summaryrefslogtreecommitdiff
path: root/src/gallium
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium')
-rw-r--r--src/gallium/Makefile12
-rw-r--r--src/gallium/Makefile.template66
-rw-r--r--src/gallium/README.portability109
-rw-r--r--src/gallium/SConscript37
-rw-r--r--src/gallium/auxiliary/Makefile12
-rw-r--r--src/gallium/auxiliary/cso_cache/Makefile11
-rw-r--r--src/gallium/auxiliary/cso_cache/SConscript11
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_cache.c410
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_cache.h176
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_context.c865
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_context.h145
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_hash.c439
-rw-r--r--src/gallium/auxiliary/cso_cache/cso_hash.h129
-rw-r--r--src/gallium/auxiliary/draw/Makefile46
-rw-r--r--src/gallium/auxiliary/draw/SConscript46
-rw-r--r--src/gallium/auxiliary/draw/draw_context.c430
-rw-r--r--src/gallium/auxiliary/draw/draw_context.h182
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe.c319
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe.h126
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_aaline.c921
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_aapoint.c884
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_clip.c516
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_cull.c148
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_flatshade.c282
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_offset.c185
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_pstipple.c777
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_stipple.c251
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_twoside.c200
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_unfilled.c204
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_util.c137
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_validate.c339
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_vbuf.c492
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_wide_line.c181
-rw-r--r--src/gallium/auxiliary/draw/draw_pipe_wide_point.c333
-rw-r--r--src/gallium/auxiliary/draw/draw_private.h319
-rw-r--r--src/gallium/auxiliary/draw/draw_pt.c324
-rw-r--r--src/gallium/auxiliary/draw/draw_pt.h233
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_decompose.h182
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_elts.c89
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_emit.c313
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch.c234
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch_emit.c437
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c429
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c395
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_post_vs.c233
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_util.c77
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_varray.c193
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_varray_tmp.h238
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_varray_tmp_linear.h91
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_vcache.c516
-rw-r--r--src/gallium/auxiliary/draw/draw_pt_vcache_tmp.h214
-rw-r--r--src/gallium/auxiliary/draw/draw_vbuf.h129
-rw-r--r--src/gallium/auxiliary/draw/draw_vertex.c129
-rw-r--r--src/gallium/auxiliary/draw/draw_vertex.h162
-rw-r--r--src/gallium/auxiliary/draw/draw_vs.c254
-rw-r--r--src/gallium/auxiliary/draw/draw_vs.h224
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_aos.c2274
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_aos.h253
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_aos_io.c462
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_aos_machine.c324
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_exec.c208
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_llvm.c161
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_ppc.c245
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_sse.c217
-rw-r--r--src/gallium/auxiliary/draw/draw_vs_varient.c322
-rw-r--r--src/gallium/auxiliary/gallivm/Makefile92
-rw-r--r--src/gallium/auxiliary/gallivm/SConscript16
-rw-r--r--src/gallium/auxiliary/gallivm/gallivm.cpp332
-rw-r--r--src/gallium/auxiliary/gallivm/gallivm.h118
-rw-r--r--src/gallium/auxiliary/gallivm/gallivm_builtins.cpp140
-rw-r--r--src/gallium/auxiliary/gallivm/gallivm_cpu.cpp243
-rw-r--r--src/gallium/auxiliary/gallivm/gallivm_p.h110
-rw-r--r--src/gallium/auxiliary/gallivm/instructions.cpp1193
-rw-r--r--src/gallium/auxiliary/gallivm/instructions.h175
-rw-r--r--src/gallium/auxiliary/gallivm/instructionssoa.cpp525
-rw-r--r--src/gallium/auxiliary/gallivm/instructionssoa.h116
-rw-r--r--src/gallium/auxiliary/gallivm/llvm_builtins.c114
-rw-r--r--src/gallium/auxiliary/gallivm/loweringpass.cpp17
-rw-r--r--src/gallium/auxiliary/gallivm/loweringpass.h15
-rw-r--r--src/gallium/auxiliary/gallivm/soabuiltins.c210
-rw-r--r--src/gallium/auxiliary/gallivm/storage.cpp364
-rw-r--r--src/gallium/auxiliary/gallivm/storage.h133
-rw-r--r--src/gallium/auxiliary/gallivm/storagesoa.cpp438
-rw-r--r--src/gallium/auxiliary/gallivm/storagesoa.h107
-rw-r--r--src/gallium/auxiliary/gallivm/tgsitollvm.cpp1136
-rw-r--r--src/gallium/auxiliary/gallivm/tgsitollvm.h20
-rw-r--r--src/gallium/auxiliary/indices/Makefile16
-rw-r--r--src/gallium/auxiliary/indices/SConscript28
-rw-r--r--src/gallium/auxiliary/indices/u_indices.c253
-rw-r--r--src/gallium/auxiliary/indices/u_indices.h106
-rw-r--r--src/gallium/auxiliary/indices/u_indices_gen.c5129
-rw-r--r--src/gallium/auxiliary/indices/u_indices_gen.py319
-rw-r--r--src/gallium/auxiliary/indices/u_indices_priv.h43
-rw-r--r--src/gallium/auxiliary/indices/u_unfilled_gen.c992
-rw-r--r--src/gallium/auxiliary/indices/u_unfilled_gen.py246
-rw-r--r--src/gallium/auxiliary/indices/u_unfilled_indices.c186
-rw-r--r--src/gallium/auxiliary/pipebuffer/Makefile19
-rw-r--r--src/gallium/auxiliary/pipebuffer/SConscript19
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer.h286
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c597
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h142
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c188
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr.h214
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c120
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c385
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c494
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c152
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c318
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c305
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c321
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c584
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_validate.c193
-rw-r--r--src/gallium/auxiliary/pipebuffer/pb_validate.h97
-rw-r--r--src/gallium/auxiliary/rbug/Makefile14
-rw-r--r--src/gallium/auxiliary/rbug/README21
-rw-r--r--src/gallium/auxiliary/rbug/SConscript14
-rw-r--r--src/gallium/auxiliary/rbug/rbug.h33
-rw-r--r--src/gallium/auxiliary/rbug/rbug_connection.c167
-rw-r--r--src/gallium/auxiliary/rbug/rbug_connection.h45
-rw-r--r--src/gallium/auxiliary/rbug/rbug_context.c759
-rw-r--r--src/gallium/auxiliary/rbug/rbug_context.h212
-rw-r--r--src/gallium/auxiliary/rbug/rbug_core.c359
-rw-r--r--src/gallium/auxiliary/rbug/rbug_core.h105
-rw-r--r--src/gallium/auxiliary/rbug/rbug_demarshal.c93
-rw-r--r--src/gallium/auxiliary/rbug/rbug_internal.h100
-rw-r--r--src/gallium/auxiliary/rbug/rbug_proto.h94
-rw-r--r--src/gallium/auxiliary/rbug/rbug_shader.c468
-rw-r--r--src/gallium/auxiliary/rbug/rbug_shader.h142
-rw-r--r--src/gallium/auxiliary/rbug/rbug_texture.c631
-rw-r--r--src/gallium/auxiliary/rbug/rbug_texture.h206
-rw-r--r--src/gallium/auxiliary/rtasm/Makefile13
-rw-r--r--src/gallium/auxiliary/rtasm/SConscript13
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_cpu.c67
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_cpu.h42
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_execmem.c141
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_execmem.h45
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_ppc.c1077
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_ppc.h342
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_ppc_spe.c1067
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_ppc_spe.h433
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_x86sse.c1757
-rw-r--r--src/gallium/auxiliary/rtasm/rtasm_x86sse.h320
-rw-r--r--src/gallium/auxiliary/sct/Makefile9
-rw-r--r--src/gallium/auxiliary/sct/SConscript9
-rw-r--r--src/gallium/auxiliary/sct/sct.c453
-rw-r--r--src/gallium/auxiliary/sct/sct.h123
-rw-r--r--src/gallium/auxiliary/sct/usage.c61
-rw-r--r--src/gallium/auxiliary/tgsi/Makefile22
-rw-r--r--src/gallium/auxiliary/tgsi/SConscript23
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi-instruction-set.txt1131
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_build.c1368
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_build.h339
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_dump.c617
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_dump.h70
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_dump_c.c720
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_dump_c.h49
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_exec.c3311
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_exec.h327
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_info.c181
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_info.h60
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_iterate.c85
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_iterate.h76
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h172
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_parse.c352
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_parse.h149
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ppc.c1375
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ppc.h51
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_sanity.c385
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_sanity.h49
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_scan.c256
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_scan.h75
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_sse2.c2985
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_sse2.h77
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_text.c1157
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_text.h47
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_transform.c227
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_transform.h92
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.c938
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_ureg.h590
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_util.c299
-rw-r--r--src/gallium/auxiliary/tgsi/tgsi_util.h100
-rw-r--r--src/gallium/auxiliary/translate/Makefile12
-rw-r--r--src/gallium/auxiliary/translate/SConscript12
-rw-r--r--src/gallium/auxiliary/translate/translate.c50
-rw-r--r--src/gallium/auxiliary/translate/translate.h127
-rw-r--r--src/gallium/auxiliary/translate/translate_cache.c102
-rw-r--r--src/gallium/auxiliary/translate/translate_cache.h54
-rw-r--r--src/gallium/auxiliary/translate/translate_generic.c700
-rw-r--r--src/gallium/auxiliary/translate/translate_sse.c706
-rw-r--r--src/gallium/auxiliary/util/.gitignore2
-rw-r--r--src/gallium/auxiliary/util/Makefile44
-rw-r--r--src/gallium/auxiliary/util/SConscript58
-rw-r--r--src/gallium/auxiliary/util/u_bitmask.c320
-rw-r--r--src/gallium/auxiliary/util/u_bitmask.h114
-rw-r--r--src/gallium/auxiliary/util/u_blit.c600
-rw-r--r--src/gallium/auxiliary/util/u_blit.h93
-rw-r--r--src/gallium/auxiliary/util/u_cache.c217
-rw-r--r--src/gallium/auxiliary/util/u_cache.h87
-rw-r--r--src/gallium/auxiliary/util/u_clear.h60
-rw-r--r--src/gallium/auxiliary/util/u_cpu_detect.c506
-rw-r--r--src/gallium/auxiliary/util/u_cpu_detect.h78
-rw-r--r--src/gallium/auxiliary/util/u_debug.c823
-rw-r--r--src/gallium/auxiliary/util/u_debug.h384
-rw-r--r--src/gallium/auxiliary/util/u_debug_dump.c189
-rw-r--r--src/gallium/auxiliary/util/u_debug_dump.h65
-rw-r--r--src/gallium/auxiliary/util/u_debug_memory.c324
-rw-r--r--src/gallium/auxiliary/util/u_debug_profile.c320
-rw-r--r--src/gallium/auxiliary/util/u_debug_stack.c107
-rw-r--r--src/gallium/auxiliary/util/u_debug_stack.h65
-rw-r--r--src/gallium/auxiliary/util/u_debug_symbol.c250
-rw-r--r--src/gallium/auxiliary/util/u_debug_symbol.h53
-rw-r--r--src/gallium/auxiliary/util/u_double_list.h102
-rw-r--r--src/gallium/auxiliary/util/u_draw_quad.c134
-rw-r--r--src/gallium/auxiliary/util/u_draw_quad.h54
-rw-r--r--src/gallium/auxiliary/util/u_fifo.h94
-rw-r--r--src/gallium/auxiliary/util/u_format.c46
-rw-r--r--src/gallium/auxiliary/util/u_format.csv99
-rw-r--r--src/gallium/auxiliary/util/u_format.h138
-rw-r--r--src/gallium/auxiliary/util/u_format_access.py498
-rwxr-xr-xsrc/gallium/auxiliary/util/u_format_parse.py141
-rwxr-xr-xsrc/gallium/auxiliary/util/u_format_table.py144
-rw-r--r--src/gallium/auxiliary/util/u_gen_mipmap.c1613
-rw-r--r--src/gallium/auxiliary/util/u_gen_mipmap.h70
-rw-r--r--src/gallium/auxiliary/util/u_handle_table.c298
-rw-r--r--src/gallium/auxiliary/util/u_handle_table.h116
-rw-r--r--src/gallium/auxiliary/util/u_hash.c121
-rw-r--r--src/gallium/auxiliary/util/u_hash.h55
-rw-r--r--src/gallium/auxiliary/util/u_hash_table.c292
-rw-r--r--src/gallium/auxiliary/util/u_hash_table.h95
-rw-r--r--src/gallium/auxiliary/util/u_keymap.c319
-rw-r--r--src/gallium/auxiliary/util/u_keymap.h68
-rw-r--r--src/gallium/auxiliary/util/u_linear.c101
-rw-r--r--src/gallium/auxiliary/util/u_linear.h92
-rw-r--r--src/gallium/auxiliary/util/u_math.c73
-rw-r--r--src/gallium/auxiliary/util/u_math.h516
-rw-r--r--src/gallium/auxiliary/util/u_memory.h242
-rw-r--r--src/gallium/auxiliary/util/u_mm.c289
-rw-r--r--src/gallium/auxiliary/util/u_mm.h91
-rw-r--r--src/gallium/auxiliary/util/u_network.c188
-rw-r--r--src/gallium/auxiliary/util/u_network.h24
-rw-r--r--src/gallium/auxiliary/util/u_pack_color.h502
-rw-r--r--src/gallium/auxiliary/util/u_pointer.h107
-rw-r--r--src/gallium/auxiliary/util/u_prim.h138
-rw-r--r--src/gallium/auxiliary/util/u_rect.c301
-rw-r--r--src/gallium/auxiliary/util/u_rect.h72
-rw-r--r--src/gallium/auxiliary/util/u_simple_list.h197
-rw-r--r--src/gallium/auxiliary/util/u_simple_screen.c153
-rw-r--r--src/gallium/auxiliary/util/u_simple_screen.h47
-rw-r--r--src/gallium/auxiliary/util/u_simple_shaders.c153
-rw-r--r--src/gallium/auxiliary/util/u_simple_shaders.h69
-rw-r--r--src/gallium/auxiliary/util/u_snprintf.c1497
-rw-r--r--src/gallium/auxiliary/util/u_sse.h77
-rw-r--r--src/gallium/auxiliary/util/u_stream.h61
-rw-r--r--src/gallium/auxiliary/util/u_stream_stdc.c106
-rw-r--r--src/gallium/auxiliary/util/u_stream_wd.c224
-rw-r--r--src/gallium/auxiliary/util/u_string.h220
-rw-r--r--src/gallium/auxiliary/util/u_surface.c113
-rw-r--r--src/gallium/auxiliary/util/u_surface.h69
-rw-r--r--src/gallium/auxiliary/util/u_tile.c1526
-rw-r--r--src/gallium/auxiliary/util/u_tile.h101
-rw-r--r--src/gallium/auxiliary/util/u_time.c225
-rw-r--r--src/gallium/auxiliary/util/u_time.h109
-rw-r--r--src/gallium/auxiliary/util/u_timed_winsys.c318
-rw-r--r--src/gallium/auxiliary/util/u_timed_winsys.h41
-rw-r--r--src/gallium/auxiliary/util/u_upload_mgr.c230
-rw-r--r--src/gallium/auxiliary/util/u_upload_mgr.h75
-rw-r--r--src/gallium/drivers/Makefile12
-rw-r--r--src/gallium/drivers/cell/Makefile12
-rw-r--r--src/gallium/drivers/cell/common.h376
-rw-r--r--src/gallium/drivers/cell/ppu/Makefile86
-rw-r--r--src/gallium/drivers/cell/ppu/cell_batch.c260
-rw-r--r--src/gallium/drivers/cell/ppu/cell_batch.h54
-rw-r--r--src/gallium/drivers/cell/ppu/cell_clear.c92
-rw-r--r--src/gallium/drivers/cell/ppu/cell_clear.h41
-rw-r--r--src/gallium/drivers/cell/ppu/cell_context.c208
-rw-r--r--src/gallium/drivers/cell/ppu/cell_context.h202
-rw-r--r--src/gallium/drivers/cell/ppu/cell_draw_arrays.c191
-rw-r--r--src/gallium/drivers/cell/ppu/cell_draw_arrays.h36
-rw-r--r--src/gallium/drivers/cell/ppu/cell_fence.c164
-rw-r--r--src/gallium/drivers/cell/ppu/cell_fence.h57
-rw-r--r--src/gallium/drivers/cell/ppu/cell_flush.c112
-rw-r--r--src/gallium/drivers/cell/ppu/cell_flush.h45
-rw-r--r--src/gallium/drivers/cell/ppu/cell_gen_fp.c2046
-rw-r--r--src/gallium/drivers/cell/ppu/cell_gen_fp.h42
-rw-r--r--src/gallium/drivers/cell/ppu/cell_gen_fragment.c2181
-rw-r--r--src/gallium/drivers/cell/ppu/cell_gen_fragment.h38
-rw-r--r--src/gallium/drivers/cell/ppu/cell_pipe_state.c407
-rw-r--r--src/gallium/drivers/cell/ppu/cell_pipe_state.h39
-rw-r--r--src/gallium/drivers/cell/ppu/cell_render.c211
-rw-r--r--src/gallium/drivers/cell/ppu/cell_render.h39
-rw-r--r--src/gallium/drivers/cell/ppu/cell_screen.c178
-rw-r--r--src/gallium/drivers/cell/ppu/cell_screen.h41
-rw-r--r--src/gallium/drivers/cell/ppu/cell_spu.c219
-rw-r--r--src/gallium/drivers/cell/ppu/cell_spu.h79
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state.h65
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_derived.c170
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_emit.c344
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_emit.h36
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_per_fragment.c1430
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_per_fragment.h39
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_shader.c217
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_vertex.c79
-rw-r--r--src/gallium/drivers/cell/ppu/cell_surface.c50
-rw-r--r--src/gallium/drivers/cell/ppu/cell_surface.h42
-rw-r--r--src/gallium/drivers/cell/ppu/cell_texture.c497
-rw-r--r--src/gallium/drivers/cell/ppu/cell_texture.h86
-rw-r--r--src/gallium/drivers/cell/ppu/cell_vbuf.c332
-rw-r--r--src/gallium/drivers/cell/ppu/cell_vbuf.h38
-rw-r--r--src/gallium/drivers/cell/ppu/cell_vertex_fetch.c346
-rw-r--r--src/gallium/drivers/cell/ppu/cell_vertex_shader.c146
-rw-r--r--src/gallium/drivers/cell/ppu/cell_winsys.h50
-rw-r--r--src/gallium/drivers/cell/spu/.gitignore1
-rw-r--r--src/gallium/drivers/cell/spu/Makefile83
-rw-r--r--src/gallium/drivers/cell/spu/spu_colorpack.h145
-rw-r--r--src/gallium/drivers/cell/spu/spu_command.c815
-rw-r--r--src/gallium/drivers/cell/spu/spu_command.h35
-rw-r--r--src/gallium/drivers/cell/spu/spu_dcache.c145
-rw-r--r--src/gallium/drivers/cell/spu/spu_dcache.h37
-rw-r--r--src/gallium/drivers/cell/spu/spu_exec.c1907
-rw-r--r--src/gallium/drivers/cell/spu/spu_exec.h172
-rw-r--r--src/gallium/drivers/cell/spu/spu_funcs.c173
-rw-r--r--src/gallium/drivers/cell/spu/spu_funcs.h35
-rw-r--r--src/gallium/drivers/cell/spu/spu_main.c117
-rw-r--r--src/gallium/drivers/cell/spu/spu_main.h254
-rw-r--r--src/gallium/drivers/cell/spu/spu_per_fragment_op.c631
-rw-r--r--src/gallium/drivers/cell/spu/spu_per_fragment_op.h44
-rw-r--r--src/gallium/drivers/cell/spu/spu_render.c356
-rw-r--r--src/gallium/drivers/cell/spu/spu_render.h38
-rw-r--r--src/gallium/drivers/cell/spu/spu_shuffle.h186
-rw-r--r--src/gallium/drivers/cell/spu/spu_texture.c641
-rw-r--r--src/gallium/drivers/cell/spu/spu_texture.h67
-rw-r--r--src/gallium/drivers/cell/spu/spu_tile.c126
-rw-r--r--src/gallium/drivers/cell/spu/spu_tile.h75
-rw-r--r--src/gallium/drivers/cell/spu/spu_tri.c843
-rw-r--r--src/gallium/drivers/cell/spu/spu_tri.h37
-rw-r--r--src/gallium/drivers/cell/spu/spu_util.c167
-rw-r--r--src/gallium/drivers/cell/spu/spu_vertex_fetch.c145
-rw-r--r--src/gallium/drivers/cell/spu/spu_vertex_shader.c244
-rw-r--r--src/gallium/drivers/cell/spu/spu_vertex_shader.h66
-rw-r--r--src/gallium/drivers/failover/Makefile11
-rw-r--r--src/gallium/drivers/failover/SConscript13
-rw-r--r--src/gallium/drivers/failover/fo_context.c182
-rw-r--r--src/gallium/drivers/failover/fo_context.h125
-rw-r--r--src/gallium/drivers/failover/fo_state.c481
-rw-r--r--src/gallium/drivers/failover/fo_state_emit.c117
-rw-r--r--src/gallium/drivers/failover/fo_winsys.h45
-rw-r--r--src/gallium/drivers/i915simple/Makefile28
-rw-r--r--src/gallium/drivers/i915simple/SConscript30
-rw-r--r--src/gallium/drivers/i915simple/i915_batch.h47
-rw-r--r--src/gallium/drivers/i915simple/i915_blit.c151
-rw-r--r--src/gallium/drivers/i915simple/i915_blit.h55
-rw-r--r--src/gallium/drivers/i915simple/i915_buffer.c136
-rw-r--r--src/gallium/drivers/i915simple/i915_buffer.h31
-rw-r--r--src/gallium/drivers/i915simple/i915_clear.c48
-rw-r--r--src/gallium/drivers/i915simple/i915_context.c237
-rw-r--r--src/gallium/drivers/i915simple/i915_context.h350
-rw-r--r--src/gallium/drivers/i915simple/i915_debug.c898
-rw-r--r--src/gallium/drivers/i915simple/i915_debug.h114
-rw-r--r--src/gallium/drivers/i915simple/i915_debug_fp.c363
-rw-r--r--src/gallium/drivers/i915simple/i915_flush.c86
-rw-r--r--src/gallium/drivers/i915simple/i915_fpc.h207
-rw-r--r--src/gallium/drivers/i915simple/i915_fpc_emit.c375
-rw-r--r--src/gallium/drivers/i915simple/i915_fpc_translate.c1202
-rw-r--r--src/gallium/drivers/i915simple/i915_prim_emit.c219
-rw-r--r--src/gallium/drivers/i915simple/i915_prim_vbuf.c616
-rw-r--r--src/gallium/drivers/i915simple/i915_reg.h978
-rw-r--r--src/gallium/drivers/i915simple/i915_screen.c300
-rw-r--r--src/gallium/drivers/i915simple/i915_screen.h80
-rw-r--r--src/gallium/drivers/i915simple/i915_state.c788
-rw-r--r--src/gallium/drivers/i915simple/i915_state.h50
-rw-r--r--src/gallium/drivers/i915simple/i915_state_derived.c183
-rw-r--r--src/gallium/drivers/i915simple/i915_state_dynamic.c310
-rw-r--r--src/gallium/drivers/i915simple/i915_state_emit.c402
-rw-r--r--src/gallium/drivers/i915simple/i915_state_immediate.c225
-rw-r--r--src/gallium/drivers/i915simple/i915_state_inlines.h230
-rw-r--r--src/gallium/drivers/i915simple/i915_state_sampler.c299
-rw-r--r--src/gallium/drivers/i915simple/i915_surface.c94
-rw-r--r--src/gallium/drivers/i915simple/i915_texture.c911
-rw-r--r--src/gallium/drivers/i915simple/i915_texture.h36
-rw-r--r--src/gallium/drivers/i915simple/intel_batchbuffer.h87
-rw-r--r--src/gallium/drivers/i915simple/intel_winsys.h230
-rw-r--r--src/gallium/drivers/i965simple/Makefile52
-rw-r--r--src/gallium/drivers/i965simple/SConscript54
-rw-r--r--src/gallium/drivers/i965simple/brw_batch.h59
-rw-r--r--src/gallium/drivers/i965simple/brw_blit.c218
-rw-r--r--src/gallium/drivers/i965simple/brw_blit.h33
-rw-r--r--src/gallium/drivers/i965simple/brw_cc.c269
-rw-r--r--src/gallium/drivers/i965simple/brw_clip.c206
-rw-r--r--src/gallium/drivers/i965simple/brw_clip.h170
-rw-r--r--src/gallium/drivers/i965simple/brw_clip_line.c245
-rw-r--r--src/gallium/drivers/i965simple/brw_clip_point.c47
-rw-r--r--src/gallium/drivers/i965simple/brw_clip_state.c93
-rw-r--r--src/gallium/drivers/i965simple/brw_clip_tri.c566
-rw-r--r--src/gallium/drivers/i965simple/brw_clip_unfilled.c477
-rw-r--r--src/gallium/drivers/i965simple/brw_clip_util.c351
-rw-r--r--src/gallium/drivers/i965simple/brw_context.c139
-rw-r--r--src/gallium/drivers/i965simple/brw_context.h684
-rw-r--r--src/gallium/drivers/i965simple/brw_curbe.c369
-rw-r--r--src/gallium/drivers/i965simple/brw_defines.h870
-rw-r--r--src/gallium/drivers/i965simple/brw_draw.c226
-rw-r--r--src/gallium/drivers/i965simple/brw_draw.h55
-rw-r--r--src/gallium/drivers/i965simple/brw_draw_upload.c300
-rw-r--r--src/gallium/drivers/i965simple/brw_eu.c130
-rw-r--r--src/gallium/drivers/i965simple/brw_eu.h888
-rw-r--r--src/gallium/drivers/i965simple/brw_eu_debug.c90
-rw-r--r--src/gallium/drivers/i965simple/brw_eu_emit.c1080
-rw-r--r--src/gallium/drivers/i965simple/brw_eu_util.c126
-rw-r--r--src/gallium/drivers/i965simple/brw_flush.c73
-rw-r--r--src/gallium/drivers/i965simple/brw_gs.c196
-rw-r--r--src/gallium/drivers/i965simple/brw_gs.h75
-rw-r--r--src/gallium/drivers/i965simple/brw_gs_emit.c148
-rw-r--r--src/gallium/drivers/i965simple/brw_gs_state.c90
-rw-r--r--src/gallium/drivers/i965simple/brw_misc_state.c488
-rw-r--r--src/gallium/drivers/i965simple/brw_reg.h76
-rw-r--r--src/gallium/drivers/i965simple/brw_screen.c246
-rw-r--r--src/gallium/drivers/i965simple/brw_screen.h68
-rw-r--r--src/gallium/drivers/i965simple/brw_sf.c351
-rw-r--r--src/gallium/drivers/i965simple/brw_sf.h122
-rw-r--r--src/gallium/drivers/i965simple/brw_sf_emit.c382
-rw-r--r--src/gallium/drivers/i965simple/brw_sf_state.c181
-rw-r--r--src/gallium/drivers/i965simple/brw_shader_info.c48
-rw-r--r--src/gallium/drivers/i965simple/brw_state.c469
-rw-r--r--src/gallium/drivers/i965simple/brw_state.h151
-rw-r--r--src/gallium/drivers/i965simple/brw_state_batch.c113
-rw-r--r--src/gallium/drivers/i965simple/brw_state_cache.c443
-rw-r--r--src/gallium/drivers/i965simple/brw_state_pool.c138
-rw-r--r--src/gallium/drivers/i965simple/brw_state_upload.c202
-rw-r--r--src/gallium/drivers/i965simple/brw_structs.h1348
-rw-r--r--src/gallium/drivers/i965simple/brw_surface.c126
-rw-r--r--src/gallium/drivers/i965simple/brw_tex_layout.c380
-rw-r--r--src/gallium/drivers/i965simple/brw_tex_layout.h44
-rw-r--r--src/gallium/drivers/i965simple/brw_urb.c186
-rw-r--r--src/gallium/drivers/i965simple/brw_util.c104
-rw-r--r--src/gallium/drivers/i965simple/brw_util.h43
-rw-r--r--src/gallium/drivers/i965simple/brw_vs.c120
-rw-r--r--src/gallium/drivers/i965simple/brw_vs.h82
-rw-r--r--src/gallium/drivers/i965simple/brw_vs_emit.c1330
-rw-r--r--src/gallium/drivers/i965simple/brw_vs_state.c103
-rw-r--r--src/gallium/drivers/i965simple/brw_winsys.h209
-rw-r--r--src/gallium/drivers/i965simple/brw_wm.c209
-rw-r--r--src/gallium/drivers/i965simple/brw_wm.h142
-rw-r--r--src/gallium/drivers/i965simple/brw_wm_decl.c392
-rw-r--r--src/gallium/drivers/i965simple/brw_wm_glsl.c1076
-rw-r--r--src/gallium/drivers/i965simple/brw_wm_iz.c214
-rw-r--r--src/gallium/drivers/i965simple/brw_wm_sampler_state.c275
-rw-r--r--src/gallium/drivers/i965simple/brw_wm_state.c195
-rw-r--r--src/gallium/drivers/i965simple/brw_wm_surface_state.c305
-rw-r--r--src/gallium/drivers/identity/Makefile12
-rw-r--r--src/gallium/drivers/identity/SConscript13
-rw-r--r--src/gallium/drivers/identity/id_context.c719
-rw-r--r--src/gallium/drivers/identity/id_context.h48
-rw-r--r--src/gallium/drivers/identity/id_drm.c173
-rw-r--r--src/gallium/drivers/identity/id_drm.h35
-rw-r--r--src/gallium/drivers/identity/id_objects.c182
-rw-r--r--src/gallium/drivers/identity/id_objects.h169
-rw-r--r--src/gallium/drivers/identity/id_public.h40
-rw-r--r--src/gallium/drivers/identity/id_screen.c483
-rw-r--r--src/gallium/drivers/identity/id_screen.h48
-rw-r--r--src/gallium/drivers/llvmpipe/Makefile55
-rw-r--r--src/gallium/drivers/llvmpipe/README149
-rw-r--r--src/gallium/drivers/llvmpipe/SConscript84
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_alpha.c64
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_alpha.h54
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_arit.c1082
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_arit.h175
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_blend.h107
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_blend_aos.c356
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_blend_logicop.c108
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_blend_soa.c298
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_const.c369
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_const.h108
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_conv.c702
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_conv.h73
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_debug.c125
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_debug.h64
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_depth.c216
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_depth.h63
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_flow.c493
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_flow.h129
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_format.h119
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_format_aos.c303
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_format_soa.c208
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_interp.c377
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_interp.h99
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_intr.c192
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_intr.h102
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_logic.c394
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_logic.h72
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_sample.h135
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_sample_soa.c411
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_struct.c72
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_struct.h75
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_swizzle.c238
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_swizzle.h91
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_tgsi.h84
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_tgsi_soa.c1339
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_type.c181
-rw-r--r--src/gallium/drivers/llvmpipe/lp_bld_type.h178
-rw-r--r--src/gallium/drivers/llvmpipe/lp_buffer.c150
-rw-r--r--src/gallium/drivers/llvmpipe/lp_buffer.h55
-rw-r--r--src/gallium/drivers/llvmpipe/lp_clear.c81
-rw-r--r--src/gallium/drivers/llvmpipe/lp_clear.h43
-rw-r--r--src/gallium/drivers/llvmpipe/lp_context.c295
-rw-r--r--src/gallium/drivers/llvmpipe/lp_context.h155
-rw-r--r--src/gallium/drivers/llvmpipe/lp_draw_arrays.c192
-rw-r--r--src/gallium/drivers/llvmpipe/lp_flush.c98
-rw-r--r--src/gallium/drivers/llvmpipe/lp_flush.h37
-rw-r--r--src/gallium/drivers/llvmpipe/lp_jit.c173
-rw-r--r--src/gallium/drivers/llvmpipe/lp_jit.h135
-rw-r--r--src/gallium/drivers/llvmpipe/lp_prim_setup.c190
-rw-r--r--src/gallium/drivers/llvmpipe/lp_prim_setup.h85
-rw-r--r--src/gallium/drivers/llvmpipe/lp_prim_vbuf.c607
-rw-r--r--src/gallium/drivers/llvmpipe/lp_prim_vbuf.h38
-rw-r--r--src/gallium/drivers/llvmpipe/lp_quad.h114
-rw-r--r--src/gallium/drivers/llvmpipe/lp_query.c111
-rw-r--r--src/gallium/drivers/llvmpipe/lp_query.h39
-rw-r--r--src/gallium/drivers/llvmpipe/lp_screen.c243
-rw-r--r--src/gallium/drivers/llvmpipe/lp_screen.h79
-rw-r--r--src/gallium/drivers/llvmpipe/lp_setup.c1483
-rw-r--r--src/gallium/drivers/llvmpipe/lp_setup.h53
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state.h230
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_blend.c114
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_clip.c79
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_derived.c260
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_fs.c762
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_rasterizer.c62
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_sampler.c127
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_surface.c106
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_vertex.c73
-rw-r--r--src/gallium/drivers/llvmpipe/lp_state_vs.c96
-rw-r--r--src/gallium/drivers/llvmpipe/lp_surface.c50
-rw-r--r--src/gallium/drivers/llvmpipe/lp_surface.h42
-rw-r--r--src/gallium/drivers/llvmpipe/lp_test.h128
-rw-r--r--src/gallium/drivers/llvmpipe/lp_test_blend.c881
-rw-r--r--src/gallium/drivers/llvmpipe/lp_test_conv.c427
-rw-r--r--src/gallium/drivers/llvmpipe/lp_test_format.c272
-rw-r--r--src/gallium/drivers/llvmpipe/lp_test_main.c384
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tex_cache.c304
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tex_cache.h151
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tex_sample.h101
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tex_sample_c.c1713
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tex_sample_llvm.c196
-rw-r--r--src/gallium/drivers/llvmpipe/lp_texture.c429
-rw-r--r--src/gallium/drivers/llvmpipe/lp_texture.h90
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tile_cache.c299
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tile_cache.h106
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tile_soa.c931
-rw-r--r--src/gallium/drivers/llvmpipe/lp_tile_soa.h81
-rw-r--r--src/gallium/drivers/llvmpipe/lp_winsys.h128
-rwxr-xr-xsrc/gallium/drivers/llvmpipe/sp2lp.sh34
-rw-r--r--src/gallium/drivers/nouveau/Makefile8
-rw-r--r--src/gallium/drivers/nouveau/nouveau_gldefs.h196
-rw-r--r--src/gallium/drivers/nouveau/nouveau_push.h93
-rw-r--r--src/gallium/drivers/nouveau/nouveau_screen.c243
-rw-r--r--src/gallium/drivers/nouveau/nouveau_screen.h36
-rw-r--r--src/gallium/drivers/nouveau/nouveau_stateobj.h159
-rw-r--r--src/gallium/drivers/nouveau/nouveau_util.h91
-rw-r--r--src/gallium/drivers/nouveau/nouveau_winsys.h62
-rw-r--r--src/gallium/drivers/nv04/Makefile21
-rw-r--r--src/gallium/drivers/nv04/nv04_clear.c12
-rw-r--r--src/gallium/drivers/nv04/nv04_context.c134
-rw-r--r--src/gallium/drivers/nv04/nv04_context.h151
-rw-r--r--src/gallium/drivers/nv04/nv04_fragprog.c21
-rw-r--r--src/gallium/drivers/nv04/nv04_fragtex.c73
-rw-r--r--src/gallium/drivers/nv04/nv04_miptree.c153
-rw-r--r--src/gallium/drivers/nv04/nv04_prim_vbuf.c321
-rw-r--r--src/gallium/drivers/nv04/nv04_screen.c212
-rw-r--r--src/gallium/drivers/nv04/nv04_screen.h30
-rw-r--r--src/gallium/drivers/nv04/nv04_state.c459
-rw-r--r--src/gallium/drivers/nv04/nv04_state.h71
-rw-r--r--src/gallium/drivers/nv04/nv04_state_emit.c226
-rw-r--r--src/gallium/drivers/nv04/nv04_surface.c63
-rw-r--r--src/gallium/drivers/nv04/nv04_surface_2d.c558
-rw-r--r--src/gallium/drivers/nv04/nv04_surface_2d.h33
-rw-r--r--src/gallium/drivers/nv04/nv04_transfer.c195
-rw-r--r--src/gallium/drivers/nv04/nv04_vbo.c80
-rw-r--r--src/gallium/drivers/nv10/Makefile20
-rw-r--r--src/gallium/drivers/nv10/nv10_clear.c14
-rw-r--r--src/gallium/drivers/nv10/nv10_context.c322
-rw-r--r--src/gallium/drivers/nv10/nv10_context.h154
-rw-r--r--src/gallium/drivers/nv10/nv10_fragprog.c21
-rw-r--r--src/gallium/drivers/nv10/nv10_fragtex.c124
-rw-r--r--src/gallium/drivers/nv10/nv10_miptree.c167
-rw-r--r--src/gallium/drivers/nv10/nv10_prim_vbuf.c261
-rw-r--r--src/gallium/drivers/nv10/nv10_screen.c198
-rw-r--r--src/gallium/drivers/nv10/nv10_screen.h28
-rw-r--r--src/gallium/drivers/nv10/nv10_state.c590
-rw-r--r--src/gallium/drivers/nv10/nv10_state.h139
-rw-r--r--src/gallium/drivers/nv10/nv10_state_emit.c306
-rw-r--r--src/gallium/drivers/nv10/nv10_surface.c63
-rw-r--r--src/gallium/drivers/nv10/nv10_transfer.c195
-rw-r--r--src/gallium/drivers/nv10/nv10_vbo.c78
-rw-r--r--src/gallium/drivers/nv20/Makefile21
-rw-r--r--src/gallium/drivers/nv20/nv20_clear.c14
-rw-r--r--src/gallium/drivers/nv20/nv20_context.c446
-rw-r--r--src/gallium/drivers/nv20/nv20_context.h153
-rw-r--r--src/gallium/drivers/nv20/nv20_fragprog.c21
-rw-r--r--src/gallium/drivers/nv20/nv20_fragtex.c124
-rw-r--r--src/gallium/drivers/nv20/nv20_miptree.c206
-rw-r--r--src/gallium/drivers/nv20/nv20_prim_vbuf.c428
-rw-r--r--src/gallium/drivers/nv20/nv20_screen.c194
-rw-r--r--src/gallium/drivers/nv20/nv20_screen.h28
-rw-r--r--src/gallium/drivers/nv20/nv20_state.c583
-rw-r--r--src/gallium/drivers/nv20/nv20_state.h139
-rw-r--r--src/gallium/drivers/nv20/nv20_state_emit.c396
-rw-r--r--src/gallium/drivers/nv20/nv20_surface.c63
-rw-r--r--src/gallium/drivers/nv20/nv20_transfer.c195
-rw-r--r--src/gallium/drivers/nv20/nv20_vbo.c80
-rw-r--r--src/gallium/drivers/nv20/nv20_vertprog.c838
-rw-r--r--src/gallium/drivers/nv30/Makefile29
-rw-r--r--src/gallium/drivers/nv30/nv30_clear.c14
-rw-r--r--src/gallium/drivers/nv30/nv30_context.c98
-rw-r--r--src/gallium/drivers/nv30/nv30_context.h212
-rw-r--r--src/gallium/drivers/nv30/nv30_draw.c61
-rw-r--r--src/gallium/drivers/nv30/nv30_fragprog.c915
-rw-r--r--src/gallium/drivers/nv30/nv30_fragtex.c166
-rw-r--r--src/gallium/drivers/nv30/nv30_miptree.c213
-rw-r--r--src/gallium/drivers/nv30/nv30_query.c121
-rw-r--r--src/gallium/drivers/nv30/nv30_screen.c324
-rw-r--r--src/gallium/drivers/nv30/nv30_screen.h41
-rw-r--r--src/gallium/drivers/nv30/nv30_shader.h490
-rw-r--r--src/gallium/drivers/nv30/nv30_state.c728
-rw-r--r--src/gallium/drivers/nv30/nv30_state.h85
-rw-r--r--src/gallium/drivers/nv30/nv30_state_blend.c41
-rw-r--r--src/gallium/drivers/nv30/nv30_state_emit.c111
-rw-r--r--src/gallium/drivers/nv30/nv30_state_fb.c145
-rw-r--r--src/gallium/drivers/nv30/nv30_state_rasterizer.c17
-rw-r--r--src/gallium/drivers/nv30/nv30_state_scissor.c36
-rw-r--r--src/gallium/drivers/nv30/nv30_state_stipple.c40
-rw-r--r--src/gallium/drivers/nv30/nv30_state_viewport.c72
-rw-r--r--src/gallium/drivers/nv30/nv30_state_zsa.c17
-rw-r--r--src/gallium/drivers/nv30/nv30_surface.c63
-rw-r--r--src/gallium/drivers/nv30/nv30_transfer.c195
-rw-r--r--src/gallium/drivers/nv30/nv30_vbo.c561
-rw-r--r--src/gallium/drivers/nv30/nv30_vertprog.c837
-rw-r--r--src/gallium/drivers/nv40/Makefile29
-rw-r--r--src/gallium/drivers/nv40/nv40_clear.c14
-rw-r--r--src/gallium/drivers/nv40/nv40_context.c98
-rw-r--r--src/gallium/drivers/nv40/nv40_context.h233
-rw-r--r--src/gallium/drivers/nv40/nv40_draw.c352
-rw-r--r--src/gallium/drivers/nv40/nv40_fragprog.c995
-rw-r--r--src/gallium/drivers/nv40/nv40_fragtex.c170
-rw-r--r--src/gallium/drivers/nv40/nv40_miptree.c213
-rw-r--r--src/gallium/drivers/nv40/nv40_query.c121
-rw-r--r--src/gallium/drivers/nv40/nv40_screen.c303
-rw-r--r--src/gallium/drivers/nv40/nv40_screen.h40
-rw-r--r--src/gallium/drivers/nv40/nv40_shader.h556
-rw-r--r--src/gallium/drivers/nv40/nv40_state.c743
-rw-r--r--src/gallium/drivers/nv40/nv40_state.h88
-rw-r--r--src/gallium/drivers/nv40/nv40_state_blend.c41
-rw-r--r--src/gallium/drivers/nv40/nv40_state_emit.c177
-rw-r--r--src/gallium/drivers/nv40/nv40_state_fb.c172
-rw-r--r--src/gallium/drivers/nv40/nv40_state_rasterizer.c17
-rw-r--r--src/gallium/drivers/nv40/nv40_state_scissor.c36
-rw-r--r--src/gallium/drivers/nv40/nv40_state_stipple.c39
-rw-r--r--src/gallium/drivers/nv40/nv40_state_viewport.c69
-rw-r--r--src/gallium/drivers/nv40/nv40_state_zsa.c17
-rw-r--r--src/gallium/drivers/nv40/nv40_surface.c64
-rw-r--r--src/gallium/drivers/nv40/nv40_transfer.c195
-rw-r--r--src/gallium/drivers/nv40/nv40_vbo.c562
-rw-r--r--src/gallium/drivers/nv40/nv40_vertprog.c1069
-rw-r--r--src/gallium/drivers/nv50/Makefile21
-rw-r--r--src/gallium/drivers/nv50/nv50_clear.c68
-rw-r--r--src/gallium/drivers/nv50/nv50_context.c122
-rw-r--r--src/gallium/drivers/nv50/nv50_context.h201
-rw-r--r--src/gallium/drivers/nv50/nv50_draw.c89
-rw-r--r--src/gallium/drivers/nv50/nv50_miptree.c219
-rw-r--r--src/gallium/drivers/nv50/nv50_program.c2482
-rw-r--r--src/gallium/drivers/nv50/nv50_program.h50
-rw-r--r--src/gallium/drivers/nv50/nv50_query.c134
-rw-r--r--src/gallium/drivers/nv50/nv50_screen.c436
-rw-r--r--src/gallium/drivers/nv50/nv50_screen.h38
-rw-r--r--src/gallium/drivers/nv50/nv50_state.c683
-rw-r--r--src/gallium/drivers/nv50/nv50_state_validate.c359
-rw-r--r--src/gallium/drivers/nv50/nv50_surface.c195
-rw-r--r--src/gallium/drivers/nv50/nv50_tex.c176
-rw-r--r--src/gallium/drivers/nv50/nv50_texture.h150
-rw-r--r--src/gallium/drivers/nv50/nv50_transfer.c239
-rw-r--r--src/gallium/drivers/nv50/nv50_vbo.c400
-rw-r--r--src/gallium/drivers/r300/Makefile42
-rw-r--r--src/gallium/drivers/r300/SConscript29
-rw-r--r--src/gallium/drivers/r300/r300_chipset.c361
-rw-r--r--src/gallium/drivers/r300/r300_chipset.h82
-rw-r--r--src/gallium/drivers/r300/r300_clear.c35
-rw-r--r--src/gallium/drivers/r300/r300_clear.h36
-rw-r--r--src/gallium/drivers/r300/r300_context.c197
-rw-r--r--src/gallium/drivers/r300/r300_context.h327
-rw-r--r--src/gallium/drivers/r300/r300_cs.h154
-rw-r--r--src/gallium/drivers/r300/r300_debug.c88
-rw-r--r--src/gallium/drivers/r300/r300_emit.c813
-rw-r--r--src/gallium/drivers/r300/r300_emit.h96
-rw-r--r--src/gallium/drivers/r300/r300_flush.c49
-rw-r--r--src/gallium/drivers/r300/r300_flush.h35
-rw-r--r--src/gallium/drivers/r300/r300_fs.c137
-rw-r--r--src/gallium/drivers/r300/r300_fs.h51
-rw-r--r--src/gallium/drivers/r300/r300_query.c145
-rw-r--r--src/gallium/drivers/r300/r300_query.h39
-rw-r--r--src/gallium/drivers/r300/r300_reg.h3323
-rw-r--r--src/gallium/drivers/r300/r300_render.c298
-rw-r--r--src/gallium/drivers/r300/r300_screen.c416
-rw-r--r--src/gallium/drivers/r300/r300_screen.h66
-rw-r--r--src/gallium/drivers/r300/r300_shader_inlines.h47
-rw-r--r--src/gallium/drivers/r300/r300_state.c734
-rw-r--r--src/gallium/drivers/r300/r300_state_derived.c465
-rw-r--r--src/gallium/drivers/r300/r300_state_derived.h30
-rw-r--r--src/gallium/drivers/r300/r300_state_inlines.h403
-rw-r--r--src/gallium/drivers/r300/r300_state_invariant.c145
-rw-r--r--src/gallium/drivers/r300/r300_state_invariant.h34
-rw-r--r--src/gallium/drivers/r300/r300_surface.c372
-rw-r--r--src/gallium/drivers/r300/r300_surface.h124
-rw-r--r--src/gallium/drivers/r300/r300_texture.c228
-rw-r--r--src/gallium/drivers/r300/r300_texture.h89
-rw-r--r--src/gallium/drivers/r300/r300_tgsi_to_rc.c336
-rw-r--r--src/gallium/drivers/r300/r300_tgsi_to_rc.h41
-rw-r--r--src/gallium/drivers/r300/r300_vs.c234
-rw-r--r--src/gallium/drivers/r300/r300_vs.h54
-rw-r--r--src/gallium/drivers/r300/r300_winsys.h108
-rw-r--r--src/gallium/drivers/r300/r3xx_fs.c74
-rw-r--r--src/gallium/drivers/r300/r3xx_fs.h32
-rw-r--r--src/gallium/drivers/r300/r5xx_fs.c125
-rw-r--r--src/gallium/drivers/r300/r5xx_fs.h32
-rw-r--r--src/gallium/drivers/softpipe/Makefile44
-rw-r--r--src/gallium/drivers/softpipe/SConscript46
-rw-r--r--src/gallium/drivers/softpipe/sp_clear.c89
-rw-r--r--src/gallium/drivers/softpipe/sp_clear.h42
-rw-r--r--src/gallium/drivers/softpipe/sp_context.c330
-rw-r--r--src/gallium/drivers/softpipe/sp_context.h168
-rw-r--r--src/gallium/drivers/softpipe/sp_draw_arrays.c194
-rw-r--r--src/gallium/drivers/softpipe/sp_flush.c94
-rw-r--r--src/gallium/drivers/softpipe/sp_flush.h37
-rw-r--r--src/gallium/drivers/softpipe/sp_fs.h54
-rw-r--r--src/gallium/drivers/softpipe/sp_fs_exec.c162
-rw-r--r--src/gallium/drivers/softpipe/sp_fs_llvm.c205
-rw-r--r--src/gallium/drivers/softpipe/sp_fs_sse.c169
-rw-r--r--src/gallium/drivers/softpipe/sp_prim_setup.c190
-rw-r--r--src/gallium/drivers/softpipe/sp_prim_setup.h85
-rw-r--r--src/gallium/drivers/softpipe/sp_prim_vbuf.c585
-rw-r--r--src/gallium/drivers/softpipe/sp_prim_vbuf.h38
-rw-r--r--src/gallium/drivers/softpipe/sp_quad.h106
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_alpha_test.c108
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_blend.c777
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_bufloop.c74
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_colormask.c116
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_coverage.c94
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_depth_test.c290
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_earlyz.c88
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_fs.c192
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_occlusion.c85
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_output.c103
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_pipe.c118
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_pipe.h74
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_stencil.c352
-rw-r--r--src/gallium/drivers/softpipe/sp_quad_stipple.c77
-rw-r--r--src/gallium/drivers/softpipe/sp_query.c111
-rw-r--r--src/gallium/drivers/softpipe/sp_query.h39
-rw-r--r--src/gallium/drivers/softpipe/sp_screen.c190
-rw-r--r--src/gallium/drivers/softpipe/sp_screen.h58
-rw-r--r--src/gallium/drivers/softpipe/sp_setup.c1550
-rw-r--r--src/gallium/drivers/softpipe/sp_setup.h53
-rw-r--r--src/gallium/drivers/softpipe/sp_state.h206
-rw-r--r--src/gallium/drivers/softpipe/sp_state_blend.c98
-rw-r--r--src/gallium/drivers/softpipe/sp_state_clip.c79
-rw-r--r--src/gallium/drivers/softpipe/sp_state_derived.c211
-rw-r--r--src/gallium/drivers/softpipe/sp_state_fs.c169
-rw-r--r--src/gallium/drivers/softpipe/sp_state_rasterizer.c62
-rw-r--r--src/gallium/drivers/softpipe/sp_state_sampler.c117
-rw-r--r--src/gallium/drivers/softpipe/sp_state_surface.c129
-rw-r--r--src/gallium/drivers/softpipe/sp_state_vertex.c73
-rw-r--r--src/gallium/drivers/softpipe/sp_surface.c50
-rw-r--r--src/gallium/drivers/softpipe/sp_surface.h42
-rw-r--r--src/gallium/drivers/softpipe/sp_tex_sample.c1295
-rw-r--r--src/gallium/drivers/softpipe/sp_tex_sample.h73
-rw-r--r--src/gallium/drivers/softpipe/sp_texture.c407
-rw-r--r--src/gallium/drivers/softpipe/sp_texture.h80
-rw-r--r--src/gallium/drivers/softpipe/sp_tile_cache.c626
-rw-r--r--src/gallium/drivers/softpipe/sp_tile_cache.h106
-rw-r--r--src/gallium/drivers/softpipe/sp_winsys.h65
-rw-r--r--src/gallium/drivers/trace/Makefile17
-rw-r--r--src/gallium/drivers/trace/README78
-rw-r--r--src/gallium/drivers/trace/SConscript19
-rw-r--r--src/gallium/drivers/trace/tr_buffer.c75
-rw-r--r--src/gallium/drivers/trace/tr_buffer.h70
-rw-r--r--src/gallium/drivers/trace/tr_context.c1302
-rw-r--r--src/gallium/drivers/trace/tr_context.h108
-rw-r--r--src/gallium/drivers/trace/tr_drm.c186
-rw-r--r--src/gallium/drivers/trace/tr_drm.h35
-rw-r--r--src/gallium/drivers/trace/tr_dump.c627
-rw-r--r--src/gallium/drivers/trace/tr_dump.h177
-rw-r--r--src/gallium/drivers/trace/tr_dump_state.c549
-rw-r--r--src/gallium/drivers/trace/tr_dump_state.h78
-rw-r--r--src/gallium/drivers/trace/tr_rbug.c864
-rw-r--r--src/gallium/drivers/trace/tr_screen.c958
-rw-r--r--src/gallium/drivers/trace/tr_screen.h130
-rw-r--r--src/gallium/drivers/trace/tr_state.c66
-rw-r--r--src/gallium/drivers/trace/tr_state.h68
-rw-r--r--src/gallium/drivers/trace/tr_texture.c170
-rw-r--r--src/gallium/drivers/trace/tr_texture.h122
-rw-r--r--src/gallium/drivers/trace/trace.xsl188
-rw-r--r--src/gallium/include/pipe/internal/p_winsys_screen.h190
-rw-r--r--src/gallium/include/pipe/p_atomic.h353
-rw-r--r--src/gallium/include/pipe/p_compiler.h179
-rw-r--r--src/gallium/include/pipe/p_config.h176
-rw-r--r--src/gallium/include/pipe/p_context.h271
-rw-r--r--src/gallium/include/pipe/p_defines.h322
-rw-r--r--src/gallium/include/pipe/p_error.h65
-rw-r--r--src/gallium/include/pipe/p_format.h620
-rw-r--r--src/gallium/include/pipe/p_inlines.h163
-rw-r--r--src/gallium/include/pipe/p_refcnt.h96
-rw-r--r--src/gallium/include/pipe/p_screen.h296
-rw-r--r--src/gallium/include/pipe/p_shader_tokens.h671
-rw-r--r--src/gallium/include/pipe/p_state.h412
-rw-r--r--src/gallium/include/pipe/p_thread.h279
-rw-r--r--src/gallium/include/state_tracker/dri1_api.h82
-rw-r--r--src/gallium/include/state_tracker/drm_api.h69
-rw-r--r--src/gallium/state_trackers/Makefile25
-rw-r--r--src/gallium/state_trackers/README2
-rw-r--r--src/gallium/state_trackers/dri/Makefile28
-rw-r--r--src/gallium/state_trackers/dri/SConscript23
-rw-r--r--src/gallium/state_trackers/dri/dri_context.c215
-rw-r--r--src/gallium/state_trackers/dri/dri_context.h123
-rw-r--r--src/gallium/state_trackers/dri/dri_drawable.c687
-rw-r--r--src/gallium/state_trackers/dri/dri_drawable.h111
-rw-r--r--src/gallium/state_trackers/dri/dri_extensions.c128
-rw-r--r--src/gallium/state_trackers/dri/dri_screen.c354
-rw-r--r--src/gallium/state_trackers/dri/dri_screen.h79
-rw-r--r--src/gallium/state_trackers/egl/Makefile19
-rw-r--r--src/gallium/state_trackers/egl/egl_context.c179
-rw-r--r--src/gallium/state_trackers/egl/egl_surface.c387
-rw-r--r--src/gallium/state_trackers/egl/egl_tracker.c271
-rw-r--r--src/gallium/state_trackers/egl/egl_tracker.h197
-rw-r--r--src/gallium/state_trackers/egl/egl_visual.c85
-rw-r--r--src/gallium/state_trackers/es/Makefile100
-rw-r--r--src/gallium/state_trackers/es/st_es1.c1
-rw-r--r--src/gallium/state_trackers/es/st_es2.c1
-rw-r--r--src/gallium/state_trackers/g3dvl/Makefile21
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_basic_csc.c714
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_basic_csc.h13
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_context.c205
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_context.h73
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_csc.h53
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_defs.h11
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_display.c48
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_display.h29
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.c1155
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.h18
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf_shaders.inc1185
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_render.h38
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_screen.c115
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_screen.h63
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_shader_build.c215
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_shader_build.h61
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_surface.c242
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_surface.h86
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_types.h115
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_util.c16
-rw-r--r--src/gallium/state_trackers/g3dvl/vl_util.h6
-rw-r--r--src/gallium/state_trackers/glx/Makefile25
-rw-r--r--src/gallium/state_trackers/glx/xlib/Makefile16
-rw-r--r--src/gallium/state_trackers/glx/xlib/SConscript27
-rw-r--r--src/gallium/state_trackers/glx/xlib/glx_api.c2629
-rw-r--r--src/gallium/state_trackers/glx/xlib/glx_getproc.c214
-rw-r--r--src/gallium/state_trackers/glx/xlib/glx_usefont.c375
-rw-r--r--src/gallium/state_trackers/glx/xlib/xm_api.c1220
-rw-r--r--src/gallium/state_trackers/glx/xlib/xm_api.h394
-rw-r--r--src/gallium/state_trackers/glx/xlib/xm_winsys.h59
-rw-r--r--src/gallium/state_trackers/python/README43
-rw-r--r--src/gallium/state_trackers/python/SConscript47
-rw-r--r--src/gallium/state_trackers/python/gallium.i103
-rw-r--r--src/gallium/state_trackers/python/p_compiler.i29
-rw-r--r--src/gallium/state_trackers/python/p_context.i319
-rw-r--r--src/gallium/state_trackers/python/p_device.i130
-rw-r--r--src/gallium/state_trackers/python/p_format.i162
-rw-r--r--src/gallium/state_trackers/python/p_state.i137
-rw-r--r--src/gallium/state_trackers/python/p_texture.i450
-rw-r--r--src/gallium/state_trackers/python/retrace/README17
-rwxr-xr-xsrc/gallium/state_trackers/python/retrace/format.py173
-rwxr-xr-xsrc/gallium/state_trackers/python/retrace/interpreter.py748
-rwxr-xr-xsrc/gallium/state_trackers/python/retrace/model.py213
-rwxr-xr-xsrc/gallium/state_trackers/python/retrace/parse.py392
-rwxr-xr-xsrc/gallium/state_trackers/python/retrace/parser.py34
-rw-r--r--src/gallium/state_trackers/python/samples/tri.py232
-rw-r--r--src/gallium/state_trackers/python/st_device.c305
-rw-r--r--src/gallium/state_trackers/python/st_device.h106
-rw-r--r--src/gallium/state_trackers/python/st_hardpipe_winsys.c237
-rw-r--r--src/gallium/state_trackers/python/st_sample.c570
-rw-r--r--src/gallium/state_trackers/python/st_sample.h47
-rw-r--r--src/gallium/state_trackers/python/st_softpipe_winsys.c270
-rw-r--r--src/gallium/state_trackers/python/st_winsys.h52
-rw-r--r--src/gallium/state_trackers/python/tests/.gitignore3
-rwxr-xr-xsrc/gallium/state_trackers/python/tests/base.py344
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-abs.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-add.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp3.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp4.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dst.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-ex2.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-flr.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-frc.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lg2.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lit.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lrp.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mad.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-max.sh10
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-min.sh10
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mov.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mul.sh10
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rcp.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rsq.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sge.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-slt.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-abs.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-absneg.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-neg.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-swz.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sub.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-xpd.sh8
-rw-r--r--src/gallium/state_trackers/python/tests/regress/fragment-shader/fragment-shader.py218
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-abs.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-add.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arl.sh23
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arr.sh23
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp3.sh16
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp4.sh16
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dst.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-ex2.sh18
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-flr.sh23
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-frc.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lg2.sh18
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lit.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lrp.sh14
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mad.sh14
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-max.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-min.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mov.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mul.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rcp.sh18
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rsq.sh18
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sge.sh16
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-slt.sh16
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-abs.sh15
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-absneg.sh16
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-neg.sh12
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-swz.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sub.sh13
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-xpd.sh11
-rw-r--r--src/gallium/state_trackers/python/tests/regress/vertex-shader/vertex-shader.py247
-rwxr-xr-xsrc/gallium/state_trackers/python/tests/surface_copy.py199
-rwxr-xr-xsrc/gallium/state_trackers/python/tests/texture_render.py320
-rwxr-xr-xsrc/gallium/state_trackers/python/tests/texture_sample.py591
-rwxr-xr-xsrc/gallium/state_trackers/python/tests/texture_transfer.py179
-rwxr-xr-xsrc/gallium/state_trackers/python/tests/tree.py23
-rw-r--r--src/gallium/state_trackers/vega/Makefile128
-rw-r--r--src/gallium/state_trackers/vega/api_consts.h56
-rw-r--r--src/gallium/state_trackers/vega/api_context.c75
-rw-r--r--src/gallium/state_trackers/vega/api_filters.c805
-rw-r--r--src/gallium/state_trackers/vega/api_images.c489
-rw-r--r--src/gallium/state_trackers/vega/api_masks.c373
-rw-r--r--src/gallium/state_trackers/vega/api_misc.c83
-rw-r--r--src/gallium/state_trackers/vega/api_paint.c166
-rw-r--r--src/gallium/state_trackers/vega/api_params.c1673
-rw-r--r--src/gallium/state_trackers/vega/api_path.c488
-rw-r--r--src/gallium/state_trackers/vega/api_text.c258
-rw-r--r--src/gallium/state_trackers/vega/api_transform.c128
-rw-r--r--src/gallium/state_trackers/vega/arc.c708
-rw-r--r--src/gallium/state_trackers/vega/arc.h80
-rw-r--r--src/gallium/state_trackers/vega/asm_fill.h246
-rw-r--r--src/gallium/state_trackers/vega/asm_filters.h117
-rw-r--r--src/gallium/state_trackers/vega/asm_util.h136
-rw-r--r--src/gallium/state_trackers/vega/bezier.c704
-rw-r--r--src/gallium/state_trackers/vega/bezier.h81
-rw-r--r--src/gallium/state_trackers/vega/image.c654
-rw-r--r--src/gallium/state_trackers/vega/image.h104
-rw-r--r--src/gallium/state_trackers/vega/mask.c690
-rw-r--r--src/gallium/state_trackers/vega/mask.h68
-rw-r--r--src/gallium/state_trackers/vega/matrix.h462
-rw-r--r--src/gallium/state_trackers/vega/paint.c699
-rw-r--r--src/gallium/state_trackers/vega/paint.h118
-rw-r--r--src/gallium/state_trackers/vega/path.c2034
-rw-r--r--src/gallium/state_trackers/vega/path.h126
-rw-r--r--src/gallium/state_trackers/vega/path_utils.h109
-rw-r--r--src/gallium/state_trackers/vega/polygon.c550
-rw-r--r--src/gallium/state_trackers/vega/polygon.h75
-rw-r--r--src/gallium/state_trackers/vega/renderer.c592
-rw-r--r--src/gallium/state_trackers/vega/renderer.h76
-rw-r--r--src/gallium/state_trackers/vega/shader.c310
-rw-r--r--src/gallium/state_trackers/vega/shader.h56
-rw-r--r--src/gallium/state_trackers/vega/shaders_cache.c439
-rw-r--r--src/gallium/state_trackers/vega/shaders_cache.h77
-rw-r--r--src/gallium/state_trackers/vega/st_inlines.h159
-rw-r--r--src/gallium/state_trackers/vega/stroker.c1349
-rw-r--r--src/gallium/state_trackers/vega/stroker.h89
-rw-r--r--src/gallium/state_trackers/vega/util_array.h122
-rw-r--r--src/gallium/state_trackers/vega/vg_context.c543
-rw-r--r--src/gallium/state_trackers/vega/vg_context.h292
-rw-r--r--src/gallium/state_trackers/vega/vg_state.c124
-rw-r--r--src/gallium/state_trackers/vega/vg_state.h109
-rw-r--r--src/gallium/state_trackers/vega/vg_tracker.c416
-rw-r--r--src/gallium/state_trackers/vega/vg_tracker.h107
-rw-r--r--src/gallium/state_trackers/vega/vg_translate.c1030
-rw-r--r--src/gallium/state_trackers/vega/vg_translate.h49
-rw-r--r--src/gallium/state_trackers/vega/vgu.c450
-rw-r--r--src/gallium/state_trackers/wgl/SConscript42
-rw-r--r--src/gallium/state_trackers/wgl/icd/stw_icd.c617
-rw-r--r--src/gallium/state_trackers/wgl/icd/stw_icd.h489
-rw-r--r--src/gallium/state_trackers/wgl/opengl32.def388
-rw-r--r--src/gallium/state_trackers/wgl/opengl32.mingw.def387
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_arbpixelformat.c483
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_context.c382
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_context.h43
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_device.c225
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_device.h77
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_extensionsstring.c59
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_extgallium.c79
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_extgallium.h47
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_extswapinterval.c57
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_framebuffer.c493
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_framebuffer.h148
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_getprocaddress.c86
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_pixelformat.c370
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_pixelformat.h65
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_public.h73
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_tls.c139
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_tls.h59
-rw-r--r--src/gallium/state_trackers/wgl/shared/stw_winsys.h65
-rw-r--r--src/gallium/state_trackers/wgl/wgl/stw_wgl.c329
-rw-r--r--src/gallium/state_trackers/wgl/wgl/stw_wgl.h63
-rw-r--r--src/gallium/state_trackers/xorg/Makefile18
-rw-r--r--src/gallium/state_trackers/xorg/SConscript27
-rw-r--r--src/gallium/state_trackers/xorg/xorg_composite.c1019
-rw-r--r--src/gallium/state_trackers/xorg/xorg_composite.h37
-rw-r--r--src/gallium/state_trackers/xorg/xorg_crtc.c331
-rw-r--r--src/gallium/state_trackers/xorg/xorg_dri2.c380
-rw-r--r--src/gallium/state_trackers/xorg/xorg_driver.c744
-rw-r--r--src/gallium/state_trackers/xorg/xorg_exa.c771
-rw-r--r--src/gallium/state_trackers/xorg/xorg_exa.h57
-rw-r--r--src/gallium/state_trackers/xorg/xorg_exa_tgsi.c489
-rw-r--r--src/gallium/state_trackers/xorg/xorg_exa_tgsi.h43
-rw-r--r--src/gallium/state_trackers/xorg/xorg_output.c296
-rw-r--r--src/gallium/state_trackers/xorg/xorg_tracker.h155
-rw-r--r--src/gallium/state_trackers/xorg/xorg_winsys.h49
-rw-r--r--src/gallium/winsys/Makefile12
-rw-r--r--src/gallium/winsys/SConscript16
-rw-r--r--src/gallium/winsys/drm/Makefile12
-rw-r--r--src/gallium/winsys/drm/Makefile.template126
-rw-r--r--src/gallium/winsys/drm/SConscript59
-rw-r--r--src/gallium/winsys/drm/intel/Makefile12
-rw-r--r--src/gallium/winsys/drm/intel/SConscript7
-rw-r--r--src/gallium/winsys/drm/intel/dri/Makefile27
-rw-r--r--src/gallium/winsys/drm/intel/dri/SConscript20
-rw-r--r--src/gallium/winsys/drm/intel/egl/Makefile29
-rw-r--r--src/gallium/winsys/drm/intel/gem/Makefile16
-rw-r--r--src/gallium/winsys/drm/intel/gem/SConscript17
-rw-r--r--src/gallium/winsys/drm/intel/gem/intel_drm_api.c202
-rw-r--r--src/gallium/winsys/drm/intel/gem/intel_drm_batchbuffer.c229
-rw-r--r--src/gallium/winsys/drm/intel/gem/intel_drm_buffer.c148
-rw-r--r--src/gallium/winsys/drm/intel/gem/intel_drm_fence.c81
-rw-r--r--src/gallium/winsys/drm/intel/gem/intel_drm_winsys.h78
-rw-r--r--src/gallium/winsys/drm/intel/xorg/Makefile57
-rw-r--r--src/gallium/winsys/drm/intel/xorg/intel_xorg.c147
-rw-r--r--src/gallium/winsys/drm/nouveau/Makefile12
-rw-r--r--src/gallium/winsys/drm/nouveau/dri/Makefile27
-rw-r--r--src/gallium/winsys/drm/nouveau/drm/Makefile11
-rw-r--r--src/gallium/winsys/drm/nouveau/drm/nouveau_dri.h28
-rw-r--r--src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.c263
-rw-r--r--src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.h34
-rw-r--r--src/gallium/winsys/drm/radeon/Makefile12
-rw-r--r--src/gallium/winsys/drm/radeon/SConscript7
-rw-r--r--src/gallium/winsys/drm/radeon/core/Makefile18
-rw-r--r--src/gallium/winsys/drm/radeon/core/SConscript19
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_buffer.c285
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_buffer.h86
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_drm.c188
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_drm.h79
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_r300.c210
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_r300.h42
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.c41
-rw-r--r--src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.h44
-rw-r--r--src/gallium/winsys/drm/radeon/dri/Makefile26
-rw-r--r--src/gallium/winsys/drm/radeon/dri/SConscript17
-rw-r--r--src/gallium/winsys/drm/radeon/egl/Makefile26
-rw-r--r--src/gallium/winsys/drm/radeon/python/README15
-rw-r--r--src/gallium/winsys/drm/radeon/python/SConscript33
-rw-r--r--src/gallium/winsys/drm/radeon/python/radeon_hardpipe_winsys.c140
-rw-r--r--src/gallium/winsys/drm/radeon/python/xf86dri.c605
-rw-r--r--src/gallium/winsys/drm/radeon/python/xf86dri.h123
-rw-r--r--src/gallium/winsys/drm/radeon/python/xf86dristr.h389
-rw-r--r--src/gallium/winsys/drm/radeon/xorg/Makefile42
-rw-r--r--src/gallium/winsys/drm/radeon/xorg/radeon_xorg.c148
-rw-r--r--src/gallium/winsys/egl_xlib/Makefile89
-rw-r--r--src/gallium/winsys/egl_xlib/egl_xlib.c869
-rw-r--r--src/gallium/winsys/egl_xlib/sw_winsys.c243
-rw-r--r--src/gallium/winsys/egl_xlib/sw_winsys.h40
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/Makefile50
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.c172
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.h39
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.c88
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.h20
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.c94
-rw-r--r--src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.h10
-rw-r--r--src/gallium/winsys/g3dvl/vl_winsys.h14
-rw-r--r--src/gallium/winsys/g3dvl/xsp_winsys.c290
-rw-r--r--src/gallium/winsys/gdi/SConscript39
-rw-r--r--src/gallium/winsys/gdi/gdi_softpipe_winsys.c334
-rw-r--r--src/gallium/winsys/xlib/Makefile105
-rw-r--r--src/gallium/winsys/xlib/SConscript66
-rw-r--r--src/gallium/winsys/xlib/xlib.c123
-rw-r--r--src/gallium/winsys/xlib/xlib.h15
-rw-r--r--src/gallium/winsys/xlib/xlib_brw.h30
-rw-r--r--src/gallium/winsys/xlib/xlib_brw_aub.c399
-rw-r--r--src/gallium/winsys/xlib/xlib_brw_aub.h114
-rw-r--r--src/gallium/winsys/xlib/xlib_brw_context.c209
-rw-r--r--src/gallium/winsys/xlib/xlib_brw_screen.c469
-rw-r--r--src/gallium/winsys/xlib/xlib_cell.c437
-rw-r--r--src/gallium/winsys/xlib/xlib_llvmpipe.c461
-rw-r--r--src/gallium/winsys/xlib/xlib_softpipe.c507
-rw-r--r--src/gallium/winsys/xlib/xlib_trace.c113
-rw-r--r--src/gallium/winsys/xlib/xmesa.h424
-rw-r--r--src/gallium/winsys/xlib/xmesa_x.h86
1118 files changed, 275956 insertions, 0 deletions
diff --git a/src/gallium/Makefile b/src/gallium/Makefile
new file mode 100644
index 0000000000..79ca767f7e
--- /dev/null
+++ b/src/gallium/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/Makefile
+TOP = ../..
+include $(TOP)/configs/current
+
+SUBDIRS = $(GALLIUM_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/Makefile.template b/src/gallium/Makefile.template
new file mode 100644
index 0000000000..63983c5220
--- /dev/null
+++ b/src/gallium/Makefile.template
@@ -0,0 +1,66 @@
+# src/gallium/Makefile.template
+
+# Template makefile for gallium libraries.
+#
+# Usage:
+# The minimum that the including makefile needs to define
+# is TOP, LIBNAME and one of of the *_SOURCES.
+#
+# Optional defines:
+# LIBRARY_INCLUDES are appended to the list of includes directories.
+# LIBRARY_DEFINES is not used for makedepend, but for compilation.
+
+### Basic defines ###
+
+OBJECTS = $(C_SOURCES:.c=.o) \
+ $(CPP_SOURCES:.cpp=.o) \
+ $(ASM_SOURCES:.S=.o)
+
+INCLUDES = \
+ -I. \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(GALLIUM)/src/gallium/include \
+ -I$(GALLIUM)/src/gallium/auxiliary \
+ -I$(GALLIUM)/src/gallium/drivers \
+ $(LIBRARY_INCLUDES)
+
+
+##### TARGETS #####
+
+default: depend lib$(LIBNAME).a
+
+lib$(LIBNAME).a: $(OBJECTS) $(EXTRA_OBJECTS) Makefile $(TOP)/src/gallium/Makefile.template
+ $(MKLIB) -o $(LIBNAME) -static $(OBJECTS) $(EXTRA_OBJECTS)
+
+depend: $(C_SOURCES) $(CPP_SOURCES) $(ASM_SOURCES) $(SYMLINKS)
+ rm -f depend
+ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(INCLUDES) $(C_SOURCES) $(CPP_SOURCES) $(ASM_SOURCES) 2> /dev/null
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` `find $(TOP)/src/gallium/include -name \*.h`
+
+# Remove .o and backup files
+clean:
+ rm -f $(OBJECTS) lib$(LIBNAME).a depend depend.bak
+
+# Dummy target
+install:
+ @echo -n ""
+
+##### RULES #####
+
+.c.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(LIBRARY_DEFINES) $< -o $@
+
+.cpp.o:
+ $(CXX) -c $(INCLUDES) $(CXXFLAGS) $(LIBRARY_DEFINES) $< -o $@
+
+.S.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(LIBRARY_DEFINES) $< -o $@
+
+
+sinclude depend
diff --git a/src/gallium/README.portability b/src/gallium/README.portability
new file mode 100644
index 0000000000..adecf4bb79
--- /dev/null
+++ b/src/gallium/README.portability
@@ -0,0 +1,109 @@
+ CROSS-PLATFORM PORTABILITY GUIDELINES FOR GALLIUM3D
+
+
+= General Considerations =
+
+The state tracker and winsys driver support a rather limited number of
+platforms. However, the pipe drivers are meant to run in a wide number of
+platforms. Hence the pipe drivers, the auxiliary modules, and all public
+headers in general, should strictly follow these guidelines to ensure
+
+
+= Compiler Support =
+
+* Include the p_compiler.h.
+
+* Don't use the 'inline' keyword, use the INLINE macro in p_compiler.h instead.
+
+* Cast explicitly when converting to integer types of smaller sizes.
+
+* Cast explicitly when converting between float, double and integral types.
+
+* Don't use named struct initializers.
+
+* Don't use variable number of macro arguments. Use static inline functions
+instead.
+
+* Don't use C99 features.
+
+= Standard Library =
+
+* Avoid including standard library headers. Most standard library functions are
+not available in Windows Kernel Mode. Use the appropriate p_*.h include.
+
+== Memory Allocation ==
+
+* Use MALLOC, CALLOC, FREE instead of the malloc, calloc, free functions.
+
+* Use align_pointer() function defined in u_memory.h for aligning pointers
+ in a portable way.
+
+== Debugging ==
+
+* Use the functions/macros in p_debug.h.
+
+* Don't include assert.h, call abort, printf, etc.
+
+
+= Code Style =
+
+== Inherantice in C ==
+
+The main thing we do is mimic inheritance by structure containment.
+
+Here's a silly made-up example:
+
+/* base class */
+struct buffer
+{
+ int size;
+ void (*validate)(struct buffer *buf);
+};
+
+/* sub-class of bufffer */
+struct texture_buffer
+{
+ struct buffer base; /* the base class, MUST COME FIRST! */
+ int format;
+ int width, height;
+};
+
+
+Then, we'll typically have cast-wrapper functions to convert base-class
+pointers to sub-class pointers where needed:
+
+static inline struct vertex_buffer *vertex_buffer(struct buffer *buf)
+{
+ return (struct vertex_buffer *) buf;
+}
+
+
+To create/init a sub-classed object:
+
+struct buffer *create_texture_buffer(int w, int h, int format)
+{
+ struct texture_buffer *t = malloc(sizeof(*t));
+ t->format = format;
+ t->width = w;
+ t->height = h;
+ t->base.size = w * h;
+ t->base.validate = tex_validate;
+ return &t->base;
+}
+
+Example sub-class method:
+
+void tex_validate(struct buffer *buf)
+{
+ struct texture_buffer *tb = texture_buffer(buf);
+ assert(tb->format);
+ assert(tb->width);
+ assert(tb->height);
+}
+
+
+Note that we typically do not use typedefs to make "class names"; we use
+'struct whatever' everywhere.
+
+Gallium's pipe_context and the subclassed psb_context, etc are prime examples
+of this. There's also many examples in Mesa and the Mesa state tracker.
diff --git a/src/gallium/SConscript b/src/gallium/SConscript
new file mode 100644
index 0000000000..89c69d7205
--- /dev/null
+++ b/src/gallium/SConscript
@@ -0,0 +1,37 @@
+import os
+
+Import('*')
+
+env = env.Clone()
+
+auxiliaries = []
+
+Export('auxiliaries')
+
+
+if llvm:
+ SConscript(['auxiliary/gallivm/SConscript'])
+
+SConscript([
+ # NOTE: order matters!
+ 'auxiliary/util/SConscript',
+ 'auxiliary/rtasm/SConscript',
+ 'auxiliary/tgsi/SConscript',
+ 'auxiliary/cso_cache/SConscript',
+ 'auxiliary/translate/SConscript',
+ 'auxiliary/draw/SConscript',
+ 'auxiliary/pipebuffer/SConscript',
+ 'auxiliary/indices/SConscript',
+ 'auxiliary/rbug/SConscript',
+])
+
+for driver in env['drivers']:
+ SConscript(os.path.join('drivers', driver, 'SConscript'))
+
+SConscript('state_trackers/python/SConscript')
+SConscript('state_trackers/glx/xlib/SConscript')
+SConscript('state_trackers/dri/SConscript')
+SConscript('state_trackers/xorg/SConscript')
+
+if platform == 'windows':
+ SConscript('state_trackers/wgl/SConscript')
diff --git a/src/gallium/auxiliary/Makefile b/src/gallium/auxiliary/Makefile
new file mode 100644
index 0000000000..5446eb68a9
--- /dev/null
+++ b/src/gallium/auxiliary/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/auxiliary/Makefile
+TOP = ../../..
+include $(TOP)/configs/current
+
+SUBDIRS = $(GALLIUM_AUXILIARY_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/auxiliary/cso_cache/Makefile b/src/gallium/auxiliary/cso_cache/Makefile
new file mode 100644
index 0000000000..8726afcd94
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/Makefile
@@ -0,0 +1,11 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = cso_cache
+
+C_SOURCES = \
+ cso_context.c \
+ cso_cache.c \
+ cso_hash.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/cso_cache/SConscript b/src/gallium/auxiliary/cso_cache/SConscript
new file mode 100644
index 0000000000..651e68a191
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/SConscript
@@ -0,0 +1,11 @@
+Import('*')
+
+cso_cache = env.ConvenienceLibrary(
+ target = 'cso_cache',
+ source = [
+ 'cso_context.c',
+ 'cso_cache.c',
+ 'cso_hash.c',
+ ])
+
+auxiliaries.insert(0, cso_cache)
diff --git a/src/gallium/auxiliary/cso_cache/cso_cache.c b/src/gallium/auxiliary/cso_cache/cso_cache.c
new file mode 100644
index 0000000000..e6dce3f0e5
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/cso_cache.c
@@ -0,0 +1,410 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Zack Rusin <zack@tungstengraphics.com>
+ */
+
+#include "util/u_debug.h"
+
+#include "util/u_memory.h"
+
+#include "cso_cache.h"
+#include "cso_hash.h"
+
+
+struct cso_cache {
+ struct cso_hash *blend_hash;
+ struct cso_hash *depth_stencil_hash;
+ struct cso_hash *fs_hash;
+ struct cso_hash *vs_hash;
+ struct cso_hash *rasterizer_hash;
+ struct cso_hash *sampler_hash;
+ int max_size;
+
+ cso_sanitize_callback sanitize_cb;
+ void *sanitize_data;
+};
+
+#if 1
+static unsigned hash_key(const void *key, unsigned key_size)
+{
+ unsigned *ikey = (unsigned *)key;
+ unsigned hash = 0, i;
+
+ assert(key_size % 4 == 0);
+
+ /* I'm sure this can be improved on:
+ */
+ for (i = 0; i < key_size/4; i++)
+ hash ^= ikey[i];
+
+ return hash;
+}
+#else
+static unsigned hash_key(const unsigned char *p, int n)
+{
+ unsigned h = 0;
+ unsigned g;
+
+ while (n--) {
+ h = (h << 4) + *p++;
+ if ((g = (h & 0xf0000000)) != 0)
+ h ^= g >> 23;
+ h &= ~g;
+ }
+ return h;
+}
+#endif
+
+unsigned cso_construct_key(void *item, int item_size)
+{
+ return hash_key((item), item_size);
+}
+
+static struct cso_hash *_cso_hash_for_type(struct cso_cache *sc, enum cso_cache_type type)
+{
+ struct cso_hash *hash = 0;
+
+ switch(type) {
+ case CSO_BLEND:
+ hash = sc->blend_hash;
+ break;
+ case CSO_SAMPLER:
+ hash = sc->sampler_hash;
+ break;
+ case CSO_DEPTH_STENCIL_ALPHA:
+ hash = sc->depth_stencil_hash;
+ break;
+ case CSO_RASTERIZER:
+ hash = sc->rasterizer_hash;
+ break;
+ case CSO_FRAGMENT_SHADER:
+ hash = sc->fs_hash;
+ break;
+ case CSO_VERTEX_SHADER:
+ hash = sc->vs_hash;
+ break;
+ }
+
+ return hash;
+}
+
+static int _cso_size_for_type(enum cso_cache_type type)
+{
+ switch(type) {
+ case CSO_BLEND:
+ return sizeof(struct pipe_blend_state);
+ case CSO_SAMPLER:
+ return sizeof(struct pipe_sampler_state);
+ case CSO_DEPTH_STENCIL_ALPHA:
+ return sizeof(struct pipe_depth_stencil_alpha_state);
+ case CSO_RASTERIZER:
+ return sizeof(struct pipe_rasterizer_state);
+ case CSO_FRAGMENT_SHADER:
+ return sizeof(struct pipe_shader_state);
+ case CSO_VERTEX_SHADER:
+ return sizeof(struct pipe_shader_state);
+ }
+ return 0;
+}
+
+
+static void delete_blend_state(void *state, void *data)
+{
+ struct cso_blend *cso = (struct cso_blend *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+}
+
+static void delete_depth_stencil_state(void *state, void *data)
+{
+ struct cso_depth_stencil_alpha *cso = (struct cso_depth_stencil_alpha *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+}
+
+static void delete_sampler_state(void *state, void *data)
+{
+ struct cso_sampler *cso = (struct cso_sampler *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+}
+
+static void delete_rasterizer_state(void *state, void *data)
+{
+ struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+}
+
+static void delete_fs_state(void *state, void *data)
+{
+ struct cso_fragment_shader *cso = (struct cso_fragment_shader *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+}
+
+static void delete_vs_state(void *state, void *data)
+{
+ struct cso_vertex_shader *cso = (struct cso_vertex_shader *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+}
+
+
+static INLINE void delete_cso(void *state, enum cso_cache_type type)
+{
+ switch (type) {
+ case CSO_BLEND:
+ delete_blend_state(state, 0);
+ break;
+ case CSO_SAMPLER:
+ delete_sampler_state(state, 0);
+ break;
+ case CSO_DEPTH_STENCIL_ALPHA:
+ delete_depth_stencil_state(state, 0);
+ break;
+ case CSO_RASTERIZER:
+ delete_rasterizer_state(state, 0);
+ break;
+ case CSO_FRAGMENT_SHADER:
+ delete_fs_state(state, 0);
+ break;
+ case CSO_VERTEX_SHADER:
+ delete_vs_state(state, 0);
+ break;
+ default:
+ assert(0);
+ FREE(state);
+ }
+}
+
+
+static INLINE void sanitize_hash(struct cso_cache *sc,
+ struct cso_hash *hash,
+ enum cso_cache_type type,
+ int max_size)
+{
+ if (sc->sanitize_cb)
+ sc->sanitize_cb(hash, type, max_size, sc->sanitize_data);
+}
+
+
+static INLINE void sanitize_cb(struct cso_hash *hash, enum cso_cache_type type,
+ int max_size, void *user_data)
+{
+ /* if we're approach the maximum size, remove fourth of the entries
+ * otherwise every subsequent call will go through the same */
+ int hash_size = cso_hash_size(hash);
+ int max_entries = (max_size > hash_size) ? max_size : hash_size;
+ int to_remove = (max_size < max_entries) * max_entries/4;
+ if (hash_size > max_size)
+ to_remove += hash_size - max_size;
+ while (to_remove) {
+ /*remove elements until we're good */
+ /*fixme: currently we pick the nodes to remove at random*/
+ struct cso_hash_iter iter = cso_hash_first_node(hash);
+ void *cso = cso_hash_take(hash, cso_hash_iter_key(iter));
+ delete_cso(cso, type);
+ --to_remove;
+ }
+}
+
+struct cso_hash_iter
+cso_insert_state(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type,
+ void *state)
+{
+ struct cso_hash *hash = _cso_hash_for_type(sc, type);
+ sanitize_hash(sc, hash, type, sc->max_size);
+
+ return cso_hash_insert(hash, hash_key, state);
+}
+
+struct cso_hash_iter
+cso_find_state(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type)
+{
+ struct cso_hash *hash = _cso_hash_for_type(sc, type);
+
+ return cso_hash_find(hash, hash_key);
+}
+
+
+void *cso_hash_find_data_from_template( struct cso_hash *hash,
+ unsigned hash_key,
+ void *templ,
+ int size )
+{
+ struct cso_hash_iter iter = cso_hash_find(hash, hash_key);
+ while (!cso_hash_iter_is_null(iter)) {
+ void *iter_data = cso_hash_iter_data(iter);
+ if (!memcmp(iter_data, templ, size)) {
+ /* We found a match
+ */
+ return iter_data;
+ }
+ iter = cso_hash_iter_next(iter);
+ }
+ return NULL;
+}
+
+
+struct cso_hash_iter cso_find_state_template(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type,
+ void *templ)
+{
+ struct cso_hash_iter iter = cso_find_state(sc, hash_key, type);
+ int size = _cso_size_for_type(type);
+ while (!cso_hash_iter_is_null(iter)) {
+ void *iter_data = cso_hash_iter_data(iter);
+ if (!memcmp(iter_data, templ, size))
+ return iter;
+ iter = cso_hash_iter_next(iter);
+ }
+ return iter;
+}
+
+void * cso_take_state(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type)
+{
+ struct cso_hash *hash = _cso_hash_for_type(sc, type);
+ return cso_hash_take(hash, hash_key);
+}
+
+struct cso_cache *cso_cache_create(void)
+{
+ struct cso_cache *sc = MALLOC_STRUCT(cso_cache);
+ if (sc == NULL)
+ return NULL;
+
+ sc->max_size = 4096;
+ sc->blend_hash = cso_hash_create();
+ sc->sampler_hash = cso_hash_create();
+ sc->depth_stencil_hash = cso_hash_create();
+ sc->rasterizer_hash = cso_hash_create();
+ sc->fs_hash = cso_hash_create();
+ sc->vs_hash = cso_hash_create();
+ sc->sanitize_cb = sanitize_cb;
+ sc->sanitize_data = 0;
+
+ return sc;
+}
+
+void cso_for_each_state(struct cso_cache *sc, enum cso_cache_type type,
+ cso_state_callback func, void *user_data)
+{
+ struct cso_hash *hash = 0;
+ struct cso_hash_iter iter;
+
+ switch (type) {
+ case CSO_BLEND:
+ hash = sc->blend_hash;
+ break;
+ case CSO_SAMPLER:
+ hash = sc->sampler_hash;
+ break;
+ case CSO_DEPTH_STENCIL_ALPHA:
+ hash = sc->depth_stencil_hash;
+ break;
+ case CSO_RASTERIZER:
+ hash = sc->rasterizer_hash;
+ break;
+ case CSO_FRAGMENT_SHADER:
+ hash = sc->fs_hash;
+ break;
+ case CSO_VERTEX_SHADER:
+ hash = sc->vs_hash;
+ break;
+ }
+
+ iter = cso_hash_first_node(hash);
+ while (!cso_hash_iter_is_null(iter)) {
+ void *state = cso_hash_iter_data(iter);
+ iter = cso_hash_iter_next(iter);
+ if (state) {
+ func(state, user_data);
+ }
+ }
+}
+
+void cso_cache_delete(struct cso_cache *sc)
+{
+ assert(sc);
+
+ if (!sc)
+ return;
+
+ /* delete driver data */
+ cso_for_each_state(sc, CSO_BLEND, delete_blend_state, 0);
+ cso_for_each_state(sc, CSO_DEPTH_STENCIL_ALPHA, delete_depth_stencil_state, 0);
+ cso_for_each_state(sc, CSO_FRAGMENT_SHADER, delete_fs_state, 0);
+ cso_for_each_state(sc, CSO_VERTEX_SHADER, delete_vs_state, 0);
+ cso_for_each_state(sc, CSO_RASTERIZER, delete_rasterizer_state, 0);
+ cso_for_each_state(sc, CSO_SAMPLER, delete_sampler_state, 0);
+
+ cso_hash_delete(sc->blend_hash);
+ cso_hash_delete(sc->sampler_hash);
+ cso_hash_delete(sc->depth_stencil_hash);
+ cso_hash_delete(sc->rasterizer_hash);
+ cso_hash_delete(sc->fs_hash);
+ cso_hash_delete(sc->vs_hash);
+ FREE(sc);
+}
+
+void cso_set_maximum_cache_size(struct cso_cache *sc, int number)
+{
+ sc->max_size = number;
+
+ sanitize_hash(sc, sc->blend_hash, CSO_BLEND, sc->max_size);
+ sanitize_hash(sc, sc->depth_stencil_hash, CSO_DEPTH_STENCIL_ALPHA,
+ sc->max_size);
+ sanitize_hash(sc, sc->fs_hash, CSO_FRAGMENT_SHADER, sc->max_size);
+ sanitize_hash(sc, sc->vs_hash, CSO_VERTEX_SHADER, sc->max_size);
+ sanitize_hash(sc, sc->rasterizer_hash, CSO_RASTERIZER, sc->max_size);
+ sanitize_hash(sc, sc->sampler_hash, CSO_SAMPLER, sc->max_size);
+}
+
+int cso_maximum_cache_size(const struct cso_cache *sc)
+{
+ return sc->max_size;
+}
+
+void cso_cache_set_sanitize_callback(struct cso_cache *sc,
+ cso_sanitize_callback cb,
+ void *user_data)
+{
+ sc->sanitize_cb = cb;
+ sc->sanitize_data = user_data;
+}
+
diff --git a/src/gallium/auxiliary/cso_cache/cso_cache.h b/src/gallium/auxiliary/cso_cache/cso_cache.h
new file mode 100644
index 0000000000..6b5c230e8f
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/cso_cache.h
@@ -0,0 +1,176 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /**
+ * @file
+ * Constant State Object (CSO) cache.
+ *
+ * The basic idea is that the states are created via the
+ * create_state/bind_state/delete_state semantics. The driver is expected to
+ * perform as much of the Gallium state translation to whatever its internal
+ * representation is during the create call. Gallium then has a caching
+ * mechanism where it stores the created states. When the pipeline needs an
+ * actual state change, a bind call is issued. In the bind call the driver
+ * gets its already translated representation.
+ *
+ * Those semantics mean that the driver doesn't do the repeated translations
+ * of states on every frame, but only once, when a new state is actually
+ * created.
+ *
+ * Even on hardware that doesn't do any kind of state cache, it makes the
+ * driver look a lot neater, plus it avoids all the redundant state
+ * translations on every frame.
+ *
+ * Currently our constant state objects are:
+ * - alpha test
+ * - blend
+ * - depth stencil
+ * - fragment shader
+ * - rasterizer (old setup)
+ * - sampler
+ * - vertex shader
+ *
+ * Things that are not constant state objects include:
+ * - blend_color
+ * - clip_state
+ * - clear_color_state
+ * - constant_buffer
+ * - feedback_state
+ * - framebuffer_state
+ * - polygon_stipple
+ * - scissor_state
+ * - texture_state
+ * - viewport_state
+ *
+ * @author Zack Rusin <zack@tungstengraphics.com>
+ */
+
+#ifndef CSO_CACHE_H
+#define CSO_CACHE_H
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+
+/* cso_hash.h is necessary for cso_hash_iter, as MSVC requires structures
+ * returned by value to be fully defined */
+#include "cso_hash.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum cso_cache_type {
+ CSO_BLEND,
+ CSO_SAMPLER,
+ CSO_DEPTH_STENCIL_ALPHA,
+ CSO_RASTERIZER,
+ CSO_FRAGMENT_SHADER,
+ CSO_VERTEX_SHADER
+};
+
+typedef void (*cso_state_callback)(void *ctx, void *obj);
+
+typedef void (*cso_sanitize_callback)(struct cso_hash *hash,
+ enum cso_cache_type type,
+ int max_size,
+ void *user_data);
+
+struct cso_cache;
+
+struct cso_blend {
+ struct pipe_blend_state state;
+ void *data;
+ cso_state_callback delete_state;
+ struct pipe_context *context;
+};
+
+struct cso_depth_stencil_alpha {
+ struct pipe_depth_stencil_alpha_state state;
+ void *data;
+ cso_state_callback delete_state;
+ struct pipe_context *context;
+};
+
+struct cso_rasterizer {
+ struct pipe_rasterizer_state state;
+ void *data;
+ cso_state_callback delete_state;
+ struct pipe_context *context;
+};
+
+struct cso_fragment_shader {
+ struct pipe_shader_state state;
+ void *data;
+ cso_state_callback delete_state;
+ struct pipe_context *context;
+};
+
+struct cso_vertex_shader {
+ struct pipe_shader_state state;
+ void *data;
+ cso_state_callback delete_state;
+ struct pipe_context *context;
+};
+
+struct cso_sampler {
+ struct pipe_sampler_state state;
+ void *data;
+ cso_state_callback delete_state;
+ struct pipe_context *context;
+};
+
+unsigned cso_construct_key(void *item, int item_size);
+
+struct cso_cache *cso_cache_create(void);
+void cso_cache_delete(struct cso_cache *sc);
+
+void cso_cache_set_sanitize_callback(struct cso_cache *sc,
+ cso_sanitize_callback cb,
+ void *user_data);
+
+struct cso_hash_iter cso_insert_state(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type,
+ void *state);
+struct cso_hash_iter cso_find_state(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type);
+struct cso_hash_iter cso_find_state_template(struct cso_cache *sc,
+ unsigned hash_key, enum cso_cache_type type,
+ void *templ);
+void cso_for_each_state(struct cso_cache *sc, enum cso_cache_type type,
+ cso_state_callback func, void *user_data);
+void * cso_take_state(struct cso_cache *sc, unsigned hash_key,
+ enum cso_cache_type type);
+
+void cso_set_maximum_cache_size(struct cso_cache *sc, int number);
+int cso_maximum_cache_size(const struct cso_cache *sc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/cso_cache/cso_context.c b/src/gallium/auxiliary/cso_cache/cso_context.c
new file mode 100644
index 0000000000..36c882acb7
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/cso_context.c
@@ -0,0 +1,865 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /**
+ * @file
+ *
+ * Wrap the cso cache & hash mechanisms in a simplified
+ * pipe-driver-specific interface.
+ *
+ * @author Zack Rusin <zack@tungstengraphics.com>
+ * @author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_parse.h"
+
+#include "cso_cache/cso_context.h"
+#include "cso_cache/cso_cache.h"
+#include "cso_cache/cso_hash.h"
+
+struct cso_context {
+ struct pipe_context *pipe;
+ struct cso_cache *cache;
+
+ struct {
+ void *samplers[PIPE_MAX_SAMPLERS];
+ unsigned nr_samplers;
+ } hw;
+
+ void *samplers[PIPE_MAX_SAMPLERS];
+ unsigned nr_samplers;
+
+ unsigned nr_samplers_saved;
+ void *samplers_saved[PIPE_MAX_SAMPLERS];
+
+ struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
+ uint nr_textures;
+
+ uint nr_textures_saved;
+ struct pipe_texture *textures_saved[PIPE_MAX_SAMPLERS];
+
+ /** Current and saved state.
+ * The saved state is used as a 1-deep stack.
+ */
+ void *blend, *blend_saved;
+ void *depth_stencil, *depth_stencil_saved;
+ void *rasterizer, *rasterizer_saved;
+ void *fragment_shader, *fragment_shader_saved;
+ void *vertex_shader, *vertex_shader_saved;
+
+ struct pipe_framebuffer_state fb, fb_saved;
+ struct pipe_viewport_state vp, vp_saved;
+ struct pipe_blend_color blend_color;
+};
+
+
+static void
+free_framebuffer_state(struct pipe_framebuffer_state *fb);
+
+
+static boolean delete_blend_state(struct cso_context *ctx, void *state)
+{
+ struct cso_blend *cso = (struct cso_blend *)state;
+
+ if (ctx->blend == cso->data)
+ return FALSE;
+
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+ return TRUE;
+}
+
+static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state)
+{
+ struct cso_depth_stencil_alpha *cso = (struct cso_depth_stencil_alpha *)state;
+
+ if (ctx->depth_stencil == cso->data)
+ return FALSE;
+
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+
+ return TRUE;
+}
+
+static boolean delete_sampler_state(struct cso_context *ctx, void *state)
+{
+ struct cso_sampler *cso = (struct cso_sampler *)state;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+ return TRUE;
+}
+
+static boolean delete_rasterizer_state(struct cso_context *ctx, void *state)
+{
+ struct cso_rasterizer *cso = (struct cso_rasterizer *)state;
+
+ if (ctx->rasterizer == cso->data)
+ return FALSE;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+ return TRUE;
+}
+
+static boolean delete_fs_state(struct cso_context *ctx, void *state)
+{
+ struct cso_fragment_shader *cso = (struct cso_fragment_shader *)state;
+ if (ctx->fragment_shader == cso->data)
+ return FALSE;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+ return TRUE;
+}
+
+static boolean delete_vs_state(struct cso_context *ctx, void *state)
+{
+ struct cso_vertex_shader *cso = (struct cso_vertex_shader *)state;
+ if (ctx->vertex_shader == cso->data)
+ return TRUE;
+ if (cso->delete_state)
+ cso->delete_state(cso->context, cso->data);
+ FREE(state);
+ return FALSE;
+}
+
+
+static INLINE boolean delete_cso(struct cso_context *ctx,
+ void *state, enum cso_cache_type type)
+{
+ switch (type) {
+ case CSO_BLEND:
+ return delete_blend_state(ctx, state);
+ break;
+ case CSO_SAMPLER:
+ return delete_sampler_state(ctx, state);
+ break;
+ case CSO_DEPTH_STENCIL_ALPHA:
+ return delete_depth_stencil_state(ctx, state);
+ break;
+ case CSO_RASTERIZER:
+ return delete_rasterizer_state(ctx, state);
+ break;
+ case CSO_FRAGMENT_SHADER:
+ return delete_fs_state(ctx, state);
+ break;
+ case CSO_VERTEX_SHADER:
+ return delete_vs_state(ctx, state);
+ break;
+ default:
+ assert(0);
+ FREE(state);
+ }
+ return FALSE;
+}
+
+static INLINE void sanitize_hash(struct cso_hash *hash, enum cso_cache_type type,
+ int max_size, void *user_data)
+{
+ struct cso_context *ctx = (struct cso_context *)user_data;
+ /* if we're approach the maximum size, remove fourth of the entries
+ * otherwise every subsequent call will go through the same */
+ int hash_size = cso_hash_size(hash);
+ int max_entries = (max_size > hash_size) ? max_size : hash_size;
+ int to_remove = (max_size < max_entries) * max_entries/4;
+ struct cso_hash_iter iter = cso_hash_first_node(hash);
+ if (hash_size > max_size)
+ to_remove += hash_size - max_size;
+ while (to_remove) {
+ /*remove elements until we're good */
+ /*fixme: currently we pick the nodes to remove at random*/
+ void *cso = cso_hash_iter_data(iter);
+ if (delete_cso(ctx, cso, type)) {
+ iter = cso_hash_erase(hash, iter);
+ --to_remove;
+ } else
+ iter = cso_hash_iter_next(iter);
+ }
+}
+
+
+struct cso_context *cso_create_context( struct pipe_context *pipe )
+{
+ struct cso_context *ctx = CALLOC_STRUCT(cso_context);
+ if (ctx == NULL)
+ goto out;
+
+ ctx->cache = cso_cache_create();
+ if (ctx->cache == NULL)
+ goto out;
+ cso_cache_set_sanitize_callback(ctx->cache,
+ sanitize_hash,
+ ctx);
+
+ ctx->pipe = pipe;
+
+ /* Enable for testing: */
+ if (0) cso_set_maximum_cache_size( ctx->cache, 4 );
+
+ return ctx;
+
+out:
+ cso_destroy_context( ctx );
+ return NULL;
+}
+
+
+/**
+ * Prior to context destruction, this function unbinds all state objects.
+ */
+void cso_release_all( struct cso_context *ctx )
+{
+ unsigned i;
+
+ if (ctx->pipe) {
+ ctx->pipe->bind_blend_state( ctx->pipe, NULL );
+ ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
+ ctx->pipe->bind_sampler_states( ctx->pipe, 0, NULL );
+ ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
+ ctx->pipe->bind_fs_state( ctx->pipe, NULL );
+ ctx->pipe->bind_vs_state( ctx->pipe, NULL );
+ }
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ pipe_texture_reference(&ctx->textures[i], NULL);
+ pipe_texture_reference(&ctx->textures_saved[i], NULL);
+ }
+
+ free_framebuffer_state(&ctx->fb);
+ free_framebuffer_state(&ctx->fb_saved);
+
+ if (ctx->cache) {
+ cso_cache_delete( ctx->cache );
+ ctx->cache = NULL;
+ }
+}
+
+
+void cso_destroy_context( struct cso_context *ctx )
+{
+ if (ctx) {
+ //cso_release_all( ctx );
+ FREE( ctx );
+ }
+}
+
+
+/* Those function will either find the state of the given template
+ * in the cache or they will create a new state from the given
+ * template, insert it in the cache and return it.
+ */
+
+/*
+ * If the driver returns 0 from the create method then they will assign
+ * the data member of the cso to be the template itself.
+ */
+
+enum pipe_error cso_set_blend(struct cso_context *ctx,
+ const struct pipe_blend_state *templ)
+{
+ unsigned hash_key = cso_construct_key((void*)templ, sizeof(struct pipe_blend_state));
+ struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
+ hash_key, CSO_BLEND,
+ (void*)templ);
+ void *handle;
+
+ if (cso_hash_iter_is_null(iter)) {
+ struct cso_blend *cso = MALLOC(sizeof(struct cso_blend));
+ if (!cso)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(&cso->state, templ, sizeof(*templ));
+ cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state);
+ cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state;
+ cso->context = ctx->pipe;
+
+ iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(cso);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ handle = cso->data;
+ }
+ else {
+ handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data;
+ }
+
+ if (ctx->blend != handle) {
+ ctx->blend = handle;
+ ctx->pipe->bind_blend_state(ctx->pipe, handle);
+ }
+ return PIPE_OK;
+}
+
+void cso_save_blend(struct cso_context *ctx)
+{
+ assert(!ctx->blend_saved);
+ ctx->blend_saved = ctx->blend;
+}
+
+void cso_restore_blend(struct cso_context *ctx)
+{
+ if (ctx->blend != ctx->blend_saved) {
+ ctx->blend = ctx->blend_saved;
+ ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved);
+ }
+ ctx->blend_saved = NULL;
+}
+
+
+
+enum pipe_error cso_single_sampler(struct cso_context *ctx,
+ unsigned idx,
+ const struct pipe_sampler_state *templ)
+{
+ void *handle = NULL;
+
+ if (templ != NULL) {
+ unsigned hash_key = cso_construct_key((void*)templ, sizeof(struct pipe_sampler_state));
+ struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
+ hash_key, CSO_SAMPLER,
+ (void*)templ);
+
+ if (cso_hash_iter_is_null(iter)) {
+ struct cso_sampler *cso = MALLOC(sizeof(struct cso_sampler));
+ if (!cso)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(&cso->state, templ, sizeof(*templ));
+ cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state);
+ cso->delete_state = (cso_state_callback)ctx->pipe->delete_sampler_state;
+ cso->context = ctx->pipe;
+
+ iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(cso);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ handle = cso->data;
+ }
+ else {
+ handle = ((struct cso_sampler *)cso_hash_iter_data(iter))->data;
+ }
+ }
+
+ ctx->samplers[idx] = handle;
+ return PIPE_OK;
+}
+
+void cso_single_sampler_done( struct cso_context *ctx )
+{
+ unsigned i;
+
+ /* find highest non-null sampler */
+ for (i = PIPE_MAX_SAMPLERS; i > 0; i--) {
+ if (ctx->samplers[i - 1] != NULL)
+ break;
+ }
+
+ ctx->nr_samplers = i;
+
+ if (ctx->hw.nr_samplers != ctx->nr_samplers ||
+ memcmp(ctx->hw.samplers,
+ ctx->samplers,
+ ctx->nr_samplers * sizeof(void *)) != 0)
+ {
+ memcpy(ctx->hw.samplers, ctx->samplers, ctx->nr_samplers * sizeof(void *));
+ ctx->hw.nr_samplers = ctx->nr_samplers;
+
+ ctx->pipe->bind_sampler_states(ctx->pipe, ctx->nr_samplers, ctx->samplers);
+ }
+}
+
+/*
+ * If the function encouters any errors it will return the
+ * last one. Done to always try to set as many samplers
+ * as possible.
+ */
+enum pipe_error cso_set_samplers( struct cso_context *ctx,
+ unsigned nr,
+ const struct pipe_sampler_state **templates )
+{
+ unsigned i;
+ enum pipe_error temp, error = PIPE_OK;
+
+ /* TODO: fastpath
+ */
+
+ for (i = 0; i < nr; i++) {
+ temp = cso_single_sampler( ctx, i, templates[i] );
+ if (temp != PIPE_OK)
+ error = temp;
+ }
+
+ for ( ; i < ctx->nr_samplers; i++) {
+ temp = cso_single_sampler( ctx, i, NULL );
+ if (temp != PIPE_OK)
+ error = temp;
+ }
+
+ cso_single_sampler_done( ctx );
+
+ return error;
+}
+
+void cso_save_samplers(struct cso_context *ctx)
+{
+ ctx->nr_samplers_saved = ctx->nr_samplers;
+ memcpy(ctx->samplers_saved, ctx->samplers, sizeof(ctx->samplers));
+}
+
+void cso_restore_samplers(struct cso_context *ctx)
+{
+ ctx->nr_samplers = ctx->nr_samplers_saved;
+ memcpy(ctx->samplers, ctx->samplers_saved, sizeof(ctx->samplers));
+ cso_single_sampler_done( ctx );
+}
+
+
+enum pipe_error cso_set_sampler_textures( struct cso_context *ctx,
+ uint count,
+ struct pipe_texture **textures )
+{
+ uint i;
+
+ ctx->nr_textures = count;
+
+ for (i = 0; i < count; i++)
+ pipe_texture_reference(&ctx->textures[i], textures[i]);
+ for ( ; i < PIPE_MAX_SAMPLERS; i++)
+ pipe_texture_reference(&ctx->textures[i], NULL);
+
+ ctx->pipe->set_sampler_textures(ctx->pipe, count, textures);
+
+ return PIPE_OK;
+}
+
+void cso_save_sampler_textures( struct cso_context *ctx )
+{
+ uint i;
+
+ ctx->nr_textures_saved = ctx->nr_textures;
+ for (i = 0; i < ctx->nr_textures; i++) {
+ assert(!ctx->textures_saved[i]);
+ pipe_texture_reference(&ctx->textures_saved[i], ctx->textures[i]);
+ }
+}
+
+void cso_restore_sampler_textures( struct cso_context *ctx )
+{
+ uint i;
+
+ ctx->nr_textures = ctx->nr_textures_saved;
+
+ for (i = 0; i < ctx->nr_textures; i++) {
+ pipe_texture_reference(&ctx->textures[i], NULL);
+ ctx->textures[i] = ctx->textures_saved[i];
+ ctx->textures_saved[i] = NULL;
+ }
+ for ( ; i < PIPE_MAX_SAMPLERS; i++)
+ pipe_texture_reference(&ctx->textures[i], NULL);
+
+ ctx->pipe->set_sampler_textures(ctx->pipe, ctx->nr_textures, ctx->textures);
+
+ ctx->nr_textures_saved = 0;
+}
+
+
+
+enum pipe_error cso_set_depth_stencil_alpha(struct cso_context *ctx,
+ const struct pipe_depth_stencil_alpha_state *templ)
+{
+ unsigned hash_key = cso_construct_key((void*)templ,
+ sizeof(struct pipe_depth_stencil_alpha_state));
+ struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
+ hash_key,
+ CSO_DEPTH_STENCIL_ALPHA,
+ (void*)templ);
+ void *handle;
+
+ if (cso_hash_iter_is_null(iter)) {
+ struct cso_depth_stencil_alpha *cso = MALLOC(sizeof(struct cso_depth_stencil_alpha));
+ if (!cso)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(&cso->state, templ, sizeof(*templ));
+ cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, &cso->state);
+ cso->delete_state = (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state;
+ cso->context = ctx->pipe;
+
+ iter = cso_insert_state(ctx->cache, hash_key, CSO_DEPTH_STENCIL_ALPHA, cso);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(cso);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ handle = cso->data;
+ }
+ else {
+ handle = ((struct cso_depth_stencil_alpha *)cso_hash_iter_data(iter))->data;
+ }
+
+ if (ctx->depth_stencil != handle) {
+ ctx->depth_stencil = handle;
+ ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle);
+ }
+ return PIPE_OK;
+}
+
+void cso_save_depth_stencil_alpha(struct cso_context *ctx)
+{
+ assert(!ctx->depth_stencil_saved);
+ ctx->depth_stencil_saved = ctx->depth_stencil;
+}
+
+void cso_restore_depth_stencil_alpha(struct cso_context *ctx)
+{
+ if (ctx->depth_stencil != ctx->depth_stencil_saved) {
+ ctx->depth_stencil = ctx->depth_stencil_saved;
+ ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, ctx->depth_stencil_saved);
+ }
+ ctx->depth_stencil_saved = NULL;
+}
+
+
+
+enum pipe_error cso_set_rasterizer(struct cso_context *ctx,
+ const struct pipe_rasterizer_state *templ)
+{
+ unsigned hash_key = cso_construct_key((void*)templ,
+ sizeof(struct pipe_rasterizer_state));
+ struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
+ hash_key, CSO_RASTERIZER,
+ (void*)templ);
+ void *handle = NULL;
+
+ if (cso_hash_iter_is_null(iter)) {
+ struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer));
+ if (!cso)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(&cso->state, templ, sizeof(*templ));
+ cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state);
+ cso->delete_state = (cso_state_callback)ctx->pipe->delete_rasterizer_state;
+ cso->context = ctx->pipe;
+
+ iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(cso);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ handle = cso->data;
+ }
+ else {
+ handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data;
+ }
+
+ if (ctx->rasterizer != handle) {
+ ctx->rasterizer = handle;
+ ctx->pipe->bind_rasterizer_state(ctx->pipe, handle);
+ }
+ return PIPE_OK;
+}
+
+void cso_save_rasterizer(struct cso_context *ctx)
+{
+ assert(!ctx->rasterizer_saved);
+ ctx->rasterizer_saved = ctx->rasterizer;
+}
+
+void cso_restore_rasterizer(struct cso_context *ctx)
+{
+ if (ctx->rasterizer != ctx->rasterizer_saved) {
+ ctx->rasterizer = ctx->rasterizer_saved;
+ ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved);
+ }
+ ctx->rasterizer_saved = NULL;
+}
+
+
+
+enum pipe_error cso_set_fragment_shader_handle(struct cso_context *ctx,
+ void *handle )
+{
+ if (ctx->fragment_shader != handle) {
+ ctx->fragment_shader = handle;
+ ctx->pipe->bind_fs_state(ctx->pipe, handle);
+ }
+ return PIPE_OK;
+}
+
+void cso_delete_fragment_shader(struct cso_context *ctx, void *handle )
+{
+ if (handle == ctx->fragment_shader) {
+ /* unbind before deleting */
+ ctx->pipe->bind_fs_state(ctx->pipe, NULL);
+ ctx->fragment_shader = NULL;
+ }
+ ctx->pipe->delete_fs_state(ctx->pipe, handle);
+}
+
+/* Not really working:
+ */
+#if 0
+enum pipe_error cso_set_fragment_shader(struct cso_context *ctx,
+ const struct pipe_shader_state *templ)
+{
+ const struct tgsi_token *tokens = templ->tokens;
+ unsigned num_tokens = tgsi_num_tokens(tokens);
+ size_t tokens_size = num_tokens*sizeof(struct tgsi_token);
+ unsigned hash_key = cso_construct_key((void*)tokens, tokens_size);
+ struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
+ hash_key,
+ CSO_FRAGMENT_SHADER,
+ (void*)tokens);
+ void *handle = NULL;
+
+ if (cso_hash_iter_is_null(iter)) {
+ struct cso_fragment_shader *cso = MALLOC(sizeof(struct cso_fragment_shader) + tokens_size);
+ struct tgsi_token *cso_tokens = (struct tgsi_token *)((char *)cso + sizeof(*cso));
+
+ if (!cso)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(cso_tokens, tokens, tokens_size);
+ cso->state.tokens = cso_tokens;
+ cso->data = ctx->pipe->create_fs_state(ctx->pipe, &cso->state);
+ cso->delete_state = (cso_state_callback)ctx->pipe->delete_fs_state;
+ cso->context = ctx->pipe;
+
+ iter = cso_insert_state(ctx->cache, hash_key, CSO_FRAGMENT_SHADER, cso);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(cso);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ handle = cso->data;
+ }
+ else {
+ handle = ((struct cso_fragment_shader *)cso_hash_iter_data(iter))->data;
+ }
+
+ return cso_set_fragment_shader_handle( ctx, handle );
+}
+#endif
+
+void cso_save_fragment_shader(struct cso_context *ctx)
+{
+ assert(!ctx->fragment_shader_saved);
+ ctx->fragment_shader_saved = ctx->fragment_shader;
+}
+
+void cso_restore_fragment_shader(struct cso_context *ctx)
+{
+ if (ctx->fragment_shader_saved != ctx->fragment_shader) {
+ ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved);
+ ctx->fragment_shader = ctx->fragment_shader_saved;
+ }
+ ctx->fragment_shader_saved = NULL;
+}
+
+
+enum pipe_error cso_set_vertex_shader_handle(struct cso_context *ctx,
+ void *handle )
+{
+ if (ctx->vertex_shader != handle) {
+ ctx->vertex_shader = handle;
+ ctx->pipe->bind_vs_state(ctx->pipe, handle);
+ }
+ return PIPE_OK;
+}
+
+void cso_delete_vertex_shader(struct cso_context *ctx, void *handle )
+{
+ if (handle == ctx->vertex_shader) {
+ /* unbind before deleting */
+ ctx->pipe->bind_vs_state(ctx->pipe, NULL);
+ ctx->vertex_shader = NULL;
+ }
+ ctx->pipe->delete_vs_state(ctx->pipe, handle);
+}
+
+
+/* Not really working:
+ */
+#if 0
+enum pipe_error cso_set_vertex_shader(struct cso_context *ctx,
+ const struct pipe_shader_state *templ)
+{
+ unsigned hash_key = cso_construct_key((void*)templ,
+ sizeof(struct pipe_shader_state));
+ struct cso_hash_iter iter = cso_find_state_template(ctx->cache,
+ hash_key, CSO_VERTEX_SHADER,
+ (void*)templ);
+ void *handle = NULL;
+
+ if (cso_hash_iter_is_null(iter)) {
+ struct cso_vertex_shader *cso = MALLOC(sizeof(struct cso_vertex_shader));
+
+ if (!cso)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(cso->state, templ, sizeof(*templ));
+ cso->data = ctx->pipe->create_vs_state(ctx->pipe, &cso->state);
+ cso->delete_state = (cso_state_callback)ctx->pipe->delete_vs_state;
+ cso->context = ctx->pipe;
+
+ iter = cso_insert_state(ctx->cache, hash_key, CSO_VERTEX_SHADER, cso);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(cso);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ handle = cso->data;
+ }
+ else {
+ handle = ((struct cso_vertex_shader *)cso_hash_iter_data(iter))->data;
+ }
+
+ return cso_set_vertex_shader_handle( ctx, handle );
+}
+#endif
+
+
+
+void cso_save_vertex_shader(struct cso_context *ctx)
+{
+ assert(!ctx->vertex_shader_saved);
+ ctx->vertex_shader_saved = ctx->vertex_shader;
+}
+
+void cso_restore_vertex_shader(struct cso_context *ctx)
+{
+ if (ctx->vertex_shader_saved != ctx->vertex_shader) {
+ ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved);
+ ctx->vertex_shader = ctx->vertex_shader_saved;
+ }
+ ctx->vertex_shader_saved = NULL;
+}
+
+
+/**
+ * Copy framebuffer state from src to dst with refcounting of surfaces.
+ */
+static void
+copy_framebuffer_state(struct pipe_framebuffer_state *dst,
+ const struct pipe_framebuffer_state *src)
+{
+ uint i;
+
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->nr_cbufs = src->nr_cbufs;
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
+ pipe_surface_reference(&dst->cbufs[i], src->cbufs[i]);
+ }
+ pipe_surface_reference(&dst->zsbuf, src->zsbuf);
+}
+
+
+static void
+free_framebuffer_state(struct pipe_framebuffer_state *fb)
+{
+ uint i;
+
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
+ pipe_surface_reference(&fb->cbufs[i], NULL);
+ }
+ pipe_surface_reference(&fb->zsbuf, NULL);
+}
+
+
+enum pipe_error cso_set_framebuffer(struct cso_context *ctx,
+ const struct pipe_framebuffer_state *fb)
+{
+ if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) {
+ copy_framebuffer_state(&ctx->fb, fb);
+ ctx->pipe->set_framebuffer_state(ctx->pipe, fb);
+ }
+ return PIPE_OK;
+}
+
+void cso_save_framebuffer(struct cso_context *ctx)
+{
+ copy_framebuffer_state(&ctx->fb_saved, &ctx->fb);
+}
+
+void cso_restore_framebuffer(struct cso_context *ctx)
+{
+ if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) {
+ copy_framebuffer_state(&ctx->fb, &ctx->fb_saved);
+ ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb);
+ free_framebuffer_state(&ctx->fb_saved);
+ }
+}
+
+
+enum pipe_error cso_set_viewport(struct cso_context *ctx,
+ const struct pipe_viewport_state *vp)
+{
+ if (memcmp(&ctx->vp, vp, sizeof(*vp))) {
+ ctx->vp = *vp;
+ ctx->pipe->set_viewport_state(ctx->pipe, vp);
+ }
+ return PIPE_OK;
+}
+
+void cso_save_viewport(struct cso_context *ctx)
+{
+ ctx->vp_saved = ctx->vp;
+}
+
+
+void cso_restore_viewport(struct cso_context *ctx)
+{
+ if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) {
+ ctx->vp = ctx->vp_saved;
+ ctx->pipe->set_viewport_state(ctx->pipe, &ctx->vp);
+ }
+}
+
+
+
+
+enum pipe_error cso_set_blend_color(struct cso_context *ctx,
+ const struct pipe_blend_color *bc)
+{
+ if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) {
+ ctx->blend_color = *bc;
+ ctx->pipe->set_blend_color(ctx->pipe, bc);
+ }
+ return PIPE_OK;
+}
diff --git a/src/gallium/auxiliary/cso_cache/cso_context.h b/src/gallium/auxiliary/cso_cache/cso_context.h
new file mode 100644
index 0000000000..b04e98bfa1
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/cso_context.h
@@ -0,0 +1,145 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CSO_CONTEXT_H
+#define CSO_CONTEXT_H
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_error.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cso_context;
+
+struct cso_context *cso_create_context( struct pipe_context *pipe );
+
+void cso_release_all( struct cso_context *ctx );
+
+void cso_destroy_context( struct cso_context *cso );
+
+
+
+enum pipe_error cso_set_blend( struct cso_context *cso,
+ const struct pipe_blend_state *blend );
+void cso_save_blend(struct cso_context *cso);
+void cso_restore_blend(struct cso_context *cso);
+
+
+
+enum pipe_error cso_set_depth_stencil_alpha( struct cso_context *cso,
+ const struct pipe_depth_stencil_alpha_state *dsa );
+void cso_save_depth_stencil_alpha(struct cso_context *cso);
+void cso_restore_depth_stencil_alpha(struct cso_context *cso);
+
+
+
+enum pipe_error cso_set_rasterizer( struct cso_context *cso,
+ const struct pipe_rasterizer_state *rasterizer );
+void cso_save_rasterizer(struct cso_context *cso);
+void cso_restore_rasterizer(struct cso_context *cso);
+
+
+
+enum pipe_error cso_set_samplers( struct cso_context *cso,
+ unsigned count,
+ const struct pipe_sampler_state **states );
+void cso_save_samplers(struct cso_context *cso);
+void cso_restore_samplers(struct cso_context *cso);
+
+/* Alternate interface to support state trackers that like to modify
+ * samplers one at a time:
+ */
+enum pipe_error cso_single_sampler( struct cso_context *cso,
+ unsigned nr,
+ const struct pipe_sampler_state *states );
+
+void cso_single_sampler_done( struct cso_context *cso );
+
+
+
+enum pipe_error cso_set_sampler_textures( struct cso_context *cso,
+ uint count,
+ struct pipe_texture **textures );
+void cso_save_sampler_textures( struct cso_context *cso );
+void cso_restore_sampler_textures( struct cso_context *cso );
+
+
+
+/* These aren't really sensible -- most of the time the api provides
+ * object semantics for shaders anyway, and the cases where it doesn't
+ * (eg mesa's internall-generated texenv programs), it will be up to
+ * the state tracker to implement their own specialized caching.
+ */
+enum pipe_error cso_set_fragment_shader_handle(struct cso_context *ctx,
+ void *handle );
+void cso_delete_fragment_shader(struct cso_context *ctx, void *handle );
+/*
+enum pipe_error cso_set_fragment_shader( struct cso_context *cso,
+ const struct pipe_shader_state *shader );
+*/
+void cso_save_fragment_shader(struct cso_context *cso);
+void cso_restore_fragment_shader(struct cso_context *cso);
+
+
+enum pipe_error cso_set_vertex_shader_handle(struct cso_context *ctx,
+ void *handle );
+void cso_delete_vertex_shader(struct cso_context *ctx, void *handle );
+/*
+enum pipe_error cso_set_vertex_shader( struct cso_context *cso,
+ const struct pipe_shader_state *shader );
+*/
+void cso_save_vertex_shader(struct cso_context *cso);
+void cso_restore_vertex_shader(struct cso_context *cso);
+
+
+
+enum pipe_error cso_set_framebuffer(struct cso_context *cso,
+ const struct pipe_framebuffer_state *fb);
+void cso_save_framebuffer(struct cso_context *cso);
+void cso_restore_framebuffer(struct cso_context *cso);
+
+
+enum pipe_error cso_set_viewport(struct cso_context *cso,
+ const struct pipe_viewport_state *vp);
+void cso_save_viewport(struct cso_context *cso);
+void cso_restore_viewport(struct cso_context *cso);
+
+
+enum pipe_error cso_set_blend_color(struct cso_context *cso,
+ const struct pipe_blend_color *bc);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/cso_cache/cso_hash.c b/src/gallium/auxiliary/cso_cache/cso_hash.c
new file mode 100644
index 0000000000..288cef7b6f
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/cso_hash.c
@@ -0,0 +1,439 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin <zack@tungstengraphics.com>
+ */
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+#include "cso_hash.h"
+
+#define MAX(a, b) ((a > b) ? (a) : (b))
+
+static const int MinNumBits = 4;
+
+static const unsigned char prime_deltas[] = {
+ 0, 0, 1, 3, 1, 5, 3, 3, 1, 9, 7, 5, 3, 9, 25, 3,
+ 1, 21, 3, 21, 7, 15, 9, 5, 3, 29, 15, 0, 0, 0, 0, 0
+};
+
+static int primeForNumBits(int numBits)
+{
+ return (1 << numBits) + prime_deltas[numBits];
+}
+
+/*
+ Returns the smallest integer n such that
+ primeForNumBits(n) >= hint.
+*/
+static int countBits(int hint)
+{
+ int numBits = 0;
+ int bits = hint;
+
+ while (bits > 1) {
+ bits >>= 1;
+ numBits++;
+ }
+
+ if (numBits >= (int)sizeof(prime_deltas)) {
+ numBits = sizeof(prime_deltas) - 1;
+ } else if (primeForNumBits(numBits) < hint) {
+ ++numBits;
+ }
+ return numBits;
+}
+
+struct cso_node {
+ struct cso_node *next;
+ unsigned key;
+ void *value;
+};
+
+struct cso_hash_data {
+ struct cso_node *fakeNext;
+ struct cso_node **buckets;
+ int size;
+ int nodeSize;
+ short userNumBits;
+ short numBits;
+ int numBuckets;
+};
+
+struct cso_hash {
+ union {
+ struct cso_hash_data *d;
+ struct cso_node *e;
+ } data;
+};
+
+static void *cso_data_allocate_node(struct cso_hash_data *hash)
+{
+ return MALLOC(hash->nodeSize);
+}
+
+static void cso_free_node(struct cso_node *node)
+{
+ FREE(node);
+}
+
+static struct cso_node *
+cso_hash_create_node(struct cso_hash *hash,
+ unsigned akey, void *avalue,
+ struct cso_node **anextNode)
+{
+ struct cso_node *node = cso_data_allocate_node(hash->data.d);
+
+ if (!node)
+ return NULL;
+
+ node->key = akey;
+ node->value = avalue;
+
+ node->next = (struct cso_node*)(*anextNode);
+ *anextNode = node;
+ ++hash->data.d->size;
+ return node;
+}
+
+static void cso_data_rehash(struct cso_hash_data *hash, int hint)
+{
+ if (hint < 0) {
+ hint = countBits(-hint);
+ if (hint < MinNumBits)
+ hint = MinNumBits;
+ hash->userNumBits = (short)hint;
+ while (primeForNumBits(hint) < (hash->size >> 1))
+ ++hint;
+ } else if (hint < MinNumBits) {
+ hint = MinNumBits;
+ }
+
+ if (hash->numBits != hint) {
+ struct cso_node *e = (struct cso_node *)(hash);
+ struct cso_node **oldBuckets = hash->buckets;
+ int oldNumBuckets = hash->numBuckets;
+ int i = 0;
+
+ hash->numBits = (short)hint;
+ hash->numBuckets = primeForNumBits(hint);
+ hash->buckets = MALLOC(sizeof(struct cso_node*) * hash->numBuckets);
+ for (i = 0; i < hash->numBuckets; ++i)
+ hash->buckets[i] = e;
+
+ for (i = 0; i < oldNumBuckets; ++i) {
+ struct cso_node *firstNode = oldBuckets[i];
+ while (firstNode != e) {
+ unsigned h = firstNode->key;
+ struct cso_node *lastNode = firstNode;
+ struct cso_node *afterLastNode;
+ struct cso_node **beforeFirstNode;
+
+ while (lastNode->next != e && lastNode->next->key == h)
+ lastNode = lastNode->next;
+
+ afterLastNode = lastNode->next;
+ beforeFirstNode = &hash->buckets[h % hash->numBuckets];
+ while (*beforeFirstNode != e)
+ beforeFirstNode = &(*beforeFirstNode)->next;
+ lastNode->next = *beforeFirstNode;
+ *beforeFirstNode = firstNode;
+ firstNode = afterLastNode;
+ }
+ }
+ FREE(oldBuckets);
+ }
+}
+
+static void cso_data_might_grow(struct cso_hash_data *hash)
+{
+ if (hash->size >= hash->numBuckets)
+ cso_data_rehash(hash, hash->numBits + 1);
+}
+
+static void cso_data_has_shrunk(struct cso_hash_data *hash)
+{
+ if (hash->size <= (hash->numBuckets >> 3) &&
+ hash->numBits > hash->userNumBits) {
+ int max = MAX(hash->numBits-2, hash->userNumBits);
+ cso_data_rehash(hash, max);
+ }
+}
+
+static struct cso_node *cso_data_first_node(struct cso_hash_data *hash)
+{
+ struct cso_node *e = (struct cso_node *)(hash);
+ struct cso_node **bucket = hash->buckets;
+ int n = hash->numBuckets;
+ while (n--) {
+ if (*bucket != e)
+ return *bucket;
+ ++bucket;
+ }
+ return e;
+}
+
+static struct cso_node **cso_hash_find_node(struct cso_hash *hash, unsigned akey)
+{
+ struct cso_node **node;
+
+ if (hash->data.d->numBuckets) {
+ node = (struct cso_node **)(&hash->data.d->buckets[akey % hash->data.d->numBuckets]);
+ assert(*node == hash->data.e || (*node)->next);
+ while (*node != hash->data.e && (*node)->key != akey)
+ node = &(*node)->next;
+ } else {
+ node = (struct cso_node **)((const struct cso_node * const *)(&hash->data.e));
+ }
+ return node;
+}
+
+struct cso_hash_iter cso_hash_insert(struct cso_hash *hash,
+ unsigned key, void *data)
+{
+ cso_data_might_grow(hash->data.d);
+
+ {
+ struct cso_node **nextNode = cso_hash_find_node(hash, key);
+ struct cso_node *node = cso_hash_create_node(hash, key, data, nextNode);
+ if (!node) {
+ struct cso_hash_iter null_iter = {hash, 0};
+ return null_iter;
+ }
+
+ {
+ struct cso_hash_iter iter = {hash, node};
+ return iter;
+ }
+ }
+}
+
+struct cso_hash * cso_hash_create(void)
+{
+ struct cso_hash *hash = MALLOC_STRUCT(cso_hash);
+ if (!hash)
+ return NULL;
+
+ hash->data.d = MALLOC_STRUCT(cso_hash_data);
+ if (!hash->data.d) {
+ FREE(hash);
+ return NULL;
+ }
+
+ hash->data.d->fakeNext = 0;
+ hash->data.d->buckets = 0;
+ hash->data.d->size = 0;
+ hash->data.d->nodeSize = sizeof(struct cso_node);
+ hash->data.d->userNumBits = (short)MinNumBits;
+ hash->data.d->numBits = 0;
+ hash->data.d->numBuckets = 0;
+
+ return hash;
+}
+
+void cso_hash_delete(struct cso_hash *hash)
+{
+ struct cso_node *e_for_x = (struct cso_node *)(hash->data.d);
+ struct cso_node **bucket = (struct cso_node **)(hash->data.d->buckets);
+ int n = hash->data.d->numBuckets;
+ while (n--) {
+ struct cso_node *cur = *bucket++;
+ while (cur != e_for_x) {
+ struct cso_node *next = cur->next;
+ cso_free_node(cur);
+ cur = next;
+ }
+ }
+ FREE(hash->data.d->buckets);
+ FREE(hash->data.d);
+ FREE(hash);
+}
+
+struct cso_hash_iter cso_hash_find(struct cso_hash *hash,
+ unsigned key)
+{
+ struct cso_node **nextNode = cso_hash_find_node(hash, key);
+ struct cso_hash_iter iter = {hash, *nextNode};
+ return iter;
+}
+
+unsigned cso_hash_iter_key(struct cso_hash_iter iter)
+{
+ if (!iter.node || iter.hash->data.e == iter.node)
+ return 0;
+ return iter.node->key;
+}
+
+void * cso_hash_iter_data(struct cso_hash_iter iter)
+{
+ if (!iter.node || iter.hash->data.e == iter.node)
+ return 0;
+ return iter.node->value;
+}
+
+static struct cso_node *cso_hash_data_next(struct cso_node *node)
+{
+ union {
+ struct cso_node *next;
+ struct cso_node *e;
+ struct cso_hash_data *d;
+ } a;
+ int start;
+ struct cso_node **bucket;
+ int n;
+
+ a.next = node->next;
+ if (!a.next) {
+ debug_printf("iterating beyond the last element\n");
+ return 0;
+ }
+ if (a.next->next)
+ return a.next;
+
+ start = (node->key % a.d->numBuckets) + 1;
+ bucket = a.d->buckets + start;
+ n = a.d->numBuckets - start;
+ while (n--) {
+ if (*bucket != a.e)
+ return *bucket;
+ ++bucket;
+ }
+ return a.e;
+}
+
+
+static struct cso_node *cso_hash_data_prev(struct cso_node *node)
+{
+ union {
+ struct cso_node *e;
+ struct cso_hash_data *d;
+ } a;
+ int start;
+ struct cso_node *sentinel;
+ struct cso_node **bucket;
+
+ a.e = node;
+ while (a.e->next)
+ a.e = a.e->next;
+
+ if (node == a.e)
+ start = a.d->numBuckets - 1;
+ else
+ start = node->key % a.d->numBuckets;
+
+ sentinel = node;
+ bucket = a.d->buckets + start;
+ while (start >= 0) {
+ if (*bucket != sentinel) {
+ struct cso_node *prev = *bucket;
+ while (prev->next != sentinel)
+ prev = prev->next;
+ return prev;
+ }
+
+ sentinel = a.e;
+ --bucket;
+ --start;
+ }
+ debug_printf("iterating backward beyond first element\n");
+ return a.e;
+}
+
+struct cso_hash_iter cso_hash_iter_next(struct cso_hash_iter iter)
+{
+ struct cso_hash_iter next = {iter.hash, cso_hash_data_next(iter.node)};
+ return next;
+}
+
+int cso_hash_iter_is_null(struct cso_hash_iter iter)
+{
+ if (!iter.node || iter.node == iter.hash->data.e)
+ return 1;
+ return 0;
+}
+
+void * cso_hash_take(struct cso_hash *hash,
+ unsigned akey)
+{
+ struct cso_node **node = cso_hash_find_node(hash, akey);
+ if (*node != hash->data.e) {
+ void *t = (*node)->value;
+ struct cso_node *next = (*node)->next;
+ cso_free_node(*node);
+ *node = next;
+ --hash->data.d->size;
+ cso_data_has_shrunk(hash->data.d);
+ return t;
+ }
+ return 0;
+}
+
+struct cso_hash_iter cso_hash_iter_prev(struct cso_hash_iter iter)
+{
+ struct cso_hash_iter prev = {iter.hash,
+ cso_hash_data_prev(iter.node)};
+ return prev;
+}
+
+struct cso_hash_iter cso_hash_first_node(struct cso_hash *hash)
+{
+ struct cso_hash_iter iter = {hash, cso_data_first_node(hash->data.d)};
+ return iter;
+}
+
+int cso_hash_size(struct cso_hash *hash)
+{
+ return hash->data.d->size;
+}
+
+struct cso_hash_iter cso_hash_erase(struct cso_hash *hash, struct cso_hash_iter iter)
+{
+ struct cso_hash_iter ret = iter;
+ struct cso_node *node = iter.node;
+ struct cso_node **node_ptr;
+
+ if (node == hash->data.e)
+ return iter;
+
+ ret = cso_hash_iter_next(ret);
+ node_ptr = (struct cso_node**)(&hash->data.d->buckets[node->key % hash->data.d->numBuckets]);
+ while (*node_ptr != node)
+ node_ptr = &(*node_ptr)->next;
+ *node_ptr = node->next;
+ cso_free_node(node);
+ --hash->data.d->size;
+ return ret;
+}
+
+boolean cso_hash_contains(struct cso_hash *hash, unsigned key)
+{
+ struct cso_node **node = cso_hash_find_node(hash, key);
+ return (*node != hash->data.e);
+}
diff --git a/src/gallium/auxiliary/cso_cache/cso_hash.h b/src/gallium/auxiliary/cso_cache/cso_hash.h
new file mode 100644
index 0000000000..5891c325fa
--- /dev/null
+++ b/src/gallium/auxiliary/cso_cache/cso_hash.h
@@ -0,0 +1,129 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Hash table implementation.
+ *
+ * This file provides a hash implementation that is capable of dealing
+ * with collisions. It stores colliding entries in linked list. All
+ * functions operating on the hash return an iterator. The iterator
+ * itself points to the collision list. If there wasn't any collision
+ * the list will have just one entry, otherwise client code should
+ * iterate over the entries to find the exact entry among ones that
+ * had the same key (e.g. memcmp could be used on the data to check
+ * that)
+ *
+ * @author Zack Rusin <zack@tungstengraphics.com>
+ */
+
+#ifndef CSO_HASH_H
+#define CSO_HASH_H
+
+#include "pipe/p_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct cso_hash;
+struct cso_node;
+
+
+struct cso_hash_iter {
+ struct cso_hash *hash;
+ struct cso_node *node;
+};
+
+
+struct cso_hash *cso_hash_create(void);
+void cso_hash_delete(struct cso_hash *hash);
+
+
+int cso_hash_size(struct cso_hash *hash);
+
+
+/**
+ * Adds a data with the given key to the hash. If entry with the given
+ * key is already in the hash, this current entry is instered before it
+ * in the collision list.
+ * Function returns iterator pointing to the inserted item in the hash.
+ */
+struct cso_hash_iter cso_hash_insert(struct cso_hash *hash, unsigned key,
+ void *data);
+/**
+ * Removes the item pointed to by the current iterator from the hash.
+ * Note that the data itself is not erased and if it was a malloc'ed pointer
+ * it will have to be freed after calling this function by the callee.
+ * Function returns iterator pointing to the item after the removed one in
+ * the hash.
+ */
+struct cso_hash_iter cso_hash_erase(struct cso_hash *hash, struct cso_hash_iter iter);
+
+void *cso_hash_take(struct cso_hash *hash, unsigned key);
+
+
+
+struct cso_hash_iter cso_hash_first_node(struct cso_hash *hash);
+
+/**
+ * Return an iterator pointing to the first entry in the collision list.
+ */
+struct cso_hash_iter cso_hash_find(struct cso_hash *hash, unsigned key);
+
+/**
+ * Returns true if a value with the given key exists in the hash
+ */
+boolean cso_hash_contains(struct cso_hash *hash, unsigned key);
+
+
+int cso_hash_iter_is_null(struct cso_hash_iter iter);
+unsigned cso_hash_iter_key(struct cso_hash_iter iter);
+void *cso_hash_iter_data(struct cso_hash_iter iter);
+
+
+struct cso_hash_iter cso_hash_iter_next(struct cso_hash_iter iter);
+struct cso_hash_iter cso_hash_iter_prev(struct cso_hash_iter iter);
+
+
+/**
+ * Convenience routine to iterate over the collision list while doing a memory
+ * comparison to see which entry in the list is a direct copy of our template
+ * and returns that entry.
+ */
+void *cso_hash_find_data_from_template( struct cso_hash *hash,
+ unsigned hash_key,
+ void *templ,
+ int size );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/draw/Makefile b/src/gallium/auxiliary/draw/Makefile
new file mode 100644
index 0000000000..5041dcc072
--- /dev/null
+++ b/src/gallium/auxiliary/draw/Makefile
@@ -0,0 +1,46 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = draw
+
+C_SOURCES = \
+ draw_context.c \
+ draw_pipe.c \
+ draw_pipe_aaline.c \
+ draw_pipe_aapoint.c \
+ draw_pipe_clip.c \
+ draw_pipe_cull.c \
+ draw_pipe_flatshade.c \
+ draw_pipe_offset.c \
+ draw_pipe_pstipple.c \
+ draw_pipe_stipple.c \
+ draw_pipe_twoside.c \
+ draw_pipe_unfilled.c \
+ draw_pipe_util.c \
+ draw_pipe_validate.c \
+ draw_pipe_vbuf.c \
+ draw_pipe_wide_line.c \
+ draw_pipe_wide_point.c \
+ draw_pt.c \
+ draw_pt_elts.c \
+ draw_pt_emit.c \
+ draw_pt_fetch.c \
+ draw_pt_fetch_emit.c \
+ draw_pt_fetch_shade_emit.c \
+ draw_pt_fetch_shade_pipeline.c \
+ draw_pt_post_vs.c \
+ draw_pt_util.c \
+ draw_pt_varray.c \
+ draw_pt_vcache.c \
+ draw_vertex.c \
+ draw_vs.c \
+ draw_vs_varient.c \
+ draw_vs_aos.c \
+ draw_vs_aos_io.c \
+ draw_vs_aos_machine.c \
+ draw_vs_exec.c \
+ draw_vs_llvm.c \
+ draw_vs_ppc.c \
+ draw_vs_sse.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/draw/SConscript b/src/gallium/auxiliary/draw/SConscript
new file mode 100644
index 0000000000..5f05aa324a
--- /dev/null
+++ b/src/gallium/auxiliary/draw/SConscript
@@ -0,0 +1,46 @@
+Import('*')
+
+draw = env.ConvenienceLibrary(
+ target = 'draw',
+ source = [
+ 'draw_context.c',
+ 'draw_pipe.c',
+ 'draw_pipe_aaline.c',
+ 'draw_pipe_aapoint.c',
+ 'draw_pipe_clip.c',
+ 'draw_pipe_cull.c',
+ 'draw_pipe_flatshade.c',
+ 'draw_pipe_offset.c',
+ 'draw_pipe_pstipple.c',
+ 'draw_pipe_stipple.c',
+ 'draw_pipe_twoside.c',
+ 'draw_pipe_unfilled.c',
+ 'draw_pipe_util.c',
+ 'draw_pipe_validate.c',
+ 'draw_pipe_vbuf.c',
+ 'draw_pipe_wide_line.c',
+ 'draw_pipe_wide_point.c',
+ 'draw_pt.c',
+ 'draw_pt_elts.c',
+ 'draw_pt_emit.c',
+ 'draw_pt_fetch.c',
+ 'draw_pt_fetch_emit.c',
+ 'draw_pt_fetch_shade_emit.c',
+ 'draw_pt_fetch_shade_pipeline.c',
+ 'draw_pt_post_vs.c',
+ 'draw_pt_util.c',
+ 'draw_pt_varray.c',
+ 'draw_pt_vcache.c',
+ 'draw_vertex.c',
+ 'draw_vs.c',
+ 'draw_vs_aos.c',
+ 'draw_vs_aos_io.c',
+ 'draw_vs_aos_machine.c',
+ 'draw_vs_exec.c',
+ 'draw_vs_llvm.c',
+ 'draw_vs_ppc.c',
+ 'draw_vs_sse.c',
+ 'draw_vs_varient.c'
+ ])
+
+auxiliaries.insert(0, draw)
diff --git a/src/gallium/auxiliary/draw/draw_context.c b/src/gallium/auxiliary/draw/draw_context.c
new file mode 100644
index 0000000000..a4f1fcddc1
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_context.c
@@ -0,0 +1,430 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "draw_context.h"
+#include "draw_vbuf.h"
+#include "draw_vs.h"
+#include "draw_pt.h"
+#include "draw_pipe.h"
+
+
+struct draw_context *draw_create( void )
+{
+ struct draw_context *draw = CALLOC_STRUCT( draw_context );
+ if (draw == NULL)
+ goto fail;
+
+ ASSIGN_4V( draw->plane[0], -1, 0, 0, 1 );
+ ASSIGN_4V( draw->plane[1], 1, 0, 0, 1 );
+ ASSIGN_4V( draw->plane[2], 0, -1, 0, 1 );
+ ASSIGN_4V( draw->plane[3], 0, 1, 0, 1 );
+ ASSIGN_4V( draw->plane[4], 0, 0, 1, 1 ); /* yes these are correct */
+ ASSIGN_4V( draw->plane[5], 0, 0, -1, 1 ); /* mesa's a bit wonky */
+ draw->nr_planes = 6;
+
+
+ draw->reduced_prim = ~0; /* != any of PIPE_PRIM_x */
+
+
+ if (!draw_pipeline_init( draw ))
+ goto fail;
+
+ if (!draw_pt_init( draw ))
+ goto fail;
+
+ if (!draw_vs_init( draw ))
+ goto fail;
+
+ return draw;
+
+fail:
+ draw_destroy( draw );
+ return NULL;
+}
+
+
+void draw_destroy( struct draw_context *draw )
+{
+ if (!draw)
+ return;
+
+
+
+ /* Not so fast -- we're just borrowing this at the moment.
+ *
+ if (draw->render)
+ draw->render->destroy( draw->render );
+ */
+
+ draw_pipeline_destroy( draw );
+ draw_pt_destroy( draw );
+ draw_vs_destroy( draw );
+
+ FREE( draw );
+}
+
+
+
+void draw_flush( struct draw_context *draw )
+{
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+}
+
+
+/**
+ * Specify the Minimum Resolvable Depth factor for polygon offset.
+ * This factor potentially depends on the number of Z buffer bits,
+ * the rasterization algorithm and the arithmetic performed on Z
+ * values between vertex shading and rasterization. It will vary
+ * from one driver to another.
+ */
+void draw_set_mrd(struct draw_context *draw, double mrd)
+{
+ draw->mrd = mrd;
+}
+
+
+/**
+ * Register new primitive rasterization/rendering state.
+ * This causes the drawing pipeline to be rebuilt.
+ */
+void draw_set_rasterizer_state( struct draw_context *draw,
+ const struct pipe_rasterizer_state *raster )
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+
+ draw->rasterizer = raster;
+ draw->bypass_clipping =
+ ((draw->rasterizer && draw->rasterizer->bypass_vs_clip_and_viewport) ||
+ draw->driver.bypass_clipping);
+}
+
+
+void draw_set_driver_clipping( struct draw_context *draw,
+ boolean bypass_clipping )
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+
+ draw->driver.bypass_clipping = bypass_clipping;
+ draw->bypass_clipping =
+ ((draw->rasterizer && draw->rasterizer->bypass_vs_clip_and_viewport) ||
+ draw->driver.bypass_clipping);
+}
+
+
+/**
+ * Plug in the primitive rendering/rasterization stage (which is the last
+ * stage in the drawing pipeline).
+ * This is provided by the device driver.
+ */
+void draw_set_rasterize_stage( struct draw_context *draw,
+ struct draw_stage *stage )
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+
+ draw->pipeline.rasterize = stage;
+}
+
+
+/**
+ * Set the draw module's clipping state.
+ */
+void draw_set_clip_state( struct draw_context *draw,
+ const struct pipe_clip_state *clip )
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+
+ assert(clip->nr <= PIPE_MAX_CLIP_PLANES);
+ memcpy(&draw->plane[6], clip->ucp, clip->nr * sizeof(clip->ucp[0]));
+ draw->nr_planes = 6 + clip->nr;
+}
+
+
+/**
+ * Set the draw module's viewport state.
+ */
+void draw_set_viewport_state( struct draw_context *draw,
+ const struct pipe_viewport_state *viewport )
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->viewport = *viewport; /* struct copy */
+ draw->identity_viewport = (viewport->scale[0] == 1.0f &&
+ viewport->scale[1] == 1.0f &&
+ viewport->scale[2] == 1.0f &&
+ viewport->scale[3] == 1.0f &&
+ viewport->translate[0] == 0.0f &&
+ viewport->translate[1] == 0.0f &&
+ viewport->translate[2] == 0.0f &&
+ viewport->translate[3] == 0.0f);
+
+ draw_vs_set_viewport( draw, viewport );
+}
+
+
+
+void
+draw_set_vertex_buffers(struct draw_context *draw,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(draw->pt.vertex_buffer, buffers, count * sizeof(buffers[0]));
+ draw->pt.nr_vertex_buffers = count;
+}
+
+
+void
+draw_set_vertex_elements(struct draw_context *draw,
+ unsigned count,
+ const struct pipe_vertex_element *elements)
+{
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0]));
+ draw->pt.nr_vertex_elements = count;
+}
+
+
+/**
+ * Tell drawing context where to find mapped vertex buffers.
+ */
+void
+draw_set_mapped_vertex_buffer(struct draw_context *draw,
+ unsigned attr, const void *buffer)
+{
+ draw->pt.user.vbuffer[attr] = buffer;
+}
+
+
+void
+draw_set_mapped_constant_buffer(struct draw_context *draw,
+ const void *buffer,
+ unsigned size )
+{
+ draw->pt.user.constants = buffer;
+ draw_vs_set_constants( draw, (const float (*)[4])buffer, size );
+}
+
+
+/**
+ * Tells the draw module to draw points with triangles if their size
+ * is greater than this threshold.
+ */
+void
+draw_wide_point_threshold(struct draw_context *draw, float threshold)
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->pipeline.wide_point_threshold = threshold;
+}
+
+
+/**
+ * Tells the draw module to draw lines with triangles if their width
+ * is greater than this threshold.
+ */
+void
+draw_wide_line_threshold(struct draw_context *draw, float threshold)
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->pipeline.wide_line_threshold = threshold;
+}
+
+
+/**
+ * Tells the draw module whether or not to implement line stipple.
+ */
+void
+draw_enable_line_stipple(struct draw_context *draw, boolean enable)
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->pipeline.line_stipple = enable;
+}
+
+
+/**
+ * Tells draw module whether to convert points to quads for sprite mode.
+ */
+void
+draw_enable_point_sprites(struct draw_context *draw, boolean enable)
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->pipeline.point_sprite = enable;
+}
+
+
+void
+draw_set_force_passthrough( struct draw_context *draw, boolean enable )
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->force_passthrough = enable;
+}
+
+
+/**
+ * Ask the draw module for the location/slot of the given vertex attribute in
+ * a post-transformed vertex.
+ *
+ * With this function, drivers that use the draw module should have no reason
+ * to track the current vertex shader.
+ *
+ * Note that the draw module may sometimes generate vertices with extra
+ * attributes (such as texcoords for AA lines). The driver can call this
+ * function to find those attributes.
+ *
+ * Zero is returned if the attribute is not found since this is
+ * a don't care / undefined situtation. Returning -1 would be a bit more
+ * work for the drivers.
+ */
+int
+draw_find_vs_output(const struct draw_context *draw,
+ uint semantic_name, uint semantic_index)
+{
+ const struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+ uint i;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == semantic_name &&
+ vs->info.output_semantic_index[i] == semantic_index)
+ return i;
+ }
+
+ /* XXX there may be more than one extra vertex attrib.
+ * For example, simulated gl_FragCoord and gl_PointCoord.
+ */
+ if (draw->extra_vp_outputs.semantic_name == semantic_name &&
+ draw->extra_vp_outputs.semantic_index == semantic_index) {
+ return draw->extra_vp_outputs.slot;
+ }
+ return 0;
+}
+
+
+/**
+ * Return number of vertex shader outputs.
+ */
+uint
+draw_num_vs_outputs(const struct draw_context *draw)
+{
+ uint count = draw->vs.vertex_shader->info.num_outputs;
+ if (draw->extra_vp_outputs.slot > 0)
+ count++;
+ return count;
+}
+
+
+/**
+ * Provide TGSI sampler objects for vertex shaders that use texture fetches.
+ * This might only be used by software drivers for the time being.
+ */
+void
+draw_texture_samplers(struct draw_context *draw,
+ uint num_samplers,
+ struct tgsi_sampler **samplers)
+{
+ draw->vs.num_samplers = num_samplers;
+ draw->vs.samplers = samplers;
+}
+
+
+
+
+void draw_set_render( struct draw_context *draw,
+ struct vbuf_render *render )
+{
+ draw->render = render;
+}
+
+void draw_set_edgeflags( struct draw_context *draw,
+ const unsigned *edgeflag )
+{
+ draw->pt.user.edgeflag = edgeflag;
+}
+
+
+
+
+/**
+ * Tell the drawing context about the index/element buffer to use
+ * (ala glDrawElements)
+ * If no element buffer is to be used (i.e. glDrawArrays) then this
+ * should be called with eltSize=0 and elements=NULL.
+ *
+ * \param draw the drawing context
+ * \param eltSize size of each element (1, 2 or 4 bytes)
+ * \param elements the element buffer ptr
+ */
+void
+draw_set_mapped_element_buffer_range( struct draw_context *draw,
+ unsigned eltSize,
+ unsigned min_index,
+ unsigned max_index,
+ const void *elements )
+{
+ draw->pt.user.elts = elements;
+ draw->pt.user.eltSize = eltSize;
+ draw->pt.user.min_index = min_index;
+ draw->pt.user.max_index = max_index;
+}
+
+
+void
+draw_set_mapped_element_buffer( struct draw_context *draw,
+ unsigned eltSize,
+ const void *elements )
+{
+ draw->pt.user.elts = elements;
+ draw->pt.user.eltSize = eltSize;
+ draw->pt.user.min_index = 0;
+ draw->pt.user.max_index = 0xffffffff;
+}
+
+
+/* Revamp me please:
+ */
+void draw_do_flush( struct draw_context *draw, unsigned flags )
+{
+ if (!draw->suspend_flushing)
+ {
+ assert(!draw->flushing); /* catch inadvertant recursion */
+
+ draw->flushing = TRUE;
+
+ draw_pipeline_flush( draw, flags );
+
+ draw->reduced_prim = ~0; /* is reduced_prim needed any more? */
+
+ draw->flushing = FALSE;
+ }
+}
diff --git a/src/gallium/auxiliary/draw/draw_context.h b/src/gallium/auxiliary/draw/draw_context.h
new file mode 100644
index 0000000000..d529e4e9a2
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_context.h
@@ -0,0 +1,182 @@
+
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Public interface into the drawing module.
+ */
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef DRAW_CONTEXT_H
+#define DRAW_CONTEXT_H
+
+
+#include "pipe/p_state.h"
+
+
+struct pipe_context;
+struct draw_context;
+struct draw_stage;
+struct draw_vertex_shader;
+struct tgsi_sampler;
+
+
+struct draw_context *draw_create( void );
+
+void draw_destroy( struct draw_context *draw );
+
+void draw_set_viewport_state( struct draw_context *draw,
+ const struct pipe_viewport_state *viewport );
+
+void draw_set_clip_state( struct draw_context *pipe,
+ const struct pipe_clip_state *clip );
+
+void draw_set_rasterizer_state( struct draw_context *draw,
+ const struct pipe_rasterizer_state *raster );
+
+void draw_set_rasterize_stage( struct draw_context *draw,
+ struct draw_stage *stage );
+
+void draw_wide_point_threshold(struct draw_context *draw, float threshold);
+
+void draw_wide_line_threshold(struct draw_context *draw, float threshold);
+
+void draw_enable_line_stipple(struct draw_context *draw, boolean enable);
+
+void draw_enable_point_sprites(struct draw_context *draw, boolean enable);
+
+void draw_set_mrd(struct draw_context *draw, double mrd);
+
+boolean
+draw_install_aaline_stage(struct draw_context *draw, struct pipe_context *pipe);
+
+boolean
+draw_install_aapoint_stage(struct draw_context *draw, struct pipe_context *pipe);
+
+boolean
+draw_install_pstipple_stage(struct draw_context *draw, struct pipe_context *pipe);
+
+
+int
+draw_find_vs_output(const struct draw_context *draw,
+ uint semantic_name, uint semantic_index);
+
+uint
+draw_num_vs_outputs(const struct draw_context *draw);
+
+
+void
+draw_texture_samplers(struct draw_context *draw,
+ uint num_samplers,
+ struct tgsi_sampler **samplers);
+
+
+
+/*
+ * Vertex shader functions
+ */
+
+struct draw_vertex_shader *
+draw_create_vertex_shader(struct draw_context *draw,
+ const struct pipe_shader_state *shader);
+void draw_bind_vertex_shader(struct draw_context *draw,
+ struct draw_vertex_shader *dvs);
+void draw_delete_vertex_shader(struct draw_context *draw,
+ struct draw_vertex_shader *dvs);
+
+
+
+/*
+ * Vertex data functions
+ */
+
+void draw_set_vertex_buffers(struct draw_context *draw,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers);
+
+void draw_set_vertex_elements(struct draw_context *draw,
+ unsigned count,
+ const struct pipe_vertex_element *elements);
+
+void
+draw_set_mapped_element_buffer_range( struct draw_context *draw,
+ unsigned eltSize,
+ unsigned min_index,
+ unsigned max_index,
+ const void *elements );
+
+void draw_set_mapped_element_buffer( struct draw_context *draw,
+ unsigned eltSize,
+ const void *elements );
+
+void draw_set_mapped_vertex_buffer(struct draw_context *draw,
+ unsigned attr, const void *buffer);
+
+void draw_set_mapped_constant_buffer(struct draw_context *draw,
+ const void *buffer,
+ unsigned size );
+
+void draw_set_edgeflags( struct draw_context *draw,
+ const unsigned *edgeflag );
+
+
+/***********************************************************************
+ * draw_prim.c
+ */
+
+void draw_arrays(struct draw_context *draw, unsigned prim,
+ unsigned start, unsigned count);
+
+void draw_flush(struct draw_context *draw);
+
+
+/*******************************************************************************
+ * Driver backend interface
+ */
+struct vbuf_render;
+void draw_set_render( struct draw_context *draw,
+ struct vbuf_render *render );
+
+void draw_set_driver_clipping( struct draw_context *draw,
+ boolean bypass_clipping );
+
+void draw_set_force_passthrough( struct draw_context *draw,
+ boolean enable );
+
+/*******************************************************************************
+ * Draw pipeline
+ */
+boolean draw_need_pipeline(const struct draw_context *draw,
+ const struct pipe_rasterizer_state *rasterizer,
+ unsigned prim );
+
+
+
+#endif /* DRAW_CONTEXT_H */
diff --git a/src/gallium/auxiliary/draw/draw_pipe.c b/src/gallium/auxiliary/draw/draw_pipe.c
new file mode 100644
index 0000000000..1c6d657297
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe.c
@@ -0,0 +1,319 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_private.h"
+#include "draw/draw_pipe.h"
+
+
+
+boolean draw_pipeline_init( struct draw_context *draw )
+{
+ /* create pipeline stages */
+ draw->pipeline.wide_line = draw_wide_line_stage( draw );
+ draw->pipeline.wide_point = draw_wide_point_stage( draw );
+ draw->pipeline.stipple = draw_stipple_stage( draw );
+ draw->pipeline.unfilled = draw_unfilled_stage( draw );
+ draw->pipeline.twoside = draw_twoside_stage( draw );
+ draw->pipeline.offset = draw_offset_stage( draw );
+ draw->pipeline.clip = draw_clip_stage( draw );
+ draw->pipeline.flatshade = draw_flatshade_stage( draw );
+ draw->pipeline.cull = draw_cull_stage( draw );
+ draw->pipeline.validate = draw_validate_stage( draw );
+ draw->pipeline.first = draw->pipeline.validate;
+
+ if (!draw->pipeline.wide_line ||
+ !draw->pipeline.wide_point ||
+ !draw->pipeline.stipple ||
+ !draw->pipeline.unfilled ||
+ !draw->pipeline.twoside ||
+ !draw->pipeline.offset ||
+ !draw->pipeline.clip ||
+ !draw->pipeline.flatshade ||
+ !draw->pipeline.cull ||
+ !draw->pipeline.validate)
+ return FALSE;
+
+ /* these defaults are oriented toward the needs of softpipe */
+ draw->pipeline.wide_point_threshold = 1000000.0; /* infinity */
+ draw->pipeline.wide_line_threshold = 1.0;
+ draw->pipeline.line_stipple = TRUE;
+ draw->pipeline.point_sprite = TRUE;
+
+ return TRUE;
+}
+
+
+void draw_pipeline_destroy( struct draw_context *draw )
+{
+ if (draw->pipeline.wide_line)
+ draw->pipeline.wide_line->destroy( draw->pipeline.wide_line );
+ if (draw->pipeline.wide_point)
+ draw->pipeline.wide_point->destroy( draw->pipeline.wide_point );
+ if (draw->pipeline.stipple)
+ draw->pipeline.stipple->destroy( draw->pipeline.stipple );
+ if (draw->pipeline.unfilled)
+ draw->pipeline.unfilled->destroy( draw->pipeline.unfilled );
+ if (draw->pipeline.twoside)
+ draw->pipeline.twoside->destroy( draw->pipeline.twoside );
+ if (draw->pipeline.offset)
+ draw->pipeline.offset->destroy( draw->pipeline.offset );
+ if (draw->pipeline.clip)
+ draw->pipeline.clip->destroy( draw->pipeline.clip );
+ if (draw->pipeline.flatshade)
+ draw->pipeline.flatshade->destroy( draw->pipeline.flatshade );
+ if (draw->pipeline.cull)
+ draw->pipeline.cull->destroy( draw->pipeline.cull );
+ if (draw->pipeline.validate)
+ draw->pipeline.validate->destroy( draw->pipeline.validate );
+ if (draw->pipeline.aaline)
+ draw->pipeline.aaline->destroy( draw->pipeline.aaline );
+ if (draw->pipeline.aapoint)
+ draw->pipeline.aapoint->destroy( draw->pipeline.aapoint );
+ if (draw->pipeline.pstipple)
+ draw->pipeline.pstipple->destroy( draw->pipeline.pstipple );
+ if (draw->pipeline.rasterize)
+ draw->pipeline.rasterize->destroy( draw->pipeline.rasterize );
+}
+
+
+
+
+
+
+
+static void do_point( struct draw_context *draw,
+ const char *v0 )
+{
+ struct prim_header prim;
+
+ prim.flags = 0;
+ prim.pad = 0;
+ prim.v[0] = (struct vertex_header *)v0;
+
+ draw->pipeline.first->point( draw->pipeline.first, &prim );
+}
+
+
+static void do_line( struct draw_context *draw,
+ ushort flags,
+ const char *v0,
+ const char *v1 )
+{
+ struct prim_header prim;
+
+ prim.flags = flags;
+ prim.pad = 0;
+ prim.v[0] = (struct vertex_header *)v0;
+ prim.v[1] = (struct vertex_header *)v1;
+
+ draw->pipeline.first->line( draw->pipeline.first, &prim );
+}
+
+
+static void do_triangle( struct draw_context *draw,
+ ushort flags,
+ char *v0,
+ char *v1,
+ char *v2 )
+{
+ struct prim_header prim;
+
+ prim.v[0] = (struct vertex_header *)v0;
+ prim.v[1] = (struct vertex_header *)v1;
+ prim.v[2] = (struct vertex_header *)v2;
+ prim.flags = flags;
+ prim.pad = 0;
+
+ draw->pipeline.first->tri( draw->pipeline.first, &prim );
+}
+
+
+
+#define QUAD(i0,i1,i2,i3) \
+ do_triangle( draw, \
+ ( DRAW_PIPE_RESET_STIPPLE | \
+ DRAW_PIPE_EDGE_FLAG_0 | \
+ DRAW_PIPE_EDGE_FLAG_2 ), \
+ verts + stride * elts[i0], \
+ verts + stride * elts[i1], \
+ verts + stride * elts[i3]); \
+ do_triangle( draw, \
+ ( DRAW_PIPE_EDGE_FLAG_0 | \
+ DRAW_PIPE_EDGE_FLAG_1 ), \
+ verts + stride * elts[i1], \
+ verts + stride * elts[i2], \
+ verts + stride * elts[i3])
+
+#define TRIANGLE(flags,i0,i1,i2) \
+ do_triangle( draw, \
+ elts[i0], /* flags */ \
+ verts + stride * (elts[i0] & ~DRAW_PIPE_FLAG_MASK), \
+ verts + stride * elts[i1], \
+ verts + stride * elts[i2])
+
+#define LINE(flags,i0,i1) \
+ do_line( draw, \
+ elts[i0], \
+ verts + stride * (elts[i0] & ~DRAW_PIPE_FLAG_MASK), \
+ verts + stride * elts[i1])
+
+#define POINT(i0) \
+ do_point( draw, \
+ verts + stride * elts[i0] )
+
+#define FUNC pipe_run
+#define ARGS \
+ struct draw_context *draw, \
+ unsigned prim, \
+ struct vertex_header *vertices, \
+ unsigned stride, \
+ const ushort *elts
+
+#define LOCAL_VARS \
+ char *verts = (char *)vertices; \
+ boolean flatfirst = (draw->rasterizer->flatshade && \
+ draw->rasterizer->flatshade_first); \
+ unsigned i; \
+ ushort flags
+
+#define FLUSH
+
+#include "draw_pt_decompose.h"
+#undef ARGS
+#undef LOCAL_VARS
+
+
+
+/* Code to run the pipeline on a fairly arbitary collection of vertices.
+ *
+ * Vertex headers must be pre-initialized with the
+ * UNDEFINED_VERTEX_ID, this code will cause that id to become
+ * overwritten, so it may have to be reset if there is the intention
+ * to reuse the vertices.
+ *
+ * This code provides a callback to reset the vertex id's which the
+ * draw_vbuf.c code uses when it has to perform a flush.
+ */
+void draw_pipeline_run( struct draw_context *draw,
+ unsigned prim,
+ struct vertex_header *vertices,
+ unsigned vertex_count,
+ unsigned stride,
+ const ushort *elts,
+ unsigned count )
+{
+ char *verts = (char *)vertices;
+
+ draw->pipeline.verts = verts;
+ draw->pipeline.vertex_stride = stride;
+ draw->pipeline.vertex_count = vertex_count;
+
+ pipe_run(draw, prim, vertices, stride, elts, count);
+
+ draw->pipeline.verts = NULL;
+ draw->pipeline.vertex_count = 0;
+}
+
+#define QUAD(i0,i1,i2,i3) \
+ do_triangle( draw, \
+ ( DRAW_PIPE_RESET_STIPPLE | \
+ DRAW_PIPE_EDGE_FLAG_0 | \
+ DRAW_PIPE_EDGE_FLAG_2 ), \
+ verts + stride * ((i0) & ~DRAW_PIPE_FLAG_MASK), \
+ verts + stride * (i1), \
+ verts + stride * (i3)); \
+ do_triangle( draw, \
+ ( DRAW_PIPE_EDGE_FLAG_0 | \
+ DRAW_PIPE_EDGE_FLAG_1 ), \
+ verts + stride * ((i1) & ~DRAW_PIPE_FLAG_MASK), \
+ verts + stride * (i2), \
+ verts + stride * (i3))
+
+#define TRIANGLE(flags,i0,i1,i2) \
+ do_triangle( draw, \
+ flags, /* flags */ \
+ verts + stride * ((i0) & ~DRAW_PIPE_FLAG_MASK), \
+ verts + stride * (i1), \
+ verts + stride * (i2))
+
+#define LINE(flags,i0,i1) \
+ do_line( draw, \
+ flags, \
+ verts + stride * ((i0) & ~DRAW_PIPE_FLAG_MASK), \
+ verts + stride * (i1))
+
+#define POINT(i0) \
+ do_point( draw, \
+ verts + stride * i0 )
+
+#define FUNC pipe_run_linear
+#define ARGS \
+ struct draw_context *draw, \
+ unsigned prim, \
+ struct vertex_header *vertices, \
+ unsigned stride
+
+#define LOCAL_VARS \
+ char *verts = (char *)vertices; \
+ boolean flatfirst = (draw->rasterizer->flatshade && \
+ draw->rasterizer->flatshade_first); \
+ unsigned i; \
+ ushort flags
+
+#define FLUSH
+
+#include "draw_pt_decompose.h"
+
+void draw_pipeline_run_linear( struct draw_context *draw,
+ unsigned prim,
+ struct vertex_header *vertices,
+ unsigned count,
+ unsigned stride )
+{
+ char *verts = (char *)vertices;
+ draw->pipeline.verts = verts;
+ draw->pipeline.vertex_stride = stride;
+ draw->pipeline.vertex_count = count;
+
+ pipe_run_linear(draw, prim, vertices, stride, count);
+
+ draw->pipeline.verts = NULL;
+ draw->pipeline.vertex_count = 0;
+}
+
+
+void draw_pipeline_flush( struct draw_context *draw,
+ unsigned flags )
+{
+ draw->pipeline.first->flush( draw->pipeline.first, flags );
+ draw->pipeline.first = draw->pipeline.validate;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe.h b/src/gallium/auxiliary/draw/draw_pipe.h
new file mode 100644
index 0000000000..479250729f
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe.h
@@ -0,0 +1,126 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef DRAW_PIPE_H
+#define DRAW_PIPE_H
+
+#include "pipe/p_compiler.h"
+#include "draw_private.h" /* for sizeof(vertex_header) */
+
+
+/**
+ * Basic info for a point/line/triangle primitive.
+ */
+struct prim_header {
+ float det; /**< front/back face determinant */
+ ushort flags;
+ ushort pad;
+ struct vertex_header *v[3]; /**< 1 to 3 vertex pointers */
+};
+
+
+
+/**
+ * Base class for all primitive drawing stages.
+ */
+struct draw_stage
+{
+ struct draw_context *draw; /**< parent context */
+
+ struct draw_stage *next; /**< next stage in pipeline */
+ const char *name; /**< for debugging */
+
+ struct vertex_header **tmp; /**< temp vert storage, such as for clipping */
+ unsigned nr_tmps;
+
+ void (*point)( struct draw_stage *,
+ struct prim_header * );
+
+ void (*line)( struct draw_stage *,
+ struct prim_header * );
+
+ void (*tri)( struct draw_stage *,
+ struct prim_header * );
+
+ void (*flush)( struct draw_stage *,
+ unsigned flags );
+
+ void (*reset_stipple_counter)( struct draw_stage * );
+
+ void (*destroy)( struct draw_stage * );
+};
+
+
+extern struct draw_stage *draw_unfilled_stage( struct draw_context *context );
+extern struct draw_stage *draw_twoside_stage( struct draw_context *context );
+extern struct draw_stage *draw_offset_stage( struct draw_context *context );
+extern struct draw_stage *draw_clip_stage( struct draw_context *context );
+extern struct draw_stage *draw_flatshade_stage( struct draw_context *context );
+extern struct draw_stage *draw_cull_stage( struct draw_context *context );
+extern struct draw_stage *draw_stipple_stage( struct draw_context *context );
+extern struct draw_stage *draw_wide_line_stage( struct draw_context *context );
+extern struct draw_stage *draw_wide_point_stage( struct draw_context *context );
+extern struct draw_stage *draw_validate_stage( struct draw_context *context );
+
+
+extern void draw_free_temp_verts( struct draw_stage *stage );
+extern boolean draw_alloc_temp_verts( struct draw_stage *stage, unsigned nr );
+
+extern void draw_reset_vertex_ids( struct draw_context *draw );
+
+void draw_pipe_passthrough_tri(struct draw_stage *stage, struct prim_header *header);
+void draw_pipe_passthrough_line(struct draw_stage *stage, struct prim_header *header);
+void draw_pipe_passthrough_point(struct draw_stage *stage, struct prim_header *header);
+
+
+
+/**
+ * Get a writeable copy of a vertex.
+ * \param stage drawing stage info
+ * \param vert the vertex to copy (source)
+ * \param idx index into stage's tmp[] array to put the copy (dest)
+ * \return pointer to the copied vertex
+ */
+static INLINE struct vertex_header *
+dup_vert( struct draw_stage *stage,
+ const struct vertex_header *vert,
+ unsigned idx )
+{
+ struct vertex_header *tmp = stage->tmp[idx];
+ const uint vsize = sizeof(struct vertex_header)
+ + stage->draw->vs.num_vs_outputs * 4 * sizeof(float);
+ memcpy(tmp, vert, vsize);
+ tmp->vertex_id = UNDEFINED_VERTEX_ID;
+ return tmp;
+}
+
+#endif
diff --git a/src/gallium/auxiliary/draw/draw_pipe_aaline.c b/src/gallium/auxiliary/draw/draw_pipe_aaline.c
new file mode 100644
index 0000000000..9f956715a2
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_aaline.c
@@ -0,0 +1,921 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * AA line stage: AA lines are converted to texture mapped triangles.
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "tgsi/tgsi_transform.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "draw_context.h"
+#include "draw_private.h"
+#include "draw_pipe.h"
+
+
+/**
+ * Max texture level for the alpha texture used for antialiasing
+ */
+#define MAX_TEXTURE_LEVEL 5 /* 32 x 32 */
+
+
+/**
+ * Subclass of pipe_shader_state to carry extra fragment shader info.
+ */
+struct aaline_fragment_shader
+{
+ struct pipe_shader_state state;
+ void *driver_fs;
+ void *aaline_fs;
+ uint sampler_unit;
+ int generic_attrib; /**< texcoord/generic used for texture */
+};
+
+
+/**
+ * Subclass of draw_stage
+ */
+struct aaline_stage
+{
+ struct draw_stage stage;
+
+ float half_line_width;
+
+ /** For AA lines, this is the vertex attrib slot for the new texcoords */
+ uint tex_slot;
+ /** position, not necessarily output zero */
+ uint pos_slot;
+
+ void *sampler_cso;
+ struct pipe_texture *texture;
+ uint num_samplers;
+ uint num_textures;
+
+
+ /*
+ * Currently bound state
+ */
+ struct aaline_fragment_shader *fs;
+ struct {
+ void *sampler[PIPE_MAX_SAMPLERS];
+ struct pipe_texture *texture[PIPE_MAX_SAMPLERS];
+ } state;
+
+ /*
+ * Driver interface/override functions
+ */
+ void * (*driver_create_fs_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*driver_bind_fs_state)(struct pipe_context *, void *);
+ void (*driver_delete_fs_state)(struct pipe_context *, void *);
+
+ void (*driver_bind_sampler_states)(struct pipe_context *, unsigned,
+ void **);
+ void (*driver_set_sampler_textures)(struct pipe_context *, unsigned,
+ struct pipe_texture **);
+
+ struct pipe_context *pipe;
+};
+
+
+
+/**
+ * Subclass of tgsi_transform_context, used for transforming the
+ * user's fragment shader to add the special AA instructions.
+ */
+struct aa_transform_context {
+ struct tgsi_transform_context base;
+ uint tempsUsed; /**< bitmask */
+ int colorOutput; /**< which output is the primary color */
+ uint samplersUsed; /**< bitfield of samplers used */
+ int freeSampler; /** an available sampler for the pstipple */
+ int maxInput, maxGeneric; /**< max input index found */
+ int colorTemp, texTemp; /**< temp registers */
+ boolean firstInstruction;
+};
+
+
+/**
+ * TGSI declaration transform callback.
+ * Look for a free sampler, a free input attrib, and two free temp regs.
+ */
+static void
+aa_transform_decl(struct tgsi_transform_context *ctx,
+ struct tgsi_full_declaration *decl)
+{
+ struct aa_transform_context *aactx = (struct aa_transform_context *) ctx;
+
+ if (decl->Declaration.File == TGSI_FILE_OUTPUT &&
+ decl->Semantic.SemanticName == TGSI_SEMANTIC_COLOR &&
+ decl->Semantic.SemanticIndex == 0) {
+ aactx->colorOutput = decl->DeclarationRange.First;
+ }
+ else if (decl->Declaration.File == TGSI_FILE_SAMPLER) {
+ uint i;
+ for (i = decl->DeclarationRange.First;
+ i <= decl->DeclarationRange.Last; i++) {
+ aactx->samplersUsed |= 1 << i;
+ }
+ }
+ else if (decl->Declaration.File == TGSI_FILE_INPUT) {
+ if ((int) decl->DeclarationRange.Last > aactx->maxInput)
+ aactx->maxInput = decl->DeclarationRange.Last;
+ if (decl->Semantic.SemanticName == TGSI_SEMANTIC_GENERIC &&
+ (int) decl->Semantic.SemanticIndex > aactx->maxGeneric) {
+ aactx->maxGeneric = decl->Semantic.SemanticIndex;
+ }
+ }
+ else if (decl->Declaration.File == TGSI_FILE_TEMPORARY) {
+ uint i;
+ for (i = decl->DeclarationRange.First;
+ i <= decl->DeclarationRange.Last; i++) {
+ aactx->tempsUsed |= (1 << i);
+ }
+ }
+
+ ctx->emit_declaration(ctx, decl);
+}
+
+
+/**
+ * Find the lowest zero bit in the given word, or -1 if bitfield is all ones.
+ */
+static int
+free_bit(uint bitfield)
+{
+ int i;
+ for (i = 0; i < 32; i++) {
+ if ((bitfield & (1 << i)) == 0)
+ return i;
+ }
+ return -1;
+}
+
+
+/**
+ * TGSI instruction transform callback.
+ * Replace writes to result.color w/ a temp reg.
+ * Upon END instruction, insert texture sampling code for antialiasing.
+ */
+static void
+aa_transform_inst(struct tgsi_transform_context *ctx,
+ struct tgsi_full_instruction *inst)
+{
+ struct aa_transform_context *aactx = (struct aa_transform_context *) ctx;
+
+ if (aactx->firstInstruction) {
+ /* emit our new declarations before the first instruction */
+
+ struct tgsi_full_declaration decl;
+ uint i;
+
+ /* find free sampler */
+ aactx->freeSampler = free_bit(aactx->samplersUsed);
+ if (aactx->freeSampler >= PIPE_MAX_SAMPLERS)
+ aactx->freeSampler = PIPE_MAX_SAMPLERS - 1;
+
+ /* find two free temp regs */
+ for (i = 0; i < 32; i++) {
+ if ((aactx->tempsUsed & (1 << i)) == 0) {
+ /* found a free temp */
+ if (aactx->colorTemp < 0)
+ aactx->colorTemp = i;
+ else if (aactx->texTemp < 0)
+ aactx->texTemp = i;
+ else
+ break;
+ }
+ }
+ assert(aactx->colorTemp >= 0);
+ assert(aactx->texTemp >= 0);
+
+ /* declare new generic input/texcoord */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_INPUT;
+ /* XXX this could be linear... */
+ decl.Declaration.Interpolate = TGSI_INTERPOLATE_PERSPECTIVE;
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = TGSI_SEMANTIC_GENERIC;
+ decl.Semantic.SemanticIndex = aactx->maxGeneric + 1;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = aactx->maxInput + 1;
+ ctx->emit_declaration(ctx, &decl);
+
+ /* declare new sampler */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_SAMPLER;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = aactx->freeSampler;
+ ctx->emit_declaration(ctx, &decl);
+
+ /* declare new temp regs */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_TEMPORARY;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = aactx->texTemp;
+ ctx->emit_declaration(ctx, &decl);
+
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_TEMPORARY;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = aactx->colorTemp;
+ ctx->emit_declaration(ctx, &decl);
+
+ aactx->firstInstruction = FALSE;
+ }
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_END &&
+ aactx->colorOutput != -1) {
+ struct tgsi_full_instruction newInst;
+
+ /* TEX */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_TEX;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = aactx->texTemp;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.InstructionExtTexture.Texture = TGSI_TEXTURE_2D;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = aactx->maxInput + 1;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_SAMPLER;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = aactx->freeSampler;
+
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* MOV rgb */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MOV;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_OUTPUT;
+ newInst.FullDstRegisters[0].DstRegister.Index = aactx->colorOutput;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_XYZ;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = aactx->colorTemp;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* MUL alpha */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MUL;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_OUTPUT;
+ newInst.FullDstRegisters[0].DstRegister.Index = aactx->colorOutput;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_W;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = aactx->colorTemp;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = aactx->texTemp;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* END */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_END;
+ newInst.Instruction.NumDstRegs = 0;
+ newInst.Instruction.NumSrcRegs = 0;
+ ctx->emit_instruction(ctx, &newInst);
+ }
+ else {
+ /* Not an END instruction.
+ * Look for writes to result.color and replace with colorTemp reg.
+ */
+ uint i;
+
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
+ struct tgsi_full_dst_register *dst = &inst->FullDstRegisters[i];
+ if (dst->DstRegister.File == TGSI_FILE_OUTPUT &&
+ dst->DstRegister.Index == aactx->colorOutput) {
+ dst->DstRegister.File = TGSI_FILE_TEMPORARY;
+ dst->DstRegister.Index = aactx->colorTemp;
+ }
+ }
+
+ ctx->emit_instruction(ctx, inst);
+ }
+}
+
+
+/**
+ * Generate the frag shader we'll use for drawing AA lines.
+ * This will be the user's shader plus some texture/modulate instructions.
+ */
+static boolean
+generate_aaline_fs(struct aaline_stage *aaline)
+{
+ const struct pipe_shader_state *orig_fs = &aaline->fs->state;
+ struct pipe_shader_state aaline_fs;
+ struct aa_transform_context transform;
+
+#define MAX 1000
+
+ aaline_fs = *orig_fs; /* copy to init */
+ aaline_fs.tokens = MALLOC(sizeof(struct tgsi_token) * MAX);
+ if (aaline_fs.tokens == NULL)
+ return FALSE;
+
+ memset(&transform, 0, sizeof(transform));
+ transform.colorOutput = -1;
+ transform.maxInput = -1;
+ transform.maxGeneric = -1;
+ transform.colorTemp = -1;
+ transform.texTemp = -1;
+ transform.firstInstruction = TRUE;
+ transform.base.transform_instruction = aa_transform_inst;
+ transform.base.transform_declaration = aa_transform_decl;
+
+ tgsi_transform_shader(orig_fs->tokens,
+ (struct tgsi_token *) aaline_fs.tokens,
+ MAX, &transform.base);
+
+#if 0 /* DEBUG */
+ tgsi_dump(orig_fs->tokens, 0);
+ tgsi_dump(aaline_fs.tokens, 0);
+#endif
+
+ aaline->fs->sampler_unit = transform.freeSampler;
+
+ aaline->fs->aaline_fs
+ = aaline->driver_create_fs_state(aaline->pipe, &aaline_fs);
+ if (aaline->fs->aaline_fs == NULL)
+ goto fail;
+
+ aaline->fs->generic_attrib = transform.maxGeneric + 1;
+ FREE((void *)aaline_fs.tokens);
+ return TRUE;
+
+fail:
+ FREE((void *)aaline_fs.tokens);
+ return FALSE;
+}
+
+
+/**
+ * Create the texture map we'll use for antialiasing the lines.
+ */
+static boolean
+aaline_create_texture(struct aaline_stage *aaline)
+{
+ struct pipe_context *pipe = aaline->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_texture texTemp;
+ uint level;
+
+ memset(&texTemp, 0, sizeof(texTemp));
+ texTemp.target = PIPE_TEXTURE_2D;
+ texTemp.format = PIPE_FORMAT_A8_UNORM; /* XXX verify supported by driver! */
+ texTemp.last_level = MAX_TEXTURE_LEVEL;
+ texTemp.width[0] = 1 << MAX_TEXTURE_LEVEL;
+ texTemp.height[0] = 1 << MAX_TEXTURE_LEVEL;
+ texTemp.depth[0] = 1;
+ pf_get_block(texTemp.format, &texTemp.block);
+
+ aaline->texture = screen->texture_create(screen, &texTemp);
+ if (!aaline->texture)
+ return FALSE;
+
+ /* Fill in mipmap images.
+ * Basically each level is solid opaque, except for the outermost
+ * texels which are zero. Special case the 1x1 and 2x2 levels.
+ */
+ for (level = 0; level <= MAX_TEXTURE_LEVEL; level++) {
+ struct pipe_transfer *transfer;
+ const uint size = aaline->texture->width[level];
+ ubyte *data;
+ uint i, j;
+
+ assert(aaline->texture->width[level] == aaline->texture->height[level]);
+
+ /* This texture is new, no need to flush.
+ */
+ transfer = screen->get_tex_transfer(screen, aaline->texture, 0, level, 0,
+ PIPE_TRANSFER_WRITE, 0, 0, size, size);
+ data = screen->transfer_map(screen, transfer);
+ if (data == NULL)
+ return FALSE;
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ ubyte d;
+ if (size == 1) {
+ d = 255;
+ }
+ else if (size == 2) {
+ d = 200; /* tuneable */
+ }
+ else if (i == 0 || j == 0 || i == size - 1 || j == size - 1) {
+ d = 0;
+ }
+ else {
+ d = 255;
+ }
+ data[i * transfer->stride + j] = d;
+ }
+ }
+
+ /* unmap */
+ screen->transfer_unmap(screen, transfer);
+ screen->tex_transfer_destroy(transfer);
+ }
+ return TRUE;
+}
+
+
+/**
+ * Create the sampler CSO that'll be used for antialiasing.
+ * By using a mipmapped texture, we don't have to generate a different
+ * texture image for each line size.
+ */
+static boolean
+aaline_create_sampler(struct aaline_stage *aaline)
+{
+ struct pipe_sampler_state sampler;
+ struct pipe_context *pipe = aaline->pipe;
+
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_LINEAR;
+ sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.normalized_coords = 1;
+ sampler.min_lod = 0.0f;
+ sampler.max_lod = MAX_TEXTURE_LEVEL;
+
+ aaline->sampler_cso = pipe->create_sampler_state(pipe, &sampler);
+ if (aaline->sampler_cso == NULL)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+/**
+ * When we're about to draw our first AA line in a batch, this function is
+ * called to tell the driver to bind our modified fragment shader.
+ */
+static boolean
+bind_aaline_fragment_shader(struct aaline_stage *aaline)
+{
+ struct draw_context *draw = aaline->stage.draw;
+
+ if (!aaline->fs->aaline_fs &&
+ !generate_aaline_fs(aaline))
+ return FALSE;
+
+ draw->suspend_flushing = TRUE;
+ aaline->driver_bind_fs_state(aaline->pipe, aaline->fs->aaline_fs);
+ draw->suspend_flushing = FALSE;
+
+ return TRUE;
+}
+
+
+
+static INLINE struct aaline_stage *
+aaline_stage( struct draw_stage *stage )
+{
+ return (struct aaline_stage *) stage;
+}
+
+
+/**
+ * Draw a wide line by drawing a quad, using geometry which will
+ * fullfill GL's antialiased line requirements.
+ */
+static void
+aaline_line(struct draw_stage *stage, struct prim_header *header)
+{
+ const struct aaline_stage *aaline = aaline_stage(stage);
+ const float half_width = aaline->half_line_width;
+ struct prim_header tri;
+ struct vertex_header *v[8];
+ uint texPos = aaline->tex_slot;
+ uint posPos = aaline->pos_slot;
+ float *pos, *tex;
+ float dx = header->v[1]->data[posPos][0] - header->v[0]->data[posPos][0];
+ float dy = header->v[1]->data[posPos][1] - header->v[0]->data[posPos][1];
+ double a = atan2(dy, dx);
+ float c_a = (float) cos(a), s_a = (float) sin(a);
+ uint i;
+
+ /* XXX the ends of lines aren't quite perfect yet, but probably passable */
+ dx = 0.5F * half_width;
+ dy = half_width;
+
+ /* allocate/dup new verts */
+ for (i = 0; i < 8; i++) {
+ v[i] = dup_vert(stage, header->v[i/4], i);
+ }
+
+ /*
+ * Quad strip for line from v0 to v1 (*=endpoints):
+ *
+ * 1 3 5 7
+ * +---+---------------------+---+
+ * | |
+ * | *v0 v1* |
+ * | |
+ * +---+---------------------+---+
+ * 0 2 4 6
+ */
+
+ /* new verts */
+ pos = v[0]->data[posPos];
+ pos[0] += (-dx * c_a - dy * s_a);
+ pos[1] += (-dx * s_a + dy * c_a);
+
+ pos = v[1]->data[posPos];
+ pos[0] += (-dx * c_a - -dy * s_a);
+ pos[1] += (-dx * s_a + -dy * c_a);
+
+ pos = v[2]->data[posPos];
+ pos[0] += ( dx * c_a - dy * s_a);
+ pos[1] += ( dx * s_a + dy * c_a);
+
+ pos = v[3]->data[posPos];
+ pos[0] += ( dx * c_a - -dy * s_a);
+ pos[1] += ( dx * s_a + -dy * c_a);
+
+ pos = v[4]->data[posPos];
+ pos[0] += (-dx * c_a - dy * s_a);
+ pos[1] += (-dx * s_a + dy * c_a);
+
+ pos = v[5]->data[posPos];
+ pos[0] += (-dx * c_a - -dy * s_a);
+ pos[1] += (-dx * s_a + -dy * c_a);
+
+ pos = v[6]->data[posPos];
+ pos[0] += ( dx * c_a - dy * s_a);
+ pos[1] += ( dx * s_a + dy * c_a);
+
+ pos = v[7]->data[posPos];
+ pos[0] += ( dx * c_a - -dy * s_a);
+ pos[1] += ( dx * s_a + -dy * c_a);
+
+ /* new texcoords */
+ tex = v[0]->data[texPos];
+ ASSIGN_4V(tex, 0, 0, 0, 1);
+
+ tex = v[1]->data[texPos];
+ ASSIGN_4V(tex, 0, 1, 0, 1);
+
+ tex = v[2]->data[texPos];
+ ASSIGN_4V(tex, .5, 0, 0, 1);
+
+ tex = v[3]->data[texPos];
+ ASSIGN_4V(tex, .5, 1, 0, 1);
+
+ tex = v[4]->data[texPos];
+ ASSIGN_4V(tex, .5, 0, 0, 1);
+
+ tex = v[5]->data[texPos];
+ ASSIGN_4V(tex, .5, 1, 0, 1);
+
+ tex = v[6]->data[texPos];
+ ASSIGN_4V(tex, 1, 0, 0, 1);
+
+ tex = v[7]->data[texPos];
+ ASSIGN_4V(tex, 1, 1, 0, 1);
+
+ /* emit 6 tris for the quad strip */
+ tri.v[0] = v[2]; tri.v[1] = v[1]; tri.v[2] = v[0];
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v[3]; tri.v[1] = v[1]; tri.v[2] = v[2];
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v[4]; tri.v[1] = v[3]; tri.v[2] = v[2];
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v[5]; tri.v[1] = v[3]; tri.v[2] = v[4];
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v[6]; tri.v[1] = v[5]; tri.v[2] = v[4];
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v[7]; tri.v[1] = v[5]; tri.v[2] = v[6];
+ stage->next->tri( stage->next, &tri );
+}
+
+
+static void
+aaline_first_line(struct draw_stage *stage, struct prim_header *header)
+{
+ auto struct aaline_stage *aaline = aaline_stage(stage);
+ struct draw_context *draw = stage->draw;
+ struct pipe_context *pipe = aaline->pipe;
+ uint num_samplers;
+
+ assert(draw->rasterizer->line_smooth);
+
+ if (draw->rasterizer->line_width <= 3.0)
+ aaline->half_line_width = 1.5f;
+ else
+ aaline->half_line_width = 0.5f * draw->rasterizer->line_width;
+
+ /*
+ * Bind (generate) our fragprog, sampler and texture
+ */
+ if (!bind_aaline_fragment_shader(aaline)) {
+ stage->line = draw_pipe_passthrough_line;
+ stage->line(stage, header);
+ return;
+ }
+
+ /* update vertex attrib info */
+ aaline->tex_slot = draw->vs.num_vs_outputs;
+ aaline->pos_slot = draw->vs.position_output;
+
+ /* advertise the extra post-transformed vertex attribute */
+ draw->extra_vp_outputs.semantic_name = TGSI_SEMANTIC_GENERIC;
+ draw->extra_vp_outputs.semantic_index = aaline->fs->generic_attrib;
+ draw->extra_vp_outputs.slot = aaline->tex_slot;
+
+ /* how many samplers? */
+ /* we'll use sampler/texture[pstip->sampler_unit] for the stipple */
+ num_samplers = MAX2(aaline->num_textures, aaline->num_samplers);
+ num_samplers = MAX2(num_samplers, aaline->fs->sampler_unit + 1);
+
+ aaline->state.sampler[aaline->fs->sampler_unit] = aaline->sampler_cso;
+ pipe_texture_reference(&aaline->state.texture[aaline->fs->sampler_unit],
+ aaline->texture);
+
+ draw->suspend_flushing = TRUE;
+ aaline->driver_bind_sampler_states(pipe, num_samplers, aaline->state.sampler);
+ aaline->driver_set_sampler_textures(pipe, num_samplers, aaline->state.texture);
+ draw->suspend_flushing = FALSE;
+
+ /* now really draw first line */
+ stage->line = aaline_line;
+ stage->line(stage, header);
+}
+
+
+static void
+aaline_flush(struct draw_stage *stage, unsigned flags)
+{
+ struct draw_context *draw = stage->draw;
+ struct aaline_stage *aaline = aaline_stage(stage);
+ struct pipe_context *pipe = aaline->pipe;
+
+ stage->line = aaline_first_line;
+ stage->next->flush( stage->next, flags );
+
+ /* restore original frag shader, texture, sampler state */
+ draw->suspend_flushing = TRUE;
+ aaline->driver_bind_fs_state(pipe, aaline->fs->driver_fs);
+ aaline->driver_bind_sampler_states(pipe, aaline->num_samplers,
+ aaline->state.sampler);
+ aaline->driver_set_sampler_textures(pipe, aaline->num_textures,
+ aaline->state.texture);
+ draw->suspend_flushing = FALSE;
+
+ draw->extra_vp_outputs.slot = 0;
+}
+
+
+static void
+aaline_reset_stipple_counter(struct draw_stage *stage)
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void
+aaline_destroy(struct draw_stage *stage)
+{
+ struct aaline_stage *aaline = aaline_stage(stage);
+ uint i;
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ pipe_texture_reference(&aaline->state.texture[i], NULL);
+ }
+
+ if (aaline->sampler_cso)
+ aaline->pipe->delete_sampler_state(aaline->pipe, aaline->sampler_cso);
+
+ if (aaline->texture)
+ pipe_texture_reference(&aaline->texture, NULL);
+
+ draw_free_temp_verts( stage );
+
+ FREE( stage );
+}
+
+
+static struct aaline_stage *
+draw_aaline_stage(struct draw_context *draw)
+{
+ struct aaline_stage *aaline = CALLOC_STRUCT(aaline_stage);
+ if (aaline == NULL)
+ return NULL;
+
+ if (!draw_alloc_temp_verts( &aaline->stage, 8 ))
+ goto fail;
+
+ aaline->stage.draw = draw;
+ aaline->stage.name = "aaline";
+ aaline->stage.next = NULL;
+ aaline->stage.point = draw_pipe_passthrough_point;
+ aaline->stage.line = aaline_first_line;
+ aaline->stage.tri = draw_pipe_passthrough_tri;
+ aaline->stage.flush = aaline_flush;
+ aaline->stage.reset_stipple_counter = aaline_reset_stipple_counter;
+ aaline->stage.destroy = aaline_destroy;
+
+ return aaline;
+
+ fail:
+ if (aaline)
+ aaline_destroy(&aaline->stage);
+
+ return NULL;
+}
+
+
+static struct aaline_stage *
+aaline_stage_from_pipe(struct pipe_context *pipe)
+{
+ struct draw_context *draw = (struct draw_context *) pipe->draw;
+ return aaline_stage(draw->pipeline.aaline);
+}
+
+
+/**
+ * This function overrides the driver's create_fs_state() function and
+ * will typically be called by the state tracker.
+ */
+static void *
+aaline_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *fs)
+{
+ struct aaline_stage *aaline = aaline_stage_from_pipe(pipe);
+ struct aaline_fragment_shader *aafs = CALLOC_STRUCT(aaline_fragment_shader);
+ if (aafs == NULL)
+ return NULL;
+
+ aafs->state = *fs;
+
+ /* pass-through */
+ aafs->driver_fs = aaline->driver_create_fs_state(aaline->pipe, fs);
+
+ return aafs;
+}
+
+
+static void
+aaline_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct aaline_stage *aaline = aaline_stage_from_pipe(pipe);
+ struct aaline_fragment_shader *aafs = (struct aaline_fragment_shader *) fs;
+
+ /* save current */
+ aaline->fs = aafs;
+ /* pass-through */
+ aaline->driver_bind_fs_state(aaline->pipe,
+ (aafs ? aafs->driver_fs : NULL));
+}
+
+
+static void
+aaline_delete_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct aaline_stage *aaline = aaline_stage_from_pipe(pipe);
+ struct aaline_fragment_shader *aafs = (struct aaline_fragment_shader *) fs;
+ /* pass-through */
+ aaline->driver_delete_fs_state(aaline->pipe, aafs->driver_fs);
+
+ if (aafs->aaline_fs)
+ aaline->driver_delete_fs_state(aaline->pipe, aafs->aaline_fs);
+
+ FREE(aafs);
+}
+
+
+static void
+aaline_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct aaline_stage *aaline = aaline_stage_from_pipe(pipe);
+
+ /* save current */
+ memcpy(aaline->state.sampler, sampler, num * sizeof(void *));
+ aaline->num_samplers = num;
+
+ /* pass-through */
+ aaline->driver_bind_sampler_states(aaline->pipe, num, sampler);
+}
+
+
+static void
+aaline_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num, struct pipe_texture **texture)
+{
+ struct aaline_stage *aaline = aaline_stage_from_pipe(pipe);
+ uint i;
+
+ /* save current */
+ for (i = 0; i < num; i++) {
+ pipe_texture_reference(&aaline->state.texture[i], texture[i]);
+ }
+ for ( ; i < PIPE_MAX_SAMPLERS; i++) {
+ pipe_texture_reference(&aaline->state.texture[i], NULL);
+ }
+ aaline->num_textures = num;
+
+ /* pass-through */
+ aaline->driver_set_sampler_textures(aaline->pipe, num, texture);
+}
+
+
+/**
+ * Called by drivers that want to install this AA line prim stage
+ * into the draw module's pipeline. This will not be used if the
+ * hardware has native support for AA lines.
+ */
+boolean
+draw_install_aaline_stage(struct draw_context *draw, struct pipe_context *pipe)
+{
+ struct aaline_stage *aaline;
+
+ pipe->draw = (void *) draw;
+
+ /*
+ * Create / install AA line drawing / prim stage
+ */
+ aaline = draw_aaline_stage( draw );
+ if (!aaline)
+ goto fail;
+
+ aaline->pipe = pipe;
+
+ /* create special texture, sampler state */
+ if (!aaline_create_texture(aaline))
+ goto fail;
+
+ if (!aaline_create_sampler(aaline))
+ goto fail;
+
+ /* save original driver functions */
+ aaline->driver_create_fs_state = pipe->create_fs_state;
+ aaline->driver_bind_fs_state = pipe->bind_fs_state;
+ aaline->driver_delete_fs_state = pipe->delete_fs_state;
+
+ aaline->driver_bind_sampler_states = pipe->bind_sampler_states;
+ aaline->driver_set_sampler_textures = pipe->set_sampler_textures;
+
+ /* override the driver's functions */
+ pipe->create_fs_state = aaline_create_fs_state;
+ pipe->bind_fs_state = aaline_bind_fs_state;
+ pipe->delete_fs_state = aaline_delete_fs_state;
+
+ pipe->bind_sampler_states = aaline_bind_sampler_states;
+ pipe->set_sampler_textures = aaline_set_sampler_textures;
+
+ /* Install once everything is known to be OK:
+ */
+ draw->pipeline.aaline = &aaline->stage;
+
+ return TRUE;
+
+ fail:
+ if (aaline)
+ aaline->stage.destroy( &aaline->stage );
+
+ return FALSE;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_aapoint.c b/src/gallium/auxiliary/draw/draw_pipe_aapoint.c
new file mode 100644
index 0000000000..ae1712fe12
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_aapoint.c
@@ -0,0 +1,884 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * AA point stage: AA points are converted to quads and rendered with a
+ * special fragment shader. Another approach would be to use a texture
+ * map image of a point, but experiments indicate the quality isn't nearly
+ * as good as this approach.
+ *
+ * Note: this looks a lot like draw_aaline.c but there's actually little
+ * if any code that can be shared.
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "tgsi/tgsi_transform.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "draw_context.h"
+#include "draw_vs.h"
+#include "draw_pipe.h"
+
+
+/*
+ * Enabling NORMALIZE might give _slightly_ better results.
+ * Basically, it controls whether we compute distance as d=sqrt(x*x+y*y) or
+ * d=x*x+y*y. Since we're working with a unit circle, the later seems
+ * close enough and saves some costly instructions.
+ */
+#define NORMALIZE 0
+
+
+/**
+ * Subclass of pipe_shader_state to carry extra fragment shader info.
+ */
+struct aapoint_fragment_shader
+{
+ struct pipe_shader_state state;
+ void *driver_fs; /**< the regular shader */
+ void *aapoint_fs; /**< the aa point-augmented shader */
+ int generic_attrib; /**< The generic input attrib/texcoord we'll use */
+};
+
+
+/**
+ * Subclass of draw_stage
+ */
+struct aapoint_stage
+{
+ struct draw_stage stage;
+
+ int psize_slot;
+ float radius;
+
+ /** this is the vertex attrib slot for the new texcoords */
+ uint tex_slot;
+ uint pos_slot;
+
+ /*
+ * Currently bound state
+ */
+ struct aapoint_fragment_shader *fs;
+
+ /*
+ * Driver interface/override functions
+ */
+ void * (*driver_create_fs_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*driver_bind_fs_state)(struct pipe_context *, void *);
+ void (*driver_delete_fs_state)(struct pipe_context *, void *);
+
+ struct pipe_context *pipe;
+};
+
+
+
+/**
+ * Subclass of tgsi_transform_context, used for transforming the
+ * user's fragment shader to add the special AA instructions.
+ */
+struct aa_transform_context {
+ struct tgsi_transform_context base;
+ uint tempsUsed; /**< bitmask */
+ int colorOutput; /**< which output is the primary color */
+ int maxInput, maxGeneric; /**< max input index found */
+ int tmp0, colorTemp; /**< temp registers */
+ boolean firstInstruction;
+};
+
+
+/**
+ * TGSI declaration transform callback.
+ * Look for two free temp regs and available input reg for new texcoords.
+ */
+static void
+aa_transform_decl(struct tgsi_transform_context *ctx,
+ struct tgsi_full_declaration *decl)
+{
+ struct aa_transform_context *aactx = (struct aa_transform_context *) ctx;
+
+ if (decl->Declaration.File == TGSI_FILE_OUTPUT &&
+ decl->Semantic.SemanticName == TGSI_SEMANTIC_COLOR &&
+ decl->Semantic.SemanticIndex == 0) {
+ aactx->colorOutput = decl->DeclarationRange.First;
+ }
+ else if (decl->Declaration.File == TGSI_FILE_INPUT) {
+ if ((int) decl->DeclarationRange.Last > aactx->maxInput)
+ aactx->maxInput = decl->DeclarationRange.Last;
+ if (decl->Semantic.SemanticName == TGSI_SEMANTIC_GENERIC &&
+ (int) decl->Semantic.SemanticIndex > aactx->maxGeneric) {
+ aactx->maxGeneric = decl->Semantic.SemanticIndex;
+ }
+ }
+ else if (decl->Declaration.File == TGSI_FILE_TEMPORARY) {
+ uint i;
+ for (i = decl->DeclarationRange.First;
+ i <= decl->DeclarationRange.Last; i++) {
+ aactx->tempsUsed |= (1 << i);
+ }
+ }
+
+ ctx->emit_declaration(ctx, decl);
+}
+
+
+/**
+ * TGSI instruction transform callback.
+ * Replace writes to result.color w/ a temp reg.
+ * Upon END instruction, insert texture sampling code for antialiasing.
+ */
+static void
+aa_transform_inst(struct tgsi_transform_context *ctx,
+ struct tgsi_full_instruction *inst)
+{
+ struct aa_transform_context *aactx = (struct aa_transform_context *) ctx;
+ struct tgsi_full_instruction newInst;
+
+ if (aactx->firstInstruction) {
+ /* emit our new declarations before the first instruction */
+
+ struct tgsi_full_declaration decl;
+ const int texInput = aactx->maxInput + 1;
+ int tmp0;
+ uint i;
+
+ /* find two free temp regs */
+ for (i = 0; i < 32; i++) {
+ if ((aactx->tempsUsed & (1 << i)) == 0) {
+ /* found a free temp */
+ if (aactx->tmp0 < 0)
+ aactx->tmp0 = i;
+ else if (aactx->colorTemp < 0)
+ aactx->colorTemp = i;
+ else
+ break;
+ }
+ }
+
+ assert(aactx->colorTemp != aactx->tmp0);
+
+ tmp0 = aactx->tmp0;
+
+ /* declare new generic input/texcoord */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_INPUT;
+ /* XXX this could be linear... */
+ decl.Declaration.Interpolate = TGSI_INTERPOLATE_PERSPECTIVE;
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = TGSI_SEMANTIC_GENERIC;
+ decl.Semantic.SemanticIndex = aactx->maxGeneric + 1;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = texInput;
+ ctx->emit_declaration(ctx, &decl);
+
+ /* declare new temp regs */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_TEMPORARY;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = tmp0;
+ ctx->emit_declaration(ctx, &decl);
+
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_TEMPORARY;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = aactx->colorTemp;
+ ctx->emit_declaration(ctx, &decl);
+
+ aactx->firstInstruction = FALSE;
+
+
+ /*
+ * Emit code to compute fragment coverage, kill if outside point radius
+ *
+ * Temp reg0 usage:
+ * t0.x = distance of fragment from center point
+ * t0.y = boolean, is t0.x > 1.0, also misc temp usage
+ * t0.z = temporary for computing 1/(1-k) value
+ * t0.w = final coverage value
+ */
+
+ /* MUL t0.xy, tex, tex; # compute x^2, y^2 */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MUL;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_XY;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = texInput;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* ADD t0.x, t0.x, t0.y; # x^2 + y^2 */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_ADD;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ ctx->emit_instruction(ctx, &newInst);
+
+#if NORMALIZE /* OPTIONAL normalization of length */
+ /* RSQ t0.x, t0.x; */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_RSQ;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* RCP t0.x, t0.x; */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_RCP;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ ctx->emit_instruction(ctx, &newInst);
+#endif
+
+ /* SGT t0.y, t0.xxxx, tex.wwww; # bool b = d > 1 (NOTE tex.w == 1) */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_SGT;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_Y;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_W;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* KIL -tmp0.yyyy; # if -tmp0.y < 0, KILL */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_KIL;
+ newInst.Instruction.NumDstRegs = 0;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.Negate = 1;
+ ctx->emit_instruction(ctx, &newInst);
+
+
+ /* compute coverage factor = (1-d)/(1-k) */
+
+ /* SUB t0.z, tex.w, tex.z; # m = 1 - k */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_SUB;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_Z;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Z;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* RCP t0.z, t0.z; # t0.z = 1 / m */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_RCP;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_Z;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_Z;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* SUB t0.y, 1, t0.x; # d = 1 - d */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_SUB;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_Y;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* MUL t0.w, t0.y, t0.z; # coverage = d * m */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MUL;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_W;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_Z;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* SLE t0.y, t0.x, tex.z; # bool b = distance <= k */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_SLE;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_Y;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_Z;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* CMP t0.w, -t0.y, tex.w, t0.w;
+ * # if -t0.y < 0 then
+ * t0.w = 1
+ * else
+ * t0.w = t0.w
+ */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_CMP;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = tmp0;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_W;
+ newInst.Instruction.NumSrcRegs = 3;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ newInst.FullSrcRegisters[0].SrcRegister.Negate = 1;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = texInput;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[2].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[2].SrcRegister.Index = tmp0;
+ newInst.FullSrcRegisters[2].SrcRegister.SwizzleX = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[2].SrcRegister.SwizzleY = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[2].SrcRegister.SwizzleZ = TGSI_SWIZZLE_W;
+ newInst.FullSrcRegisters[2].SrcRegister.SwizzleW = TGSI_SWIZZLE_W;
+ ctx->emit_instruction(ctx, &newInst);
+
+ }
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_END) {
+ /* add alpha modulation code at tail of program */
+
+ /* MOV result.color.xyz, colorTemp; */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MOV;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_OUTPUT;
+ newInst.FullDstRegisters[0].DstRegister.Index = aactx->colorOutput;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_XYZ;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = aactx->colorTemp;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* MUL result.color.w, colorTemp, tmp0.w; */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MUL;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_OUTPUT;
+ newInst.FullDstRegisters[0].DstRegister.Index = aactx->colorOutput;
+ newInst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_W;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = aactx->colorTemp;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = aactx->tmp0;
+ ctx->emit_instruction(ctx, &newInst);
+ }
+ else {
+ /* Not an END instruction.
+ * Look for writes to result.color and replace with colorTemp reg.
+ */
+ uint i;
+
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
+ struct tgsi_full_dst_register *dst = &inst->FullDstRegisters[i];
+ if (dst->DstRegister.File == TGSI_FILE_OUTPUT &&
+ dst->DstRegister.Index == aactx->colorOutput) {
+ dst->DstRegister.File = TGSI_FILE_TEMPORARY;
+ dst->DstRegister.Index = aactx->colorTemp;
+ }
+ }
+ }
+
+ ctx->emit_instruction(ctx, inst);
+}
+
+
+/**
+ * Generate the frag shader we'll use for drawing AA points.
+ * This will be the user's shader plus some texture/modulate instructions.
+ */
+static boolean
+generate_aapoint_fs(struct aapoint_stage *aapoint)
+{
+ const struct pipe_shader_state *orig_fs = &aapoint->fs->state;
+ struct pipe_shader_state aapoint_fs;
+ struct aa_transform_context transform;
+
+#define MAX 1000
+
+ aapoint_fs = *orig_fs; /* copy to init */
+ aapoint_fs.tokens = MALLOC(sizeof(struct tgsi_token) * MAX);
+ if (aapoint_fs.tokens == NULL)
+ return FALSE;
+
+ memset(&transform, 0, sizeof(transform));
+ transform.colorOutput = -1;
+ transform.maxInput = -1;
+ transform.maxGeneric = -1;
+ transform.colorTemp = -1;
+ transform.tmp0 = -1;
+ transform.firstInstruction = TRUE;
+ transform.base.transform_instruction = aa_transform_inst;
+ transform.base.transform_declaration = aa_transform_decl;
+
+ tgsi_transform_shader(orig_fs->tokens,
+ (struct tgsi_token *) aapoint_fs.tokens,
+ MAX, &transform.base);
+
+#if 0 /* DEBUG */
+ printf("draw_aapoint, orig shader:\n");
+ tgsi_dump(orig_fs->tokens, 0);
+ printf("draw_aapoint, new shader:\n");
+ tgsi_dump(aapoint_fs.tokens, 0);
+#endif
+
+ aapoint->fs->aapoint_fs
+ = aapoint->driver_create_fs_state(aapoint->pipe, &aapoint_fs);
+ if (aapoint->fs->aapoint_fs == NULL)
+ goto fail;
+
+ aapoint->fs->generic_attrib = transform.maxGeneric + 1;
+ FREE((void *)aapoint_fs.tokens);
+ return TRUE;
+
+fail:
+ FREE((void *)aapoint_fs.tokens);
+ return FALSE;
+}
+
+
+/**
+ * When we're about to draw our first AA point in a batch, this function is
+ * called to tell the driver to bind our modified fragment shader.
+ */
+static boolean
+bind_aapoint_fragment_shader(struct aapoint_stage *aapoint)
+{
+ struct draw_context *draw = aapoint->stage.draw;
+
+ if (!aapoint->fs->aapoint_fs &&
+ !generate_aapoint_fs(aapoint))
+ return FALSE;
+
+ draw->suspend_flushing = TRUE;
+ aapoint->driver_bind_fs_state(aapoint->pipe, aapoint->fs->aapoint_fs);
+ draw->suspend_flushing = FALSE;
+
+ return TRUE;
+}
+
+
+
+static INLINE struct aapoint_stage *
+aapoint_stage( struct draw_stage *stage )
+{
+ return (struct aapoint_stage *) stage;
+}
+
+
+
+
+/**
+ * Draw an AA point by drawing a quad.
+ */
+static void
+aapoint_point(struct draw_stage *stage, struct prim_header *header)
+{
+ const struct aapoint_stage *aapoint = aapoint_stage(stage);
+ struct prim_header tri;
+ struct vertex_header *v[4];
+ uint texPos = aapoint->tex_slot;
+ uint pos_slot = aapoint->pos_slot;
+ float radius, *pos, *tex;
+ uint i;
+ float k;
+
+ if (aapoint->psize_slot >= 0) {
+ radius = 0.5f * header->v[0]->data[aapoint->psize_slot][0];
+ }
+ else {
+ radius = aapoint->radius;
+ }
+
+ /*
+ * Note: the texcoords (generic attrib, really) we use are special:
+ * The S and T components simply vary from -1 to +1.
+ * The R component is k, below.
+ * The Q component is 1.0 and will used as a handy constant in the
+ * fragment shader.
+ */
+
+ /*
+ * k is the threshold distance from the point's center at which
+ * we begin alpha attenuation (the coverage value).
+ * Operating within a unit circle, we'll compute the fragment's
+ * distance 'd' from the center point using the texcoords.
+ * IF d > 1.0 THEN
+ * KILL fragment
+ * ELSE IF d > k THEN
+ * compute coverage in [0,1] proportional to d in [k, 1].
+ * ELSE
+ * coverage = 1.0; // full coverage
+ * ENDIF
+ *
+ * Note: the ELSEIF and ELSE clauses are actually implemented with CMP to
+ * avoid using IF/ELSE/ENDIF TGSI opcodes.
+ */
+
+#if !NORMALIZE
+ k = 1.0f / radius;
+ k = 1.0f - 2.0f * k + k * k;
+#else
+ k = 1.0f - 1.0f / radius;
+#endif
+
+ /* allocate/dup new verts */
+ for (i = 0; i < 4; i++) {
+ v[i] = dup_vert(stage, header->v[0], i);
+ }
+
+ /* new verts */
+ pos = v[0]->data[pos_slot];
+ pos[0] -= radius;
+ pos[1] -= radius;
+
+ pos = v[1]->data[pos_slot];
+ pos[0] += radius;
+ pos[1] -= radius;
+
+ pos = v[2]->data[pos_slot];
+ pos[0] += radius;
+ pos[1] += radius;
+
+ pos = v[3]->data[pos_slot];
+ pos[0] -= radius;
+ pos[1] += radius;
+
+ /* new texcoords */
+ tex = v[0]->data[texPos];
+ ASSIGN_4V(tex, -1, -1, k, 1);
+
+ tex = v[1]->data[texPos];
+ ASSIGN_4V(tex, 1, -1, k, 1);
+
+ tex = v[2]->data[texPos];
+ ASSIGN_4V(tex, 1, 1, k, 1);
+
+ tex = v[3]->data[texPos];
+ ASSIGN_4V(tex, -1, 1, k, 1);
+
+ /* emit 2 tris for the quad strip */
+ tri.v[0] = v[0];
+ tri.v[1] = v[1];
+ tri.v[2] = v[2];
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v[0];
+ tri.v[1] = v[2];
+ tri.v[2] = v[3];
+ stage->next->tri( stage->next, &tri );
+}
+
+
+static void
+aapoint_first_point(struct draw_stage *stage, struct prim_header *header)
+{
+ auto struct aapoint_stage *aapoint = aapoint_stage(stage);
+ struct draw_context *draw = stage->draw;
+
+ assert(draw->rasterizer->point_smooth);
+
+ if (draw->rasterizer->point_size <= 2.0)
+ aapoint->radius = 1.0;
+ else
+ aapoint->radius = 0.5f * draw->rasterizer->point_size;
+
+ /*
+ * Bind (generate) our fragprog.
+ */
+ bind_aapoint_fragment_shader(aapoint);
+
+ /* update vertex attrib info */
+ aapoint->tex_slot = draw->vs.num_vs_outputs;
+ assert(aapoint->tex_slot > 0); /* output[0] is vertex pos */
+
+ aapoint->pos_slot = draw->vs.position_output;
+
+ draw->extra_vp_outputs.semantic_name = TGSI_SEMANTIC_GENERIC;
+ draw->extra_vp_outputs.semantic_index = aapoint->fs->generic_attrib;
+ draw->extra_vp_outputs.slot = aapoint->tex_slot;
+
+ /* find psize slot in post-transform vertex */
+ aapoint->psize_slot = -1;
+ if (draw->rasterizer->point_size_per_vertex) {
+ /* find PSIZ vertex output */
+ const struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+ uint i;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_PSIZE) {
+ aapoint->psize_slot = i;
+ break;
+ }
+ }
+ }
+
+ /* now really draw first point */
+ stage->point = aapoint_point;
+ stage->point(stage, header);
+}
+
+
+static void
+aapoint_flush(struct draw_stage *stage, unsigned flags)
+{
+ struct draw_context *draw = stage->draw;
+ struct aapoint_stage *aapoint = aapoint_stage(stage);
+ struct pipe_context *pipe = aapoint->pipe;
+
+ stage->point = aapoint_first_point;
+ stage->next->flush( stage->next, flags );
+
+ /* restore original frag shader */
+ draw->suspend_flushing = TRUE;
+ aapoint->driver_bind_fs_state(pipe, aapoint->fs->driver_fs);
+ draw->suspend_flushing = FALSE;
+
+ draw->extra_vp_outputs.slot = 0;
+}
+
+
+static void
+aapoint_reset_stipple_counter(struct draw_stage *stage)
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void
+aapoint_destroy(struct draw_stage *stage)
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+static struct aapoint_stage *
+draw_aapoint_stage(struct draw_context *draw)
+{
+ struct aapoint_stage *aapoint = CALLOC_STRUCT(aapoint_stage);
+ if (aapoint == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &aapoint->stage, 4 ))
+ goto fail;
+
+ aapoint->stage.draw = draw;
+ aapoint->stage.name = "aapoint";
+ aapoint->stage.next = NULL;
+ aapoint->stage.point = aapoint_first_point;
+ aapoint->stage.line = draw_pipe_passthrough_line;
+ aapoint->stage.tri = draw_pipe_passthrough_tri;
+ aapoint->stage.flush = aapoint_flush;
+ aapoint->stage.reset_stipple_counter = aapoint_reset_stipple_counter;
+ aapoint->stage.destroy = aapoint_destroy;
+
+ return aapoint;
+
+ fail:
+ if (aapoint)
+ aapoint_destroy(&aapoint->stage);
+
+ return NULL;
+
+}
+
+
+static struct aapoint_stage *
+aapoint_stage_from_pipe(struct pipe_context *pipe)
+{
+ struct draw_context *draw = (struct draw_context *) pipe->draw;
+ return aapoint_stage(draw->pipeline.aapoint);
+}
+
+
+/**
+ * This function overrides the driver's create_fs_state() function and
+ * will typically be called by the state tracker.
+ */
+static void *
+aapoint_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *fs)
+{
+ struct aapoint_stage *aapoint = aapoint_stage_from_pipe(pipe);
+ struct aapoint_fragment_shader *aafs = CALLOC_STRUCT(aapoint_fragment_shader);
+ if (aafs == NULL)
+ return NULL;
+
+ aafs->state = *fs;
+
+ /* pass-through */
+ aafs->driver_fs = aapoint->driver_create_fs_state(aapoint->pipe, fs);
+
+ return aafs;
+}
+
+
+static void
+aapoint_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct aapoint_stage *aapoint = aapoint_stage_from_pipe(pipe);
+ struct aapoint_fragment_shader *aafs = (struct aapoint_fragment_shader *) fs;
+ /* save current */
+ aapoint->fs = aafs;
+ /* pass-through */
+ aapoint->driver_bind_fs_state(aapoint->pipe,
+ (aafs ? aafs->driver_fs : NULL));
+}
+
+
+static void
+aapoint_delete_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct aapoint_stage *aapoint = aapoint_stage_from_pipe(pipe);
+ struct aapoint_fragment_shader *aafs = (struct aapoint_fragment_shader *) fs;
+
+ /* pass-through */
+ aapoint->driver_delete_fs_state(aapoint->pipe, aafs->driver_fs);
+
+ if (aafs->aapoint_fs)
+ aapoint->driver_delete_fs_state(aapoint->pipe, aafs->aapoint_fs);
+
+ FREE(aafs);
+}
+
+
+/**
+ * Called by drivers that want to install this AA point prim stage
+ * into the draw module's pipeline. This will not be used if the
+ * hardware has native support for AA points.
+ */
+boolean
+draw_install_aapoint_stage(struct draw_context *draw,
+ struct pipe_context *pipe)
+{
+ struct aapoint_stage *aapoint;
+
+ pipe->draw = (void *) draw;
+
+ /*
+ * Create / install AA point drawing / prim stage
+ */
+ aapoint = draw_aapoint_stage( draw );
+ if (aapoint == NULL)
+ goto fail;
+
+ aapoint->pipe = pipe;
+
+ /* save original driver functions */
+ aapoint->driver_create_fs_state = pipe->create_fs_state;
+ aapoint->driver_bind_fs_state = pipe->bind_fs_state;
+ aapoint->driver_delete_fs_state = pipe->delete_fs_state;
+
+ /* override the driver's functions */
+ pipe->create_fs_state = aapoint_create_fs_state;
+ pipe->bind_fs_state = aapoint_bind_fs_state;
+ pipe->delete_fs_state = aapoint_delete_fs_state;
+
+ draw->pipeline.aapoint = &aapoint->stage;
+
+ return TRUE;
+
+ fail:
+ if (aapoint)
+ aapoint->stage.destroy( &aapoint->stage );
+
+ return FALSE;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_clip.c b/src/gallium/auxiliary/draw/draw_pipe_clip.c
new file mode 100644
index 0000000000..0670268a19
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_clip.c
@@ -0,0 +1,516 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Clipping stage
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "pipe/p_shader_tokens.h"
+
+#include "draw_vs.h"
+#include "draw_pipe.h"
+
+
+#ifndef IS_NEGATIVE
+#define IS_NEGATIVE(X) ((X) < 0.0)
+#endif
+
+#ifndef DIFFERENT_SIGNS
+#define DIFFERENT_SIGNS(x, y) ((x) * (y) <= 0.0F && (x) - (y) != 0.0F)
+#endif
+
+#ifndef MAX_CLIPPED_VERTICES
+#define MAX_CLIPPED_VERTICES ((2 * (6 + PIPE_MAX_CLIP_PLANES))+1)
+#endif
+
+
+
+struct clipper {
+ struct draw_stage stage; /**< base class */
+
+ /* Basically duplicate some of the flatshading logic here:
+ */
+ boolean flat;
+ uint num_color_attribs;
+ uint color_attribs[4]; /* front/back primary/secondary colors */
+
+ float (*plane)[4];
+};
+
+
+/* This is a bit confusing:
+ */
+static INLINE struct clipper *clipper_stage( struct draw_stage *stage )
+{
+ return (struct clipper *)stage;
+}
+
+
+#define LINTERP(T, OUT, IN) ((OUT) + (T) * ((IN) - (OUT)))
+
+
+/* All attributes are float[4], so this is easy:
+ */
+static void interp_attr( float *fdst,
+ float t,
+ const float *fin,
+ const float *fout )
+{
+ fdst[0] = LINTERP( t, fout[0], fin[0] );
+ fdst[1] = LINTERP( t, fout[1], fin[1] );
+ fdst[2] = LINTERP( t, fout[2], fin[2] );
+ fdst[3] = LINTERP( t, fout[3], fin[3] );
+}
+
+static void copy_colors( struct draw_stage *stage,
+ struct vertex_header *dst,
+ const struct vertex_header *src )
+{
+ const struct clipper *clipper = clipper_stage(stage);
+ uint i;
+ for (i = 0; i < clipper->num_color_attribs; i++) {
+ const uint attr = clipper->color_attribs[i];
+ COPY_4FV(dst->data[attr], src->data[attr]);
+ }
+}
+
+
+
+/* Interpolate between two vertices to produce a third.
+ */
+static void interp( const struct clipper *clip,
+ struct vertex_header *dst,
+ float t,
+ const struct vertex_header *out,
+ const struct vertex_header *in )
+{
+ const unsigned nr_attrs = clip->stage.draw->vs.num_vs_outputs;
+ const unsigned pos_attr = clip->stage.draw->vs.position_output;
+ unsigned j;
+
+ /* Vertex header.
+ */
+ {
+ dst->clipmask = 0;
+ dst->edgeflag = 0; /* will get overwritten later */
+ dst->pad = 0;
+ dst->vertex_id = UNDEFINED_VERTEX_ID;
+ }
+
+ /* Clip coordinates: interpolate normally
+ */
+ {
+ interp_attr(dst->clip, t, in->clip, out->clip);
+ }
+
+ /* Do the projective divide and insert window coordinates:
+ */
+ {
+ const float *pos = dst->clip;
+ const float *scale = clip->stage.draw->viewport.scale;
+ const float *trans = clip->stage.draw->viewport.translate;
+ const float oow = 1.0f / pos[3];
+
+ dst->data[pos_attr][0] = pos[0] * oow * scale[0] + trans[0];
+ dst->data[pos_attr][1] = pos[1] * oow * scale[1] + trans[1];
+ dst->data[pos_attr][2] = pos[2] * oow * scale[2] + trans[2];
+ dst->data[pos_attr][3] = oow;
+ }
+
+ /* Other attributes
+ */
+ for (j = 0; j < nr_attrs; j++) {
+ if (j != pos_attr)
+ interp_attr(dst->data[j], t, in->data[j], out->data[j]);
+ }
+}
+
+
+static void emit_poly( struct draw_stage *stage,
+ struct vertex_header **inlist,
+ unsigned n,
+ const struct prim_header *origPrim)
+{
+ struct prim_header header;
+ unsigned i;
+
+ const ushort edge_first = DRAW_PIPE_EDGE_FLAG_2;
+ const ushort edge_middle = DRAW_PIPE_EDGE_FLAG_0;
+ const ushort edge_last = DRAW_PIPE_EDGE_FLAG_1;
+
+ /* later stages may need the determinant, but only the sign matters */
+ header.det = origPrim->det;
+ header.flags = DRAW_PIPE_RESET_STIPPLE | edge_first | edge_middle;
+ header.pad = 0;
+
+ for (i = 2; i < n; i++, header.flags = edge_middle) {
+ header.v[0] = inlist[i-1];
+ header.v[1] = inlist[i];
+ header.v[2] = inlist[0]; /* keep in v[2] for flatshading */
+
+ if (i == n-1)
+ header.flags |= edge_last;
+
+ if (0) {
+ const struct draw_vertex_shader *vs = stage->draw->vs.vertex_shader;
+ uint j, k;
+ debug_printf("Clipped tri:\n");
+ for (j = 0; j < 3; j++) {
+ for (k = 0; k < vs->info.num_outputs; k++) {
+ debug_printf(" Vert %d: Attr %d: %f %f %f %f\n", j, k,
+ header.v[j]->data[k][0],
+ header.v[j]->data[k][1],
+ header.v[j]->data[k][2],
+ header.v[j]->data[k][3]);
+ }
+ }
+ }
+
+ stage->next->tri( stage->next, &header );
+ }
+}
+
+static INLINE float
+dot4(const float *a, const float *b)
+{
+ return (a[0]*b[0] +
+ a[1]*b[1] +
+ a[2]*b[2] +
+ a[3]*b[3]);
+}
+
+
+/* Clip a triangle against the viewport and user clip planes.
+ */
+static void
+do_clip_tri( struct draw_stage *stage,
+ struct prim_header *header,
+ unsigned clipmask )
+{
+ struct clipper *clipper = clipper_stage( stage );
+ struct vertex_header *a[MAX_CLIPPED_VERTICES];
+ struct vertex_header *b[MAX_CLIPPED_VERTICES];
+ struct vertex_header **inlist = a;
+ struct vertex_header **outlist = b;
+ unsigned tmpnr = 0;
+ unsigned n = 3;
+ unsigned i;
+
+ inlist[0] = header->v[0];
+ inlist[1] = header->v[1];
+ inlist[2] = header->v[2];
+
+ while (clipmask && n >= 3) {
+ const unsigned plane_idx = ffs(clipmask)-1;
+ const float *plane = clipper->plane[plane_idx];
+ struct vertex_header *vert_prev = inlist[0];
+ float dp_prev = dot4( vert_prev->clip, plane );
+ unsigned outcount = 0;
+
+ clipmask &= ~(1<<plane_idx);
+
+ inlist[n] = inlist[0]; /* prevent rotation of vertices */
+
+ for (i = 1; i <= n; i++) {
+ struct vertex_header *vert = inlist[i];
+
+ float dp = dot4( vert->clip, plane );
+
+ if (!IS_NEGATIVE(dp_prev)) {
+ outlist[outcount++] = vert_prev;
+ }
+
+ if (DIFFERENT_SIGNS(dp, dp_prev)) {
+ struct vertex_header *new_vert = clipper->stage.tmp[tmpnr++];
+ outlist[outcount++] = new_vert;
+
+ if (IS_NEGATIVE(dp)) {
+ /* Going out of bounds. Avoid division by zero as we
+ * know dp != dp_prev from DIFFERENT_SIGNS, above.
+ */
+ float t = dp / (dp - dp_prev);
+ interp( clipper, new_vert, t, vert, vert_prev );
+
+ /* Force edgeflag true in this case:
+ */
+ new_vert->edgeflag = 1;
+ } else {
+ /* Coming back in.
+ */
+ float t = dp_prev / (dp_prev - dp);
+ interp( clipper, new_vert, t, vert_prev, vert );
+
+ /* Copy starting vert's edgeflag:
+ */
+ new_vert->edgeflag = vert_prev->edgeflag;
+ }
+ }
+
+ vert_prev = vert;
+ dp_prev = dp;
+ }
+
+ {
+ struct vertex_header **tmp = inlist;
+ inlist = outlist;
+ outlist = tmp;
+ n = outcount;
+ }
+ }
+
+ /* If flat-shading, copy color to new provoking vertex.
+ */
+ if (clipper->flat && inlist[0] != header->v[2]) {
+ if (1) {
+ inlist[0] = dup_vert(stage, inlist[0], tmpnr++);
+ }
+
+ copy_colors(stage, inlist[0], header->v[2]);
+ }
+
+
+
+ /* Emit the polygon as triangles to the setup stage:
+ */
+ if (n >= 3)
+ emit_poly( stage, inlist, n, header );
+}
+
+
+/* Clip a line against the viewport and user clip planes.
+ */
+static void
+do_clip_line( struct draw_stage *stage,
+ struct prim_header *header,
+ unsigned clipmask )
+{
+ const struct clipper *clipper = clipper_stage( stage );
+ struct vertex_header *v0 = header->v[0];
+ struct vertex_header *v1 = header->v[1];
+ const float *pos0 = v0->clip;
+ const float *pos1 = v1->clip;
+ float t0 = 0.0F;
+ float t1 = 0.0F;
+ struct prim_header newprim;
+
+ while (clipmask) {
+ const unsigned plane_idx = ffs(clipmask)-1;
+ const float *plane = clipper->plane[plane_idx];
+ const float dp0 = dot4( pos0, plane );
+ const float dp1 = dot4( pos1, plane );
+
+ if (dp1 < 0.0F) {
+ float t = dp1 / (dp1 - dp0);
+ t1 = MAX2(t1, t);
+ }
+
+ if (dp0 < 0.0F) {
+ float t = dp0 / (dp0 - dp1);
+ t0 = MAX2(t0, t);
+ }
+
+ if (t0 + t1 >= 1.0F)
+ return; /* discard */
+
+ clipmask &= ~(1 << plane_idx); /* turn off this plane's bit */
+ }
+
+ if (v0->clipmask) {
+ interp( clipper, stage->tmp[0], t0, v0, v1 );
+
+ if (clipper->flat)
+ copy_colors(stage, stage->tmp[0], v0);
+
+ newprim.v[0] = stage->tmp[0];
+ }
+ else {
+ newprim.v[0] = v0;
+ }
+
+ if (v1->clipmask) {
+ interp( clipper, stage->tmp[1], t1, v1, v0 );
+ newprim.v[1] = stage->tmp[1];
+ }
+ else {
+ newprim.v[1] = v1;
+ }
+
+ stage->next->line( stage->next, &newprim );
+}
+
+
+static void
+clip_point( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ if (header->v[0]->clipmask == 0)
+ stage->next->point( stage->next, header );
+}
+
+
+static void
+clip_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ unsigned clipmask = (header->v[0]->clipmask |
+ header->v[1]->clipmask);
+
+ if (clipmask == 0) {
+ /* no clipping needed */
+ stage->next->line( stage->next, header );
+ }
+ else if ((header->v[0]->clipmask &
+ header->v[1]->clipmask) == 0) {
+ do_clip_line(stage, header, clipmask);
+ }
+ /* else, totally clipped */
+}
+
+
+static void
+clip_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ unsigned clipmask = (header->v[0]->clipmask |
+ header->v[1]->clipmask |
+ header->v[2]->clipmask);
+
+ if (clipmask == 0) {
+ /* no clipping needed */
+ stage->next->tri( stage->next, header );
+ }
+ else if ((header->v[0]->clipmask &
+ header->v[1]->clipmask &
+ header->v[2]->clipmask) == 0) {
+ do_clip_tri(stage, header, clipmask);
+ }
+}
+
+/* Update state. Could further delay this until we hit the first
+ * primitive that really requires clipping.
+ */
+static void
+clip_init_state( struct draw_stage *stage )
+{
+ struct clipper *clipper = clipper_stage( stage );
+
+ clipper->flat = stage->draw->rasterizer->flatshade ? TRUE : FALSE;
+
+ if (clipper->flat) {
+ const struct draw_vertex_shader *vs = stage->draw->vs.vertex_shader;
+ uint i;
+
+ clipper->num_color_attribs = 0;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_COLOR ||
+ vs->info.output_semantic_name[i] == TGSI_SEMANTIC_BCOLOR) {
+ clipper->color_attribs[clipper->num_color_attribs++] = i;
+ }
+ }
+ }
+
+ stage->tri = clip_tri;
+ stage->line = clip_line;
+}
+
+
+
+static void clip_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ clip_init_state( stage );
+ stage->tri( stage, header );
+}
+
+static void clip_first_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ clip_init_state( stage );
+ stage->line( stage, header );
+}
+
+
+static void clip_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ stage->tri = clip_first_tri;
+ stage->line = clip_first_line;
+ stage->next->flush( stage->next, flags );
+}
+
+
+static void clip_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void clip_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Allocate a new clipper stage.
+ * \return pointer to new stage object
+ */
+struct draw_stage *draw_clip_stage( struct draw_context *draw )
+{
+ struct clipper *clipper = CALLOC_STRUCT(clipper);
+ if (clipper == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &clipper->stage, MAX_CLIPPED_VERTICES+1 ))
+ goto fail;
+
+ clipper->stage.draw = draw;
+ clipper->stage.name = "clipper";
+ clipper->stage.point = clip_point;
+ clipper->stage.line = clip_first_line;
+ clipper->stage.tri = clip_first_tri;
+ clipper->stage.flush = clip_flush;
+ clipper->stage.reset_stipple_counter = clip_reset_stipple_counter;
+ clipper->stage.destroy = clip_destroy;
+
+ clipper->plane = draw->plane;
+
+ return &clipper->stage;
+
+ fail:
+ if (clipper)
+ clipper->stage.destroy( &clipper->stage );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_cull.c b/src/gallium/auxiliary/draw/draw_pipe_cull.c
new file mode 100644
index 0000000000..0a70483858
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_cull.c
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Drawing stage for polygon culling
+ */
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "draw_pipe.h"
+
+
+struct cull_stage {
+ struct draw_stage stage;
+ unsigned winding; /**< which winding(s) to cull (one of PIPE_WINDING_x) */
+};
+
+
+static INLINE struct cull_stage *cull_stage( struct draw_stage *stage )
+{
+ return (struct cull_stage *)stage;
+}
+
+
+
+
+static void cull_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ const unsigned pos = stage->draw->vs.position_output;
+
+ /* Window coords: */
+ const float *v0 = header->v[0]->data[pos];
+ const float *v1 = header->v[1]->data[pos];
+ const float *v2 = header->v[2]->data[pos];
+
+ /* edge vectors e = v0 - v2, f = v1 - v2 */
+ const float ex = v0[0] - v2[0];
+ const float ey = v0[1] - v2[1];
+ const float fx = v1[0] - v2[0];
+ const float fy = v1[1] - v2[1];
+
+ /* det = cross(e,f).z */
+ header->det = ex * fy - ey * fx;
+
+ if (header->det != 0) {
+ /* if (det < 0 then Z points toward camera and triangle is
+ * counter-clockwise winding.
+ */
+ unsigned winding = (header->det < 0) ? PIPE_WINDING_CCW : PIPE_WINDING_CW;
+
+ if ((winding & cull_stage(stage)->winding) == 0) {
+ /* triangle is not culled, pass to next stage */
+ stage->next->tri( stage->next, header );
+ }
+ }
+}
+
+static void cull_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct cull_stage *cull = cull_stage(stage);
+
+ cull->winding = stage->draw->rasterizer->cull_mode;
+
+ stage->tri = cull_tri;
+ stage->tri( stage, header );
+}
+
+
+
+static void cull_flush( struct draw_stage *stage, unsigned flags )
+{
+ stage->tri = cull_first_tri;
+ stage->next->flush( stage->next, flags );
+}
+
+static void cull_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void cull_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Create a new polygon culling stage.
+ */
+struct draw_stage *draw_cull_stage( struct draw_context *draw )
+{
+ struct cull_stage *cull = CALLOC_STRUCT(cull_stage);
+ if (cull == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &cull->stage, 0 ))
+ goto fail;
+
+ cull->stage.draw = draw;
+ cull->stage.name = "cull";
+ cull->stage.next = NULL;
+ cull->stage.point = draw_pipe_passthrough_point;
+ cull->stage.line = draw_pipe_passthrough_line;
+ cull->stage.tri = cull_first_tri;
+ cull->stage.flush = cull_flush;
+ cull->stage.reset_stipple_counter = cull_reset_stipple_counter;
+ cull->stage.destroy = cull_destroy;
+
+ return &cull->stage;
+
+ fail:
+ if (cull)
+ cull->stage.destroy( &cull->stage );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_flatshade.c b/src/gallium/auxiliary/draw/draw_pipe_flatshade.c
new file mode 100644
index 0000000000..34afb1a0b6
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_flatshade.c
@@ -0,0 +1,282 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "draw_vs.h"
+#include "draw_pipe.h"
+
+
+/** subclass of draw_stage */
+struct flat_stage
+{
+ struct draw_stage stage;
+
+ uint num_color_attribs;
+ uint color_attribs[2]; /* front/back primary colors */
+
+ uint num_spec_attribs;
+ uint spec_attribs[2]; /* front/back secondary colors */
+};
+
+#define COPY_3FV( DST, SRC ) \
+do { \
+ (DST)[0] = (SRC)[0]; \
+ (DST)[1] = (SRC)[1]; \
+ (DST)[2] = (SRC)[2]; \
+} while (0)
+
+
+static INLINE struct flat_stage *
+flat_stage(struct draw_stage *stage)
+{
+ return (struct flat_stage *) stage;
+}
+
+
+/** Copy all the color attributes from 'src' vertex to 'dst' vertex */
+static INLINE void copy_colors( struct draw_stage *stage,
+ struct vertex_header *dst,
+ const struct vertex_header *src )
+{
+ const struct flat_stage *flat = flat_stage(stage);
+ uint i;
+
+ for (i = 0; i < flat->num_color_attribs; i++) {
+ const uint attr = flat->color_attribs[i];
+ COPY_4FV(dst->data[attr], src->data[attr]);
+ }
+
+ for (i = 0; i < flat->num_spec_attribs; i++) {
+ const uint attr = flat->spec_attribs[i];
+ COPY_3FV(dst->data[attr], src->data[attr]);
+ }
+}
+
+
+/** Copy all the color attributes from src vertex to dst0 & dst1 vertices */
+static INLINE void copy_colors2( struct draw_stage *stage,
+ struct vertex_header *dst0,
+ struct vertex_header *dst1,
+ const struct vertex_header *src )
+{
+ const struct flat_stage *flat = flat_stage(stage);
+ uint i;
+ for (i = 0; i < flat->num_color_attribs; i++) {
+ const uint attr = flat->color_attribs[i];
+ COPY_4FV(dst0->data[attr], src->data[attr]);
+ COPY_4FV(dst1->data[attr], src->data[attr]);
+ }
+
+ for (i = 0; i < flat->num_spec_attribs; i++) {
+ const uint attr = flat->spec_attribs[i];
+ COPY_3FV(dst0->data[attr], src->data[attr]);
+ COPY_3FV(dst1->data[attr], src->data[attr]);
+ }
+}
+
+
+/**
+ * Flatshade tri. Required for clipping and when unfilled tris are
+ * active, otherwise handled by hardware.
+ */
+static void flatshade_tri_0( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct prim_header tmp;
+
+ tmp.det = header->det;
+ tmp.flags = header->flags;
+ tmp.pad = header->pad;
+ tmp.v[0] = header->v[0];
+ tmp.v[1] = dup_vert(stage, header->v[1], 0);
+ tmp.v[2] = dup_vert(stage, header->v[2], 1);
+
+ copy_colors2(stage, tmp.v[1], tmp.v[2], tmp.v[0]);
+
+ stage->next->tri( stage->next, &tmp );
+}
+
+
+static void flatshade_tri_2( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct prim_header tmp;
+
+ tmp.det = header->det;
+ tmp.flags = header->flags;
+ tmp.pad = header->pad;
+ tmp.v[0] = dup_vert(stage, header->v[0], 0);
+ tmp.v[1] = dup_vert(stage, header->v[1], 1);
+ tmp.v[2] = header->v[2];
+
+ copy_colors2(stage, tmp.v[0], tmp.v[1], tmp.v[2]);
+
+ stage->next->tri( stage->next, &tmp );
+}
+
+
+
+
+
+/**
+ * Flatshade line. Required for clipping.
+ */
+static void flatshade_line_0( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct prim_header tmp;
+
+ tmp.v[0] = header->v[0];
+ tmp.v[1] = dup_vert(stage, header->v[1], 0);
+
+ copy_colors(stage, tmp.v[1], tmp.v[0]);
+
+ stage->next->line( stage->next, &tmp );
+}
+
+static void flatshade_line_1( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct prim_header tmp;
+
+ tmp.v[0] = dup_vert(stage, header->v[0], 0);
+ tmp.v[1] = header->v[1];
+
+ copy_colors(stage, tmp.v[0], tmp.v[1]);
+
+ stage->next->line( stage->next, &tmp );
+}
+
+
+
+
+static void flatshade_init_state( struct draw_stage *stage )
+{
+ struct flat_stage *flat = flat_stage(stage);
+ const struct draw_vertex_shader *vs = stage->draw->vs.vertex_shader;
+ uint i;
+
+ /* Find which vertex shader outputs are colors, make a list */
+ flat->num_color_attribs = 0;
+ flat->num_spec_attribs = 0;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_COLOR ||
+ vs->info.output_semantic_name[i] == TGSI_SEMANTIC_BCOLOR) {
+ if (vs->info.output_semantic_index[i] == 0)
+ flat->color_attribs[flat->num_color_attribs++] = i;
+ else
+ flat->spec_attribs[flat->num_spec_attribs++] = i;
+ }
+ }
+
+ /* Choose flatshade routine according to provoking vertex:
+ */
+ if (stage->draw->rasterizer->flatshade_first) {
+ stage->line = flatshade_line_0;
+ stage->tri = flatshade_tri_0;
+ }
+ else {
+ stage->line = flatshade_line_1;
+ stage->tri = flatshade_tri_2;
+ }
+}
+
+static void flatshade_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ flatshade_init_state( stage );
+ stage->tri( stage, header );
+}
+
+static void flatshade_first_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ flatshade_init_state( stage );
+ stage->line( stage, header );
+}
+
+
+static void flatshade_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ stage->tri = flatshade_first_tri;
+ stage->line = flatshade_first_line;
+ stage->next->flush( stage->next, flags );
+}
+
+
+static void flatshade_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void flatshade_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Create flatshading drawing stage.
+ */
+struct draw_stage *draw_flatshade_stage( struct draw_context *draw )
+{
+ struct flat_stage *flatshade = CALLOC_STRUCT(flat_stage);
+ if (flatshade == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &flatshade->stage, 2 ))
+ goto fail;
+
+ flatshade->stage.draw = draw;
+ flatshade->stage.name = "flatshade";
+ flatshade->stage.next = NULL;
+ flatshade->stage.point = draw_pipe_passthrough_point;
+ flatshade->stage.line = flatshade_first_line;
+ flatshade->stage.tri = flatshade_first_tri;
+ flatshade->stage.flush = flatshade_flush;
+ flatshade->stage.reset_stipple_counter = flatshade_reset_stipple_counter;
+ flatshade->stage.destroy = flatshade_destroy;
+
+ return &flatshade->stage;
+
+ fail:
+ if (flatshade)
+ flatshade->stage.destroy( &flatshade->stage );
+
+ return NULL;
+}
+
+
diff --git a/src/gallium/auxiliary/draw/draw_pipe_offset.c b/src/gallium/auxiliary/draw/draw_pipe_offset.c
new file mode 100644
index 0000000000..40798a5d6e
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_offset.c
@@ -0,0 +1,185 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief polygon offset state
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Brian Paul
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "draw_pipe.h"
+
+
+
+struct offset_stage {
+ struct draw_stage stage;
+
+ float scale;
+ float units;
+};
+
+
+
+static INLINE struct offset_stage *offset_stage( struct draw_stage *stage )
+{
+ return (struct offset_stage *) stage;
+}
+
+
+
+
+
+/**
+ * Offset tri Z. Some hardware can handle this, but not usually when
+ * doing unfilled rendering.
+ */
+static void do_offset_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ const unsigned pos = stage->draw->vs.position_output;
+ struct offset_stage *offset = offset_stage(stage);
+ float inv_det = 1.0f / header->det;
+
+ /* Window coords:
+ */
+ float *v0 = header->v[0]->data[pos];
+ float *v1 = header->v[1]->data[pos];
+ float *v2 = header->v[2]->data[pos];
+
+ /* edge vectors e = v0 - v2, f = v1 - v2 */
+ float ex = v0[0] - v2[0];
+ float ey = v0[1] - v2[1];
+ float ez = v0[2] - v2[2];
+ float fx = v1[0] - v2[0];
+ float fy = v1[1] - v2[1];
+ float fz = v1[2] - v2[2];
+
+ /* (a,b) = cross(e,f).xy */
+ float a = ey*fz - ez*fy;
+ float b = ez*fx - ex*fz;
+
+ float dzdx = fabsf(a * inv_det);
+ float dzdy = fabsf(b * inv_det);
+
+ float zoffset = offset->units + MAX2(dzdx, dzdy) * offset->scale;
+
+ /*
+ * Note: we're applying the offset and clamping per-vertex.
+ * Ideally, the offset is applied per-fragment prior to fragment shading.
+ */
+ v0[2] = CLAMP(v0[2] + zoffset, 0.0f, 1.0f);
+ v1[2] = CLAMP(v1[2] + zoffset, 0.0f, 1.0f);
+ v2[2] = CLAMP(v2[2] + zoffset, 0.0f, 1.0f);
+
+ stage->next->tri( stage->next, header );
+}
+
+
+static void offset_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct prim_header tmp;
+
+ tmp.det = header->det;
+ tmp.flags = header->flags;
+ tmp.pad = header->pad;
+ tmp.v[0] = dup_vert(stage, header->v[0], 0);
+ tmp.v[1] = dup_vert(stage, header->v[1], 1);
+ tmp.v[2] = dup_vert(stage, header->v[2], 2);
+
+ do_offset_tri( stage, &tmp );
+}
+
+
+static void offset_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct offset_stage *offset = offset_stage(stage);
+
+ offset->units = (float) (stage->draw->rasterizer->offset_units * stage->draw->mrd);
+ offset->scale = stage->draw->rasterizer->offset_scale;
+
+ stage->tri = offset_tri;
+ stage->tri( stage, header );
+}
+
+
+
+
+static void offset_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ stage->tri = offset_first_tri;
+ stage->next->flush( stage->next, flags );
+}
+
+
+static void offset_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void offset_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Create polygon offset drawing stage.
+ */
+struct draw_stage *draw_offset_stage( struct draw_context *draw )
+{
+ struct offset_stage *offset = CALLOC_STRUCT(offset_stage);
+ if (offset == NULL)
+ goto fail;
+
+ draw_alloc_temp_verts( &offset->stage, 3 );
+
+ offset->stage.draw = draw;
+ offset->stage.name = "offset";
+ offset->stage.next = NULL;
+ offset->stage.point = draw_pipe_passthrough_point;
+ offset->stage.line = draw_pipe_passthrough_line;
+ offset->stage.tri = offset_first_tri;
+ offset->stage.flush = offset_flush;
+ offset->stage.reset_stipple_counter = offset_reset_stipple_counter;
+ offset->stage.destroy = offset_destroy;
+
+ return &offset->stage;
+
+ fail:
+ if (offset)
+ offset->stage.destroy( &offset->stage );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_pstipple.c b/src/gallium/auxiliary/draw/draw_pipe_pstipple.c
new file mode 100644
index 0000000000..283502cdf3
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_pstipple.c
@@ -0,0 +1,777 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Polygon stipple stage: implement polygon stipple with texture map and
+ * fragment program. The fragment program samples the texture and does
+ * a fragment kill for the stipple-failing fragments.
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "tgsi/tgsi_transform.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "draw_context.h"
+#include "draw_pipe.h"
+
+
+
+/**
+ * Subclass of pipe_shader_state to carry extra fragment shader info.
+ */
+struct pstip_fragment_shader
+{
+ struct pipe_shader_state state;
+ void *driver_fs;
+ void *pstip_fs;
+ uint sampler_unit;
+};
+
+
+/**
+ * Subclass of draw_stage
+ */
+struct pstip_stage
+{
+ struct draw_stage stage;
+
+ void *sampler_cso;
+ struct pipe_texture *texture;
+ uint num_samplers;
+ uint num_textures;
+
+ /*
+ * Currently bound state
+ */
+ struct pstip_fragment_shader *fs;
+ struct {
+ void *samplers[PIPE_MAX_SAMPLERS];
+ struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
+ const struct pipe_poly_stipple *stipple;
+ } state;
+
+ /*
+ * Driver interface/override functions
+ */
+ void * (*driver_create_fs_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*driver_bind_fs_state)(struct pipe_context *, void *);
+ void (*driver_delete_fs_state)(struct pipe_context *, void *);
+
+ void (*driver_bind_sampler_states)(struct pipe_context *, unsigned, void **);
+
+ void (*driver_set_sampler_textures)(struct pipe_context *, unsigned,
+ struct pipe_texture **);
+
+ void (*driver_set_polygon_stipple)(struct pipe_context *,
+ const struct pipe_poly_stipple *);
+
+ struct pipe_context *pipe;
+};
+
+
+
+/**
+ * Subclass of tgsi_transform_context, used for transforming the
+ * user's fragment shader to add the special AA instructions.
+ */
+struct pstip_transform_context {
+ struct tgsi_transform_context base;
+ uint tempsUsed; /**< bitmask */
+ int wincoordInput;
+ int maxInput;
+ uint samplersUsed; /**< bitfield of samplers used */
+ int freeSampler; /** an available sampler for the pstipple */
+ int texTemp; /**< temp registers */
+ int numImmed;
+ boolean firstInstruction;
+};
+
+
+/**
+ * TGSI declaration transform callback.
+ * Look for a free sampler, a free input attrib, and two free temp regs.
+ */
+static void
+pstip_transform_decl(struct tgsi_transform_context *ctx,
+ struct tgsi_full_declaration *decl)
+{
+ struct pstip_transform_context *pctx = (struct pstip_transform_context *) ctx;
+
+ if (decl->Declaration.File == TGSI_FILE_SAMPLER) {
+ uint i;
+ for (i = decl->DeclarationRange.First;
+ i <= decl->DeclarationRange.Last; i++) {
+ pctx->samplersUsed |= 1 << i;
+ }
+ }
+ else if (decl->Declaration.File == TGSI_FILE_INPUT) {
+ pctx->maxInput = MAX2(pctx->maxInput, (int) decl->DeclarationRange.Last);
+ if (decl->Semantic.SemanticName == TGSI_SEMANTIC_POSITION)
+ pctx->wincoordInput = (int) decl->DeclarationRange.First;
+ }
+ else if (decl->Declaration.File == TGSI_FILE_TEMPORARY) {
+ uint i;
+ for (i = decl->DeclarationRange.First;
+ i <= decl->DeclarationRange.Last; i++) {
+ pctx->tempsUsed |= (1 << i);
+ }
+ }
+
+ ctx->emit_declaration(ctx, decl);
+}
+
+
+static void
+pstip_transform_immed(struct tgsi_transform_context *ctx,
+ struct tgsi_full_immediate *immed)
+{
+ struct pstip_transform_context *pctx = (struct pstip_transform_context *) ctx;
+ pctx->numImmed++;
+}
+
+
+/**
+ * Find the lowest zero bit in the given word, or -1 if bitfield is all ones.
+ */
+static int
+free_bit(uint bitfield)
+{
+ int i;
+ for (i = 0; i < 32; i++) {
+ if ((bitfield & (1 << i)) == 0)
+ return i;
+ }
+ return -1;
+}
+
+
+/**
+ * TGSI instruction transform callback.
+ * Replace writes to result.color w/ a temp reg.
+ * Upon END instruction, insert texture sampling code for antialiasing.
+ */
+static void
+pstip_transform_inst(struct tgsi_transform_context *ctx,
+ struct tgsi_full_instruction *inst)
+{
+ struct pstip_transform_context *pctx = (struct pstip_transform_context *) ctx;
+
+ if (pctx->firstInstruction) {
+ /* emit our new declarations before the first instruction */
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction newInst;
+ uint i;
+ int wincoordInput;
+
+ /* find free sampler */
+ pctx->freeSampler = free_bit(pctx->samplersUsed);
+ if (pctx->freeSampler >= PIPE_MAX_SAMPLERS)
+ pctx->freeSampler = PIPE_MAX_SAMPLERS - 1;
+
+ if (pctx->wincoordInput < 0)
+ wincoordInput = pctx->maxInput + 1;
+ else
+ wincoordInput = pctx->wincoordInput;
+
+ /* find one free temp reg */
+ for (i = 0; i < 32; i++) {
+ if ((pctx->tempsUsed & (1 << i)) == 0) {
+ /* found a free temp */
+ if (pctx->texTemp < 0)
+ pctx->texTemp = i;
+ else
+ break;
+ }
+ }
+ assert(pctx->texTemp >= 0);
+
+ if (pctx->wincoordInput < 0) {
+ /* declare new position input reg */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_INPUT;
+ decl.Declaration.Interpolate = TGSI_INTERPOLATE_LINEAR; /* XXX? */
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = TGSI_SEMANTIC_POSITION;
+ decl.Semantic.SemanticIndex = 0;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = wincoordInput;
+ ctx->emit_declaration(ctx, &decl);
+ }
+
+ /* declare new sampler */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_SAMPLER;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = pctx->freeSampler;
+ ctx->emit_declaration(ctx, &decl);
+
+ /* declare new temp regs */
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_TEMPORARY;
+ decl.DeclarationRange.First =
+ decl.DeclarationRange.Last = pctx->texTemp;
+ ctx->emit_declaration(ctx, &decl);
+
+ /* emit immediate = {1/32, 1/32, 1, 1}
+ * The index/position of this immediate will be pctx->numImmed
+ */
+ {
+ static const float value[4] = { 1.0/32, 1.0/32, 1.0, 1.0 };
+ struct tgsi_full_immediate immed;
+ uint size = 4;
+ immed = tgsi_default_full_immediate();
+ immed.Immediate.NrTokens = 1 + size; /* one for the token itself */
+ immed.u[0].Float = value[0];
+ immed.u[1].Float = value[1];
+ immed.u[2].Float = value[2];
+ immed.u[3].Float = value[3];
+ ctx->emit_immediate(ctx, &immed);
+ }
+
+ pctx->firstInstruction = FALSE;
+
+
+ /*
+ * Insert new MUL/TEX/KILP instructions at start of program
+ * Take gl_FragCoord, divide by 32 (stipple size), sample the
+ * texture and kill fragment if needed.
+ *
+ * We'd like to use non-normalized texcoords to index into a RECT
+ * texture, but we can only use GL_REPEAT wrap mode with normalized
+ * texcoords. Darn.
+ */
+
+ /* MUL texTemp, INPUT[wincoord], 1/32; */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_MUL;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = pctx->texTemp;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_INPUT;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = wincoordInput;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_IMMEDIATE;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = pctx->numImmed;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* TEX texTemp, texTemp, sampler; */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_TEX;
+ newInst.Instruction.NumDstRegs = 1;
+ newInst.FullDstRegisters[0].DstRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullDstRegisters[0].DstRegister.Index = pctx->texTemp;
+ newInst.Instruction.NumSrcRegs = 2;
+ newInst.InstructionExtTexture.Texture = TGSI_TEXTURE_2D;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = pctx->texTemp;
+ newInst.FullSrcRegisters[1].SrcRegister.File = TGSI_FILE_SAMPLER;
+ newInst.FullSrcRegisters[1].SrcRegister.Index = pctx->freeSampler;
+ ctx->emit_instruction(ctx, &newInst);
+
+ /* KIL -texTemp; # if -texTemp < 0, KILL fragment */
+ newInst = tgsi_default_full_instruction();
+ newInst.Instruction.Opcode = TGSI_OPCODE_KIL;
+ newInst.Instruction.NumDstRegs = 0;
+ newInst.Instruction.NumSrcRegs = 1;
+ newInst.FullSrcRegisters[0].SrcRegister.File = TGSI_FILE_TEMPORARY;
+ newInst.FullSrcRegisters[0].SrcRegister.Index = pctx->texTemp;
+ newInst.FullSrcRegisters[0].SrcRegister.Negate = 1;
+ ctx->emit_instruction(ctx, &newInst);
+ }
+
+ /* emit this instruction */
+ ctx->emit_instruction(ctx, inst);
+}
+
+
+/**
+ * Generate the frag shader we'll use for doing polygon stipple.
+ * This will be the user's shader prefixed with a TEX and KIL instruction.
+ */
+static boolean
+generate_pstip_fs(struct pstip_stage *pstip)
+{
+ const struct pipe_shader_state *orig_fs = &pstip->fs->state;
+ /*struct draw_context *draw = pstip->stage.draw;*/
+ struct pipe_shader_state pstip_fs;
+ struct pstip_transform_context transform;
+
+#define MAX 1000
+
+ pstip_fs = *orig_fs; /* copy to init */
+ pstip_fs.tokens = MALLOC(sizeof(struct tgsi_token) * MAX);
+ if (pstip_fs.tokens == NULL)
+ return FALSE;
+
+ memset(&transform, 0, sizeof(transform));
+ transform.wincoordInput = -1;
+ transform.maxInput = -1;
+ transform.texTemp = -1;
+ transform.firstInstruction = TRUE;
+ transform.base.transform_instruction = pstip_transform_inst;
+ transform.base.transform_declaration = pstip_transform_decl;
+ transform.base.transform_immediate = pstip_transform_immed;
+
+ tgsi_transform_shader(orig_fs->tokens,
+ (struct tgsi_token *) pstip_fs.tokens,
+ MAX, &transform.base);
+
+#if 0 /* DEBUG */
+ tgsi_dump(orig_fs->tokens, 0);
+ tgsi_dump(pstip_fs.tokens, 0);
+#endif
+
+ pstip->fs->sampler_unit = transform.freeSampler;
+ assert(pstip->fs->sampler_unit < PIPE_MAX_SAMPLERS);
+
+ pstip->fs->pstip_fs = pstip->driver_create_fs_state(pstip->pipe, &pstip_fs);
+
+ FREE((void *)pstip_fs.tokens);
+ return TRUE;
+}
+
+
+/**
+ * Load texture image with current stipple pattern.
+ */
+static void
+pstip_update_texture(struct pstip_stage *pstip)
+{
+ static const uint bit31 = 1 << 31;
+ struct pipe_context *pipe = pstip->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_transfer *transfer;
+ const uint *stipple = pstip->state.stipple->stipple;
+ uint i, j;
+ ubyte *data;
+
+ /* XXX: want to avoid flushing just because we use stipple:
+ */
+ pipe->flush( pipe, PIPE_FLUSH_TEXTURE_CACHE, NULL );
+
+ transfer = screen->get_tex_transfer(screen, pstip->texture, 0, 0, 0,
+ PIPE_TRANSFER_WRITE, 0, 0, 32, 32);
+ data = screen->transfer_map(screen, transfer);
+
+ /*
+ * Load alpha texture.
+ * Note: 0 means keep the fragment, 255 means kill it.
+ * We'll negate the texel value and use KILP which kills if value
+ * is negative.
+ */
+ for (i = 0; i < 32; i++) {
+ for (j = 0; j < 32; j++) {
+ if (stipple[i] & (bit31 >> j)) {
+ /* fragment "on" */
+ data[i * transfer->stride + j] = 0;
+ }
+ else {
+ /* fragment "off" */
+ data[i * transfer->stride + j] = 255;
+ }
+ }
+ }
+
+ /* unmap */
+ screen->transfer_unmap(screen, transfer);
+ screen->tex_transfer_destroy(transfer);
+}
+
+
+/**
+ * Create the texture map we'll use for stippling.
+ */
+static boolean
+pstip_create_texture(struct pstip_stage *pstip)
+{
+ struct pipe_context *pipe = pstip->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_texture texTemp;
+
+ memset(&texTemp, 0, sizeof(texTemp));
+ texTemp.target = PIPE_TEXTURE_2D;
+ texTemp.format = PIPE_FORMAT_A8_UNORM; /* XXX verify supported by driver! */
+ texTemp.last_level = 0;
+ texTemp.width[0] = 32;
+ texTemp.height[0] = 32;
+ texTemp.depth[0] = 1;
+ pf_get_block(texTemp.format, &texTemp.block);
+
+ pstip->texture = screen->texture_create(screen, &texTemp);
+ if (pstip->texture == NULL)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+/**
+ * Create the sampler CSO that'll be used for stippling.
+ */
+static boolean
+pstip_create_sampler(struct pstip_stage *pstip)
+{
+ struct pipe_sampler_state sampler;
+ struct pipe_context *pipe = pstip->pipe;
+
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
+ sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
+ sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.normalized_coords = 1;
+ sampler.min_lod = 0.0f;
+ sampler.max_lod = 0.0f;
+
+ pstip->sampler_cso = pipe->create_sampler_state(pipe, &sampler);
+ if (pstip->sampler_cso == NULL)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+/**
+ * When we're about to draw our first stipple polygon in a batch, this function
+ * is called to tell the driver to bind our modified fragment shader.
+ */
+static boolean
+bind_pstip_fragment_shader(struct pstip_stage *pstip)
+{
+ struct draw_context *draw = pstip->stage.draw;
+ if (!pstip->fs->pstip_fs &&
+ !generate_pstip_fs(pstip))
+ return FALSE;
+
+ draw->suspend_flushing = TRUE;
+ pstip->driver_bind_fs_state(pstip->pipe, pstip->fs->pstip_fs);
+ draw->suspend_flushing = FALSE;
+ return TRUE;
+}
+
+
+static INLINE struct pstip_stage *
+pstip_stage( struct draw_stage *stage )
+{
+ return (struct pstip_stage *) stage;
+}
+
+
+static void
+pstip_first_tri(struct draw_stage *stage, struct prim_header *header)
+{
+ struct pstip_stage *pstip = pstip_stage(stage);
+ struct pipe_context *pipe = pstip->pipe;
+ struct draw_context *draw = stage->draw;
+ uint num_samplers;
+
+ assert(stage->draw->rasterizer->poly_stipple_enable);
+
+ /* bind our fragprog */
+ if (!bind_pstip_fragment_shader(pstip)) {
+ stage->tri = draw_pipe_passthrough_tri;
+ stage->tri(stage, header);
+ return;
+ }
+
+
+ /* how many samplers? */
+ /* we'll use sampler/texture[pstip->sampler_unit] for the stipple */
+ num_samplers = MAX2(pstip->num_textures, pstip->num_samplers);
+ num_samplers = MAX2(num_samplers, pstip->fs->sampler_unit + 1);
+
+ /* plug in our sampler, texture */
+ pstip->state.samplers[pstip->fs->sampler_unit] = pstip->sampler_cso;
+ pipe_texture_reference(&pstip->state.textures[pstip->fs->sampler_unit],
+ pstip->texture);
+
+ assert(num_samplers <= PIPE_MAX_SAMPLERS);
+
+ draw->suspend_flushing = TRUE;
+ pstip->driver_bind_sampler_states(pipe, num_samplers, pstip->state.samplers);
+ pstip->driver_set_sampler_textures(pipe, num_samplers, pstip->state.textures);
+ draw->suspend_flushing = FALSE;
+
+ /* now really draw first triangle */
+ stage->tri = draw_pipe_passthrough_tri;
+ stage->tri(stage, header);
+}
+
+
+static void
+pstip_flush(struct draw_stage *stage, unsigned flags)
+{
+ struct draw_context *draw = stage->draw;
+ struct pstip_stage *pstip = pstip_stage(stage);
+ struct pipe_context *pipe = pstip->pipe;
+
+ stage->tri = pstip_first_tri;
+ stage->next->flush( stage->next, flags );
+
+ /* restore original frag shader, texture, sampler state */
+ draw->suspend_flushing = TRUE;
+ pstip->driver_bind_fs_state(pipe, pstip->fs->driver_fs);
+ pstip->driver_bind_sampler_states(pipe, pstip->num_samplers,
+ pstip->state.samplers);
+ pstip->driver_set_sampler_textures(pipe, pstip->num_textures,
+ pstip->state.textures);
+ draw->suspend_flushing = FALSE;
+}
+
+
+static void
+pstip_reset_stipple_counter(struct draw_stage *stage)
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void
+pstip_destroy(struct draw_stage *stage)
+{
+ struct pstip_stage *pstip = pstip_stage(stage);
+ uint i;
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ pipe_texture_reference(&pstip->state.textures[i], NULL);
+ }
+
+ pstip->pipe->delete_sampler_state(pstip->pipe, pstip->sampler_cso);
+
+ pipe_texture_reference(&pstip->texture, NULL);
+
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+static struct pstip_stage *
+draw_pstip_stage(struct draw_context *draw)
+{
+ struct pstip_stage *pstip = CALLOC_STRUCT(pstip_stage);
+
+ draw_alloc_temp_verts( &pstip->stage, 8 );
+
+ pstip->stage.draw = draw;
+ pstip->stage.name = "pstip";
+ pstip->stage.next = NULL;
+ pstip->stage.point = draw_pipe_passthrough_point;
+ pstip->stage.line = draw_pipe_passthrough_line;
+ pstip->stage.tri = pstip_first_tri;
+ pstip->stage.flush = pstip_flush;
+ pstip->stage.reset_stipple_counter = pstip_reset_stipple_counter;
+ pstip->stage.destroy = pstip_destroy;
+
+ return pstip;
+}
+
+
+static struct pstip_stage *
+pstip_stage_from_pipe(struct pipe_context *pipe)
+{
+ struct draw_context *draw = (struct draw_context *) pipe->draw;
+ return pstip_stage(draw->pipeline.pstipple);
+}
+
+
+/**
+ * This function overrides the driver's create_fs_state() function and
+ * will typically be called by the state tracker.
+ */
+static void *
+pstip_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *fs)
+{
+ struct pstip_stage *pstip = pstip_stage_from_pipe(pipe);
+ struct pstip_fragment_shader *aafs = CALLOC_STRUCT(pstip_fragment_shader);
+
+ if (aafs) {
+ aafs->state = *fs;
+
+ /* pass-through */
+ aafs->driver_fs = pstip->driver_create_fs_state(pstip->pipe, fs);
+ }
+
+ return aafs;
+}
+
+
+static void
+pstip_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct pstip_stage *pstip = pstip_stage_from_pipe(pipe);
+ struct pstip_fragment_shader *aafs = (struct pstip_fragment_shader *) fs;
+ /* save current */
+ pstip->fs = aafs;
+ /* pass-through */
+ pstip->driver_bind_fs_state(pstip->pipe,
+ (aafs ? aafs->driver_fs : NULL));
+}
+
+
+static void
+pstip_delete_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct pstip_stage *pstip = pstip_stage_from_pipe(pipe);
+ struct pstip_fragment_shader *aafs = (struct pstip_fragment_shader *) fs;
+ /* pass-through */
+ pstip->driver_delete_fs_state(pstip->pipe, aafs->driver_fs);
+
+ if (aafs->pstip_fs)
+ pstip->driver_delete_fs_state(pstip->pipe, aafs->pstip_fs);
+
+ FREE(aafs);
+}
+
+
+static void
+pstip_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct pstip_stage *pstip = pstip_stage_from_pipe(pipe);
+ uint i;
+
+ /* save current */
+ memcpy(pstip->state.samplers, sampler, num * sizeof(void *));
+ for (i = num; i < PIPE_MAX_SAMPLERS; i++) {
+ pstip->state.samplers[i] = NULL;
+ }
+
+ pstip->num_samplers = num;
+ /* pass-through */
+ pstip->driver_bind_sampler_states(pstip->pipe, num, sampler);
+}
+
+
+static void
+pstip_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num, struct pipe_texture **texture)
+{
+ struct pstip_stage *pstip = pstip_stage_from_pipe(pipe);
+ uint i;
+
+ /* save current */
+ for (i = 0; i < num; i++) {
+ pipe_texture_reference(&pstip->state.textures[i], texture[i]);
+ }
+ for (; i < PIPE_MAX_SAMPLERS; i++) {
+ pipe_texture_reference(&pstip->state.textures[i], NULL);
+ }
+
+ pstip->num_textures = num;
+
+ /* pass-through */
+ pstip->driver_set_sampler_textures(pstip->pipe, num, texture);
+}
+
+
+static void
+pstip_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ struct pstip_stage *pstip = pstip_stage_from_pipe(pipe);
+
+ /* save current */
+ pstip->state.stipple = stipple;
+
+ /* pass-through */
+ pstip->driver_set_polygon_stipple(pstip->pipe, stipple);
+
+ pstip_update_texture(pstip);
+}
+
+
+/**
+ * Called by drivers that want to install this polygon stipple stage
+ * into the draw module's pipeline. This will not be used if the
+ * hardware has native support for polygon stipple.
+ */
+boolean
+draw_install_pstipple_stage(struct draw_context *draw,
+ struct pipe_context *pipe)
+{
+ struct pstip_stage *pstip;
+
+ pipe->draw = (void *) draw;
+
+ /*
+ * Create / install pgon stipple drawing / prim stage
+ */
+ pstip = draw_pstip_stage( draw );
+ if (pstip == NULL)
+ goto fail;
+
+ draw->pipeline.pstipple = &pstip->stage;
+
+ pstip->pipe = pipe;
+
+ /* create special texture, sampler state */
+ if (!pstip_create_texture(pstip))
+ goto fail;
+
+ if (!pstip_create_sampler(pstip))
+ goto fail;
+
+ /* save original driver functions */
+ pstip->driver_create_fs_state = pipe->create_fs_state;
+ pstip->driver_bind_fs_state = pipe->bind_fs_state;
+ pstip->driver_delete_fs_state = pipe->delete_fs_state;
+
+ pstip->driver_bind_sampler_states = pipe->bind_sampler_states;
+ pstip->driver_set_sampler_textures = pipe->set_sampler_textures;
+ pstip->driver_set_polygon_stipple = pipe->set_polygon_stipple;
+
+ /* override the driver's functions */
+ pipe->create_fs_state = pstip_create_fs_state;
+ pipe->bind_fs_state = pstip_bind_fs_state;
+ pipe->delete_fs_state = pstip_delete_fs_state;
+
+ pipe->bind_sampler_states = pstip_bind_sampler_states;
+ pipe->set_sampler_textures = pstip_set_sampler_textures;
+ pipe->set_polygon_stipple = pstip_set_polygon_stipple;
+
+ return TRUE;
+
+ fail:
+ if (pstip)
+ pstip->stage.destroy( &pstip->stage );
+
+ return FALSE;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_stipple.c b/src/gallium/auxiliary/draw/draw_pipe_stipple.c
new file mode 100644
index 0000000000..6e921bac27
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_stipple.c
@@ -0,0 +1,251 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+/* Implement line stipple by cutting lines up into smaller lines.
+ * There are hundreds of ways to implement line stipple, this is one
+ * choice that should work in all situations, requires no state
+ * manipulations, but with a penalty in terms of large amounts of
+ * generated geometry.
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "draw/draw_pipe.h"
+
+
+/** Subclass of draw_stage */
+struct stipple_stage {
+ struct draw_stage stage;
+ float counter;
+ uint pattern;
+ uint factor;
+};
+
+
+static INLINE struct stipple_stage *
+stipple_stage(struct draw_stage *stage)
+{
+ return (struct stipple_stage *) stage;
+}
+
+
+/**
+ * Compute interpolated vertex attributes for 'dst' at position 't'
+ * between 'v0' and 'v1'.
+ * XXX using linear interpolation for all attribs at this time.
+ */
+static void
+screen_interp( struct draw_context *draw,
+ struct vertex_header *dst,
+ float t,
+ const struct vertex_header *v0,
+ const struct vertex_header *v1 )
+{
+ uint attr;
+ for (attr = 0; attr < draw->vs.num_vs_outputs; attr++) {
+ const float *val0 = v0->data[attr];
+ const float *val1 = v1->data[attr];
+ float *newv = dst->data[attr];
+ uint i;
+ for (i = 0; i < 4; i++) {
+ newv[i] = val0[i] + t * (val1[i] - val0[i]);
+ }
+ }
+}
+
+
+static void
+emit_segment(struct draw_stage *stage, struct prim_header *header,
+ float t0, float t1)
+{
+ struct vertex_header *v0new = dup_vert(stage, header->v[0], 0);
+ struct vertex_header *v1new = dup_vert(stage, header->v[1], 1);
+ struct prim_header newprim = *header;
+
+ if (t0 > 0.0) {
+ screen_interp( stage->draw, v0new, t0, header->v[0], header->v[1] );
+ newprim.v[0] = v0new;
+ }
+
+ if (t1 < 1.0) {
+ screen_interp( stage->draw, v1new, t1, header->v[0], header->v[1] );
+ newprim.v[1] = v1new;
+ }
+
+ stage->next->line( stage->next, &newprim );
+}
+
+
+static INLINE unsigned
+stipple_test(int counter, ushort pattern, int factor)
+{
+ int b = (counter / factor) & 0xf;
+ return (1 << b) & pattern;
+}
+
+
+static void
+stipple_line(struct draw_stage *stage, struct prim_header *header)
+{
+ struct stipple_stage *stipple = stipple_stage(stage);
+ struct vertex_header *v0 = header->v[0];
+ struct vertex_header *v1 = header->v[1];
+ const unsigned pos = stage->draw->vs.position_output;
+ const float *pos0 = v0->data[pos];
+ const float *pos1 = v1->data[pos];
+ float start = 0;
+ int state = 0;
+
+ float x0 = pos0[0];
+ float x1 = pos1[0];
+ float y0 = pos0[1];
+ float y1 = pos1[1];
+
+ float dx = x0 > x1 ? x0 - x1 : x1 - x0;
+ float dy = y0 > y1 ? y0 - y1 : y1 - y0;
+
+ float length = MAX2(dx, dy);
+ int i;
+
+ if (header->flags & DRAW_PIPE_RESET_STIPPLE)
+ stipple->counter = 0;
+
+
+ /* XXX ToDo: intead of iterating pixel-by-pixel, use a look-up table.
+ */
+ for (i = 0; i < length; i++) {
+ int result = stipple_test( (int) stipple->counter+i,
+ (ushort) stipple->pattern, stipple->factor );
+ if (result != state) {
+ /* changing from "off" to "on" or vice versa */
+ if (state) {
+ if (start != i) {
+ /* finishing an "on" segment */
+ emit_segment( stage, header, start / length, i / length );
+ }
+ }
+ else {
+ /* starting an "on" segment */
+ start = (float) i;
+ }
+ state = result;
+ }
+ }
+
+ if (state && start < length)
+ emit_segment( stage, header, start / length, 1.0 );
+
+ stipple->counter += length;
+}
+
+
+static void
+reset_stipple_counter(struct draw_stage *stage)
+{
+ struct stipple_stage *stipple = stipple_stage(stage);
+ stipple->counter = 0;
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+static void
+stipple_reset_point(struct draw_stage *stage, struct prim_header *header)
+{
+ struct stipple_stage *stipple = stipple_stage(stage);
+ stipple->counter = 0;
+ stage->next->point(stage->next, header);
+}
+
+static void
+stipple_reset_tri(struct draw_stage *stage, struct prim_header *header)
+{
+ struct stipple_stage *stipple = stipple_stage(stage);
+ stipple->counter = 0;
+ stage->next->tri(stage->next, header);
+}
+
+
+static void
+stipple_first_line(struct draw_stage *stage,
+ struct prim_header *header)
+{
+ struct stipple_stage *stipple = stipple_stage(stage);
+ struct draw_context *draw = stage->draw;
+
+ stipple->pattern = draw->rasterizer->line_stipple_pattern;
+ stipple->factor = draw->rasterizer->line_stipple_factor + 1;
+
+ stage->line = stipple_line;
+ stage->line( stage, header );
+}
+
+
+static void
+stipple_flush(struct draw_stage *stage, unsigned flags)
+{
+ stage->line = stipple_first_line;
+ stage->next->flush( stage->next, flags );
+}
+
+
+
+
+static void
+stipple_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Create line stippler stage
+ */
+struct draw_stage *draw_stipple_stage( struct draw_context *draw )
+{
+ struct stipple_stage *stipple = CALLOC_STRUCT(stipple_stage);
+
+ draw_alloc_temp_verts( &stipple->stage, 2 );
+
+ stipple->stage.draw = draw;
+ stipple->stage.name = "stipple";
+ stipple->stage.next = NULL;
+ stipple->stage.point = stipple_reset_point;
+ stipple->stage.line = stipple_first_line;
+ stipple->stage.tri = stipple_reset_tri;
+ stipple->stage.reset_stipple_counter = reset_stipple_counter;
+ stipple->stage.flush = stipple_flush;
+ stipple->stage.destroy = stipple_destroy;
+
+ return &stipple->stage;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_twoside.c b/src/gallium/auxiliary/draw/draw_pipe_twoside.c
new file mode 100644
index 0000000000..eef0238b15
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_twoside.c
@@ -0,0 +1,200 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw_vs.h"
+#include "draw_pipe.h"
+
+struct twoside_stage {
+ struct draw_stage stage;
+ float sign; /**< +1 or -1 */
+ uint attrib_front0, attrib_back0;
+ uint attrib_front1, attrib_back1;
+};
+
+
+static INLINE struct twoside_stage *twoside_stage( struct draw_stage *stage )
+{
+ return (struct twoside_stage *)stage;
+}
+
+
+
+
+/**
+ * Copy back color(s) to front color(s).
+ */
+static INLINE struct vertex_header *
+copy_bfc( struct twoside_stage *twoside,
+ const struct vertex_header *v,
+ unsigned idx )
+{
+ struct vertex_header *tmp = dup_vert( &twoside->stage, v, idx );
+
+ if (twoside->attrib_back0) {
+ COPY_4FV(tmp->data[twoside->attrib_front0],
+ tmp->data[twoside->attrib_back0]);
+ }
+ if (twoside->attrib_back1) {
+ COPY_4FV(tmp->data[twoside->attrib_front1],
+ tmp->data[twoside->attrib_back1]);
+ }
+
+ return tmp;
+}
+
+
+/* Twoside tri:
+ */
+static void twoside_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct twoside_stage *twoside = twoside_stage(stage);
+
+ if (header->det * twoside->sign < 0.0) {
+ /* this is a back-facing triangle */
+ struct prim_header tmp;
+
+ tmp.det = header->det;
+ tmp.flags = header->flags;
+ tmp.pad = header->pad;
+ /* copy back attribs to front attribs */
+ tmp.v[0] = copy_bfc(twoside, header->v[0], 0);
+ tmp.v[1] = copy_bfc(twoside, header->v[1], 1);
+ tmp.v[2] = copy_bfc(twoside, header->v[2], 2);
+
+ stage->next->tri( stage->next, &tmp );
+ }
+ else {
+ stage->next->tri( stage->next, header );
+ }
+}
+
+
+
+static void twoside_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct twoside_stage *twoside = twoside_stage(stage);
+ const struct draw_vertex_shader *vs = stage->draw->vs.vertex_shader;
+ uint i;
+
+ twoside->attrib_front0 = 0;
+ twoside->attrib_front1 = 0;
+ twoside->attrib_back0 = 0;
+ twoside->attrib_back1 = 0;
+
+ /* Find which vertex shader outputs are front/back colors */
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_COLOR) {
+ if (vs->info.output_semantic_index[i] == 0)
+ twoside->attrib_front0 = i;
+ else
+ twoside->attrib_front1 = i;
+ }
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_BCOLOR) {
+ if (vs->info.output_semantic_index[i] == 0)
+ twoside->attrib_back0 = i;
+ else
+ twoside->attrib_back1 = i;
+ }
+ }
+
+ if (!twoside->attrib_back0)
+ twoside->attrib_front0 = 0;
+
+ if (!twoside->attrib_back1)
+ twoside->attrib_front1 = 0;
+
+ /*
+ * We'll multiply the primitive's determinant by this sign to determine
+ * if the triangle is back-facing (negative).
+ * sign = -1 for CCW, +1 for CW
+ */
+ twoside->sign = (stage->draw->rasterizer->front_winding == PIPE_WINDING_CCW) ? -1.0f : 1.0f;
+
+ stage->tri = twoside_tri;
+ stage->tri( stage, header );
+}
+
+
+static void twoside_flush( struct draw_stage *stage, unsigned flags )
+{
+ stage->tri = twoside_first_tri;
+ stage->next->flush( stage->next, flags );
+}
+
+
+static void twoside_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void twoside_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Create twoside pipeline stage.
+ */
+struct draw_stage *draw_twoside_stage( struct draw_context *draw )
+{
+ struct twoside_stage *twoside = CALLOC_STRUCT(twoside_stage);
+ if (twoside == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &twoside->stage, 3 ))
+ goto fail;
+
+ twoside->stage.draw = draw;
+ twoside->stage.name = "twoside";
+ twoside->stage.next = NULL;
+ twoside->stage.point = draw_pipe_passthrough_point;
+ twoside->stage.line = draw_pipe_passthrough_line;
+ twoside->stage.tri = twoside_first_tri;
+ twoside->stage.flush = twoside_flush;
+ twoside->stage.reset_stipple_counter = twoside_reset_stipple_counter;
+ twoside->stage.destroy = twoside_destroy;
+
+ return &twoside->stage;
+
+ fail:
+ if (twoside)
+ twoside->stage.destroy( &twoside->stage );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_unfilled.c b/src/gallium/auxiliary/draw/draw_pipe_unfilled.c
new file mode 100644
index 0000000000..03bb842e20
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_unfilled.c
@@ -0,0 +1,204 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Drawing stage for handling glPolygonMode(line/point).
+ * Convert triangles to points or lines as needed.
+ */
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "draw_private.h"
+#include "draw_pipe.h"
+
+
+struct unfilled_stage {
+ struct draw_stage stage;
+
+ /** [0] = front face, [1] = back face.
+ * legal values: PIPE_POLYGON_MODE_FILL, PIPE_POLYGON_MODE_LINE,
+ * and PIPE_POLYGON_MODE_POINT,
+ */
+ unsigned mode[2];
+};
+
+
+static INLINE struct unfilled_stage *unfilled_stage( struct draw_stage *stage )
+{
+ return (struct unfilled_stage *)stage;
+}
+
+
+
+static void point( struct draw_stage *stage,
+ struct vertex_header *v0 )
+{
+ struct prim_header tmp;
+ tmp.v[0] = v0;
+ stage->next->point( stage->next, &tmp );
+}
+
+static void line( struct draw_stage *stage,
+ struct vertex_header *v0,
+ struct vertex_header *v1 )
+{
+ struct prim_header tmp;
+ tmp.v[0] = v0;
+ tmp.v[1] = v1;
+ stage->next->line( stage->next, &tmp );
+}
+
+
+static void points( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct vertex_header *v0 = header->v[0];
+ struct vertex_header *v1 = header->v[1];
+ struct vertex_header *v2 = header->v[2];
+
+ if ((header->flags & DRAW_PIPE_EDGE_FLAG_0) && v0->edgeflag) point( stage, v0 );
+ if ((header->flags & DRAW_PIPE_EDGE_FLAG_1) && v1->edgeflag) point( stage, v1 );
+ if ((header->flags & DRAW_PIPE_EDGE_FLAG_2) && v2->edgeflag) point( stage, v2 );
+}
+
+
+static void lines( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct vertex_header *v0 = header->v[0];
+ struct vertex_header *v1 = header->v[1];
+ struct vertex_header *v2 = header->v[2];
+
+ if (header->flags & DRAW_PIPE_RESET_STIPPLE)
+ stage->next->reset_stipple_counter( stage->next );
+
+ if ((header->flags & DRAW_PIPE_EDGE_FLAG_2) && v2->edgeflag) line( stage, v2, v0 );
+ if ((header->flags & DRAW_PIPE_EDGE_FLAG_0) && v0->edgeflag) line( stage, v0, v1 );
+ if ((header->flags & DRAW_PIPE_EDGE_FLAG_1) && v1->edgeflag) line( stage, v1, v2 );
+}
+
+
+/* Unfilled tri:
+ *
+ * Note edgeflags in the vertex struct is not sufficient as we will
+ * need to manipulate them when decomposing primitives.
+ *
+ * We currently keep the vertex edgeflag and primitive edgeflag mask
+ * separate until the last possible moment.
+ */
+static void unfilled_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct unfilled_stage *unfilled = unfilled_stage(stage);
+ unsigned mode = unfilled->mode[header->det >= 0.0];
+
+ switch (mode) {
+ case PIPE_POLYGON_MODE_FILL:
+ stage->next->tri( stage->next, header );
+ break;
+ case PIPE_POLYGON_MODE_LINE:
+ lines( stage, header );
+ break;
+ case PIPE_POLYGON_MODE_POINT:
+ points( stage, header );
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static void unfilled_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct unfilled_stage *unfilled = unfilled_stage(stage);
+
+ unfilled->mode[0] = stage->draw->rasterizer->fill_ccw; /* front */
+ unfilled->mode[1] = stage->draw->rasterizer->fill_cw; /* back */
+
+ stage->tri = unfilled_tri;
+ stage->tri( stage, header );
+}
+
+
+
+static void unfilled_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ stage->next->flush( stage->next, flags );
+
+ stage->tri = unfilled_first_tri;
+}
+
+
+static void unfilled_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void unfilled_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+/**
+ * Create unfilled triangle stage.
+ */
+struct draw_stage *draw_unfilled_stage( struct draw_context *draw )
+{
+ struct unfilled_stage *unfilled = CALLOC_STRUCT(unfilled_stage);
+ if (unfilled == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &unfilled->stage, 0 ))
+ goto fail;
+
+ unfilled->stage.draw = draw;
+ unfilled->stage.name = "unfilled";
+ unfilled->stage.next = NULL;
+ unfilled->stage.tmp = NULL;
+ unfilled->stage.point = draw_pipe_passthrough_point;
+ unfilled->stage.line = draw_pipe_passthrough_line;
+ unfilled->stage.tri = unfilled_first_tri;
+ unfilled->stage.flush = unfilled_flush;
+ unfilled->stage.reset_stipple_counter = unfilled_reset_stipple_counter;
+ unfilled->stage.destroy = unfilled_destroy;
+
+ return &unfilled->stage;
+
+ fail:
+ if (unfilled)
+ unfilled->stage.destroy( &unfilled->stage );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_util.c b/src/gallium/auxiliary/draw/draw_pipe_util.c
new file mode 100644
index 0000000000..e22e5fed0c
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_util.c
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "draw/draw_private.h"
+#include "draw/draw_pipe.h"
+
+
+
+void
+draw_pipe_passthrough_point(struct draw_stage *stage, struct prim_header *header)
+{
+ stage->next->point(stage->next, header);
+}
+
+void
+draw_pipe_passthrough_line(struct draw_stage *stage, struct prim_header *header)
+{
+ stage->next->line(stage->next, header);
+}
+
+void
+draw_pipe_passthrough_tri(struct draw_stage *stage, struct prim_header *header)
+{
+ stage->next->tri(stage->next, header);
+}
+
+
+
+
+
+/* This is only used for temporary verts.
+ */
+#define MAX_VERTEX_SIZE ((2 + PIPE_MAX_SHADER_OUTPUTS) * 4 * sizeof(float))
+
+
+/**
+ * Allocate space for temporary post-transform vertices, such as for clipping.
+ */
+boolean draw_alloc_temp_verts( struct draw_stage *stage, unsigned nr )
+{
+ assert(!stage->tmp);
+
+ stage->tmp = NULL;
+ stage->nr_tmps = nr;
+
+ if (nr != 0)
+ {
+ unsigned i;
+ ubyte *store = (ubyte *) MALLOC( MAX_VERTEX_SIZE * nr );
+
+ if (store == NULL)
+ return FALSE;
+
+ stage->tmp = (struct vertex_header **) MALLOC( sizeof(struct vertex_header *) * nr );
+ if (stage->tmp == NULL) {
+ FREE(store);
+ return FALSE;
+ }
+
+ for (i = 0; i < nr; i++)
+ stage->tmp[i] = (struct vertex_header *)(store + i * MAX_VERTEX_SIZE);
+ }
+
+ return TRUE;
+}
+
+
+void draw_free_temp_verts( struct draw_stage *stage )
+{
+ if (stage->tmp) {
+ FREE( stage->tmp[0] );
+ FREE( stage->tmp );
+ stage->tmp = NULL;
+ }
+}
+
+
+/* Reset vertex ids. This is basically a type of flush.
+ *
+ * Called only from draw_pipe_vbuf.c
+ */
+void draw_reset_vertex_ids(struct draw_context *draw)
+{
+ struct draw_stage *stage = draw->pipeline.first;
+
+ while (stage) {
+ unsigned i;
+
+ for (i = 0; i < stage->nr_tmps; i++)
+ stage->tmp[i]->vertex_id = UNDEFINED_VERTEX_ID;
+
+ stage = stage->next;
+ }
+
+ if (draw->pipeline.verts)
+ {
+ unsigned i;
+ char *verts = draw->pipeline.verts;
+ unsigned stride = draw->pipeline.vertex_stride;
+
+ for (i = 0; i < draw->pipeline.vertex_count; i++) {
+ ((struct vertex_header *)verts)->vertex_id = UNDEFINED_VERTEX_ID;
+ verts += stride;
+ }
+ }
+}
+
diff --git a/src/gallium/auxiliary/draw/draw_pipe_validate.c b/src/gallium/auxiliary/draw/draw_pipe_validate.c
new file mode 100644
index 0000000000..bea90e50d3
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_validate.c
@@ -0,0 +1,339 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "draw_private.h"
+#include "draw_pipe.h"
+#include "draw_context.h"
+#include "draw_vbuf.h"
+
+static boolean points( unsigned prim )
+{
+ return (prim == PIPE_PRIM_POINTS);
+}
+
+static boolean lines( unsigned prim )
+{
+ return (prim == PIPE_PRIM_LINES ||
+ prim == PIPE_PRIM_LINE_STRIP ||
+ prim == PIPE_PRIM_LINE_LOOP);
+}
+
+static boolean triangles( unsigned prim )
+{
+ return prim >= PIPE_PRIM_TRIANGLES;
+}
+
+/**
+ * Default version of a function to check if we need any special
+ * pipeline stages, or whether prims/verts can go through untouched.
+ * Don't test for bypass clipping or vs modes, this function is just
+ * about the primitive pipeline stages.
+ *
+ * This can be overridden by the driver.
+ */
+boolean
+draw_need_pipeline(const struct draw_context *draw,
+ const struct pipe_rasterizer_state *rasterizer,
+ unsigned int prim )
+{
+ /* If the driver has overridden this, use that version:
+ */
+ if (draw->render &&
+ draw->render->need_pipeline)
+ {
+ return draw->render->need_pipeline( draw->render,
+ rasterizer,
+ prim );
+ }
+
+ /* Don't have to worry about triangles turning into lines/points
+ * and triggering the pipeline, because we have to trigger the
+ * pipeline *anyway* if unfilled mode is active.
+ */
+ if (lines(prim))
+ {
+ /* line stipple */
+ if (rasterizer->line_stipple_enable && draw->pipeline.line_stipple)
+ return TRUE;
+
+ /* wide lines */
+ if (rasterizer->line_width > draw->pipeline.wide_line_threshold)
+ return TRUE;
+
+ /* AA lines */
+ if (rasterizer->line_smooth && draw->pipeline.aaline)
+ return TRUE;
+ }
+
+ if (points(prim))
+ {
+ /* large points */
+ if (rasterizer->point_size > draw->pipeline.wide_point_threshold)
+ return TRUE;
+
+ /* AA points */
+ if (rasterizer->point_smooth && draw->pipeline.aapoint)
+ return TRUE;
+
+ /* point sprites */
+ if (rasterizer->point_sprite && draw->pipeline.point_sprite)
+ return TRUE;
+ }
+
+
+ if (triangles(prim))
+ {
+ /* polygon stipple */
+ if (rasterizer->poly_stipple_enable && draw->pipeline.pstipple)
+ return TRUE;
+
+ /* unfilled polygons */
+ if (rasterizer->fill_cw != PIPE_POLYGON_MODE_FILL ||
+ rasterizer->fill_ccw != PIPE_POLYGON_MODE_FILL)
+ return TRUE;
+
+ /* polygon offset */
+ if (rasterizer->offset_cw || rasterizer->offset_ccw)
+ return TRUE;
+
+ /* two-side lighting */
+ if (rasterizer->light_twoside)
+ return TRUE;
+ }
+
+ /* polygon cull - this is difficult - hardware can cull just fine
+ * most of the time (though sometimes CULL_NEITHER is unsupported.
+ *
+ * Generally this isn't a reason to require the pipeline, though.
+ *
+ if (rasterizer->cull_mode)
+ return TRUE;
+ */
+
+ return FALSE;
+}
+
+
+
+/**
+ * Rebuild the rendering pipeline.
+ */
+static struct draw_stage *validate_pipeline( struct draw_stage *stage )
+{
+ struct draw_context *draw = stage->draw;
+ struct draw_stage *next = draw->pipeline.rasterize;
+ int need_det = 0;
+ int precalc_flat = 0;
+ boolean wide_lines, wide_points;
+
+ /* Set the validate's next stage to the rasterize stage, so that it
+ * can be found later if needed for flushing.
+ */
+ stage->next = next;
+
+ /* drawing wide lines? */
+ wide_lines = (draw->rasterizer->line_width > draw->pipeline.wide_line_threshold
+ && !draw->rasterizer->line_smooth);
+
+ /* drawing large points? */
+ if (draw->rasterizer->point_sprite && draw->pipeline.point_sprite)
+ wide_points = TRUE;
+ else if (draw->rasterizer->point_smooth && draw->pipeline.aapoint)
+ wide_points = FALSE;
+ else if (draw->rasterizer->point_size > draw->pipeline.wide_point_threshold)
+ wide_points = TRUE;
+ else
+ wide_points = FALSE;
+
+ /*
+ * NOTE: we build up the pipeline in end-to-start order.
+ *
+ * TODO: make the current primitive part of the state and build
+ * shorter pipelines for lines & points.
+ */
+
+ if (draw->rasterizer->line_smooth && draw->pipeline.aaline) {
+ draw->pipeline.aaline->next = next;
+ next = draw->pipeline.aaline;
+ }
+
+ if (draw->rasterizer->point_smooth && draw->pipeline.aapoint) {
+ draw->pipeline.aapoint->next = next;
+ next = draw->pipeline.aapoint;
+ }
+
+ if (wide_lines) {
+ draw->pipeline.wide_line->next = next;
+ next = draw->pipeline.wide_line;
+ precalc_flat = 1;
+ }
+
+ if (wide_points || draw->rasterizer->point_sprite) {
+ draw->pipeline.wide_point->next = next;
+ next = draw->pipeline.wide_point;
+ }
+
+ if (draw->rasterizer->line_stipple_enable && draw->pipeline.line_stipple) {
+ draw->pipeline.stipple->next = next;
+ next = draw->pipeline.stipple;
+ precalc_flat = 1; /* only needed for lines really */
+ }
+
+ if (draw->rasterizer->poly_stipple_enable
+ && draw->pipeline.pstipple) {
+ draw->pipeline.pstipple->next = next;
+ next = draw->pipeline.pstipple;
+ }
+
+ if (draw->rasterizer->fill_cw != PIPE_POLYGON_MODE_FILL ||
+ draw->rasterizer->fill_ccw != PIPE_POLYGON_MODE_FILL) {
+ draw->pipeline.unfilled->next = next;
+ next = draw->pipeline.unfilled;
+ precalc_flat = 1; /* only needed for triangles really */
+ need_det = 1;
+ }
+
+ if (draw->rasterizer->flatshade && precalc_flat) {
+ draw->pipeline.flatshade->next = next;
+ next = draw->pipeline.flatshade;
+ }
+
+ if (draw->rasterizer->offset_cw ||
+ draw->rasterizer->offset_ccw) {
+ draw->pipeline.offset->next = next;
+ next = draw->pipeline.offset;
+ need_det = 1;
+ }
+
+ if (draw->rasterizer->light_twoside) {
+ draw->pipeline.twoside->next = next;
+ next = draw->pipeline.twoside;
+ need_det = 1;
+ }
+
+ /* Always run the cull stage as we calculate determinant there
+ * also.
+ *
+ * This can actually be a win as culling out the triangles can lead
+ * to less work emitting vertices, smaller vertex buffers, etc.
+ * It's difficult to say whether this will be true in general.
+ */
+ if (need_det || draw->rasterizer->cull_mode) {
+ draw->pipeline.cull->next = next;
+ next = draw->pipeline.cull;
+ }
+
+ /* Clip stage
+ */
+ if (!draw->bypass_clipping)
+ {
+ draw->pipeline.clip->next = next;
+ next = draw->pipeline.clip;
+ }
+
+
+ draw->pipeline.first = next;
+
+ if (0) {
+ debug_printf("draw pipeline:\n");
+ for (next = draw->pipeline.first; next ; next = next->next )
+ debug_printf(" %s\n", next->name);
+ debug_printf("\n");
+ }
+
+ return draw->pipeline.first;
+}
+
+static void validate_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct draw_stage *pipeline = validate_pipeline( stage );
+ pipeline->tri( pipeline, header );
+}
+
+static void validate_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct draw_stage *pipeline = validate_pipeline( stage );
+ pipeline->line( pipeline, header );
+}
+
+static void validate_point( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct draw_stage *pipeline = validate_pipeline( stage );
+ pipeline->point( pipeline, header );
+}
+
+static void validate_reset_stipple_counter( struct draw_stage *stage )
+{
+ struct draw_stage *pipeline = validate_pipeline( stage );
+ pipeline->reset_stipple_counter( pipeline );
+}
+
+static void validate_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ /* May need to pass a backend flush on to the rasterize stage.
+ */
+ if (stage->next)
+ stage->next->flush( stage->next, flags );
+}
+
+
+static void validate_destroy( struct draw_stage *stage )
+{
+ FREE( stage );
+}
+
+
+/**
+ * Create validate pipeline stage.
+ */
+struct draw_stage *draw_validate_stage( struct draw_context *draw )
+{
+ struct draw_stage *stage = CALLOC_STRUCT(draw_stage);
+ if (stage == NULL)
+ return NULL;
+
+ stage->draw = draw;
+ stage->name = "validate";
+ stage->next = NULL;
+ stage->point = validate_point;
+ stage->line = validate_line;
+ stage->tri = validate_tri;
+ stage->flush = validate_flush;
+ stage->reset_stipple_counter = validate_reset_stipple_counter;
+ stage->destroy = validate_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_vbuf.c b/src/gallium/auxiliary/draw/draw_pipe_vbuf.c
new file mode 100644
index 0000000000..1a5269c0de
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_vbuf.c
@@ -0,0 +1,492 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Vertex buffer drawing stage.
+ *
+ * \author Jose Fonseca <jrfonsec@tungstengraphics.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "draw_vbuf.h"
+#include "draw_private.h"
+#include "draw_vertex.h"
+#include "draw_pipe.h"
+#include "translate/translate.h"
+#include "translate/translate_cache.h"
+
+
+/**
+ * Vertex buffer emit stage.
+ */
+struct vbuf_stage {
+ struct draw_stage stage; /**< This must be first (base class) */
+
+ struct vbuf_render *render;
+
+ const struct vertex_info *vinfo;
+
+ /** Vertex size in bytes */
+ unsigned vertex_size;
+
+ struct translate *translate;
+
+ /* FIXME: we have no guarantee that 'unsigned' is 32bit */
+
+ /** Vertices in hardware format */
+ unsigned *vertices;
+ unsigned *vertex_ptr;
+ unsigned max_vertices;
+ unsigned nr_vertices;
+
+ /** Indices */
+ ushort *indices;
+ unsigned max_indices;
+ unsigned nr_indices;
+
+ /* Cache point size somewhere it's address won't change:
+ */
+ float point_size;
+
+ struct translate_cache *cache;
+};
+
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct vbuf_stage *
+vbuf_stage( struct draw_stage *stage )
+{
+ assert(stage);
+ return (struct vbuf_stage *)stage;
+}
+
+
+static void vbuf_flush_vertices( struct vbuf_stage *vbuf );
+static void vbuf_alloc_vertices( struct vbuf_stage *vbuf );
+
+
+static INLINE boolean
+overflow( void *map, void *ptr, unsigned bytes, unsigned bufsz )
+{
+ unsigned long used = (unsigned long) ((char *)ptr - (char *)map);
+ return (used + bytes) > bufsz;
+}
+
+
+static INLINE void
+check_space( struct vbuf_stage *vbuf, unsigned nr )
+{
+ if (vbuf->nr_vertices + nr > vbuf->max_vertices ||
+ vbuf->nr_indices + nr > vbuf->max_indices)
+ {
+ vbuf_flush_vertices( vbuf );
+ vbuf_alloc_vertices( vbuf );
+ }
+}
+
+
+
+
+/**
+ * Extract the needed fields from post-transformed vertex and emit
+ * a hardware(driver) vertex.
+ * Recall that the vertices are constructed by the 'draw' module and
+ * have a couple of slots at the beginning (1-dword header, 4-dword
+ * clip pos) that we ignore here. We only use the vertex->data[] fields.
+ */
+static INLINE ushort
+emit_vertex( struct vbuf_stage *vbuf,
+ struct vertex_header *vertex )
+{
+ if(vertex->vertex_id == UNDEFINED_VERTEX_ID) {
+ /* Hmm - vertices are emitted one at a time - better make sure
+ * set_buffer is efficient. Consider a special one-shot mode for
+ * translate.
+ */
+ /* Note: we really do want data[0] here, not data[pos]:
+ */
+ vbuf->translate->set_buffer(vbuf->translate, 0, vertex->data[0], 0);
+ vbuf->translate->run(vbuf->translate, 0, 1, vbuf->vertex_ptr);
+
+ if (0) draw_dump_emitted_vertex(vbuf->vinfo, (uint8_t *)vbuf->vertex_ptr);
+
+ vbuf->vertex_ptr += vbuf->vertex_size/4;
+ vertex->vertex_id = vbuf->nr_vertices++;
+ }
+
+ return (ushort)vertex->vertex_id;
+}
+
+
+static void
+vbuf_tri( struct draw_stage *stage,
+ struct prim_header *prim )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+ unsigned i;
+
+ check_space( vbuf, 3 );
+
+ if (vbuf->stage.draw->rasterizer->flatshade_first) {
+ /* Put provoking vertex in position expected by the driver.
+ * Emit last provoking vertex in first pos.
+ * Swap verts 0 & 1 to preserve polygon winding.
+ */
+ vbuf->indices[vbuf->nr_indices++] = emit_vertex( vbuf, prim->v[2] );
+ vbuf->indices[vbuf->nr_indices++] = emit_vertex( vbuf, prim->v[0] );
+ vbuf->indices[vbuf->nr_indices++] = emit_vertex( vbuf, prim->v[1] );
+ }
+ else {
+ for (i = 0; i < 3; i++) {
+ vbuf->indices[vbuf->nr_indices++] = emit_vertex( vbuf, prim->v[i] );
+ }
+ }
+}
+
+
+static void
+vbuf_line( struct draw_stage *stage,
+ struct prim_header *prim )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+ unsigned i;
+
+ check_space( vbuf, 2 );
+
+ for (i = 0; i < 2; i++) {
+ vbuf->indices[vbuf->nr_indices++] = emit_vertex( vbuf, prim->v[i] );
+ }
+}
+
+
+static void
+vbuf_point( struct draw_stage *stage,
+ struct prim_header *prim )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+
+ check_space( vbuf, 1 );
+
+ vbuf->indices[vbuf->nr_indices++] = emit_vertex( vbuf, prim->v[0] );
+}
+
+
+
+
+/**
+ * Set the prim type for subsequent vertices.
+ * This may result in a new vertex size. The existing vbuffer (if any)
+ * will be flushed if needed and a new one allocated.
+ */
+static void
+vbuf_start_prim( struct vbuf_stage *vbuf, uint prim )
+{
+ struct translate_key hw_key;
+ unsigned dst_offset;
+ unsigned i;
+
+ vbuf->render->set_primitive(vbuf->render, prim);
+
+ /* Must do this after set_primitive() above:
+ *
+ * XXX: need some state managment to track when this needs to be
+ * recalculated. The driver should tell us whether there was a
+ * state change.
+ */
+ vbuf->vinfo = vbuf->render->get_vertex_info(vbuf->render);
+ vbuf->vertex_size = vbuf->vinfo->size * sizeof(float);
+
+ /* Translate from pipeline vertices to hw vertices.
+ */
+ dst_offset = 0;
+
+ for (i = 0; i < vbuf->vinfo->num_attribs; i++) {
+ unsigned emit_sz = 0;
+ unsigned src_buffer = 0;
+ unsigned output_format;
+ unsigned src_offset = (vbuf->vinfo->attrib[i].src_index * 4 * sizeof(float) );
+
+ switch (vbuf->vinfo->attrib[i].emit) {
+ case EMIT_4F:
+ output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ emit_sz = 4 * sizeof(float);
+ break;
+ case EMIT_3F:
+ output_format = PIPE_FORMAT_R32G32B32_FLOAT;
+ emit_sz = 3 * sizeof(float);
+ break;
+ case EMIT_2F:
+ output_format = PIPE_FORMAT_R32G32_FLOAT;
+ emit_sz = 2 * sizeof(float);
+ break;
+ case EMIT_1F:
+ output_format = PIPE_FORMAT_R32_FLOAT;
+ emit_sz = 1 * sizeof(float);
+ break;
+ case EMIT_1F_PSIZE:
+ output_format = PIPE_FORMAT_R32_FLOAT;
+ emit_sz = 1 * sizeof(float);
+ src_buffer = 1;
+ src_offset = 0;
+ break;
+ case EMIT_4UB:
+ output_format = PIPE_FORMAT_B8G8R8A8_UNORM;
+ emit_sz = 4 * sizeof(ubyte);
+ break;
+ default:
+ assert(0);
+ output_format = PIPE_FORMAT_NONE;
+ emit_sz = 0;
+ break;
+ }
+
+ hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ hw_key.element[i].input_buffer = src_buffer;
+ hw_key.element[i].input_offset = src_offset;
+ hw_key.element[i].output_format = output_format;
+ hw_key.element[i].output_offset = dst_offset;
+
+ dst_offset += emit_sz;
+ }
+
+ hw_key.nr_elements = vbuf->vinfo->num_attribs;
+ hw_key.output_stride = vbuf->vinfo->size * 4;
+
+ /* Don't bother with caching at this stage:
+ */
+ if (!vbuf->translate ||
+ translate_key_compare(&vbuf->translate->key, &hw_key) != 0)
+ {
+ translate_key_sanitize(&hw_key);
+ vbuf->translate = translate_cache_find(vbuf->cache, &hw_key);
+
+ vbuf->translate->set_buffer(vbuf->translate, 1, &vbuf->point_size, 0);
+ }
+
+ vbuf->point_size = vbuf->stage.draw->rasterizer->point_size;
+
+ /* Allocate new buffer?
+ */
+ assert(vbuf->vertices == NULL);
+ vbuf_alloc_vertices(vbuf);
+}
+
+
+static void
+vbuf_first_tri( struct draw_stage *stage,
+ struct prim_header *prim )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+
+ vbuf_flush_vertices( vbuf );
+ vbuf_start_prim(vbuf, PIPE_PRIM_TRIANGLES);
+ stage->tri = vbuf_tri;
+ stage->tri( stage, prim );
+}
+
+
+static void
+vbuf_first_line( struct draw_stage *stage,
+ struct prim_header *prim )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+
+ vbuf_flush_vertices( vbuf );
+ vbuf_start_prim(vbuf, PIPE_PRIM_LINES);
+ stage->line = vbuf_line;
+ stage->line( stage, prim );
+}
+
+
+static void
+vbuf_first_point( struct draw_stage *stage,
+ struct prim_header *prim )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+
+ vbuf_flush_vertices(vbuf);
+ vbuf_start_prim(vbuf, PIPE_PRIM_POINTS);
+ stage->point = vbuf_point;
+ stage->point( stage, prim );
+}
+
+
+
+/**
+ * Flush existing vertex buffer and allocate a new one.
+ */
+static void
+vbuf_flush_vertices( struct vbuf_stage *vbuf )
+{
+ if(vbuf->vertices) {
+
+ vbuf->render->unmap_vertices( vbuf->render, 0, vbuf->nr_vertices - 1 );
+
+ if (vbuf->nr_indices)
+ {
+ vbuf->render->draw(vbuf->render,
+ vbuf->indices,
+ vbuf->nr_indices );
+
+ vbuf->nr_indices = 0;
+ }
+
+ /* Reset temporary vertices ids */
+ if(vbuf->nr_vertices)
+ draw_reset_vertex_ids( vbuf->stage.draw );
+
+ /* Free the vertex buffer */
+ vbuf->render->release_vertices( vbuf->render );
+
+ vbuf->max_vertices = vbuf->nr_vertices = 0;
+ vbuf->vertex_ptr = vbuf->vertices = NULL;
+ }
+}
+
+
+static void
+vbuf_alloc_vertices( struct vbuf_stage *vbuf )
+{
+ assert(!vbuf->nr_indices);
+ assert(!vbuf->vertices);
+
+ /* Allocate a new vertex buffer */
+ vbuf->max_vertices = vbuf->render->max_vertex_buffer_bytes / vbuf->vertex_size;
+
+ /* even number */
+ vbuf->max_vertices = vbuf->max_vertices & ~1;
+
+ if(vbuf->max_vertices >= UNDEFINED_VERTEX_ID)
+ vbuf->max_vertices = UNDEFINED_VERTEX_ID - 1;
+
+ /* Must always succeed -- driver gives us a
+ * 'max_vertex_buffer_bytes' which it guarantees it can allocate,
+ * and it will flush itself if necessary to do so. If this does
+ * fail, we are basically without usable hardware.
+ */
+ vbuf->render->allocate_vertices(vbuf->render,
+ (ushort) vbuf->vertex_size,
+ (ushort) vbuf->max_vertices);
+
+ vbuf->vertices = (uint *) vbuf->render->map_vertices( vbuf->render );
+
+ vbuf->vertex_ptr = vbuf->vertices;
+}
+
+
+
+static void
+vbuf_flush( struct draw_stage *stage, unsigned flags )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+
+ vbuf_flush_vertices( vbuf );
+
+ stage->point = vbuf_first_point;
+ stage->line = vbuf_first_line;
+ stage->tri = vbuf_first_tri;
+}
+
+
+static void
+vbuf_reset_stipple_counter( struct draw_stage *stage )
+{
+ /* XXX: Need to do something here for hardware with linestipple.
+ */
+ (void) stage;
+}
+
+
+static void vbuf_destroy( struct draw_stage *stage )
+{
+ struct vbuf_stage *vbuf = vbuf_stage( stage );
+
+ if(vbuf->indices)
+ align_free( vbuf->indices );
+
+ if (vbuf->render)
+ vbuf->render->destroy( vbuf->render );
+
+ if (vbuf->cache)
+ translate_cache_destroy(vbuf->cache);
+
+ FREE( stage );
+}
+
+
+/**
+ * Create a new primitive vbuf/render stage.
+ */
+struct draw_stage *draw_vbuf_stage( struct draw_context *draw,
+ struct vbuf_render *render )
+{
+ struct vbuf_stage *vbuf = CALLOC_STRUCT(vbuf_stage);
+ if (vbuf == NULL)
+ goto fail;
+
+ vbuf->stage.draw = draw;
+ vbuf->stage.name = "vbuf";
+ vbuf->stage.point = vbuf_first_point;
+ vbuf->stage.line = vbuf_first_line;
+ vbuf->stage.tri = vbuf_first_tri;
+ vbuf->stage.flush = vbuf_flush;
+ vbuf->stage.reset_stipple_counter = vbuf_reset_stipple_counter;
+ vbuf->stage.destroy = vbuf_destroy;
+
+ vbuf->render = render;
+ vbuf->max_indices = MAX2(render->max_indices, UNDEFINED_VERTEX_ID-1);
+
+ vbuf->indices = (ushort *) align_malloc( vbuf->max_indices *
+ sizeof(vbuf->indices[0]),
+ 16 );
+ if (!vbuf->indices)
+ goto fail;
+
+ vbuf->cache = translate_cache_create();
+ if (!vbuf->cache)
+ goto fail;
+
+
+ vbuf->vertices = NULL;
+ vbuf->vertex_ptr = vbuf->vertices;
+
+ return &vbuf->stage;
+
+ fail:
+ if (vbuf)
+ vbuf_destroy(&vbuf->stage);
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_wide_line.c b/src/gallium/auxiliary/draw/draw_pipe_wide_line.c
new file mode 100644
index 0000000000..f32cbef983
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_wide_line.c
@@ -0,0 +1,181 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "draw_private.h"
+#include "draw_pipe.h"
+
+
+struct wideline_stage {
+ struct draw_stage stage;
+
+ float half_line_width;
+};
+
+
+
+static INLINE struct wideline_stage *wideline_stage( struct draw_stage *stage )
+{
+ return (struct wideline_stage *)stage;
+}
+
+
+
+/**
+ * Draw a wide line by drawing a quad (two triangles).
+ * XXX need to disable polygon stipple.
+ */
+static void wideline_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ /*const struct wideline_stage *wide = wideline_stage(stage);*/
+ const unsigned pos = stage->draw->vs.position_output;
+ const float half_width = 0.5f * stage->draw->rasterizer->line_width;
+
+ struct prim_header tri;
+
+ struct vertex_header *v0 = dup_vert(stage, header->v[0], 0);
+ struct vertex_header *v1 = dup_vert(stage, header->v[0], 1);
+ struct vertex_header *v2 = dup_vert(stage, header->v[1], 2);
+ struct vertex_header *v3 = dup_vert(stage, header->v[1], 3);
+
+ float *pos0 = v0->data[pos];
+ float *pos1 = v1->data[pos];
+ float *pos2 = v2->data[pos];
+ float *pos3 = v3->data[pos];
+
+ const float dx = fabsf(pos0[0] - pos2[0]);
+ const float dy = fabsf(pos0[1] - pos2[1]);
+
+ /* small tweak to meet GL specification */
+ const float bias = 0.125f;
+
+ /*
+ * Draw wide line as a quad (two tris) by "stretching" the line along
+ * X or Y.
+ * We need to tweak coords in several ways to be conformant here.
+ */
+
+ if (dx > dy) {
+ /* x-major line */
+ pos0[1] = pos0[1] - half_width - bias;
+ pos1[1] = pos1[1] + half_width - bias;
+ pos2[1] = pos2[1] - half_width - bias;
+ pos3[1] = pos3[1] + half_width - bias;
+ if (pos0[0] < pos2[0]) {
+ /* left to right line */
+ pos0[0] -= 0.5f;
+ pos1[0] -= 0.5f;
+ pos2[0] -= 0.5f;
+ pos3[0] -= 0.5f;
+ }
+ else {
+ /* right to left line */
+ pos0[0] += 0.5f;
+ pos1[0] += 0.5f;
+ pos2[0] += 0.5f;
+ pos3[0] += 0.5f;
+ }
+ }
+ else {
+ /* y-major line */
+ pos0[0] = pos0[0] - half_width + bias;
+ pos1[0] = pos1[0] + half_width + bias;
+ pos2[0] = pos2[0] - half_width + bias;
+ pos3[0] = pos3[0] + half_width + bias;
+ if (pos0[1] < pos2[1]) {
+ /* top to bottom line */
+ pos0[1] -= 0.5f;
+ pos1[1] -= 0.5f;
+ pos2[1] -= 0.5f;
+ pos3[1] -= 0.5f;
+ }
+ else {
+ /* bottom to top line */
+ pos0[1] += 0.5f;
+ pos1[1] += 0.5f;
+ pos2[1] += 0.5f;
+ pos3[1] += 0.5f;
+ }
+ }
+
+ tri.det = header->det; /* only the sign matters */
+ tri.v[0] = v0;
+ tri.v[1] = v2;
+ tri.v[2] = v3;
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v0;
+ tri.v[1] = v3;
+ tri.v[2] = v1;
+ stage->next->tri( stage->next, &tri );
+}
+
+
+static void wideline_flush( struct draw_stage *stage, unsigned flags )
+{
+ stage->next->flush( stage->next, flags );
+}
+
+
+static void wideline_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void wideline_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+struct draw_stage *draw_wide_line_stage( struct draw_context *draw )
+{
+ struct wideline_stage *wide = CALLOC_STRUCT(wideline_stage);
+
+ draw_alloc_temp_verts( &wide->stage, 4 );
+
+ wide->stage.draw = draw;
+ wide->stage.name = "wide-line";
+ wide->stage.next = NULL;
+ wide->stage.point = draw_pipe_passthrough_point;
+ wide->stage.line = wideline_line;
+ wide->stage.tri = draw_pipe_passthrough_tri;
+ wide->stage.flush = wideline_flush;
+ wide->stage.reset_stipple_counter = wideline_reset_stipple_counter;
+ wide->stage.destroy = wideline_destroy;
+
+ return &wide->stage;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pipe_wide_point.c b/src/gallium/auxiliary/draw/draw_pipe_wide_point.c
new file mode 100644
index 0000000000..7d76a7dbf3
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pipe_wide_point.c
@@ -0,0 +1,333 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+/**
+ * Notes on wide points and sprite mode:
+ *
+ * In wide point/sprite mode we effectively need to convert each incoming
+ * vertex into four outgoing vertices specifying the corners of a quad.
+ * Since we don't (yet) have geometry shaders, we have to handle this here
+ * in the draw module.
+ *
+ * For sprites, it also means that this is where we have to handle texcoords
+ * for the vertices of the quad. OpenGL's GL_COORD_REPLACE state specifies
+ * if/how enabled texcoords are automatically generated for sprites. We pass
+ * that info through gallium in the pipe_rasterizer_state::sprite_coord_mode
+ * array.
+ *
+ * Additionally, GLSL's gl_PointCoord fragment attribute has to be handled
+ * here as well. This is basically an additional texture/generic attribute
+ * that varies .x from 0 to 1 horizontally across the point and varies .y
+ * vertically from 0 to 1 down the sprite.
+ *
+ * With geometry shaders, the state tracker could create a GS to do
+ * most/all of this.
+ */
+
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw_vs.h"
+#include "draw_pipe.h"
+
+
+struct widepoint_stage {
+ struct draw_stage stage;
+
+ float half_point_size;
+ float point_size_min;
+ float point_size_max;
+
+ float xbias;
+ float ybias;
+
+ uint texcoord_slot[PIPE_MAX_SHADER_OUTPUTS];
+ uint texcoord_mode[PIPE_MAX_SHADER_OUTPUTS];
+ uint num_texcoords;
+
+ int psize_slot;
+
+ int point_coord_fs_input; /**< input for pointcoord */
+};
+
+
+
+static INLINE struct widepoint_stage *
+widepoint_stage( struct draw_stage *stage )
+{
+ return (struct widepoint_stage *)stage;
+}
+
+
+/**
+ * Set the vertex texcoords for sprite mode.
+ * Coords may be left untouched or set to a right-side-up or upside-down
+ * orientation.
+ */
+static void set_texcoords(const struct widepoint_stage *wide,
+ struct vertex_header *v, const float tc[4])
+{
+ uint i;
+ for (i = 0; i < wide->num_texcoords; i++) {
+ if (wide->texcoord_mode[i] != PIPE_SPRITE_COORD_NONE) {
+ uint j = wide->texcoord_slot[i];
+ v->data[j][0] = tc[0];
+ if (wide->texcoord_mode[i] == PIPE_SPRITE_COORD_LOWER_LEFT)
+ v->data[j][1] = 1.0f - tc[1];
+ else
+ v->data[j][1] = tc[1];
+ v->data[j][2] = tc[2];
+ v->data[j][3] = tc[3];
+ }
+ }
+
+ if (wide->point_coord_fs_input >= 0) {
+ /* put gl_PointCoord into the extra vertex slot */
+ uint slot = wide->stage.draw->extra_vp_outputs.slot;
+ v->data[slot][0] = tc[0];
+ v->data[slot][1] = tc[1];
+ v->data[slot][2] = 0.0F;
+ v->data[slot][3] = 1.0F;
+ }
+}
+
+
+/* If there are lots of sprite points (and why wouldn't there be?) it
+ * would probably be more sensible to change hardware setup to
+ * optimize this rather than doing the whole thing in software like
+ * this.
+ */
+static void widepoint_point( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ const struct widepoint_stage *wide = widepoint_stage(stage);
+ const unsigned pos = stage->draw->vs.position_output;
+ const boolean sprite = (boolean) stage->draw->rasterizer->point_sprite;
+ float half_size;
+ float left_adj, right_adj, bot_adj, top_adj;
+
+ struct prim_header tri;
+
+ /* four dups of original vertex */
+ struct vertex_header *v0 = dup_vert(stage, header->v[0], 0);
+ struct vertex_header *v1 = dup_vert(stage, header->v[0], 1);
+ struct vertex_header *v2 = dup_vert(stage, header->v[0], 2);
+ struct vertex_header *v3 = dup_vert(stage, header->v[0], 3);
+
+ float *pos0 = v0->data[pos];
+ float *pos1 = v1->data[pos];
+ float *pos2 = v2->data[pos];
+ float *pos3 = v3->data[pos];
+
+ /* point size is either per-vertex or fixed size */
+ if (wide->psize_slot >= 0) {
+ half_size = header->v[0]->data[wide->psize_slot][0];
+
+ /* XXX: temporary -- do this in the vertex shader??
+ */
+ half_size = CLAMP(half_size,
+ wide->point_size_min,
+ wide->point_size_max);
+
+ half_size *= 0.5f;
+ }
+ else {
+ half_size = wide->half_point_size;
+ }
+
+ left_adj = -half_size + wide->xbias;
+ right_adj = half_size + wide->xbias;
+ bot_adj = half_size + wide->ybias;
+ top_adj = -half_size + wide->ybias;
+
+ pos0[0] += left_adj;
+ pos0[1] += top_adj;
+
+ pos1[0] += left_adj;
+ pos1[1] += bot_adj;
+
+ pos2[0] += right_adj;
+ pos2[1] += top_adj;
+
+ pos3[0] += right_adj;
+ pos3[1] += bot_adj;
+
+ if (sprite) {
+ static const float tex00[4] = { 0, 0, 0, 1 };
+ static const float tex01[4] = { 0, 1, 0, 1 };
+ static const float tex11[4] = { 1, 1, 0, 1 };
+ static const float tex10[4] = { 1, 0, 0, 1 };
+ set_texcoords( wide, v0, tex00 );
+ set_texcoords( wide, v1, tex01 );
+ set_texcoords( wide, v2, tex10 );
+ set_texcoords( wide, v3, tex11 );
+ }
+
+ tri.det = header->det; /* only the sign matters */
+ tri.v[0] = v0;
+ tri.v[1] = v2;
+ tri.v[2] = v3;
+ stage->next->tri( stage->next, &tri );
+
+ tri.v[0] = v0;
+ tri.v[1] = v3;
+ tri.v[2] = v1;
+ stage->next->tri( stage->next, &tri );
+}
+
+
+static int
+find_pntc_input_attrib(struct draw_context *draw)
+{
+ /* Scan the fragment program's input decls to find the pointcoord
+ * attribute. The xy components will store the point coord.
+ */
+ return 0; /* XXX fix this */
+}
+
+
+static void widepoint_first_point( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ struct widepoint_stage *wide = widepoint_stage(stage);
+ struct draw_context *draw = stage->draw;
+
+ wide->half_point_size = 0.5f * draw->rasterizer->point_size;
+ wide->point_size_min = draw->rasterizer->point_size_min;
+ wide->point_size_max = draw->rasterizer->point_size_max;
+ wide->xbias = 0.0;
+ wide->ybias = 0.0;
+
+ if (draw->rasterizer->gl_rasterization_rules) {
+ wide->xbias = 0.125;
+ }
+
+ /* XXX we won't know the real size if it's computed by the vertex shader! */
+ if ((draw->rasterizer->point_size > draw->pipeline.wide_point_threshold) ||
+ (draw->rasterizer->point_sprite && draw->pipeline.point_sprite)) {
+ stage->point = widepoint_point;
+ }
+ else {
+ stage->point = draw_pipe_passthrough_point;
+ }
+
+ if (draw->rasterizer->point_sprite) {
+ /* find vertex shader texcoord outputs */
+ const struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+ uint i, j = 0;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_GENERIC) {
+ wide->texcoord_slot[j] = i;
+ wide->texcoord_mode[j] = draw->rasterizer->sprite_coord_mode[j];
+ j++;
+ }
+ }
+ wide->num_texcoords = j;
+
+ /* find fragment shader PointCoord input */
+ wide->point_coord_fs_input = find_pntc_input_attrib(draw);
+
+ /* setup extra vp output (point coord implemented as a texcoord) */
+ draw->extra_vp_outputs.semantic_name = TGSI_SEMANTIC_GENERIC;
+ draw->extra_vp_outputs.semantic_index = 0;
+ draw->extra_vp_outputs.slot = draw->vs.num_vs_outputs;
+ }
+ else {
+ wide->point_coord_fs_input = -1;
+ draw->extra_vp_outputs.slot = 0;
+ }
+
+ wide->psize_slot = -1;
+ if (draw->rasterizer->point_size_per_vertex) {
+ /* find PSIZ vertex output */
+ const struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+ uint i;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_PSIZE) {
+ wide->psize_slot = i;
+ break;
+ }
+ }
+ }
+
+ stage->point( stage, header );
+}
+
+
+static void widepoint_flush( struct draw_stage *stage, unsigned flags )
+{
+ stage->point = widepoint_first_point;
+ stage->next->flush( stage->next, flags );
+ stage->draw->extra_vp_outputs.slot = 0;
+}
+
+
+static void widepoint_reset_stipple_counter( struct draw_stage *stage )
+{
+ stage->next->reset_stipple_counter( stage->next );
+}
+
+
+static void widepoint_destroy( struct draw_stage *stage )
+{
+ draw_free_temp_verts( stage );
+ FREE( stage );
+}
+
+
+struct draw_stage *draw_wide_point_stage( struct draw_context *draw )
+{
+ struct widepoint_stage *wide = CALLOC_STRUCT(widepoint_stage);
+ if (wide == NULL)
+ goto fail;
+
+ if (!draw_alloc_temp_verts( &wide->stage, 4 ))
+ goto fail;
+
+ wide->stage.draw = draw;
+ wide->stage.name = "wide-point";
+ wide->stage.next = NULL;
+ wide->stage.point = widepoint_first_point;
+ wide->stage.line = draw_pipe_passthrough_line;
+ wide->stage.tri = draw_pipe_passthrough_tri;
+ wide->stage.flush = widepoint_flush;
+ wide->stage.reset_stipple_counter = widepoint_reset_stipple_counter;
+ wide->stage.destroy = widepoint_destroy;
+
+ return &wide->stage;
+
+ fail:
+ if (wide)
+ wide->stage.destroy( &wide->stage );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_private.h b/src/gallium/auxiliary/draw/draw_private.h
new file mode 100644
index 0000000000..41fcb16a0a
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_private.h
@@ -0,0 +1,319 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Private data structures, etc for the draw module.
+ */
+
+
+/**
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+
+#ifndef DRAW_PRIVATE_H
+#define DRAW_PRIVATE_H
+
+
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+
+#include "tgsi/tgsi_scan.h"
+
+
+struct pipe_context;
+struct gallivm_prog;
+struct gallivm_cpu_engine;
+struct draw_vertex_shader;
+struct draw_context;
+struct draw_stage;
+struct vbuf_render;
+struct tgsi_exec_machine;
+struct tgsi_sampler;
+
+
+/**
+ * Basic vertex info.
+ * Carry some useful information around with the vertices in the prim pipe.
+ */
+struct vertex_header {
+ unsigned clipmask:12;
+ unsigned edgeflag:1;
+ unsigned pad:3;
+ unsigned vertex_id:16;
+
+ float clip[4];
+
+ /* This will probably become float (*data)[4] soon:
+ */
+ float data[][4];
+};
+
+/* NOTE: It should match vertex_id size above */
+#define UNDEFINED_VERTEX_ID 0xffff
+
+
+/**
+ * Private context for the drawing module.
+ */
+struct draw_context
+{
+ /** Drawing/primitive pipeline stages */
+ struct {
+ struct draw_stage *first; /**< one of the following */
+
+ struct draw_stage *validate;
+
+ /* stages (in logical order) */
+ struct draw_stage *flatshade;
+ struct draw_stage *clip;
+ struct draw_stage *cull;
+ struct draw_stage *twoside;
+ struct draw_stage *offset;
+ struct draw_stage *unfilled;
+ struct draw_stage *stipple;
+ struct draw_stage *aapoint;
+ struct draw_stage *aaline;
+ struct draw_stage *pstipple;
+ struct draw_stage *wide_line;
+ struct draw_stage *wide_point;
+ struct draw_stage *rasterize;
+
+ float wide_point_threshold; /**< convert pnts to tris if larger than this */
+ float wide_line_threshold; /**< convert lines to tris if wider than this */
+ boolean line_stipple; /**< do line stipple? */
+ boolean point_sprite; /**< convert points to quads for sprites? */
+
+ /* Temporary storage while the pipeline is being run:
+ */
+ char *verts;
+ unsigned vertex_stride;
+ unsigned vertex_count;
+ } pipeline;
+
+
+ struct vbuf_render *render;
+
+ /* Support prototype passthrough path:
+ */
+ struct {
+ struct {
+ struct draw_pt_middle_end *fetch_emit;
+ struct draw_pt_middle_end *fetch_shade_emit;
+ struct draw_pt_middle_end *general;
+ } middle;
+
+ struct {
+ struct draw_pt_front_end *vcache;
+ struct draw_pt_front_end *varray;
+ } front;
+
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+ unsigned nr_vertex_buffers;
+
+ struct pipe_vertex_element vertex_element[PIPE_MAX_ATTRIBS];
+ unsigned nr_vertex_elements;
+
+ /* user-space vertex data, buffers */
+ struct {
+ const unsigned *edgeflag;
+
+ /** vertex element/index buffer (ex: glDrawElements) */
+ const void *elts;
+ /** bytes per index (0, 1, 2 or 4) */
+ unsigned eltSize;
+ unsigned min_index;
+ unsigned max_index;
+
+ /** vertex arrays */
+ const void *vbuffer[PIPE_MAX_ATTRIBS];
+
+ /** constant buffer (for vertex shader) */
+ const void *constants;
+ } user;
+
+ boolean test_fse; /* enable FSE even though its not correct (eg for softpipe) */
+ boolean no_fse; /* disable FSE even when it is correct */
+ } pt;
+
+ struct {
+ boolean bypass_clipping;
+ boolean bypass_vs;
+ } driver;
+
+ boolean flushing; /**< debugging/sanity */
+ boolean suspend_flushing; /**< internally set */
+ boolean bypass_clipping; /**< set if either api or driver bypass_clipping true */
+
+ boolean force_passthrough; /**< never clip or shade */
+
+ double mrd; /**< minimum resolvable depth value, for polygon offset */
+
+ /* pipe state that we need: */
+ const struct pipe_rasterizer_state *rasterizer;
+ struct pipe_viewport_state viewport;
+ boolean identity_viewport;
+
+ struct {
+ struct draw_vertex_shader *vertex_shader;
+ uint num_vs_outputs; /**< convenience, from vertex_shader */
+ uint position_output;
+
+ /** TGSI program interpreter runtime state */
+ struct tgsi_exec_machine *machine;
+
+ uint num_samplers;
+ struct tgsi_sampler **samplers;
+
+ /* This (and the tgsi_exec_machine struct) probably need to be moved somewhere private.
+ */
+ struct gallivm_cpu_engine *engine;
+
+ /* Here's another one:
+ */
+ struct aos_machine *aos_machine;
+
+
+ const float (*aligned_constants)[4];
+
+ const float (*aligned_constant_storage)[4];
+ unsigned const_storage_size;
+
+
+ struct translate *fetch;
+ struct translate_cache *fetch_cache;
+ struct translate *emit;
+ struct translate_cache *emit_cache;
+ } vs;
+
+ /* Clip derived state:
+ */
+ float plane[12][4];
+ unsigned nr_planes;
+
+ /* If a prim stage introduces new vertex attributes, they'll be stored here
+ */
+ struct {
+ uint semantic_name;
+ uint semantic_index;
+ int slot;
+ } extra_vp_outputs;
+
+ unsigned reduced_prim;
+
+ void *driver_private;
+};
+
+
+/*******************************************************************************
+ * Vertex shader code:
+ */
+boolean draw_vs_init( struct draw_context *draw );
+void draw_vs_destroy( struct draw_context *draw );
+
+void draw_vs_set_viewport( struct draw_context *,
+ const struct pipe_viewport_state * );
+
+void draw_vs_set_constants( struct draw_context *,
+ const float (*constants)[4],
+ unsigned size );
+
+
+
+
+/*******************************************************************************
+ * Vertex processing (was passthrough) code:
+ */
+boolean draw_pt_init( struct draw_context *draw );
+void draw_pt_destroy( struct draw_context *draw );
+void draw_pt_reset_vertex_ids( struct draw_context *draw );
+
+
+/*******************************************************************************
+ * Primitive processing (pipeline) code:
+ */
+
+boolean draw_pipeline_init( struct draw_context *draw );
+void draw_pipeline_destroy( struct draw_context *draw );
+
+
+
+
+
+/* We use the top few bits in the elts[] parameter to convey a little
+ * API information. This limits the number of vertices we can address
+ * to only 4096 -- if that becomes a problem, we can switch to 32-bit
+ * draw indices.
+ *
+ * These flags expected at first vertex of lines & triangles when
+ * unfilled and/or line stipple modes are operational.
+ */
+#define DRAW_PIPE_MAX_VERTICES (0x1<<12)
+#define DRAW_PIPE_EDGE_FLAG_0 (0x1<<12)
+#define DRAW_PIPE_EDGE_FLAG_1 (0x2<<12)
+#define DRAW_PIPE_EDGE_FLAG_2 (0x4<<12)
+#define DRAW_PIPE_EDGE_FLAG_ALL (0x7<<12)
+#define DRAW_PIPE_RESET_STIPPLE (0x8<<12)
+#define DRAW_PIPE_FLAG_MASK (0xf<<12)
+
+void draw_pipeline_run( struct draw_context *draw,
+ unsigned prim,
+ struct vertex_header *vertices,
+ unsigned vertex_count,
+ unsigned stride,
+ const ushort *elts,
+ unsigned count );
+
+void draw_pipeline_run_linear( struct draw_context *draw,
+ unsigned prim,
+ struct vertex_header *vertices,
+ unsigned count,
+ unsigned stride );
+
+
+
+void draw_pipeline_flush( struct draw_context *draw,
+ unsigned flags );
+
+
+
+/*******************************************************************************
+ * Flushing
+ */
+
+#define DRAW_FLUSH_STATE_CHANGE 0x8
+#define DRAW_FLUSH_BACKEND 0x10
+
+
+void draw_do_flush( struct draw_context *draw, unsigned flags );
+
+
+
+
+#endif /* DRAW_PRIVATE_H */
diff --git a/src/gallium/auxiliary/draw/draw_pt.c b/src/gallium/auxiliary/draw/draw_pt.c
new file mode 100644
index 0000000000..dbb5ac7182
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt.c
@@ -0,0 +1,324 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_pt.h"
+#include "draw/draw_vs.h"
+#include "tgsi/tgsi_dump.h"
+#include "util/u_math.h"
+#include "util/u_prim.h"
+
+static unsigned trim( unsigned count, unsigned first, unsigned incr )
+{
+ if (count < first)
+ return 0;
+ return count - (count - first) % incr;
+}
+
+
+
+/* Overall we split things into:
+ * - frontend -- prepare fetch_elts, draw_elts - eg vcache
+ * - middle -- fetch, shade, cliptest, viewport
+ * - pipeline -- the prim pipeline: clipping, wide lines, etc
+ * - backend -- the vbuf_render provided by the driver.
+ */
+static boolean
+draw_pt_arrays(struct draw_context *draw,
+ unsigned prim,
+ unsigned start,
+ unsigned count)
+{
+ struct draw_pt_front_end *frontend = NULL;
+ struct draw_pt_middle_end *middle = NULL;
+ unsigned opt = 0;
+
+ /* Sanitize primitive length:
+ */
+ {
+ unsigned first, incr;
+ draw_pt_split_prim(prim, &first, &incr);
+ count = trim(count, first, incr);
+ if (count < first)
+ return TRUE;
+ }
+
+ if (!draw->force_passthrough) {
+ if (!draw->render) {
+ opt |= PT_PIPELINE;
+ }
+
+ if (draw_need_pipeline(draw,
+ draw->rasterizer,
+ prim)) {
+ opt |= PT_PIPELINE;
+ }
+
+ if (!draw->bypass_clipping && !draw->pt.test_fse) {
+ opt |= PT_CLIPTEST;
+ }
+
+ if (!draw->rasterizer->bypass_vs_clip_and_viewport) {
+ opt |= PT_SHADE;
+ }
+ }
+
+ if (opt == 0)
+ middle = draw->pt.middle.fetch_emit;
+ else if (opt == PT_SHADE && !draw->pt.no_fse)
+ middle = draw->pt.middle.fetch_shade_emit;
+ else
+ middle = draw->pt.middle.general;
+
+
+ /* Pick the right frontend
+ */
+ if (draw->pt.user.elts || (opt & PT_PIPELINE)) {
+ frontend = draw->pt.front.vcache;
+ } else {
+ frontend = draw->pt.front.varray;
+ }
+
+ frontend->prepare( frontend, prim, middle, opt );
+
+ frontend->run(frontend,
+ draw_pt_elt_func(draw),
+ draw_pt_elt_ptr(draw, start),
+ count);
+
+ frontend->finish( frontend );
+
+ return TRUE;
+}
+
+
+boolean draw_pt_init( struct draw_context *draw )
+{
+ draw->pt.test_fse = debug_get_bool_option("DRAW_FSE", FALSE);
+ draw->pt.no_fse = debug_get_bool_option("DRAW_NO_FSE", FALSE);
+
+ draw->pt.front.vcache = draw_pt_vcache( draw );
+ if (!draw->pt.front.vcache)
+ return FALSE;
+
+ draw->pt.front.varray = draw_pt_varray(draw);
+ if (!draw->pt.front.varray)
+ return FALSE;
+
+ draw->pt.middle.fetch_emit = draw_pt_fetch_emit( draw );
+ if (!draw->pt.middle.fetch_emit)
+ return FALSE;
+
+ draw->pt.middle.fetch_shade_emit = draw_pt_middle_fse( draw );
+ if (!draw->pt.middle.fetch_shade_emit)
+ return FALSE;
+
+ draw->pt.middle.general = draw_pt_fetch_pipeline_or_emit( draw );
+ if (!draw->pt.middle.general)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+void draw_pt_destroy( struct draw_context *draw )
+{
+ if (draw->pt.middle.general) {
+ draw->pt.middle.general->destroy( draw->pt.middle.general );
+ draw->pt.middle.general = NULL;
+ }
+
+ if (draw->pt.middle.fetch_emit) {
+ draw->pt.middle.fetch_emit->destroy( draw->pt.middle.fetch_emit );
+ draw->pt.middle.fetch_emit = NULL;
+ }
+
+ if (draw->pt.middle.fetch_shade_emit) {
+ draw->pt.middle.fetch_shade_emit->destroy( draw->pt.middle.fetch_shade_emit );
+ draw->pt.middle.fetch_shade_emit = NULL;
+ }
+
+ if (draw->pt.front.vcache) {
+ draw->pt.front.vcache->destroy( draw->pt.front.vcache );
+ draw->pt.front.vcache = NULL;
+ }
+
+ if (draw->pt.front.varray) {
+ draw->pt.front.varray->destroy( draw->pt.front.varray );
+ draw->pt.front.varray = NULL;
+ }
+}
+
+
+/**
+ * Debug- print the first 'count' vertices.
+ */
+static void
+draw_print_arrays(struct draw_context *draw, uint prim, int start, uint count)
+{
+ uint i;
+
+ debug_printf("Draw arrays(prim = %u, start = %u, count = %u)\n",
+ prim, start, count);
+
+ for (i = 0; i < count; i++) {
+ uint ii, j;
+
+ if (draw->pt.user.elts) {
+ /* indexed arrays */
+ switch (draw->pt.user.eltSize) {
+ case 1:
+ {
+ const ubyte *elem = (const ubyte *) draw->pt.user.elts;
+ ii = elem[start + i];
+ }
+ break;
+ case 2:
+ {
+ const ushort *elem = (const ushort *) draw->pt.user.elts;
+ ii = elem[start + i];
+ }
+ break;
+ case 4:
+ {
+ const uint *elem = (const uint *) draw->pt.user.elts;
+ ii = elem[start + i];
+ }
+ break;
+ default:
+ assert(0);
+ }
+ debug_printf("Element[%u + %u] -> Vertex %u:\n", start, i, ii);
+ }
+ else {
+ /* non-indexed arrays */
+ ii = start + i;
+ debug_printf("Vertex %u:\n", ii);
+ }
+
+ for (j = 0; j < draw->pt.nr_vertex_elements; j++) {
+ uint buf = draw->pt.vertex_element[j].vertex_buffer_index;
+ ubyte *ptr = (ubyte *) draw->pt.user.vbuffer[buf];
+ ptr += draw->pt.vertex_buffer[buf].stride * ii;
+ ptr += draw->pt.vertex_element[j].src_offset;
+
+ debug_printf(" Attr %u: ", j);
+ switch (draw->pt.vertex_element[j].src_format) {
+ case PIPE_FORMAT_R32_FLOAT:
+ {
+ float *v = (float *) ptr;
+ debug_printf("%f @ %p\n", v[0], (void *) v);
+ }
+ break;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ {
+ float *v = (float *) ptr;
+ debug_printf("%f %f @ %p\n", v[0], v[1], (void *) v);
+ }
+ break;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ {
+ float *v = (float *) ptr;
+ debug_printf("%f %f %f @ %p\n", v[0], v[1], v[2], (void *) v);
+ }
+ break;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ {
+ float *v = (float *) ptr;
+ debug_printf("%f %f %f %f @ %p\n", v[0], v[1], v[2], v[3],
+ (void *) v);
+ }
+ break;
+ default:
+ debug_printf("other format (fix me)\n");
+ ;
+ }
+ }
+ }
+}
+
+
+/**
+ * Draw vertex arrays
+ * This is the main entrypoint into the drawing module.
+ * \param prim one of PIPE_PRIM_x
+ * \param start index of first vertex to draw
+ * \param count number of vertices to draw
+ */
+void
+draw_arrays(struct draw_context *draw, unsigned prim,
+ unsigned start, unsigned count)
+{
+ unsigned reduced_prim = u_reduced_prim(prim);
+ if (reduced_prim != draw->reduced_prim) {
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw->reduced_prim = reduced_prim;
+ }
+
+ if (0)
+ draw_print_arrays(draw, prim, start, MIN2(count, 20));
+
+#if 0
+ {
+ int i;
+ debug_printf("draw_arrays(prim=%u start=%u count=%u):\n",
+ prim, start, count);
+ tgsi_dump(draw->vs.vertex_shader->state.tokens, 0);
+ debug_printf("Elements:\n");
+ for (i = 0; i < draw->pt.nr_vertex_elements; i++) {
+ debug_printf(" format=%s comps=%u\n",
+ pf_name(draw->pt.vertex_element[i].src_format),
+ draw->pt.vertex_element[i].nr_components);
+ }
+ debug_printf("Buffers:\n");
+ for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
+ debug_printf(" stride=%u offset=%u ptr=%p\n",
+ draw->pt.vertex_buffer[i].stride,
+ draw->pt.vertex_buffer[i].buffer_offset,
+ draw->pt.user.vbuffer[i]);
+ }
+ }
+#endif
+
+ /* drawing done here: */
+ draw_pt_arrays(draw, prim, start, count);
+}
+
+boolean draw_pt_get_edgeflag( struct draw_context *draw,
+ unsigned idx )
+{
+ if (draw->pt.user.edgeflag)
+ return (draw->pt.user.edgeflag[idx/32] & (1 << (idx%32))) != 0;
+ else
+ return 1;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt.h b/src/gallium/auxiliary/draw/draw_pt.h
new file mode 100644
index 0000000000..7a17a9fb6b
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt.h
@@ -0,0 +1,233 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef DRAW_PT_H
+#define DRAW_PT_H
+
+#include "pipe/p_compiler.h"
+
+typedef unsigned (*pt_elt_func)( const void *elts, unsigned idx );
+
+struct draw_pt_middle_end;
+struct draw_context;
+
+
+#define PT_SHADE 0x1
+#define PT_CLIPTEST 0x2
+#define PT_PIPELINE 0x4
+#define PT_MAX_MIDDLE 0x8
+
+
+/* The "front end" - prepare sets of fetch, draw elements for the
+ * middle end.
+ *
+ * Currenly one version of this:
+ * - vcache - catchall implementation, decomposes to TRI/LINE/POINT prims
+ * Later:
+ * - varray, varray_split
+ * - velement, velement_split
+ *
+ * Currenly only using the vcache version.
+ */
+struct draw_pt_front_end {
+ void (*prepare)( struct draw_pt_front_end *,
+ unsigned prim,
+ struct draw_pt_middle_end *,
+ unsigned opt );
+
+ void (*run)( struct draw_pt_front_end *,
+ pt_elt_func elt_func,
+ const void *elt_ptr,
+ unsigned count );
+
+ void (*finish)( struct draw_pt_front_end * );
+ void (*destroy)( struct draw_pt_front_end * );
+};
+
+
+/* The "middle end" - prepares actual hardware vertices for the
+ * hardware backend.
+ *
+ * Currently two versions of this:
+ * - fetch, vertex shade, cliptest, prim-pipeline
+ * - fetch, emit (ie passthrough)
+ */
+struct draw_pt_middle_end {
+ void (*prepare)( struct draw_pt_middle_end *,
+ unsigned prim,
+ unsigned opt,
+ unsigned *max_vertices );
+
+ void (*run)( struct draw_pt_middle_end *,
+ const unsigned *fetch_elts,
+ unsigned fetch_count,
+ const ushort *draw_elts,
+ unsigned draw_count );
+
+ void (*run_linear)(struct draw_pt_middle_end *,
+ unsigned start,
+ unsigned count);
+
+ /* Transform all vertices in a linear range and then draw them with
+ * the supplied element list. May fail and return FALSE.
+ */
+ boolean (*run_linear_elts)( struct draw_pt_middle_end *,
+ unsigned fetch_start,
+ unsigned fetch_count,
+ const ushort *draw_elts,
+ unsigned draw_count );
+
+ int (*get_max_vertex_count)( struct draw_pt_middle_end * );
+
+ void (*finish)( struct draw_pt_middle_end * );
+ void (*destroy)( struct draw_pt_middle_end * );
+};
+
+
+/* The "back end" - supplied by the driver, defined in draw_vbuf.h.
+ */
+struct vbuf_render;
+struct vertex_header;
+
+
+/* Helper functions.
+ */
+pt_elt_func draw_pt_elt_func( struct draw_context *draw );
+const void *draw_pt_elt_ptr( struct draw_context *draw,
+ unsigned start );
+
+/* Frontends:
+ *
+ * Currently only the general-purpose vcache implementation, could add
+ * a special case for tiny vertex buffers.
+ */
+struct draw_pt_front_end *draw_pt_vcache( struct draw_context *draw );
+struct draw_pt_front_end *draw_pt_varray(struct draw_context *draw);
+
+
+/* Middle-ends:
+ *
+ * Currently one general-purpose case which can do all possibilities,
+ * at the slight expense of creating a vertex_header in some cases
+ * unecessarily.
+ *
+ * The special case fetch_emit code avoids pipeline vertices
+ * altogether and builds hardware vertices directly from API
+ * vertex_elements.
+ */
+struct draw_pt_middle_end *draw_pt_fetch_emit( struct draw_context *draw );
+struct draw_pt_middle_end *draw_pt_middle_fse( struct draw_context *draw );
+struct draw_pt_middle_end *draw_pt_fetch_pipeline_or_emit(struct draw_context *draw);
+
+
+/* More helpers:
+ */
+boolean draw_pt_get_edgeflag( struct draw_context *draw,
+ unsigned idx );
+
+
+/*******************************************************************************
+ * HW vertex emit:
+ */
+struct pt_emit;
+
+void draw_pt_emit_prepare( struct pt_emit *emit,
+ unsigned prim,
+ unsigned *max_vertices );
+
+void draw_pt_emit( struct pt_emit *emit,
+ const float (*vertex_data)[4],
+ unsigned vertex_count,
+ unsigned stride,
+ const ushort *elts,
+ unsigned count );
+
+void draw_pt_emit_linear( struct pt_emit *emit,
+ const float (*vertex_data)[4],
+ unsigned stride,
+ unsigned count );
+
+void draw_pt_emit_destroy( struct pt_emit *emit );
+
+struct pt_emit *draw_pt_emit_create( struct draw_context *draw );
+
+
+/*******************************************************************************
+ * API vertex fetch:
+ */
+
+struct pt_fetch;
+void draw_pt_fetch_prepare( struct pt_fetch *fetch,
+ unsigned vertex_input_count,
+ unsigned vertex_size );
+
+void draw_pt_fetch_run( struct pt_fetch *fetch,
+ const unsigned *elts,
+ unsigned count,
+ char *verts );
+
+void draw_pt_fetch_run_linear( struct pt_fetch *fetch,
+ unsigned start,
+ unsigned count,
+ char *verts );
+
+void draw_pt_fetch_destroy( struct pt_fetch *fetch );
+
+struct pt_fetch *draw_pt_fetch_create( struct draw_context *draw );
+
+/*******************************************************************************
+ * Post-VS: cliptest, rhw, viewport
+ */
+struct pt_post_vs;
+
+boolean draw_pt_post_vs_run( struct pt_post_vs *pvs,
+ struct vertex_header *pipeline_verts,
+ unsigned stride,
+ unsigned count );
+
+void draw_pt_post_vs_prepare( struct pt_post_vs *pvs,
+ boolean bypass_clipping,
+ boolean bypass_viewport,
+ boolean opengl );
+
+struct pt_post_vs *draw_pt_post_vs_create( struct draw_context *draw );
+
+void draw_pt_post_vs_destroy( struct pt_post_vs *pvs );
+
+
+/*******************************************************************************
+ * Utils:
+ */
+void draw_pt_split_prim(unsigned prim, unsigned *first, unsigned *incr);
+
+
+#endif
diff --git a/src/gallium/auxiliary/draw/draw_pt_decompose.h b/src/gallium/auxiliary/draw/draw_pt_decompose.h
new file mode 100644
index 0000000000..4ca5b52020
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_decompose.h
@@ -0,0 +1,182 @@
+
+
+static void FUNC( ARGS,
+ unsigned count )
+{
+ LOCAL_VARS;
+
+ switch (prim) {
+ case PIPE_PRIM_POINTS:
+ for (i = 0; i < count; i ++) {
+ POINT( (i + 0) );
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (i = 0; i+1 < count; i += 2) {
+ LINE( DRAW_PIPE_RESET_STIPPLE,
+ (i + 0),
+ (i + 1));
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ if (count >= 2) {
+ flags = DRAW_PIPE_RESET_STIPPLE;
+
+ for (i = 1; i < count; i++, flags = 0) {
+ LINE( flags,
+ (i - 1),
+ (i ));
+ }
+
+ LINE( flags,
+ (i - 1),
+ (0 ));
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ flags = DRAW_PIPE_RESET_STIPPLE;
+ for (i = 1; i < count; i++, flags = 0) {
+ LINE( flags,
+ (i - 1),
+ (i ));
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ for (i = 0; i+2 < count; i += 3) {
+ if (flatfirst) {
+ /* put provoking vertex in last pos for clipper */
+ TRIANGLE( DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ (i + 1),
+ (i + 2),
+ (i + 0 ));
+ }
+ else {
+ TRIANGLE( DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ (i + 0),
+ (i + 1),
+ (i + 2 ));
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ if (flatfirst) {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ (i + 1 + (i&1)),
+ (i + 2 - (i&1)),
+ (i + 0) );
+ }
+ }
+ else {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ (i + 0 + (i&1)),
+ (i + 1 - (i&1)),
+ (i + 2 ));
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (count >= 3) {
+ if (flatfirst) {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ (i + 2),
+ 0,
+ (i + 1) );
+ }
+ }
+ else {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ (0),
+ (i + 1),
+ (i + 2 ));
+ }
+ }
+ }
+ break;
+
+
+ case PIPE_PRIM_QUADS:
+ if (flatfirst) {
+ for (i = 0; i+3 < count; i += 4) {
+ QUAD( (i + 1),
+ (i + 2),
+ (i + 3),
+ (i + 0) );
+ }
+ }
+ else {
+ for (i = 0; i+3 < count; i += 4) {
+ QUAD( (i + 0),
+ (i + 1),
+ (i + 2),
+ (i + 3));
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ if (flatfirst) {
+ for (i = 0; i+3 < count; i += 2) {
+ QUAD( (i + 1),
+ (i + 3),
+ (i + 2),
+ (i + 0) );
+ }
+ }
+ else {
+ for (i = 0; i+3 < count; i += 2) {
+ QUAD( (i + 2),
+ (i + 0),
+ (i + 1),
+ (i + 3));
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ {
+ /* These bitflags look a little odd because we submit the
+ * vertices as (1,2,0) to satisfy flatshade requirements.
+ */
+ const ushort edge_first = DRAW_PIPE_EDGE_FLAG_2;
+ const ushort edge_middle = DRAW_PIPE_EDGE_FLAG_0;
+ const ushort edge_last = DRAW_PIPE_EDGE_FLAG_1;
+
+ flags = DRAW_PIPE_RESET_STIPPLE | edge_first | edge_middle;
+
+ for (i = 0; i+2 < count; i++, flags = edge_middle) {
+
+ if (i + 3 == count)
+ flags |= edge_last;
+
+ TRIANGLE( flags,
+ (i + 1),
+ (i + 2),
+ (0));
+ }
+ }
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ FLUSH;
+}
+
+
+#undef TRIANGLE
+#undef QUAD
+#undef POINT
+#undef LINE
+#undef FUNC
diff --git a/src/gallium/auxiliary/draw/draw_pt_elts.c b/src/gallium/auxiliary/draw/draw_pt_elts.c
new file mode 100644
index 0000000000..88f4d9f495
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_elts.c
@@ -0,0 +1,89 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_pt.h"
+#include "draw/draw_private.h"
+
+/* Neat get_elt func that also works for varrays drawing by encoding
+ * the start value into a pointer.
+ */
+
+static unsigned elt_uint( const void *elts, unsigned idx )
+{
+ return *(((const uint *)elts) + idx);
+}
+
+static unsigned elt_ushort( const void *elts, unsigned idx )
+{
+ return *(((const ushort *)elts) + idx);
+}
+
+static unsigned elt_ubyte( const void *elts, unsigned idx )
+{
+ return *(((const ubyte *)elts) + idx);
+}
+
+static unsigned elt_vert( const void *elts, unsigned idx )
+{
+ /* unsigned index is packed in the pointer */
+ return (unsigned)(uintptr_t)elts + idx;
+}
+
+pt_elt_func draw_pt_elt_func( struct draw_context *draw )
+{
+ switch (draw->pt.user.eltSize) {
+ case 0: return &elt_vert;
+ case 1: return &elt_ubyte;
+ case 2: return &elt_ushort;
+ case 4: return &elt_uint;
+ default: return NULL;
+ }
+}
+
+const void *draw_pt_elt_ptr( struct draw_context *draw,
+ unsigned start )
+{
+ const char *elts = draw->pt.user.elts;
+
+ switch (draw->pt.user.eltSize) {
+ case 0:
+ return (const void *)(((const ubyte *)NULL) + start);
+ case 1:
+ return (const void *)(((const ubyte *)elts) + start);
+ case 2:
+ return (const void *)(((const ushort *)elts) + start);
+ case 4:
+ return (const void *)(((const uint *)elts) + start);
+ default:
+ return NULL;
+ }
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_emit.c b/src/gallium/auxiliary/draw/draw_pt_emit.c
new file mode 100644
index 0000000000..064e16c295
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_emit.c
@@ -0,0 +1,313 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pt.h"
+#include "translate/translate.h"
+#include "translate/translate_cache.h"
+
+struct pt_emit {
+ struct draw_context *draw;
+
+ struct translate *translate;
+
+ struct translate_cache *cache;
+ unsigned prim;
+
+ const struct vertex_info *vinfo;
+};
+
+void draw_pt_emit_prepare( struct pt_emit *emit,
+ unsigned prim,
+ unsigned *max_vertices )
+{
+ struct draw_context *draw = emit->draw;
+ const struct vertex_info *vinfo;
+ unsigned dst_offset;
+ struct translate_key hw_key;
+ unsigned i;
+ boolean ok;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+
+ /* XXX: may need to defensively reset this later on as clipping can
+ * clobber this state in the render backend.
+ */
+ emit->prim = prim;
+
+ ok = draw->render->set_primitive(draw->render, emit->prim);
+ if (!ok) {
+ assert(0);
+ return;
+ }
+
+ /* Must do this after set_primitive() above:
+ */
+ emit->vinfo = vinfo = draw->render->get_vertex_info(draw->render);
+
+
+ /* Translate from pipeline vertices to hw vertices.
+ */
+ dst_offset = 0;
+ for (i = 0; i < vinfo->num_attribs; i++) {
+ unsigned emit_sz = 0;
+ unsigned src_buffer = 0;
+ unsigned output_format;
+ unsigned src_offset = (vinfo->attrib[i].src_index * 4 * sizeof(float) );
+
+
+
+ switch (vinfo->attrib[i].emit) {
+ case EMIT_4F:
+ output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ emit_sz = 4 * sizeof(float);
+ break;
+ case EMIT_3F:
+ output_format = PIPE_FORMAT_R32G32B32_FLOAT;
+ emit_sz = 3 * sizeof(float);
+ break;
+ case EMIT_2F:
+ output_format = PIPE_FORMAT_R32G32_FLOAT;
+ emit_sz = 2 * sizeof(float);
+ break;
+ case EMIT_1F:
+ output_format = PIPE_FORMAT_R32_FLOAT;
+ emit_sz = 1 * sizeof(float);
+ break;
+ case EMIT_1F_PSIZE:
+ output_format = PIPE_FORMAT_R32_FLOAT;
+ emit_sz = 1 * sizeof(float);
+ src_buffer = 1;
+ src_offset = 0;
+ break;
+ case EMIT_4UB:
+ output_format = PIPE_FORMAT_B8G8R8A8_UNORM;
+ emit_sz = 4 * sizeof(ubyte);
+ break;
+ default:
+ assert(0);
+ output_format = PIPE_FORMAT_NONE;
+ emit_sz = 0;
+ break;
+ }
+
+ hw_key.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ hw_key.element[i].input_buffer = src_buffer;
+ hw_key.element[i].input_offset = src_offset;
+ hw_key.element[i].output_format = output_format;
+ hw_key.element[i].output_offset = dst_offset;
+
+ dst_offset += emit_sz;
+ }
+
+ hw_key.nr_elements = vinfo->num_attribs;
+ hw_key.output_stride = vinfo->size * 4;
+
+ if (!emit->translate ||
+ translate_key_compare(&emit->translate->key, &hw_key) != 0)
+ {
+ translate_key_sanitize(&hw_key);
+ emit->translate = translate_cache_find(emit->cache, &hw_key);
+ }
+
+ *max_vertices = (draw->render->max_vertex_buffer_bytes /
+ (vinfo->size * 4));
+
+ /* even number */
+ *max_vertices = *max_vertices & ~1;
+}
+
+
+void draw_pt_emit( struct pt_emit *emit,
+ const float (*vertex_data)[4],
+ unsigned vertex_count,
+ unsigned stride,
+ const ushort *elts,
+ unsigned count )
+{
+ struct draw_context *draw = emit->draw;
+ struct translate *translate = emit->translate;
+ struct vbuf_render *render = draw->render;
+ void *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (vertex_count == 0)
+ return;
+
+ if (vertex_count >= UNDEFINED_VERTEX_ID) {
+ assert(0);
+ return;
+ }
+
+ /* XXX: and work out some way to coordinate the render primitive
+ * between vbuf.c and here...
+ */
+ if (!draw->render->set_primitive(draw->render, emit->prim)) {
+ assert(0);
+ return;
+ }
+
+ render->allocate_vertices(render,
+ (ushort)translate->key.output_stride,
+ (ushort)vertex_count);
+
+ hw_verts = render->map_vertices( render );
+ if (!hw_verts) {
+ assert(0);
+ return;
+ }
+
+ translate->set_buffer(translate,
+ 0,
+ vertex_data,
+ stride );
+
+ translate->set_buffer(translate,
+ 1,
+ &draw->rasterizer->point_size,
+ 0);
+
+ translate->run( translate,
+ 0,
+ vertex_count,
+ hw_verts );
+
+ render->unmap_vertices( render,
+ 0,
+ vertex_count - 1 );
+
+ render->draw(render,
+ elts,
+ count);
+
+ render->release_vertices(render);
+}
+
+
+void draw_pt_emit_linear(struct pt_emit *emit,
+ const float (*vertex_data)[4],
+ unsigned stride,
+ unsigned count)
+{
+ struct draw_context *draw = emit->draw;
+ struct translate *translate = emit->translate;
+ struct vbuf_render *render = draw->render;
+ void *hw_verts;
+
+#if 0
+ debug_printf("Linear emit\n");
+#endif
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (count >= UNDEFINED_VERTEX_ID)
+ goto fail;
+
+ /* XXX: and work out some way to coordinate the render primitive
+ * between vbuf.c and here...
+ */
+ if (!draw->render->set_primitive(draw->render, emit->prim))
+ goto fail;
+
+ if (!render->allocate_vertices(render,
+ (ushort)translate->key.output_stride,
+ (ushort)count))
+ goto fail;
+
+ hw_verts = render->map_vertices( render );
+ if (!hw_verts)
+ goto fail;
+
+ translate->set_buffer(translate, 0,
+ vertex_data, stride);
+
+ translate->set_buffer(translate, 1,
+ &draw->rasterizer->point_size,
+ 0);
+
+ translate->run(translate,
+ 0,
+ count,
+ hw_verts);
+
+ if (0) {
+ unsigned i;
+ for (i = 0; i < count; i++) {
+ debug_printf("\n\n%s vertex %d:\n", __FUNCTION__, i);
+ draw_dump_emitted_vertex( emit->vinfo,
+ (const uint8_t *)hw_verts +
+ translate->key.output_stride * i );
+ }
+ }
+
+ render->unmap_vertices( render, 0, count - 1 );
+
+ render->draw_arrays(render, 0, count);
+
+ render->release_vertices(render);
+
+ return;
+
+fail:
+ assert(0);
+ return;
+}
+
+struct pt_emit *draw_pt_emit_create( struct draw_context *draw )
+{
+ struct pt_emit *emit = CALLOC_STRUCT(pt_emit);
+ if (!emit)
+ return NULL;
+
+ emit->draw = draw;
+ emit->cache = translate_cache_create();
+ if (!emit->cache) {
+ FREE(emit);
+ return NULL;
+ }
+
+ return emit;
+}
+
+void draw_pt_emit_destroy( struct pt_emit *emit )
+{
+ if (emit->cache)
+ translate_cache_destroy(emit->cache);
+
+ FREE(emit);
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch.c b/src/gallium/auxiliary/draw/draw_pt_fetch.c
new file mode 100644
index 0000000000..65c3a34c34
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch.c
@@ -0,0 +1,234 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pt.h"
+#include "translate/translate.h"
+#include "translate/translate_cache.h"
+
+
+struct pt_fetch {
+ struct draw_context *draw;
+
+ struct translate *translate;
+
+ unsigned vertex_size;
+ boolean need_edgeflags;
+
+ struct translate_cache *cache;
+};
+
+/* Perform the fetch from API vertex elements & vertex buffers, to a
+ * contiguous set of float[4] attributes as required for the
+ * vertex_shader->run_linear() method.
+ *
+ * This is used in all cases except pure passthrough
+ * (draw_pt_fetch_emit.c) which has its own version to translate
+ * directly to hw vertices.
+ *
+ */
+void draw_pt_fetch_prepare( struct pt_fetch *fetch,
+ unsigned vs_input_count,
+ unsigned vertex_size )
+{
+ struct draw_context *draw = fetch->draw;
+ unsigned nr_inputs;
+ unsigned i, nr = 0;
+ unsigned dst_offset = 0;
+ struct translate_key key;
+
+ fetch->vertex_size = vertex_size;
+
+ /* Always emit/leave space for a vertex header.
+ *
+ * It's worth considering whether the vertex headers should contain
+ * a pointer to the 'data', rather than having it inline.
+ * Something to look at after we've fully switched over to the pt
+ * paths.
+ */
+ {
+ /* Need to set header->vertex_id = 0xffff somehow.
+ */
+ key.element[nr].input_format = PIPE_FORMAT_R32_FLOAT;
+ key.element[nr].input_buffer = draw->pt.nr_vertex_buffers;
+ key.element[nr].input_offset = 0;
+ key.element[nr].output_format = PIPE_FORMAT_R32_FLOAT;
+ key.element[nr].output_offset = dst_offset;
+ dst_offset += 1 * sizeof(float);
+ nr++;
+
+
+ /* Just leave the clip[] array untouched.
+ */
+ dst_offset += 4 * sizeof(float);
+ }
+
+ assert( draw->pt.nr_vertex_elements >= vs_input_count );
+
+ nr_inputs = MIN2( vs_input_count, draw->pt.nr_vertex_elements );
+
+ for (i = 0; i < nr_inputs; i++) {
+ key.element[nr].input_format = draw->pt.vertex_element[i].src_format;
+ key.element[nr].input_buffer = draw->pt.vertex_element[i].vertex_buffer_index;
+ key.element[nr].input_offset = draw->pt.vertex_element[i].src_offset;
+ key.element[nr].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ key.element[nr].output_offset = dst_offset;
+
+ dst_offset += 4 * sizeof(float);
+ nr++;
+ }
+
+ assert(dst_offset <= vertex_size);
+
+ key.nr_elements = nr;
+ key.output_stride = vertex_size;
+
+
+ if (!fetch->translate ||
+ translate_key_compare(&fetch->translate->key, &key) != 0)
+ {
+ translate_key_sanitize(&key);
+ fetch->translate = translate_cache_find(fetch->cache, &key);
+
+ {
+ static struct vertex_header vh = { 0, 1, 0, UNDEFINED_VERTEX_ID, { .0f, .0f, .0f, .0f } };
+ fetch->translate->set_buffer(fetch->translate,
+ draw->pt.nr_vertex_buffers,
+ &vh,
+ 0);
+ }
+ }
+
+ fetch->need_edgeflags = ((draw->rasterizer->fill_cw != PIPE_POLYGON_MODE_FILL ||
+ draw->rasterizer->fill_ccw != PIPE_POLYGON_MODE_FILL) &&
+ draw->pt.user.edgeflag);
+}
+
+
+
+
+void draw_pt_fetch_run( struct pt_fetch *fetch,
+ const unsigned *elts,
+ unsigned count,
+ char *verts )
+{
+ struct draw_context *draw = fetch->draw;
+ struct translate *translate = fetch->translate;
+ unsigned i;
+
+ for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
+ translate->set_buffer(translate,
+ i,
+ ((char *)draw->pt.user.vbuffer[i] +
+ draw->pt.vertex_buffer[i].buffer_offset),
+ draw->pt.vertex_buffer[i].stride );
+ }
+
+ translate->run_elts( translate,
+ elts,
+ count,
+ verts );
+
+ /* Edgeflags are hard to fit into a translate program, populate
+ * them separately if required. In the setup above they are
+ * defaulted to one, so only need this if there is reason to change
+ * that default:
+ */
+ if (fetch->need_edgeflags) {
+ for (i = 0; i < count; i++) {
+ struct vertex_header *vh = (struct vertex_header *)(verts + i * fetch->vertex_size);
+ vh->edgeflag = draw_pt_get_edgeflag( draw, elts[i] );
+ }
+ }
+}
+
+
+void draw_pt_fetch_run_linear( struct pt_fetch *fetch,
+ unsigned start,
+ unsigned count,
+ char *verts )
+{
+ struct draw_context *draw = fetch->draw;
+ struct translate *translate = fetch->translate;
+ unsigned i;
+
+ for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
+ translate->set_buffer(translate,
+ i,
+ ((char *)draw->pt.user.vbuffer[i] +
+ draw->pt.vertex_buffer[i].buffer_offset),
+ draw->pt.vertex_buffer[i].stride );
+ }
+
+ translate->run( translate,
+ start,
+ count,
+ verts );
+
+ /* Edgeflags are hard to fit into a translate program, populate
+ * them separately if required. In the setup above they are
+ * defaulted to one, so only need this if there is reason to change
+ * that default:
+ */
+ if (fetch->need_edgeflags) {
+ for (i = 0; i < count; i++) {
+ struct vertex_header *vh = (struct vertex_header *)(verts + i * fetch->vertex_size);
+ vh->edgeflag = draw_pt_get_edgeflag( draw, start + i );
+ }
+ }
+}
+
+
+struct pt_fetch *draw_pt_fetch_create( struct draw_context *draw )
+{
+ struct pt_fetch *fetch = CALLOC_STRUCT(pt_fetch);
+ if (!fetch)
+ return NULL;
+
+ fetch->draw = draw;
+ fetch->cache = translate_cache_create();
+ if (!fetch->cache) {
+ FREE(fetch);
+ return NULL;
+ }
+
+ return fetch;
+}
+
+void draw_pt_fetch_destroy( struct pt_fetch *fetch )
+{
+ if (fetch->cache)
+ translate_cache_destroy(fetch->cache);
+
+ FREE(fetch);
+}
+
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c b/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c
new file mode 100644
index 0000000000..e7fe6b3b76
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch_emit.c
@@ -0,0 +1,437 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pt.h"
+#include "translate/translate.h"
+#include "translate/translate_cache.h"
+
+/* The simplest 'middle end' in the new vertex code.
+ *
+ * The responsibilities of a middle end are to:
+ * - perform vertex fetch using
+ * - draw vertex element/buffer state
+ * - a list of fetch indices we received as an input
+ * - run the vertex shader
+ * - cliptest,
+ * - clip coord calculation
+ * - viewport transformation
+ * - if necessary, run the primitive pipeline, passing it:
+ * - a linear array of vertex_header vertices constructed here
+ * - a set of draw indices we received as an input
+ * - otherwise, drive the hw backend,
+ * - allocate space for hardware format vertices
+ * - translate the vertex-shader output vertices to hw format
+ * - calling the backend draw functions.
+ *
+ * For convenience, we provide a helper function to drive the hardware
+ * backend given similar inputs to those required to run the pipeline.
+ *
+ * In the case of passthrough mode, many of these actions are disabled
+ * or noops, so we end up doing:
+ *
+ * - perform vertex fetch
+ * - drive the hw backend
+ *
+ * IE, basically just vertex fetch to post-vs-format vertices,
+ * followed by a call to the backend helper function.
+ */
+
+
+struct fetch_emit_middle_end {
+ struct draw_pt_middle_end base;
+ struct draw_context *draw;
+
+ struct translate *translate;
+ const struct vertex_info *vinfo;
+
+ /* Cache point size somewhere it's address won't change:
+ */
+ float point_size;
+
+ struct translate_cache *cache;
+};
+
+
+
+
+static void fetch_emit_prepare( struct draw_pt_middle_end *middle,
+ unsigned prim,
+ unsigned opt,
+ unsigned *max_vertices )
+{
+ struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
+ struct draw_context *draw = feme->draw;
+ const struct vertex_info *vinfo;
+ unsigned i, dst_offset;
+ boolean ok;
+ struct translate_key key;
+
+
+ ok = draw->render->set_primitive( draw->render,
+ prim );
+ if (!ok) {
+ assert(0);
+ return;
+ }
+
+ /* Must do this after set_primitive() above:
+ */
+ vinfo = feme->vinfo = draw->render->get_vertex_info(draw->render);
+
+
+
+ /* Transform from API vertices to HW vertices, skipping the
+ * pipeline_vertex intermediate step.
+ */
+ dst_offset = 0;
+ memset(&key, 0, sizeof(key));
+
+ for (i = 0; i < vinfo->num_attribs; i++) {
+ const struct pipe_vertex_element *src = &draw->pt.vertex_element[vinfo->attrib[i].src_index];
+
+ unsigned emit_sz = 0;
+ unsigned input_format = src->src_format;
+ unsigned input_buffer = src->vertex_buffer_index;
+ unsigned input_offset = src->src_offset;
+ unsigned output_format;
+
+ switch (vinfo->attrib[i].emit) {
+ case EMIT_4UB:
+ output_format = PIPE_FORMAT_R8G8B8A8_UNORM;
+ emit_sz = 4 * sizeof(unsigned char);
+ break;
+ case EMIT_4F:
+ output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ emit_sz = 4 * sizeof(float);
+ break;
+ case EMIT_3F:
+ output_format = PIPE_FORMAT_R32G32B32_FLOAT;
+ emit_sz = 3 * sizeof(float);
+ break;
+ case EMIT_2F:
+ output_format = PIPE_FORMAT_R32G32_FLOAT;
+ emit_sz = 2 * sizeof(float);
+ break;
+ case EMIT_1F:
+ output_format = PIPE_FORMAT_R32_FLOAT;
+ emit_sz = 1 * sizeof(float);
+ break;
+ case EMIT_1F_PSIZE:
+ input_format = PIPE_FORMAT_R32_FLOAT;
+ input_buffer = draw->pt.nr_vertex_buffers;
+ input_offset = 0;
+ output_format = PIPE_FORMAT_R32_FLOAT;
+ emit_sz = 1 * sizeof(float);
+ break;
+ case EMIT_OMIT:
+ continue;
+ default:
+ assert(0);
+ output_format = PIPE_FORMAT_NONE;
+ emit_sz = 0;
+ continue;
+ }
+
+ key.element[i].input_format = input_format;
+ key.element[i].input_buffer = input_buffer;
+ key.element[i].input_offset = input_offset;
+ key.element[i].output_format = output_format;
+ key.element[i].output_offset = dst_offset;
+
+ dst_offset += emit_sz;
+ }
+
+ key.nr_elements = vinfo->num_attribs;
+ key.output_stride = vinfo->size * 4;
+
+ /* Don't bother with caching at this stage:
+ */
+ if (!feme->translate ||
+ translate_key_compare(&feme->translate->key, &key) != 0)
+ {
+ translate_key_sanitize(&key);
+ feme->translate = translate_cache_find(feme->cache,
+ &key);
+
+
+ feme->translate->set_buffer(feme->translate,
+ draw->pt.nr_vertex_buffers,
+ &feme->point_size,
+ 0);
+ }
+
+ feme->point_size = draw->rasterizer->point_size;
+
+ for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
+ feme->translate->set_buffer(feme->translate,
+ i,
+ ((char *)draw->pt.user.vbuffer[i] +
+ draw->pt.vertex_buffer[i].buffer_offset),
+ draw->pt.vertex_buffer[i].stride );
+ }
+
+ *max_vertices = (draw->render->max_vertex_buffer_bytes /
+ (vinfo->size * 4));
+
+ /* Return an even number of verts.
+ * This prevents "parity" errors when splitting long triangle strips which
+ * can lead to front/back culling mix-ups.
+ * Every other triangle in a strip has an alternate front/back orientation
+ * so splitting at an odd position can cause the orientation of subsequent
+ * triangles to get reversed.
+ */
+ *max_vertices = *max_vertices & ~1;
+}
+
+
+
+
+
+static void fetch_emit_run( struct draw_pt_middle_end *middle,
+ const unsigned *fetch_elts,
+ unsigned fetch_count,
+ const ushort *draw_elts,
+ unsigned draw_count )
+{
+ struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
+ struct draw_context *draw = feme->draw;
+ void *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (fetch_count >= UNDEFINED_VERTEX_ID) {
+ assert(0);
+ return;
+ }
+
+ draw->render->allocate_vertices( draw->render,
+ (ushort)feme->translate->key.output_stride,
+ (ushort)fetch_count );
+
+ hw_verts = draw->render->map_vertices( draw->render );
+ if (!hw_verts) {
+ assert(0);
+ return;
+ }
+
+
+ /* Single routine to fetch vertices and emit HW verts.
+ */
+ feme->translate->run_elts( feme->translate,
+ fetch_elts,
+ fetch_count,
+ hw_verts );
+
+ if (0) {
+ unsigned i;
+ for (i = 0; i < fetch_count; i++) {
+ debug_printf("\n\nvertex %d:\n", i);
+ draw_dump_emitted_vertex( feme->vinfo,
+ (const uint8_t *)hw_verts + feme->vinfo->size * 4 * i );
+ }
+ }
+
+ draw->render->unmap_vertices( draw->render,
+ 0,
+ (ushort)(fetch_count - 1) );
+
+ /* XXX: Draw arrays path to avoid re-emitting index list again and
+ * again.
+ */
+ draw->render->draw( draw->render,
+ draw_elts,
+ draw_count );
+
+ /* Done -- that was easy, wasn't it:
+ */
+ draw->render->release_vertices( draw->render );
+
+}
+
+
+static void fetch_emit_run_linear( struct draw_pt_middle_end *middle,
+ unsigned start,
+ unsigned count )
+{
+ struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
+ struct draw_context *draw = feme->draw;
+ void *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (count >= UNDEFINED_VERTEX_ID)
+ goto fail;
+
+ if (!draw->render->allocate_vertices( draw->render,
+ (ushort)feme->translate->key.output_stride,
+ (ushort)count ))
+ goto fail;
+
+ hw_verts = draw->render->map_vertices( draw->render );
+ if (!hw_verts)
+ goto fail;
+
+ /* Single routine to fetch vertices and emit HW verts.
+ */
+ feme->translate->run( feme->translate,
+ start,
+ count,
+ hw_verts );
+
+ if (0) {
+ unsigned i;
+ for (i = 0; i < count; i++) {
+ debug_printf("\n\nvertex %d:\n", i);
+ draw_dump_emitted_vertex( feme->vinfo,
+ (const uint8_t *)hw_verts + feme->vinfo->size * 4 * i );
+ }
+ }
+
+ draw->render->unmap_vertices( draw->render, 0, count - 1 );
+
+ /* XXX: Draw arrays path to avoid re-emitting index list again and
+ * again.
+ */
+ draw->render->draw_arrays( draw->render, 0, count );
+
+ /* Done -- that was easy, wasn't it:
+ */
+ draw->render->release_vertices( draw->render );
+ return;
+
+fail:
+ assert(0);
+ return;
+}
+
+
+static boolean fetch_emit_run_linear_elts( struct draw_pt_middle_end *middle,
+ unsigned start,
+ unsigned count,
+ const ushort *draw_elts,
+ unsigned draw_count )
+{
+ struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
+ struct draw_context *draw = feme->draw;
+ void *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (count >= UNDEFINED_VERTEX_ID)
+ return FALSE;
+
+ if (!draw->render->allocate_vertices( draw->render,
+ (ushort)feme->translate->key.output_stride,
+ (ushort)count ))
+ return FALSE;
+
+ hw_verts = draw->render->map_vertices( draw->render );
+ if (!hw_verts)
+ return FALSE;
+
+ /* Single routine to fetch vertices and emit HW verts.
+ */
+ feme->translate->run( feme->translate,
+ start,
+ count,
+ hw_verts );
+
+ draw->render->unmap_vertices( draw->render, 0, (ushort)(count - 1) );
+
+ /* XXX: Draw arrays path to avoid re-emitting index list again and
+ * again.
+ */
+ draw->render->draw( draw->render,
+ draw_elts,
+ draw_count );
+
+ /* Done -- that was easy, wasn't it:
+ */
+ draw->render->release_vertices( draw->render );
+
+ return TRUE;
+}
+
+
+
+
+static void fetch_emit_finish( struct draw_pt_middle_end *middle )
+{
+ /* nothing to do */
+}
+
+static void fetch_emit_destroy( struct draw_pt_middle_end *middle )
+{
+ struct fetch_emit_middle_end *feme = (struct fetch_emit_middle_end *)middle;
+
+ if (feme->cache)
+ translate_cache_destroy(feme->cache);
+
+ FREE(middle);
+}
+
+
+struct draw_pt_middle_end *draw_pt_fetch_emit( struct draw_context *draw )
+{
+ struct fetch_emit_middle_end *fetch_emit = CALLOC_STRUCT( fetch_emit_middle_end );
+ if (fetch_emit == NULL)
+ return NULL;
+
+ fetch_emit->cache = translate_cache_create();
+ if (!fetch_emit->cache) {
+ FREE(fetch_emit);
+ return NULL;
+ }
+
+ fetch_emit->base.prepare = fetch_emit_prepare;
+ fetch_emit->base.run = fetch_emit_run;
+ fetch_emit->base.run_linear = fetch_emit_run_linear;
+ fetch_emit->base.run_linear_elts = fetch_emit_run_linear_elts;
+ fetch_emit->base.finish = fetch_emit_finish;
+ fetch_emit->base.destroy = fetch_emit_destroy;
+
+ fetch_emit->draw = draw;
+
+ return &fetch_emit->base;
+}
+
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c
new file mode 100644
index 0000000000..44147aed9b
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_emit.c
@@ -0,0 +1,429 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pt.h"
+#include "draw/draw_vs.h"
+
+#include "translate/translate.h"
+
+struct fetch_shade_emit;
+
+
+/* Prototype fetch, shade, emit-hw-verts all in one go.
+ */
+struct fetch_shade_emit {
+ struct draw_pt_middle_end base;
+ struct draw_context *draw;
+
+
+ /* Temporaries:
+ */
+ const float *constants;
+ unsigned pitch[PIPE_MAX_ATTRIBS];
+ const ubyte *src[PIPE_MAX_ATTRIBS];
+ unsigned prim;
+
+ struct draw_vs_varient_key key;
+ struct draw_vs_varient *active;
+
+
+ const struct vertex_info *vinfo;
+};
+
+
+
+
+static void fse_prepare( struct draw_pt_middle_end *middle,
+ unsigned prim,
+ unsigned opt,
+ unsigned *max_vertices )
+{
+ struct fetch_shade_emit *fse = (struct fetch_shade_emit *)middle;
+ struct draw_context *draw = fse->draw;
+ unsigned num_vs_inputs = draw->vs.vertex_shader->info.num_inputs;
+ const struct vertex_info *vinfo;
+ unsigned i;
+ unsigned nr_vbs = 0;
+
+
+ if (!draw->render->set_primitive( draw->render,
+ prim )) {
+ assert(0);
+ return;
+ }
+
+ /* Must do this after set_primitive() above:
+ */
+ fse->vinfo = vinfo = draw->render->get_vertex_info(draw->render);
+
+
+
+ fse->key.output_stride = vinfo->size * 4;
+ fse->key.nr_outputs = vinfo->num_attribs;
+ fse->key.nr_inputs = num_vs_inputs;
+
+ fse->key.nr_elements = MAX2(fse->key.nr_outputs, /* outputs - translate to hw format */
+ fse->key.nr_inputs); /* inputs - fetch from api format */
+
+ fse->key.viewport = (!draw->rasterizer->bypass_vs_clip_and_viewport &&
+ !draw->identity_viewport);
+ fse->key.clip = !draw->bypass_clipping;
+ fse->key.const_vbuffers = 0;
+
+ memset(fse->key.element, 0,
+ fse->key.nr_elements * sizeof(fse->key.element[0]));
+
+ for (i = 0; i < num_vs_inputs; i++) {
+ const struct pipe_vertex_element *src = &draw->pt.vertex_element[i];
+ fse->key.element[i].in.format = src->src_format;
+
+ /* Consider ignoring these, ie make generated programs
+ * independent of this state:
+ */
+ fse->key.element[i].in.buffer = src->vertex_buffer_index;
+ fse->key.element[i].in.offset = src->src_offset;
+ nr_vbs = MAX2(nr_vbs, src->vertex_buffer_index + 1);
+ }
+
+ for (i = 0; i < 5 && i < nr_vbs; i++) {
+ if (draw->pt.vertex_buffer[i].stride == 0)
+ fse->key.const_vbuffers |= (1<<i);
+ }
+
+ if (0) debug_printf("%s: lookup const_vbuffers: %x\n", __FUNCTION__, fse->key.const_vbuffers);
+
+ {
+ unsigned dst_offset = 0;
+
+ for (i = 0; i < vinfo->num_attribs; i++) {
+ unsigned emit_sz = 0;
+
+ switch (vinfo->attrib[i].emit) {
+ case EMIT_4F:
+ emit_sz = 4 * sizeof(float);
+ break;
+ case EMIT_3F:
+ emit_sz = 3 * sizeof(float);
+ break;
+ case EMIT_2F:
+ emit_sz = 2 * sizeof(float);
+ break;
+ case EMIT_1F:
+ emit_sz = 1 * sizeof(float);
+ break;
+ case EMIT_1F_PSIZE:
+ emit_sz = 1 * sizeof(float);
+ break;
+ case EMIT_4UB:
+ emit_sz = 4 * sizeof(ubyte);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* The elements in the key correspond to vertex shader output
+ * numbers, not to positions in the hw vertex description --
+ * that's handled by the output_offset field.
+ */
+ fse->key.element[i].out.format = vinfo->attrib[i].emit;
+ fse->key.element[i].out.vs_output = vinfo->attrib[i].src_index;
+ fse->key.element[i].out.offset = dst_offset;
+
+ dst_offset += emit_sz;
+ assert(fse->key.output_stride >= dst_offset);
+ }
+ }
+
+
+ fse->active = draw_vs_lookup_varient( draw->vs.vertex_shader,
+ &fse->key );
+
+ if (!fse->active) {
+ assert(0);
+ return ;
+ }
+
+ if (0) debug_printf("%s: found const_vbuffers: %x\n", __FUNCTION__,
+ fse->active->key.const_vbuffers);
+
+ /* Now set buffer pointers:
+ */
+ for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
+ fse->active->set_buffer( fse->active,
+ i,
+ ((const ubyte *) draw->pt.user.vbuffer[i] +
+ draw->pt.vertex_buffer[i].buffer_offset),
+ draw->pt.vertex_buffer[i].stride );
+ }
+
+ *max_vertices = (draw->render->max_vertex_buffer_bytes /
+ (vinfo->size * 4));
+
+ /* Return an even number of verts.
+ * This prevents "parity" errors when splitting long triangle strips which
+ * can lead to front/back culling mix-ups.
+ * Every other triangle in a strip has an alternate front/back orientation
+ * so splitting at an odd position can cause the orientation of subsequent
+ * triangles to get reversed.
+ */
+ *max_vertices = *max_vertices & ~1;
+
+ /* Probably need to do this somewhere (or fix exec shader not to
+ * need it):
+ */
+ if (1) {
+ struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+ vs->prepare(vs, draw);
+ }
+
+
+ //return TRUE;
+}
+
+
+
+
+
+
+
+static void fse_run_linear( struct draw_pt_middle_end *middle,
+ unsigned start,
+ unsigned count )
+{
+ struct fetch_shade_emit *fse = (struct fetch_shade_emit *)middle;
+ struct draw_context *draw = fse->draw;
+ char *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (count >= UNDEFINED_VERTEX_ID)
+ goto fail;
+
+ if (!draw->render->allocate_vertices( draw->render,
+ (ushort)fse->key.output_stride,
+ (ushort)count ))
+ goto fail;
+
+ hw_verts = draw->render->map_vertices( draw->render );
+ if (!hw_verts)
+ goto fail;
+
+ /* Single routine to fetch vertices, run shader and emit HW verts.
+ * Clipping is done elsewhere -- either by the API or on hardware,
+ * or for some other reason not required...
+ */
+ fse->active->run_linear( fse->active,
+ start, count,
+ hw_verts );
+
+
+ if (0) {
+ unsigned i;
+ for (i = 0; i < count; i++) {
+ debug_printf("\n\n%s vertex %d: (stride %d, offset %d)\n", __FUNCTION__, i,
+ fse->key.output_stride,
+ fse->key.output_stride * i);
+
+ draw_dump_emitted_vertex( fse->vinfo,
+ (const uint8_t *)hw_verts + fse->key.output_stride * i );
+ }
+ }
+
+ draw->render->unmap_vertices( draw->render, 0, (ushort)(count - 1) );
+
+ /* Draw arrays path to avoid re-emitting index list again and
+ * again.
+ */
+ draw->render->draw_arrays( draw->render,
+ 0,
+ count );
+
+
+ draw->render->release_vertices( draw->render );
+
+ return;
+
+fail:
+ assert(0);
+ return;
+}
+
+
+static void
+fse_run(struct draw_pt_middle_end *middle,
+ const unsigned *fetch_elts,
+ unsigned fetch_count,
+ const ushort *draw_elts,
+ unsigned draw_count )
+{
+ struct fetch_shade_emit *fse = (struct fetch_shade_emit *)middle;
+ struct draw_context *draw = fse->draw;
+ void *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (fetch_count >= UNDEFINED_VERTEX_ID)
+ goto fail;
+
+ if (!draw->render->allocate_vertices( draw->render,
+ (ushort)fse->key.output_stride,
+ (ushort)fetch_count ))
+ goto fail;
+
+ hw_verts = draw->render->map_vertices( draw->render );
+ if (!hw_verts)
+ goto fail;
+
+
+ /* Single routine to fetch vertices, run shader and emit HW verts.
+ */
+ fse->active->run_elts( fse->active,
+ fetch_elts,
+ fetch_count,
+ hw_verts );
+
+
+ if (0) {
+ unsigned i;
+ for (i = 0; i < fetch_count; i++) {
+ debug_printf("\n\n%s vertex %d:\n", __FUNCTION__, i);
+ draw_dump_emitted_vertex( fse->vinfo,
+ (const uint8_t *)hw_verts +
+ fse->key.output_stride * i );
+ }
+ }
+
+ draw->render->unmap_vertices( draw->render, 0, (ushort)(fetch_count - 1) );
+
+ draw->render->draw( draw->render,
+ draw_elts,
+ draw_count );
+
+
+ draw->render->release_vertices( draw->render );
+ return;
+
+fail:
+ assert(0);
+ return;
+}
+
+
+
+static boolean fse_run_linear_elts( struct draw_pt_middle_end *middle,
+ unsigned start,
+ unsigned count,
+ const ushort *draw_elts,
+ unsigned draw_count )
+{
+ struct fetch_shade_emit *fse = (struct fetch_shade_emit *)middle;
+ struct draw_context *draw = fse->draw;
+ char *hw_verts;
+
+ /* XXX: need to flush to get prim_vbuf.c to release its allocation??
+ */
+ draw_do_flush( draw, DRAW_FLUSH_BACKEND );
+
+ if (count >= UNDEFINED_VERTEX_ID)
+ return FALSE;
+
+ if (!draw->render->allocate_vertices( draw->render,
+ (ushort)fse->key.output_stride,
+ (ushort)count ))
+ return FALSE;
+
+ hw_verts = draw->render->map_vertices( draw->render );
+ if (!hw_verts)
+ return FALSE;
+
+ /* Single routine to fetch vertices, run shader and emit HW verts.
+ * Clipping is done elsewhere -- either by the API or on hardware,
+ * or for some other reason not required...
+ */
+ fse->active->run_linear( fse->active,
+ start, count,
+ hw_verts );
+
+
+ draw->render->draw( draw->render,
+ draw_elts,
+ draw_count );
+
+
+ draw->render->unmap_vertices( draw->render, 0, (ushort)(count - 1) );
+
+ draw->render->release_vertices( draw->render );
+
+ return TRUE;
+}
+
+
+
+static void fse_finish( struct draw_pt_middle_end *middle )
+{
+}
+
+
+static void
+fse_destroy( struct draw_pt_middle_end *middle )
+{
+ FREE(middle);
+}
+
+struct draw_pt_middle_end *draw_pt_middle_fse( struct draw_context *draw )
+{
+ struct fetch_shade_emit *fse = CALLOC_STRUCT(fetch_shade_emit);
+ if (!fse)
+ return NULL;
+
+ fse->base.prepare = fse_prepare;
+ fse->base.run = fse_run;
+ fse->base.run_linear = fse_run_linear;
+ fse->base.run_linear_elts = fse_run_linear_elts;
+ fse->base.finish = fse_finish;
+ fse->base.destroy = fse_destroy;
+ fse->draw = draw;
+
+ return &fse->base;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c
new file mode 100644
index 0000000000..df6c265b7e
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_fetch_shade_pipeline.c
@@ -0,0 +1,395 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pt.h"
+#include "draw/draw_vs.h"
+#include "translate/translate.h"
+
+
+struct fetch_pipeline_middle_end {
+ struct draw_pt_middle_end base;
+ struct draw_context *draw;
+
+ struct pt_emit *emit;
+ struct pt_fetch *fetch;
+ struct pt_post_vs *post_vs;
+
+ unsigned vertex_data_offset;
+ unsigned vertex_size;
+ unsigned prim;
+ unsigned opt;
+};
+
+
+static void fetch_pipeline_prepare( struct draw_pt_middle_end *middle,
+ unsigned prim,
+ unsigned opt,
+ unsigned *max_vertices )
+{
+ struct fetch_pipeline_middle_end *fpme = (struct fetch_pipeline_middle_end *)middle;
+ struct draw_context *draw = fpme->draw;
+ struct draw_vertex_shader *vs = draw->vs.vertex_shader;
+
+ /* Add one to num_outputs because the pipeline occasionally tags on
+ * an additional texcoord, eg for AA lines.
+ */
+ unsigned nr = MAX2( vs->info.num_inputs,
+ vs->info.num_outputs + 1 );
+
+ fpme->prim = prim;
+ fpme->opt = opt;
+
+ /* Always leave room for the vertex header whether we need it or
+ * not. It's hard to get rid of it in particular because of the
+ * viewport code in draw_pt_post_vs.c.
+ */
+ fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);
+
+
+
+ draw_pt_fetch_prepare( fpme->fetch,
+ vs->info.num_inputs,
+ fpme->vertex_size );
+ /* XXX: it's not really gl rasterization rules we care about here,
+ * but gl vs dx9 clip spaces.
+ */
+ draw_pt_post_vs_prepare( fpme->post_vs,
+ (boolean)draw->bypass_clipping,
+ (boolean)(draw->identity_viewport ||
+ draw->rasterizer->bypass_vs_clip_and_viewport),
+ (boolean)draw->rasterizer->gl_rasterization_rules );
+
+
+ if (!(opt & PT_PIPELINE)) {
+ draw_pt_emit_prepare( fpme->emit,
+ prim,
+ max_vertices );
+
+ *max_vertices = MAX2( *max_vertices,
+ DRAW_PIPE_MAX_VERTICES );
+ }
+ else {
+ *max_vertices = DRAW_PIPE_MAX_VERTICES;
+ }
+
+ /* return even number */
+ *max_vertices = *max_vertices & ~1;
+
+ /* No need to prepare the shader.
+ */
+ vs->prepare(vs, draw);
+}
+
+
+
+static void fetch_pipeline_run( struct draw_pt_middle_end *middle,
+ const unsigned *fetch_elts,
+ unsigned fetch_count,
+ const ushort *draw_elts,
+ unsigned draw_count )
+{
+ struct fetch_pipeline_middle_end *fpme = (struct fetch_pipeline_middle_end *)middle;
+ struct draw_context *draw = fpme->draw;
+ struct draw_vertex_shader *shader = draw->vs.vertex_shader;
+ unsigned opt = fpme->opt;
+ unsigned alloc_count = align( fetch_count, 4 );
+
+ struct vertex_header *pipeline_verts =
+ (struct vertex_header *)MALLOC(fpme->vertex_size * alloc_count);
+
+ if (!pipeline_verts) {
+ /* Not much we can do here - just skip the rendering.
+ */
+ assert(0);
+ return;
+ }
+
+ /* Fetch into our vertex buffer
+ */
+ draw_pt_fetch_run( fpme->fetch,
+ fetch_elts,
+ fetch_count,
+ (char *)pipeline_verts );
+
+ /* Run the shader, note that this overwrites the data[] parts of
+ * the pipeline verts. If there is no shader, eg if
+ * bypass_vs_clip_and_viewport, then the inputs == outputs, and are
+ * already in the correct place.
+ */
+ if (opt & PT_SHADE)
+ {
+ shader->run_linear(shader,
+ (const float (*)[4])pipeline_verts->data,
+ ( float (*)[4])pipeline_verts->data,
+ (const float (*)[4])draw->pt.user.constants,
+ fetch_count,
+ fpme->vertex_size,
+ fpme->vertex_size);
+ }
+
+ if (draw_pt_post_vs_run( fpme->post_vs,
+ pipeline_verts,
+ fetch_count,
+ fpme->vertex_size ))
+ {
+ opt |= PT_PIPELINE;
+ }
+
+ /* Do we need to run the pipeline?
+ */
+ if (opt & PT_PIPELINE) {
+ draw_pipeline_run( fpme->draw,
+ fpme->prim,
+ pipeline_verts,
+ fetch_count,
+ fpme->vertex_size,
+ draw_elts,
+ draw_count );
+ }
+ else {
+ draw_pt_emit( fpme->emit,
+ (const float (*)[4])pipeline_verts->data,
+ fetch_count,
+ fpme->vertex_size,
+ draw_elts,
+ draw_count );
+ }
+
+
+ FREE(pipeline_verts);
+}
+
+
+static void fetch_pipeline_linear_run( struct draw_pt_middle_end *middle,
+ unsigned start,
+ unsigned count)
+{
+ struct fetch_pipeline_middle_end *fpme = (struct fetch_pipeline_middle_end *)middle;
+ struct draw_context *draw = fpme->draw;
+ struct draw_vertex_shader *shader = draw->vs.vertex_shader;
+ unsigned opt = fpme->opt;
+ unsigned alloc_count = align( count, 4 );
+
+ struct vertex_header *pipeline_verts =
+ (struct vertex_header *)MALLOC(fpme->vertex_size * alloc_count);
+
+ if (!pipeline_verts) {
+ /* Not much we can do here - just skip the rendering.
+ */
+ assert(0);
+ return;
+ }
+
+ /* Fetch into our vertex buffer
+ */
+ draw_pt_fetch_run_linear( fpme->fetch,
+ start,
+ count,
+ (char *)pipeline_verts );
+
+ /* Run the shader, note that this overwrites the data[] parts of
+ * the pipeline verts. If there is no shader, ie if
+ * bypass_vs_clip_and_viewport, then the inputs == outputs, and are
+ * already in the correct place.
+ */
+ if (opt & PT_SHADE)
+ {
+ shader->run_linear(shader,
+ (const float (*)[4])pipeline_verts->data,
+ ( float (*)[4])pipeline_verts->data,
+ (const float (*)[4])draw->pt.user.constants,
+ count,
+ fpme->vertex_size,
+ fpme->vertex_size);
+ }
+
+ if (draw_pt_post_vs_run( fpme->post_vs,
+ pipeline_verts,
+ count,
+ fpme->vertex_size ))
+ {
+ opt |= PT_PIPELINE;
+ }
+
+ /* Do we need to run the pipeline?
+ */
+ if (opt & PT_PIPELINE) {
+ draw_pipeline_run_linear( fpme->draw,
+ fpme->prim,
+ pipeline_verts,
+ count,
+ fpme->vertex_size);
+ }
+ else {
+ draw_pt_emit_linear( fpme->emit,
+ (const float (*)[4])pipeline_verts->data,
+ fpme->vertex_size,
+ count );
+ }
+
+ FREE(pipeline_verts);
+}
+
+
+
+static boolean fetch_pipeline_linear_run_elts( struct draw_pt_middle_end *middle,
+ unsigned start,
+ unsigned count,
+ const ushort *draw_elts,
+ unsigned draw_count )
+{
+ struct fetch_pipeline_middle_end *fpme = (struct fetch_pipeline_middle_end *)middle;
+ struct draw_context *draw = fpme->draw;
+ struct draw_vertex_shader *shader = draw->vs.vertex_shader;
+ unsigned opt = fpme->opt;
+ unsigned alloc_count = align( count, 4 );
+
+ struct vertex_header *pipeline_verts =
+ (struct vertex_header *)MALLOC(fpme->vertex_size * alloc_count);
+
+ if (!pipeline_verts)
+ return FALSE;
+
+ /* Fetch into our vertex buffer
+ */
+ draw_pt_fetch_run_linear( fpme->fetch,
+ start,
+ count,
+ (char *)pipeline_verts );
+
+ /* Run the shader, note that this overwrites the data[] parts of
+ * the pipeline verts. If there is no shader, ie if
+ * bypass_vs_clip_and_viewport, then the inputs == outputs, and are
+ * already in the correct place.
+ */
+ if (opt & PT_SHADE)
+ {
+ shader->run_linear(shader,
+ (const float (*)[4])pipeline_verts->data,
+ ( float (*)[4])pipeline_verts->data,
+ (const float (*)[4])draw->pt.user.constants,
+ count,
+ fpme->vertex_size,
+ fpme->vertex_size);
+ }
+
+ if (draw_pt_post_vs_run( fpme->post_vs,
+ pipeline_verts,
+ count,
+ fpme->vertex_size ))
+ {
+ opt |= PT_PIPELINE;
+ }
+
+ /* Do we need to run the pipeline?
+ */
+ if (opt & PT_PIPELINE) {
+ draw_pipeline_run( fpme->draw,
+ fpme->prim,
+ pipeline_verts,
+ count,
+ fpme->vertex_size,
+ draw_elts,
+ draw_count );
+ }
+ else {
+ draw_pt_emit( fpme->emit,
+ (const float (*)[4])pipeline_verts->data,
+ count,
+ fpme->vertex_size,
+ draw_elts,
+ draw_count );
+ }
+
+ FREE(pipeline_verts);
+ return TRUE;
+}
+
+
+
+static void fetch_pipeline_finish( struct draw_pt_middle_end *middle )
+{
+ /* nothing to do */
+}
+
+static void fetch_pipeline_destroy( struct draw_pt_middle_end *middle )
+{
+ struct fetch_pipeline_middle_end *fpme = (struct fetch_pipeline_middle_end *)middle;
+
+ if (fpme->fetch)
+ draw_pt_fetch_destroy( fpme->fetch );
+
+ if (fpme->emit)
+ draw_pt_emit_destroy( fpme->emit );
+
+ if (fpme->post_vs)
+ draw_pt_post_vs_destroy( fpme->post_vs );
+
+ FREE(middle);
+}
+
+
+struct draw_pt_middle_end *draw_pt_fetch_pipeline_or_emit( struct draw_context *draw )
+{
+ struct fetch_pipeline_middle_end *fpme = CALLOC_STRUCT( fetch_pipeline_middle_end );
+ if (!fpme)
+ goto fail;
+
+ fpme->base.prepare = fetch_pipeline_prepare;
+ fpme->base.run = fetch_pipeline_run;
+ fpme->base.run_linear = fetch_pipeline_linear_run;
+ fpme->base.run_linear_elts = fetch_pipeline_linear_run_elts;
+ fpme->base.finish = fetch_pipeline_finish;
+ fpme->base.destroy = fetch_pipeline_destroy;
+
+ fpme->draw = draw;
+
+ fpme->fetch = draw_pt_fetch_create( draw );
+ if (!fpme->fetch)
+ goto fail;
+
+ fpme->post_vs = draw_pt_post_vs_create( draw );
+ if (!fpme->post_vs)
+ goto fail;
+
+ fpme->emit = draw_pt_emit_create( draw );
+ if (!fpme->emit)
+ goto fail;
+
+ return &fpme->base;
+
+ fail:
+ if (fpme)
+ fetch_pipeline_destroy( &fpme->base );
+
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_post_vs.c b/src/gallium/auxiliary/draw/draw_pt_post_vs.c
new file mode 100644
index 0000000000..00d7197b13
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_post_vs.c
@@ -0,0 +1,233 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "pipe/p_context.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pt.h"
+
+struct pt_post_vs {
+ struct draw_context *draw;
+
+ boolean (*run)( struct pt_post_vs *pvs,
+ struct vertex_header *vertices,
+ unsigned count,
+ unsigned stride );
+};
+
+
+
+static INLINE float
+dot4(const float *a, const float *b)
+{
+ return (a[0]*b[0] +
+ a[1]*b[1] +
+ a[2]*b[2] +
+ a[3]*b[3]);
+}
+
+
+
+static INLINE unsigned
+compute_clipmask_gl(const float *clip, /*const*/ float plane[][4], unsigned nr)
+{
+ unsigned mask = 0x0;
+ unsigned i;
+
+#if 0
+ debug_printf("compute clipmask %f %f %f %f\n",
+ clip[0], clip[1], clip[2], clip[3]);
+ assert(clip[3] != 0.0);
+#endif
+
+ /* Do the hardwired planes first:
+ */
+ if (-clip[0] + clip[3] < 0) mask |= (1<<0);
+ if ( clip[0] + clip[3] < 0) mask |= (1<<1);
+ if (-clip[1] + clip[3] < 0) mask |= (1<<2);
+ if ( clip[1] + clip[3] < 0) mask |= (1<<3);
+ if ( clip[2] + clip[3] < 0) mask |= (1<<4); /* match mesa clipplane numbering - for now */
+ if (-clip[2] + clip[3] < 0) mask |= (1<<5); /* match mesa clipplane numbering - for now */
+
+ /* Followed by any remaining ones:
+ */
+ for (i = 6; i < nr; i++) {
+ if (dot4(clip, plane[i]) < 0)
+ mask |= (1<<i);
+ }
+
+ return mask;
+}
+
+
+/* The normal case - cliptest, rhw divide, viewport transform.
+ *
+ * Also handle identity viewport here at the expense of a few wasted
+ * instructions
+ */
+static boolean post_vs_cliptest_viewport_gl( struct pt_post_vs *pvs,
+ struct vertex_header *vertices,
+ unsigned count,
+ unsigned stride )
+{
+ struct vertex_header *out = vertices;
+ const float *scale = pvs->draw->viewport.scale;
+ const float *trans = pvs->draw->viewport.translate;
+ const unsigned pos = pvs->draw->vs.position_output;
+ unsigned clipped = 0;
+ unsigned j;
+
+ if (0) debug_printf("%s\n");
+
+ for (j = 0; j < count; j++) {
+ float *position = out->data[pos];
+
+ out->clip[0] = position[0];
+ out->clip[1] = position[1];
+ out->clip[2] = position[2];
+ out->clip[3] = position[3];
+
+ out->vertex_id = 0xffff;
+ out->clipmask = compute_clipmask_gl(out->clip,
+ pvs->draw->plane,
+ pvs->draw->nr_planes);
+ clipped += out->clipmask;
+
+ if (out->clipmask == 0)
+ {
+ /* divide by w */
+ float w = 1.0f / position[3];
+
+ /* Viewport mapping */
+ position[0] = position[0] * w * scale[0] + trans[0];
+ position[1] = position[1] * w * scale[1] + trans[1];
+ position[2] = position[2] * w * scale[2] + trans[2];
+ position[3] = w;
+#if 0
+ debug_printf("post viewport: %f %f %f %f\n",
+ position[0],
+ position[1],
+ position[2],
+ position[3]);
+#endif
+ }
+
+ out = (struct vertex_header *)( (char *)out + stride );
+ }
+
+ return clipped != 0;
+}
+
+
+
+/* If bypass_clipping is set, skip cliptest and rhw divide.
+ */
+static boolean post_vs_viewport( struct pt_post_vs *pvs,
+ struct vertex_header *vertices,
+ unsigned count,
+ unsigned stride )
+{
+ struct vertex_header *out = vertices;
+ const float *scale = pvs->draw->viewport.scale;
+ const float *trans = pvs->draw->viewport.translate;
+ const unsigned pos = pvs->draw->vs.position_output;
+ unsigned j;
+
+ if (0) debug_printf("%s\n", __FUNCTION__);
+ for (j = 0; j < count; j++) {
+ float *position = out->data[pos];
+
+ /* Viewport mapping only, no cliptest/rhw divide
+ */
+ position[0] = position[0] * scale[0] + trans[0];
+ position[1] = position[1] * scale[1] + trans[1];
+ position[2] = position[2] * scale[2] + trans[2];
+
+ out = (struct vertex_header *)((char *)out + stride);
+ }
+
+ return FALSE;
+}
+
+
+/* If bypass_clipping is set and we have an identity viewport, nothing
+ * to do.
+ */
+static boolean post_vs_none( struct pt_post_vs *pvs,
+ struct vertex_header *vertices,
+ unsigned count,
+ unsigned stride )
+{
+ if (0) debug_printf("%s\n", __FUNCTION__);
+ return FALSE;
+}
+
+boolean draw_pt_post_vs_run( struct pt_post_vs *pvs,
+ struct vertex_header *pipeline_verts,
+ unsigned count,
+ unsigned stride )
+{
+ return pvs->run( pvs, pipeline_verts, count, stride );
+}
+
+
+void draw_pt_post_vs_prepare( struct pt_post_vs *pvs,
+ boolean bypass_clipping,
+ boolean bypass_viewport,
+ boolean opengl )
+{
+ if (bypass_clipping) {
+ if (bypass_viewport)
+ pvs->run = post_vs_none;
+ else
+ pvs->run = post_vs_viewport;
+ }
+ else {
+ //if (opengl)
+ pvs->run = post_vs_cliptest_viewport_gl;
+ }
+}
+
+
+struct pt_post_vs *draw_pt_post_vs_create( struct draw_context *draw )
+{
+ struct pt_post_vs *pvs = CALLOC_STRUCT( pt_post_vs );
+ if (!pvs)
+ return NULL;
+
+ pvs->draw = draw;
+
+ return pvs;
+}
+
+void draw_pt_post_vs_destroy( struct pt_post_vs *pvs )
+{
+ FREE(pvs);
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_util.c b/src/gallium/auxiliary/draw/draw_pt_util.c
new file mode 100644
index 0000000000..b61fa29143
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_util.c
@@ -0,0 +1,77 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_pt.h"
+
+void draw_pt_split_prim(unsigned prim, unsigned *first, unsigned *incr)
+{
+ switch (prim) {
+ case PIPE_PRIM_POINTS:
+ *first = 1;
+ *incr = 1;
+ break;
+ case PIPE_PRIM_LINES:
+ *first = 2;
+ *incr = 2;
+ break;
+ case PIPE_PRIM_LINE_STRIP:
+ case PIPE_PRIM_LINE_LOOP:
+ *first = 2;
+ *incr = 1;
+ break;
+ case PIPE_PRIM_TRIANGLES:
+ *first = 3;
+ *incr = 3;
+ break;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ case PIPE_PRIM_TRIANGLE_FAN:
+ case PIPE_PRIM_POLYGON:
+ *first = 3;
+ *incr = 1;
+ break;
+ case PIPE_PRIM_QUADS:
+ *first = 4;
+ *incr = 4;
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ *first = 4;
+ *incr = 2;
+ break;
+ default:
+ assert(0);
+ *first = 0;
+ *incr = 1; /* set to one so that count % incr works */
+ break;
+ }
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_varray.c b/src/gallium/auxiliary/draw/draw_pt_varray.c
new file mode 100644
index 0000000000..d0e16c9bc3
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_varray.c
@@ -0,0 +1,193 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_pt.h"
+
+#define FETCH_MAX 256
+#define DRAW_MAX (FETCH_MAX+8)
+
+struct varray_frontend {
+ struct draw_pt_front_end base;
+ struct draw_context *draw;
+
+ ushort draw_elts[DRAW_MAX];
+ unsigned fetch_elts[FETCH_MAX];
+
+ unsigned driver_fetch_max;
+ unsigned fetch_max;
+
+ struct draw_pt_middle_end *middle;
+
+ unsigned input_prim;
+ unsigned output_prim;
+};
+
+
+static void varray_flush_linear(struct varray_frontend *varray,
+ unsigned start, unsigned count)
+{
+ if (count) {
+ assert(varray->middle->run_linear);
+ varray->middle->run_linear(varray->middle, start, count);
+ }
+}
+
+static void varray_line_loop_segment(struct varray_frontend *varray,
+ unsigned start,
+ unsigned segment_start,
+ unsigned segment_count,
+ boolean end )
+{
+ assert(segment_count < varray->fetch_max);
+ if (segment_count >= 1) {
+ unsigned nr = 0, i;
+
+ for (i = 0; i < segment_count; i++)
+ varray->fetch_elts[nr++] = start + segment_start + i;
+
+ if (end)
+ varray->fetch_elts[nr++] = start;
+
+ assert(nr <= FETCH_MAX);
+
+ varray->middle->run(varray->middle,
+ varray->fetch_elts,
+ nr,
+ varray->draw_elts, /* ie. linear */
+ nr);
+ }
+}
+
+
+
+static void varray_fan_segment(struct varray_frontend *varray,
+ unsigned start,
+ unsigned segment_start,
+ unsigned segment_count )
+{
+ assert(segment_count < varray->fetch_max);
+ if (segment_count >= 2) {
+ unsigned nr = 0, i;
+
+ if (segment_start != 0)
+ varray->fetch_elts[nr++] = start;
+
+ for (i = 0 ; i < segment_count; i++)
+ varray->fetch_elts[nr++] = start + segment_start + i;
+
+ assert(nr <= FETCH_MAX);
+
+ varray->middle->run(varray->middle,
+ varray->fetch_elts,
+ nr,
+ varray->draw_elts, /* ie. linear */
+ nr);
+ }
+}
+
+
+
+
+#define FUNC varray_run
+#include "draw_pt_varray_tmp_linear.h"
+
+static unsigned decompose_prim[PIPE_PRIM_POLYGON + 1] = {
+ PIPE_PRIM_POINTS,
+ PIPE_PRIM_LINES,
+ PIPE_PRIM_LINE_STRIP, /* decomposed LINELOOP */
+ PIPE_PRIM_LINE_STRIP,
+ PIPE_PRIM_TRIANGLES,
+ PIPE_PRIM_TRIANGLE_STRIP,
+ PIPE_PRIM_TRIANGLE_FAN,
+ PIPE_PRIM_QUADS,
+ PIPE_PRIM_QUAD_STRIP,
+ PIPE_PRIM_POLYGON
+};
+
+
+
+static void varray_prepare(struct draw_pt_front_end *frontend,
+ unsigned prim,
+ struct draw_pt_middle_end *middle,
+ unsigned opt)
+{
+ struct varray_frontend *varray = (struct varray_frontend *)frontend;
+
+ varray->base.run = varray_run;
+
+ varray->input_prim = prim;
+ varray->output_prim = decompose_prim[prim];
+
+ varray->middle = middle;
+ middle->prepare(middle, varray->output_prim, opt, &varray->driver_fetch_max );
+
+ /* check that the max is even */
+ assert((varray->driver_fetch_max & 1) == 0);
+
+ varray->fetch_max = MIN2(FETCH_MAX, varray->driver_fetch_max);
+}
+
+
+
+
+static void varray_finish(struct draw_pt_front_end *frontend)
+{
+ struct varray_frontend *varray = (struct varray_frontend *)frontend;
+ varray->middle->finish(varray->middle);
+ varray->middle = NULL;
+}
+
+static void varray_destroy(struct draw_pt_front_end *frontend)
+{
+ FREE(frontend);
+}
+
+
+struct draw_pt_front_end *draw_pt_varray(struct draw_context *draw)
+{
+ ushort i;
+ struct varray_frontend *varray = CALLOC_STRUCT(varray_frontend);
+ if (varray == NULL)
+ return NULL;
+
+ varray->base.prepare = varray_prepare;
+ varray->base.run = NULL;
+ varray->base.finish = varray_finish;
+ varray->base.destroy = varray_destroy;
+ varray->draw = draw;
+
+ for (i = 0; i < DRAW_MAX; i++) {
+ varray->draw_elts[i] = i;
+ }
+
+ return &varray->base;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_varray_tmp.h b/src/gallium/auxiliary/draw/draw_pt_varray_tmp.h
new file mode 100644
index 0000000000..7c722457c3
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_varray_tmp.h
@@ -0,0 +1,238 @@
+
+static void FUNC(struct draw_pt_front_end *frontend,
+ pt_elt_func get_elt,
+ const void *elts,
+ unsigned count)
+{
+ struct varray_frontend *varray = (struct varray_frontend *)frontend;
+ struct draw_context *draw = varray->draw;
+ unsigned start = (unsigned)elts;
+
+ boolean flatfirst = (draw->rasterizer->flatshade &&
+ draw->rasterizer->flatshade_first);
+ unsigned i, j;
+ ushort flags;
+ unsigned first, incr;
+
+ varray->fetch_start = start;
+
+ draw_pt_split_prim(varray->input_prim, &first, &incr);
+
+#if 0
+ debug_printf("%s (%d) %d/%d\n", __FUNCTION__,
+ varray->input_prim,
+ start, count);
+#endif
+
+ switch (varray->input_prim) {
+ case PIPE_PRIM_POINTS:
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i < end; i++) {
+ POINT(varray, i + 0);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+1 < end; i += 2) {
+ LINE(varray, DRAW_PIPE_RESET_STIPPLE,
+ i + 0, i + 1);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ if (count >= 2) {
+ flags = DRAW_PIPE_RESET_STIPPLE;
+
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 1; i < end; i++, flags = 0) {
+ LINE(varray, flags, i - 1, i);
+ }
+ LINE(varray, flags, i - 1, 0);
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ flags = DRAW_PIPE_RESET_STIPPLE;
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 1; i < end; i++, flags = 0) {
+ LINE(varray, flags, i - 1, i);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+2 < end; i += 3) {
+ TRIANGLE(varray, DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ i + 0, i + 1, i + 2);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ if (flatfirst) {
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+2 < end; i++) {
+ TRIANGLE(varray, DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ i + 0, i + 1 + (i&1), i + 2 - (i&1));
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ if (j + first + i <= count) {
+ varray->fetch_start -= 2;
+ i -= 2;
+ }
+ }
+ }
+ else {
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i + 2 < end; i++) {
+ TRIANGLE(varray, DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ i + 0 + (i&1), i + 1 - (i&1), i + 2);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ if (j + first + i <= count) {
+ varray->fetch_start -= 2;
+ i -= 2;
+ }
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (count >= 3) {
+ if (flatfirst) {
+ flags = DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL;
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+2 < end; i++) {
+ TRIANGLE(varray, flags, i + 1, i + 2, 0);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ }
+ else {
+ flags = DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL;
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+2 < end; i++) {
+ TRIANGLE(varray, flags, 0, i + 1, i + 2);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUADS:
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+3 < end; i += 4) {
+ QUAD(varray, i + 0, i + 1, i + 2, i + 3);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+3 < end; i += 2) {
+ QUAD(varray, i + 2, i + 0, i + 1, i + 3);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ if (j + first + i <= count) {
+ varray->fetch_start -= 2;
+ i -= 2;
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ {
+ /* These bitflags look a little odd because we submit the
+ * vertices as (1,2,0) to satisfy flatshade requirements.
+ */
+ const ushort edge_first = DRAW_PIPE_EDGE_FLAG_2;
+ const ushort edge_middle = DRAW_PIPE_EDGE_FLAG_0;
+ const ushort edge_last = DRAW_PIPE_EDGE_FLAG_1;
+
+ flags = DRAW_PIPE_RESET_STIPPLE | edge_first | edge_middle;
+ for (j = 0; j + first <= count; j += i) {
+ unsigned end = MIN2(FETCH_MAX, count - j);
+ end -= (end % incr);
+ for (i = 0; i+2 < end; i++, flags = edge_middle) {
+
+ if (i + 3 == count)
+ flags |= edge_last;
+
+ TRIANGLE(varray, flags, i + 1, i + 2, 0);
+ }
+ i = end;
+ fetch_init(varray, end);
+ varray_flush(varray);
+ }
+ }
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ varray_flush(varray);
+}
+
+#undef TRIANGLE
+#undef QUAD
+#undef POINT
+#undef LINE
+#undef FUNC
diff --git a/src/gallium/auxiliary/draw/draw_pt_varray_tmp_linear.h b/src/gallium/auxiliary/draw/draw_pt_varray_tmp_linear.h
new file mode 100644
index 0000000000..010c7a18a7
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_varray_tmp_linear.h
@@ -0,0 +1,91 @@
+static unsigned trim( unsigned count, unsigned first, unsigned incr )
+{
+ return count - (count - first) % incr;
+}
+
+static void FUNC(struct draw_pt_front_end *frontend,
+ pt_elt_func get_elt,
+ const void *elts,
+ unsigned count)
+{
+ struct varray_frontend *varray = (struct varray_frontend *)frontend;
+ unsigned start = (unsigned) ((char *) elts - (char *) NULL);
+
+ unsigned j;
+ unsigned first, incr;
+
+ draw_pt_split_prim(varray->input_prim, &first, &incr);
+
+ /* Sanitize primitive length:
+ */
+ count = trim(count, first, incr);
+ if (count < first)
+ return;
+
+#if 0
+ debug_printf("%s (%d) %d/%d\n", __FUNCTION__,
+ varray->input_prim,
+ start, count);
+#endif
+
+ switch (varray->input_prim) {
+ case PIPE_PRIM_POINTS:
+ case PIPE_PRIM_LINES:
+ case PIPE_PRIM_TRIANGLES:
+ case PIPE_PRIM_LINE_STRIP:
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ case PIPE_PRIM_QUADS:
+ case PIPE_PRIM_QUAD_STRIP:
+ for (j = 0; j < count;) {
+ unsigned remaining = count - j;
+ unsigned nr = trim( MIN2(varray->driver_fetch_max, remaining), first, incr );
+ varray_flush_linear(varray, start + j, nr);
+ j += nr;
+ if (nr != remaining)
+ j -= (first - incr);
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ /* Always have to decompose as we've stated that this will be
+ * emitted as a line-strip.
+ */
+ for (j = 0; j < count;) {
+ unsigned remaining = count - j;
+ unsigned nr = trim( MIN2(varray->fetch_max-1, remaining), first, incr );
+ varray_line_loop_segment(varray, start, j, nr, nr == remaining);
+ j += nr;
+ if (nr != remaining)
+ j -= (first - incr);
+ }
+ break;
+
+
+ case PIPE_PRIM_POLYGON:
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (count < varray->driver_fetch_max) {
+ varray_flush_linear(varray, start, count);
+ }
+ else {
+ for ( j = 0; j < count;) {
+ unsigned remaining = count - j;
+ unsigned nr = trim( MIN2(varray->fetch_max-1, remaining), first, incr );
+ varray_fan_segment(varray, start, j, nr);
+ j += nr;
+ if (nr != remaining)
+ j -= (first - incr);
+ }
+ }
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+}
+
+#undef TRIANGLE
+#undef QUAD
+#undef POINT
+#undef LINE
+#undef FUNC
diff --git a/src/gallium/auxiliary/draw/draw_pt_vcache.c b/src/gallium/auxiliary/draw/draw_pt_vcache.c
new file mode 100644
index 0000000000..1a0527be63
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_vcache.c
@@ -0,0 +1,516 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "util/u_prim.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_pt.h"
+
+
+#define CACHE_MAX 256
+#define FETCH_MAX 256
+#define DRAW_MAX (16*1024)
+
+struct vcache_frontend {
+ struct draw_pt_front_end base;
+ struct draw_context *draw;
+
+ unsigned in[CACHE_MAX];
+ ushort out[CACHE_MAX];
+
+ ushort draw_elts[DRAW_MAX];
+ unsigned fetch_elts[FETCH_MAX];
+
+ unsigned draw_count;
+ unsigned fetch_count;
+ unsigned fetch_max;
+
+ struct draw_pt_middle_end *middle;
+
+ unsigned input_prim;
+ unsigned output_prim;
+
+ unsigned middle_prim;
+ unsigned opt;
+};
+
+static INLINE void
+vcache_flush( struct vcache_frontend *vcache )
+{
+ if (vcache->middle_prim != vcache->output_prim) {
+ vcache->middle_prim = vcache->output_prim;
+ vcache->middle->prepare( vcache->middle,
+ vcache->middle_prim,
+ vcache->opt,
+ &vcache->fetch_max );
+ }
+
+ if (vcache->draw_count) {
+ vcache->middle->run( vcache->middle,
+ vcache->fetch_elts,
+ vcache->fetch_count,
+ vcache->draw_elts,
+ vcache->draw_count );
+ }
+
+ memset(vcache->in, ~0, sizeof(vcache->in));
+ vcache->fetch_count = 0;
+ vcache->draw_count = 0;
+}
+
+static INLINE void
+vcache_check_flush( struct vcache_frontend *vcache )
+{
+ if ( vcache->draw_count + 6 >= DRAW_MAX ||
+ vcache->fetch_count + 4 >= FETCH_MAX )
+ {
+ vcache_flush( vcache );
+ }
+}
+
+
+static INLINE void
+vcache_elt( struct vcache_frontend *vcache,
+ unsigned felt,
+ ushort flags )
+{
+ unsigned idx = felt % CACHE_MAX;
+
+ if (vcache->in[idx] != felt) {
+ assert(vcache->fetch_count < FETCH_MAX);
+
+ vcache->in[idx] = felt;
+ vcache->out[idx] = (ushort)vcache->fetch_count;
+ vcache->fetch_elts[vcache->fetch_count++] = felt;
+ }
+
+ vcache->draw_elts[vcache->draw_count++] = vcache->out[idx] | flags;
+}
+
+
+
+static INLINE void
+vcache_triangle( struct vcache_frontend *vcache,
+ unsigned i0,
+ unsigned i1,
+ unsigned i2 )
+{
+ vcache_elt(vcache, i0, 0);
+ vcache_elt(vcache, i1, 0);
+ vcache_elt(vcache, i2, 0);
+ vcache_check_flush(vcache);
+}
+
+
+static INLINE void
+vcache_triangle_flags( struct vcache_frontend *vcache,
+ ushort flags,
+ unsigned i0,
+ unsigned i1,
+ unsigned i2 )
+{
+ vcache_elt(vcache, i0, flags);
+ vcache_elt(vcache, i1, 0);
+ vcache_elt(vcache, i2, 0);
+ vcache_check_flush(vcache);
+}
+
+static INLINE void
+vcache_line( struct vcache_frontend *vcache,
+ unsigned i0,
+ unsigned i1 )
+{
+ vcache_elt(vcache, i0, 0);
+ vcache_elt(vcache, i1, 0);
+ vcache_check_flush(vcache);
+}
+
+
+static INLINE void
+vcache_line_flags( struct vcache_frontend *vcache,
+ ushort flags,
+ unsigned i0,
+ unsigned i1 )
+{
+ vcache_elt(vcache, i0, flags);
+ vcache_elt(vcache, i1, 0);
+ vcache_check_flush(vcache);
+}
+
+
+static INLINE void
+vcache_point( struct vcache_frontend *vcache,
+ unsigned i0 )
+{
+ vcache_elt(vcache, i0, 0);
+ vcache_check_flush(vcache);
+}
+
+static INLINE void
+vcache_quad( struct vcache_frontend *vcache,
+ unsigned i0,
+ unsigned i1,
+ unsigned i2,
+ unsigned i3 )
+{
+ vcache_triangle( vcache, i0, i1, i3 );
+ vcache_triangle( vcache, i1, i2, i3 );
+}
+
+static INLINE void
+vcache_ef_quad( struct vcache_frontend *vcache,
+ unsigned i0,
+ unsigned i1,
+ unsigned i2,
+ unsigned i3 )
+{
+ if (vcache->draw->rasterizer->flatshade_first) {
+ vcache_triangle_flags( vcache,
+ ( DRAW_PIPE_RESET_STIPPLE |
+ DRAW_PIPE_EDGE_FLAG_0 |
+ DRAW_PIPE_EDGE_FLAG_1 ),
+ i0, i1, i2 );
+
+ vcache_triangle_flags( vcache,
+ ( DRAW_PIPE_EDGE_FLAG_2 |
+ DRAW_PIPE_EDGE_FLAG_1 ),
+ i0, i2, i3 );
+ }
+ else {
+ vcache_triangle_flags( vcache,
+ ( DRAW_PIPE_RESET_STIPPLE |
+ DRAW_PIPE_EDGE_FLAG_0 |
+ DRAW_PIPE_EDGE_FLAG_2 ),
+ i0, i1, i3 );
+
+ vcache_triangle_flags( vcache,
+ ( DRAW_PIPE_EDGE_FLAG_0 |
+ DRAW_PIPE_EDGE_FLAG_1 ),
+ i1, i2, i3 );
+ }
+}
+
+/* At least for now, we're back to using a template include file for
+ * this. The two paths aren't too different though - it may be
+ * possible to reunify them.
+ */
+#define TRIANGLE(vc,flags,i0,i1,i2) vcache_triangle_flags(vc,flags,i0,i1,i2)
+#define QUAD(vc,i0,i1,i2,i3) vcache_ef_quad(vc,i0,i1,i2,i3)
+#define LINE(vc,flags,i0,i1) vcache_line_flags(vc,flags,i0,i1)
+#define POINT(vc,i0) vcache_point(vc,i0)
+#define FUNC vcache_run_extras
+#include "draw_pt_vcache_tmp.h"
+
+#define TRIANGLE(vc,flags,i0,i1,i2) vcache_triangle(vc,i0,i1,i2)
+#define QUAD(vc,i0,i1,i2,i3) vcache_quad(vc,i0,i1,i2,i3)
+#define LINE(vc,flags,i0,i1) vcache_line(vc,i0,i1)
+#define POINT(vc,i0) vcache_point(vc,i0)
+#define FUNC vcache_run
+#include "draw_pt_vcache_tmp.h"
+
+static INLINE void
+rebase_uint_elts( const unsigned *src,
+ unsigned count,
+ int delta,
+ ushort *dest )
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ dest[i] = (ushort)(src[i] + delta);
+}
+
+static INLINE void
+rebase_ushort_elts( const ushort *src,
+ unsigned count,
+ int delta,
+ ushort *dest )
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ dest[i] = (ushort)(src[i] + delta);
+}
+
+static INLINE void
+rebase_ubyte_elts( const ubyte *src,
+ unsigned count,
+ int delta,
+ ushort *dest )
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ dest[i] = (ushort)(src[i] + delta);
+}
+
+
+
+static INLINE void
+translate_uint_elts( const unsigned *src,
+ unsigned count,
+ ushort *dest )
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ dest[i] = (ushort)(src[i]);
+}
+
+static INLINE void
+translate_ushort_elts( const ushort *src,
+ unsigned count,
+ ushort *dest )
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ dest[i] = (ushort)(src[i]);
+}
+
+static INLINE void
+translate_ubyte_elts( const ubyte *src,
+ unsigned count,
+ ushort *dest )
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ dest[i] = (ushort)(src[i]);
+}
+
+
+
+
+#if 0
+static INLINE enum pipe_format
+format_from_get_elt( pt_elt_func get_elt )
+{
+ switch (draw->pt.user.eltSize) {
+ case 1: return PIPE_FORMAT_R8_UNORM;
+ case 2: return PIPE_FORMAT_R16_UNORM;
+ case 4: return PIPE_FORMAT_R32_UNORM;
+ default: return PIPE_FORMAT_NONE;
+ }
+}
+#endif
+
+static INLINE void
+vcache_check_run( struct draw_pt_front_end *frontend,
+ pt_elt_func get_elt,
+ const void *elts,
+ unsigned draw_count )
+{
+ struct vcache_frontend *vcache = (struct vcache_frontend *)frontend;
+ struct draw_context *draw = vcache->draw;
+ unsigned min_index = draw->pt.user.min_index;
+ unsigned max_index = draw->pt.user.max_index;
+ unsigned index_size = draw->pt.user.eltSize;
+ unsigned fetch_count = max_index + 1 - min_index;
+ const ushort *transformed_elts;
+ ushort *storage = NULL;
+ boolean ok = FALSE;
+
+
+ if (0) debug_printf("fetch_count %d fetch_max %d draw_count %d\n", fetch_count,
+ vcache->fetch_max,
+ draw_count);
+
+ if (max_index == 0xffffffff ||
+ fetch_count > draw_count) {
+ if (0) debug_printf("fail\n");
+ goto fail;
+ }
+
+ if (vcache->middle_prim != vcache->input_prim) {
+ vcache->middle_prim = vcache->input_prim;
+ vcache->middle->prepare( vcache->middle,
+ vcache->middle_prim,
+ vcache->opt,
+ &vcache->fetch_max );
+ }
+
+
+ if (min_index == 0 &&
+ index_size == 2)
+ {
+ transformed_elts = (const ushort *)elts;
+ }
+ else
+ {
+ storage = MALLOC( draw_count * sizeof(ushort) );
+ if (!storage)
+ goto fail;
+
+ if (min_index == 0) {
+ switch(index_size) {
+ case 1:
+ translate_ubyte_elts( (const ubyte *)elts,
+ draw_count,
+ storage );
+ break;
+
+ case 2:
+ translate_ushort_elts( (const ushort *)elts,
+ draw_count,
+ storage );
+ break;
+
+ case 4:
+ translate_uint_elts( (const uint *)elts,
+ draw_count,
+ storage );
+ break;
+
+ default:
+ assert(0);
+ return;
+ }
+ }
+ else {
+ switch(index_size) {
+ case 1:
+ rebase_ubyte_elts( (const ubyte *)elts,
+ draw_count,
+ 0 - (int)min_index,
+ storage );
+ break;
+
+ case 2:
+ rebase_ushort_elts( (const ushort *)elts,
+ draw_count,
+ 0 - (int)min_index,
+ storage );
+ break;
+
+ case 4:
+ rebase_uint_elts( (const uint *)elts,
+ draw_count,
+ 0 - (int)min_index,
+ storage );
+ break;
+
+ default:
+ assert(0);
+ return;
+ }
+ }
+ transformed_elts = storage;
+ }
+
+ if (fetch_count < UNDEFINED_VERTEX_ID)
+ ok = vcache->middle->run_linear_elts( vcache->middle,
+ min_index, /* start */
+ fetch_count,
+ transformed_elts,
+ draw_count );
+
+ FREE(storage);
+
+ if (ok)
+ return;
+
+ debug_printf("failed to execute atomic draw elts for %d/%d, splitting up\n",
+ fetch_count, draw_count);
+
+ fail:
+ vcache_run( frontend, get_elt, elts, draw_count );
+}
+
+
+
+
+static void
+vcache_prepare( struct draw_pt_front_end *frontend,
+ unsigned prim,
+ struct draw_pt_middle_end *middle,
+ unsigned opt )
+{
+ struct vcache_frontend *vcache = (struct vcache_frontend *)frontend;
+
+ if (opt & PT_PIPELINE)
+ {
+ vcache->base.run = vcache_run_extras;
+ }
+ else
+ {
+ vcache->base.run = vcache_check_run;
+ }
+
+ vcache->input_prim = prim;
+ vcache->output_prim = u_reduced_prim(prim);
+
+ vcache->middle = middle;
+ vcache->opt = opt;
+
+ /* Have to run prepare here, but try and guess a good prim for
+ * doing so:
+ */
+ vcache->middle_prim = (opt & PT_PIPELINE) ? vcache->output_prim : vcache->input_prim;
+ middle->prepare( middle, vcache->middle_prim, opt, &vcache->fetch_max );
+}
+
+
+
+
+static void
+vcache_finish( struct draw_pt_front_end *frontend )
+{
+ struct vcache_frontend *vcache = (struct vcache_frontend *)frontend;
+ vcache->middle->finish( vcache->middle );
+ vcache->middle = NULL;
+}
+
+static void
+vcache_destroy( struct draw_pt_front_end *frontend )
+{
+ FREE(frontend);
+}
+
+
+struct draw_pt_front_end *draw_pt_vcache( struct draw_context *draw )
+{
+ struct vcache_frontend *vcache = CALLOC_STRUCT( vcache_frontend );
+ if (vcache == NULL)
+ return NULL;
+
+ vcache->base.prepare = vcache_prepare;
+ vcache->base.run = NULL;
+ vcache->base.finish = vcache_finish;
+ vcache->base.destroy = vcache_destroy;
+ vcache->draw = draw;
+
+ memset(vcache->in, ~0, sizeof(vcache->in));
+
+ return &vcache->base;
+}
diff --git a/src/gallium/auxiliary/draw/draw_pt_vcache_tmp.h b/src/gallium/auxiliary/draw/draw_pt_vcache_tmp.h
new file mode 100644
index 0000000000..62822a3d56
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_pt_vcache_tmp.h
@@ -0,0 +1,214 @@
+
+
+static void FUNC( struct draw_pt_front_end *frontend,
+ pt_elt_func get_elt,
+ const void *elts,
+ unsigned count )
+{
+ struct vcache_frontend *vcache = (struct vcache_frontend *)frontend;
+ struct draw_context *draw = vcache->draw;
+
+ boolean flatfirst = (draw->rasterizer->flatshade &&
+ draw->rasterizer->flatshade_first);
+ unsigned i;
+ ushort flags;
+
+ if (0) debug_printf("%s %d\n", __FUNCTION__, count);
+
+
+ switch (vcache->input_prim) {
+ case PIPE_PRIM_POINTS:
+ for (i = 0; i < count; i ++) {
+ POINT( vcache,
+ get_elt(elts, i + 0) );
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (i = 0; i+1 < count; i += 2) {
+ LINE( vcache,
+ DRAW_PIPE_RESET_STIPPLE,
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1));
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ if (count >= 2) {
+ flags = DRAW_PIPE_RESET_STIPPLE;
+
+ for (i = 1; i < count; i++, flags = 0) {
+ LINE( vcache,
+ flags,
+ get_elt(elts, i - 1),
+ get_elt(elts, i ));
+ }
+
+ LINE( vcache,
+ flags,
+ get_elt(elts, i - 1),
+ get_elt(elts, 0 ));
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ flags = DRAW_PIPE_RESET_STIPPLE;
+ for (i = 1; i < count; i++, flags = 0) {
+ LINE( vcache,
+ flags,
+ get_elt(elts, i - 1),
+ get_elt(elts, i ));
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ for (i = 0; i+2 < count; i += 3) {
+ TRIANGLE( vcache,
+ DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2 ));
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ if (flatfirst) {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( vcache,
+ DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1 + (i&1)),
+ get_elt(elts, i + 2 - (i&1)));
+ }
+ }
+ else {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( vcache,
+ DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ get_elt(elts, i + 0 + (i&1)),
+ get_elt(elts, i + 1 - (i&1)),
+ get_elt(elts, i + 2 ));
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (count >= 3) {
+ if (flatfirst) {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( vcache,
+ DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2),
+ get_elt(elts, 0 ));
+ }
+ }
+ else {
+ for (i = 0; i+2 < count; i++) {
+ TRIANGLE( vcache,
+ DRAW_PIPE_RESET_STIPPLE | DRAW_PIPE_EDGE_FLAG_ALL,
+ get_elt(elts, 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2 ));
+ }
+ }
+ }
+ break;
+
+
+ case PIPE_PRIM_QUADS:
+ for (i = 0; i+3 < count; i += 4) {
+ if (flatfirst) {
+ QUAD( vcache,
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2),
+ get_elt(elts, i + 3) );
+ }
+ else {
+ QUAD( vcache,
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2),
+ get_elt(elts, i + 3) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ for (i = 0; i+3 < count; i += 2) {
+ if (flatfirst) {
+ QUAD( vcache,
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 3),
+ get_elt(elts, i + 2) );
+ }
+ else {
+ QUAD( vcache,
+ get_elt(elts, i + 2),
+ get_elt(elts, i + 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 3) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ {
+ /* These bitflags look a little odd because we submit the
+ * vertices as (1,2,0) to satisfy flatshade requirements.
+ */
+ const ushort edge_first = DRAW_PIPE_EDGE_FLAG_2;
+ const ushort edge_middle = DRAW_PIPE_EDGE_FLAG_0;
+ const ushort edge_last = DRAW_PIPE_EDGE_FLAG_1;
+ ushort edge_next, edge_finish;
+
+ if (flatfirst) {
+ flags = DRAW_PIPE_RESET_STIPPLE | edge_middle | edge_last;
+ edge_next = edge_last;
+ edge_finish = edge_first;
+ }
+ else {
+ flags = DRAW_PIPE_RESET_STIPPLE | edge_first | edge_middle;
+ edge_next = edge_middle;
+ edge_finish = edge_last;
+ }
+
+ for (i = 0; i+2 < count; i++, flags = edge_next) {
+
+ if (i + 3 == count)
+ flags |= edge_finish;
+
+ if (flatfirst) {
+ TRIANGLE( vcache,
+ flags,
+ get_elt(elts, 0),
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2) );
+ }
+ else {
+ TRIANGLE( vcache,
+ flags,
+ get_elt(elts, i + 1),
+ get_elt(elts, i + 2),
+ get_elt(elts, 0));
+ }
+ }
+ }
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ vcache_flush( vcache );
+}
+
+
+#undef TRIANGLE
+#undef QUAD
+#undef POINT
+#undef LINE
+#undef FUNC
diff --git a/src/gallium/auxiliary/draw/draw_vbuf.h b/src/gallium/auxiliary/draw/draw_vbuf.h
new file mode 100644
index 0000000000..cccd3bf435
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vbuf.h
@@ -0,0 +1,129 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Vertex buffer drawing stage.
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Jose Fonseca <jrfonsec@tungstengraphics.com>
+ */
+
+#ifndef DRAW_VBUF_H_
+#define DRAW_VBUF_H_
+
+
+#include "pipe/p_compiler.h"
+
+
+struct pipe_rasterizer_state;
+struct draw_context;
+struct vertex_info;
+
+
+/**
+ * Interface for hardware vertex buffer rendering.
+ */
+struct vbuf_render {
+
+ /**
+ * Driver limits. May be tuned lower to improve cache hits on
+ * index list.
+ */
+ unsigned max_indices;
+ unsigned max_vertex_buffer_bytes;
+
+ /**
+ * Query if the hardware driver needs assistance for a particular
+ * combination of rasterizer state and primitive.
+ *
+ * Currently optional.
+ */
+ boolean (*need_pipeline)(const struct vbuf_render *render,
+ const struct pipe_rasterizer_state *rasterizer,
+ unsigned int prim );
+
+
+ /**
+ * Get the hardware vertex format.
+ *
+ * XXX: have this in draw_context instead?
+ */
+ const struct vertex_info *(*get_vertex_info)( struct vbuf_render * );
+
+ /**
+ * Request a destination for vertices.
+ * Hardware renderers will use ttm memory, others will just malloc
+ * something.
+ */
+ boolean (*allocate_vertices)( struct vbuf_render *,
+ ushort vertex_size,
+ ushort nr_vertices );
+
+ void *(*map_vertices)( struct vbuf_render * );
+ void (*unmap_vertices)( struct vbuf_render *,
+ ushort min_index,
+ ushort max_index );
+
+ /**
+ * Notify the renderer of the current primitive when it changes.
+ * Must succeed for TRIANGLES, LINES and POINTS. Other prims at
+ * the discretion of the driver, for the benefit of the passthrough
+ * path.
+ */
+ boolean (*set_primitive)( struct vbuf_render *, unsigned prim );
+
+ /**
+ * DrawElements, note indices are ushort. The driver must complete
+ * this call, if necessary splitting the index list itself.
+ */
+ void (*draw)( struct vbuf_render *,
+ const ushort *indices,
+ uint nr_indices );
+
+ /* Draw Arrays path too.
+ */
+ void (*draw_arrays)( struct vbuf_render *,
+ unsigned start,
+ uint nr );
+
+ /**
+ * Called when vbuf is done with this set of vertices:
+ */
+ void (*release_vertices)( struct vbuf_render * );
+
+ void (*destroy)( struct vbuf_render * );
+};
+
+
+
+struct draw_stage *
+draw_vbuf_stage( struct draw_context *draw,
+ struct vbuf_render *render );
+
+
+#endif /*DRAW_VBUF_H_*/
diff --git a/src/gallium/auxiliary/draw/draw_vertex.c b/src/gallium/auxiliary/draw/draw_vertex.c
new file mode 100644
index 0000000000..3214213e44
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vertex.c
@@ -0,0 +1,129 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/*
+ * Functions for specifying the post-transformation vertex layout.
+ *
+ * Author:
+ * Brian Paul
+ * Keith Whitwell
+ */
+
+
+#include "draw/draw_private.h"
+#include "draw/draw_vertex.h"
+
+
+/**
+ * Compute the size of a vertex, in dwords/floats, to update the
+ * vinfo->size field.
+ */
+void
+draw_compute_vertex_size(struct vertex_info *vinfo)
+{
+ uint i;
+
+ vinfo->size = 0;
+ for (i = 0; i < vinfo->num_attribs; i++) {
+ switch (vinfo->attrib[i].emit) {
+ case EMIT_OMIT:
+ break;
+ case EMIT_4UB:
+ /* fall-through */
+ case EMIT_1F_PSIZE:
+ /* fall-through */
+ case EMIT_1F:
+ vinfo->size += 1;
+ break;
+ case EMIT_2F:
+ vinfo->size += 2;
+ break;
+ case EMIT_3F:
+ vinfo->size += 3;
+ break;
+ case EMIT_4F:
+ vinfo->size += 4;
+ break;
+ default:
+ assert(0);
+ }
+ }
+}
+
+
+void
+draw_dump_emitted_vertex(const struct vertex_info *vinfo, const uint8_t *data)
+{
+ unsigned i, j;
+
+ for (i = 0; i < vinfo->num_attribs; i++) {
+ j = vinfo->attrib[i].src_index;
+ switch (vinfo->attrib[i].emit) {
+ case EMIT_OMIT:
+ debug_printf("EMIT_OMIT:");
+ break;
+ case EMIT_1F:
+ debug_printf("EMIT_1F:\t");
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ break;
+ case EMIT_1F_PSIZE:
+ debug_printf("EMIT_1F_PSIZE:\t");
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ break;
+ case EMIT_2F:
+ debug_printf("EMIT_2F:\t");
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ break;
+ case EMIT_3F:
+ debug_printf("EMIT_3F:\t");
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ data += sizeof(float);
+ break;
+ case EMIT_4F:
+ debug_printf("EMIT_4F:\t");
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ debug_printf("%f ", *(float *)data); data += sizeof(float);
+ break;
+ case EMIT_4UB:
+ debug_printf("EMIT_4UB:\t");
+ debug_printf("%u ", *data++);
+ debug_printf("%u ", *data++);
+ debug_printf("%u ", *data++);
+ debug_printf("%u ", *data++);
+ break;
+ default:
+ assert(0);
+ }
+ debug_printf("\n");
+ }
+ debug_printf("\n");
+}
diff --git a/src/gallium/auxiliary/draw/draw_vertex.h b/src/gallium/auxiliary/draw/draw_vertex.h
new file mode 100644
index 0000000000..554f4ac3c1
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vertex.h
@@ -0,0 +1,162 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Post-transform vertex format info. The vertex_info struct is used by
+ * the draw_vbuf code to emit hardware-specific vertex layouts into hw
+ * vertex buffers.
+ *
+ * Author:
+ * Brian Paul
+ */
+
+
+#ifndef DRAW_VERTEX_H
+#define DRAW_VERTEX_H
+
+
+#include "pipe/p_state.h"
+
+
+/**
+ * Vertex attribute emit modes
+ */
+enum attrib_emit {
+ EMIT_OMIT, /**< don't emit the attribute */
+ EMIT_1F,
+ EMIT_1F_PSIZE, /**< insert constant point size */
+ EMIT_2F,
+ EMIT_3F,
+ EMIT_4F,
+ EMIT_4UB /**< XXX may need variations for RGBA vs BGRA, etc */
+};
+
+
+/**
+ * Attribute interpolation mode
+ */
+enum interp_mode {
+ INTERP_NONE, /**< never interpolate vertex header info */
+ INTERP_POS, /**< special case for frag position */
+ INTERP_CONSTANT,
+ INTERP_LINEAR,
+ INTERP_PERSPECTIVE
+};
+
+
+/**
+ * Information about hardware/rasterization vertex layout.
+ */
+struct vertex_info
+{
+ uint num_attribs;
+ uint hwfmt[4]; /**< hardware format info for this format */
+ uint size; /**< total vertex size in dwords */
+
+ /* Keep this small and at the end of the struct to allow quick
+ * memcmp() comparisons.
+ */
+ struct {
+ unsigned interp_mode:4; /**< INTERP_x */
+ unsigned emit:4; /**< EMIT_x */
+ unsigned src_index:8; /**< map to post-xform attribs */
+ } attrib[PIPE_MAX_SHADER_INPUTS];
+};
+
+static INLINE size_t
+draw_vinfo_size( const struct vertex_info *a )
+{
+ return offsetof(const struct vertex_info, attrib[a->num_attribs]);
+}
+
+static INLINE int
+draw_vinfo_compare( const struct vertex_info *a,
+ const struct vertex_info *b )
+{
+ size_t sizea = draw_vinfo_size( a );
+ return memcmp( a, b, sizea );
+}
+
+static INLINE void
+draw_vinfo_copy( struct vertex_info *dst,
+ const struct vertex_info *src )
+{
+ size_t size = draw_vinfo_size( src );
+ memcpy( dst, src, size );
+}
+
+
+
+/**
+ * Add another attribute to the given vertex_info object.
+ * \param src_index indicates which post-transformed vertex attrib slot
+ * corresponds to this attribute.
+ * \return slot in which the attribute was added
+ */
+static INLINE uint
+draw_emit_vertex_attr(struct vertex_info *vinfo,
+ enum attrib_emit emit,
+ enum interp_mode interp, /* only used by softpipe??? */
+ uint src_index)
+{
+ const uint n = vinfo->num_attribs;
+ assert(n < PIPE_MAX_SHADER_INPUTS);
+ vinfo->attrib[n].emit = emit;
+ vinfo->attrib[n].interp_mode = interp;
+ vinfo->attrib[n].src_index = src_index;
+ vinfo->num_attribs++;
+ return n;
+}
+
+
+extern void draw_compute_vertex_size(struct vertex_info *vinfo);
+
+void draw_dump_emitted_vertex(const struct vertex_info *vinfo,
+ const uint8_t *data);
+
+
+static INLINE unsigned draw_translate_vinfo_format(unsigned format )
+{
+ switch (format) {
+ case EMIT_1F:
+ case EMIT_1F_PSIZE:
+ return PIPE_FORMAT_R32_FLOAT;
+ case EMIT_2F:
+ return PIPE_FORMAT_R32G32_FLOAT;
+ case EMIT_3F:
+ return PIPE_FORMAT_R32G32B32_FLOAT;
+ case EMIT_4F:
+ return PIPE_FORMAT_R32G32B32A32_FLOAT;
+ case EMIT_4UB:
+ return PIPE_FORMAT_R8G8B8A8_UNORM;
+ default:
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+
+#endif /* DRAW_VERTEX_H */
diff --git a/src/gallium/auxiliary/draw/draw_vs.c b/src/gallium/auxiliary/draw/draw_vs.c
new file mode 100644
index 0000000000..790e89ed82
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs.c
@@ -0,0 +1,254 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "pipe/p_shader_tokens.h"
+
+#include "draw_private.h"
+#include "draw_context.h"
+#include "draw_vs.h"
+
+#include "translate/translate.h"
+#include "translate/translate_cache.h"
+
+#include "tgsi/tgsi_exec.h"
+
+
+
+
+void draw_vs_set_constants( struct draw_context *draw,
+ const float (*constants)[4],
+ unsigned size )
+{
+ if (((uintptr_t)constants) & 0xf) {
+ if (size > draw->vs.const_storage_size) {
+ if (draw->vs.aligned_constant_storage)
+ align_free((void *)draw->vs.aligned_constant_storage);
+ draw->vs.aligned_constant_storage = align_malloc( size, 16 );
+ }
+ memcpy( (void*)draw->vs.aligned_constant_storage,
+ constants,
+ size );
+ constants = draw->vs.aligned_constant_storage;
+ }
+
+ draw->vs.aligned_constants = constants;
+ draw_vs_aos_machine_constants( draw->vs.aos_machine, constants );
+}
+
+
+void draw_vs_set_viewport( struct draw_context *draw,
+ const struct pipe_viewport_state *viewport )
+{
+ draw_vs_aos_machine_viewport( draw->vs.aos_machine, viewport );
+}
+
+
+
+struct draw_vertex_shader *
+draw_create_vertex_shader(struct draw_context *draw,
+ const struct pipe_shader_state *shader)
+{
+ struct draw_vertex_shader *vs;
+
+ vs = draw_create_vs_llvm( draw, shader );
+ if (!vs) {
+ vs = draw_create_vs_sse( draw, shader );
+ if (!vs) {
+ vs = draw_create_vs_ppc( draw, shader );
+ if (!vs) {
+ vs = draw_create_vs_exec( draw, shader );
+ }
+ }
+ }
+
+ if (vs)
+ {
+ uint i;
+ for (i = 0; i < vs->info.num_outputs; i++) {
+ if (vs->info.output_semantic_name[i] == TGSI_SEMANTIC_POSITION &&
+ vs->info.output_semantic_index[i] == 0)
+ vs->position_output = i;
+ }
+ }
+
+ assert(vs);
+ return vs;
+}
+
+
+void
+draw_bind_vertex_shader(struct draw_context *draw,
+ struct draw_vertex_shader *dvs)
+{
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+
+ if (dvs)
+ {
+ draw->vs.vertex_shader = dvs;
+ draw->vs.num_vs_outputs = dvs->info.num_outputs;
+ draw->vs.position_output = dvs->position_output;
+ dvs->prepare( dvs, draw );
+ }
+ else {
+ draw->vs.vertex_shader = NULL;
+ draw->vs.num_vs_outputs = 0;
+ }
+}
+
+
+void
+draw_delete_vertex_shader(struct draw_context *draw,
+ struct draw_vertex_shader *dvs)
+{
+ unsigned i;
+
+ for (i = 0; i < dvs->nr_varients; i++)
+ dvs->varient[i]->destroy( dvs->varient[i] );
+
+ dvs->nr_varients = 0;
+
+ dvs->delete( dvs );
+}
+
+
+
+boolean
+draw_vs_init( struct draw_context *draw )
+{
+ draw->vs.machine = tgsi_exec_machine_create();
+ if (!draw->vs.machine)
+ return FALSE;
+
+ draw->vs.emit_cache = translate_cache_create();
+ if (!draw->vs.emit_cache)
+ return FALSE;
+
+ draw->vs.fetch_cache = translate_cache_create();
+ if (!draw->vs.fetch_cache)
+ return FALSE;
+
+ draw->vs.aos_machine = draw_vs_aos_machine();
+#ifdef PIPE_ARCH_X86
+ if (!draw->vs.aos_machine)
+ return FALSE;
+#endif
+
+ return TRUE;
+}
+
+void
+draw_vs_destroy( struct draw_context *draw )
+{
+ if (draw->vs.fetch_cache)
+ translate_cache_destroy(draw->vs.fetch_cache);
+
+ if (draw->vs.emit_cache)
+ translate_cache_destroy(draw->vs.emit_cache);
+
+ if (draw->vs.aos_machine)
+ draw_vs_aos_machine_destroy(draw->vs.aos_machine);
+
+ if (draw->vs.aligned_constant_storage)
+ align_free((void*)draw->vs.aligned_constant_storage);
+
+ tgsi_exec_machine_destroy(draw->vs.machine);
+}
+
+
+struct draw_vs_varient *
+draw_vs_lookup_varient( struct draw_vertex_shader *vs,
+ const struct draw_vs_varient_key *key )
+{
+ struct draw_vs_varient *varient;
+ unsigned i;
+
+ /* Lookup existing varient:
+ */
+ for (i = 0; i < vs->nr_varients; i++)
+ if (draw_vs_varient_key_compare(key, &vs->varient[i]->key) == 0)
+ return vs->varient[i];
+
+ /* Else have to create a new one:
+ */
+ varient = vs->create_varient( vs, key );
+ if (varient == NULL)
+ return NULL;
+
+ /* Add it to our list, could be smarter:
+ */
+ if (vs->nr_varients < Elements(vs->varient)) {
+ vs->varient[vs->nr_varients++] = varient;
+ }
+ else {
+ vs->last_varient++;
+ vs->last_varient %= Elements(vs->varient);
+ vs->varient[vs->last_varient]->destroy(vs->varient[vs->last_varient]);
+ vs->varient[vs->last_varient] = varient;
+ }
+
+ /* Done
+ */
+ return varient;
+}
+
+
+struct translate *
+draw_vs_get_fetch( struct draw_context *draw,
+ struct translate_key *key )
+{
+ if (!draw->vs.fetch ||
+ translate_key_compare(&draw->vs.fetch->key, key) != 0)
+ {
+ translate_key_sanitize(key);
+ draw->vs.fetch = translate_cache_find(draw->vs.fetch_cache, key);
+ }
+
+ return draw->vs.fetch;
+}
+
+struct translate *
+draw_vs_get_emit( struct draw_context *draw,
+ struct translate_key *key )
+{
+ if (!draw->vs.emit ||
+ translate_key_compare(&draw->vs.emit->key, key) != 0)
+ {
+ translate_key_sanitize(key);
+ draw->vs.emit = translate_cache_find(draw->vs.emit_cache, key);
+ }
+
+ return draw->vs.emit;
+}
diff --git a/src/gallium/auxiliary/draw/draw_vs.h b/src/gallium/auxiliary/draw/draw_vs.h
new file mode 100644
index 0000000000..89ae158751
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs.h
@@ -0,0 +1,224 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef DRAW_VS_H
+#define DRAW_VS_H
+
+#include "draw_context.h"
+#include "draw_private.h"
+
+
+struct draw_context;
+struct pipe_shader_state;
+
+struct draw_varient_input
+{
+ enum pipe_format format;
+ unsigned buffer;
+ unsigned offset;
+};
+
+struct draw_varient_output
+{
+ enum pipe_format format; /* output format */
+ unsigned vs_output:8; /* which vertex shader output is this? */
+ unsigned offset:24; /* offset into output vertex */
+};
+
+struct draw_varient_element {
+ struct draw_varient_input in;
+ struct draw_varient_output out;
+};
+
+struct draw_vs_varient_key {
+ unsigned output_stride;
+ unsigned nr_elements:8; /* max2(nr_inputs, nr_outputs) */
+ unsigned nr_inputs:8;
+ unsigned nr_outputs:8;
+ unsigned viewport:1;
+ unsigned clip:1;
+ unsigned const_vbuffers:5;
+ struct draw_varient_element element[PIPE_MAX_ATTRIBS];
+};
+
+struct draw_vs_varient;
+
+
+struct draw_vs_varient {
+ struct draw_vs_varient_key key;
+
+ struct draw_vertex_shader *vs;
+
+ void (*set_buffer)( struct draw_vs_varient *,
+ unsigned i,
+ const void *ptr,
+ unsigned stride );
+
+ void (PIPE_CDECL *run_linear)( struct draw_vs_varient *shader,
+ unsigned start,
+ unsigned count,
+ void *output_buffer );
+
+ void (PIPE_CDECL *run_elts)( struct draw_vs_varient *shader,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer );
+
+ void (*destroy)( struct draw_vs_varient * );
+};
+
+
+/**
+ * Private version of the compiled vertex_shader
+ */
+struct draw_vertex_shader {
+ struct draw_context *draw;
+
+ /* This member will disappear shortly:
+ */
+ struct pipe_shader_state state;
+
+ struct tgsi_shader_info info;
+ unsigned position_output;
+
+ /* Extracted from shader:
+ */
+ const float (*immediates)[4];
+
+ /*
+ */
+ struct draw_vs_varient *varient[16];
+ unsigned nr_varients;
+ unsigned last_varient;
+ struct draw_vs_varient *(*create_varient)( struct draw_vertex_shader *shader,
+ const struct draw_vs_varient_key *key );
+
+
+ void (*prepare)( struct draw_vertex_shader *shader,
+ struct draw_context *draw );
+
+ /* Run the shader - this interface will get cleaned up in the
+ * future:
+ */
+ void (*run_linear)( struct draw_vertex_shader *shader,
+ const float (*input)[4],
+ float (*output)[4],
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride );
+
+
+ void (*delete)( struct draw_vertex_shader * );
+};
+
+
+struct draw_vs_varient *
+draw_vs_lookup_varient( struct draw_vertex_shader *base,
+ const struct draw_vs_varient_key *key );
+
+
+/********************************************************************************
+ * Internal functions:
+ */
+
+struct draw_vertex_shader *
+draw_create_vs_exec(struct draw_context *draw,
+ const struct pipe_shader_state *templ);
+
+struct draw_vertex_shader *
+draw_create_vs_sse(struct draw_context *draw,
+ const struct pipe_shader_state *templ);
+
+struct draw_vertex_shader *
+draw_create_vs_ppc(struct draw_context *draw,
+ const struct pipe_shader_state *templ);
+
+struct draw_vertex_shader *
+draw_create_vs_llvm(struct draw_context *draw,
+ const struct pipe_shader_state *templ);
+
+
+
+struct draw_vs_varient_key;
+struct draw_vertex_shader;
+
+struct draw_vs_varient *draw_vs_varient_aos_sse( struct draw_vertex_shader *vs,
+ const struct draw_vs_varient_key *key );
+
+
+
+/********************************************************************************
+ * Helpers for vs implementations that don't do their own fetch/emit varients.
+ * Means these can be shared between shaders.
+ */
+struct translate;
+struct translate_key;
+
+struct translate *draw_vs_get_fetch( struct draw_context *draw,
+ struct translate_key *key );
+
+
+struct translate *draw_vs_get_emit( struct draw_context *draw,
+ struct translate_key *key );
+
+struct draw_vs_varient *draw_vs_varient_generic( struct draw_vertex_shader *vs,
+ const struct draw_vs_varient_key *key );
+
+
+
+static INLINE int draw_vs_varient_keysize( const struct draw_vs_varient_key *key )
+{
+ return 2 * sizeof(int) + key->nr_elements * sizeof(struct draw_varient_element);
+}
+
+static INLINE int draw_vs_varient_key_compare( const struct draw_vs_varient_key *a,
+ const struct draw_vs_varient_key *b )
+{
+ int keysize = draw_vs_varient_keysize(a);
+ return memcmp(a, b, keysize);
+}
+
+
+struct aos_machine *draw_vs_aos_machine( void );
+void draw_vs_aos_machine_destroy( struct aos_machine *machine );
+
+void draw_vs_aos_machine_constants( struct aos_machine *machine,
+ const float (*constants)[4] );
+
+void draw_vs_aos_machine_viewport( struct aos_machine *machine,
+ const struct pipe_viewport_state *viewport );
+
+
+#define MAX_TGSI_VERTICES 4
+
+
+
+#endif
diff --git a/src/gallium/auxiliary/draw/draw_vs_aos.c b/src/gallium/auxiliary/draw/draw_vs_aos.c
new file mode 100644
index 0000000000..62e04a65f3
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_aos.c
@@ -0,0 +1,2274 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 6.3
+ *
+ * Copyright (C) 1999-2004 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * Translate tgsi vertex programs to x86/x87/SSE/SSE2 machine code
+ * using the rtasm runtime assembler. Based on the old
+ * t_vb_arb_program_sse.c
+ */
+
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_debug.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "draw_vs.h"
+#include "draw_vs_aos.h"
+
+#include "rtasm/rtasm_x86sse.h"
+
+#ifdef PIPE_ARCH_X86
+#define DISASSEM 0
+#define FAST_MATH 1
+
+static const char *files[] =
+{
+ "NULL",
+ "CONST",
+ "IN",
+ "OUT",
+ "TEMP",
+ "SAMP",
+ "ADDR",
+ "IMM",
+ "INTERNAL",
+};
+
+static INLINE boolean eq( struct x86_reg a,
+ struct x86_reg b )
+{
+ return (a.file == b.file &&
+ a.idx == b.idx &&
+ a.mod == b.mod &&
+ a.disp == b.disp);
+}
+
+struct x86_reg aos_get_x86( struct aos_compilation *cp,
+ unsigned which_reg, /* quick hack */
+ unsigned value )
+{
+ struct x86_reg reg;
+
+ if (which_reg == 0)
+ reg = cp->temp_EBP;
+ else
+ reg = cp->tmp_EAX;
+
+ if (cp->x86_reg[which_reg] != value) {
+ unsigned offset;
+
+ switch (value) {
+ case X86_IMMEDIATES:
+ assert(which_reg == 0);
+ offset = Offset(struct aos_machine, immediates);
+ break;
+ case X86_CONSTANTS:
+ assert(which_reg == 1);
+ offset = Offset(struct aos_machine, constants);
+ break;
+ case X86_BUFFERS:
+ assert(which_reg == 0);
+ offset = Offset(struct aos_machine, buffer);
+ break;
+ default:
+ assert(0);
+ offset = 0;
+ }
+
+
+ x86_mov(cp->func, reg,
+ x86_make_disp(cp->machine_EDX, offset));
+
+ cp->x86_reg[which_reg] = value;
+ }
+
+ return reg;
+}
+
+
+static struct x86_reg get_reg_ptr(struct aos_compilation *cp,
+ unsigned file,
+ unsigned idx )
+{
+ struct x86_reg ptr = cp->machine_EDX;
+
+ switch (file) {
+ case TGSI_FILE_INPUT:
+ assert(idx < MAX_INPUTS);
+ return x86_make_disp(ptr, Offset(struct aos_machine, input[idx]));
+
+ case TGSI_FILE_OUTPUT:
+ return x86_make_disp(ptr, Offset(struct aos_machine, output[idx]));
+
+ case TGSI_FILE_TEMPORARY:
+ assert(idx < MAX_TEMPS);
+ return x86_make_disp(ptr, Offset(struct aos_machine, temp[idx]));
+
+ case AOS_FILE_INTERNAL:
+ assert(idx < MAX_INTERNALS);
+ return x86_make_disp(ptr, Offset(struct aos_machine, internal[idx]));
+
+ case TGSI_FILE_IMMEDIATE:
+ assert(idx < MAX_IMMEDIATES); /* just a sanity check */
+ return x86_make_disp(aos_get_x86(cp, 0, X86_IMMEDIATES), idx * 4 * sizeof(float));
+
+ case TGSI_FILE_CONSTANT:
+ assert(idx < MAX_CONSTANTS); /* just a sanity check */
+ return x86_make_disp(aos_get_x86(cp, 1, X86_CONSTANTS), idx * 4 * sizeof(float));
+
+ default:
+ AOS_ERROR(cp, "unknown reg file");
+ return x86_make_reg(0,0);
+ }
+}
+
+
+
+#define X87_CW_EXCEPTION_INV_OP (1<<0)
+#define X87_CW_EXCEPTION_DENORM_OP (1<<1)
+#define X87_CW_EXCEPTION_ZERO_DIVIDE (1<<2)
+#define X87_CW_EXCEPTION_OVERFLOW (1<<3)
+#define X87_CW_EXCEPTION_UNDERFLOW (1<<4)
+#define X87_CW_EXCEPTION_PRECISION (1<<5)
+#define X87_CW_PRECISION_SINGLE (0<<8)
+#define X87_CW_PRECISION_RESERVED (1<<8)
+#define X87_CW_PRECISION_DOUBLE (2<<8)
+#define X87_CW_PRECISION_DOUBLE_EXT (3<<8)
+#define X87_CW_PRECISION_MASK (3<<8)
+#define X87_CW_ROUND_NEAREST (0<<10)
+#define X87_CW_ROUND_DOWN (1<<10)
+#define X87_CW_ROUND_UP (2<<10)
+#define X87_CW_ROUND_ZERO (3<<10)
+#define X87_CW_ROUND_MASK (3<<10)
+#define X87_CW_INFINITY (1<<12)
+
+
+
+
+static void spill( struct aos_compilation *cp, unsigned idx )
+{
+ if (!cp->xmm[idx].dirty ||
+ (cp->xmm[idx].file != TGSI_FILE_INPUT && /* inputs are fetched into xmm & set dirty */
+ cp->xmm[idx].file != TGSI_FILE_OUTPUT &&
+ cp->xmm[idx].file != TGSI_FILE_TEMPORARY)) {
+ AOS_ERROR(cp, "invalid spill");
+ return;
+ }
+ else {
+ struct x86_reg oldval = get_reg_ptr(cp,
+ cp->xmm[idx].file,
+ cp->xmm[idx].idx);
+
+ if (0) debug_printf("\nspill %s[%d]",
+ files[cp->xmm[idx].file],
+ cp->xmm[idx].idx);
+
+ assert(cp->xmm[idx].dirty);
+ sse_movaps(cp->func, oldval, x86_make_reg(file_XMM, idx));
+ cp->xmm[idx].dirty = 0;
+ }
+}
+
+
+void aos_spill_all( struct aos_compilation *cp )
+{
+ unsigned i;
+
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].dirty)
+ spill(cp, i);
+ aos_release_xmm_reg(cp, i);
+ }
+}
+
+
+static struct x86_reg get_xmm_writable( struct aos_compilation *cp,
+ struct x86_reg reg )
+{
+ if (reg.file != file_XMM ||
+ cp->xmm[reg.idx].file != TGSI_FILE_NULL)
+ {
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ sse_movaps(cp->func, tmp, reg);
+ reg = tmp;
+ }
+
+ cp->xmm[reg.idx].last_used = cp->insn_counter;
+ return reg;
+}
+
+static struct x86_reg get_xmm( struct aos_compilation *cp,
+ struct x86_reg reg )
+{
+ if (reg.file != file_XMM)
+ {
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ sse_movaps(cp->func, tmp, reg);
+ reg = tmp;
+ }
+
+ cp->xmm[reg.idx].last_used = cp->insn_counter;
+ return reg;
+}
+
+
+/* Allocate an empty xmm register, either as a temporary or later to
+ * "adopt" as a shader reg.
+ */
+struct x86_reg aos_get_xmm_reg( struct aos_compilation *cp )
+{
+ unsigned i;
+ unsigned oldest = 0;
+ boolean found = FALSE;
+
+ for (i = 0; i < 8; i++)
+ if (cp->xmm[i].last_used != cp->insn_counter &&
+ cp->xmm[i].file == TGSI_FILE_NULL) {
+ oldest = i;
+ found = TRUE;
+ }
+
+ if (!found) {
+ for (i = 0; i < 8; i++)
+ if (cp->xmm[i].last_used < cp->xmm[oldest].last_used)
+ oldest = i;
+ }
+
+ /* Need to write out the old value?
+ */
+ if (cp->xmm[oldest].dirty)
+ spill(cp, oldest);
+
+ assert(cp->xmm[oldest].last_used != cp->insn_counter);
+
+ cp->xmm[oldest].file = TGSI_FILE_NULL;
+ cp->xmm[oldest].idx = 0;
+ cp->xmm[oldest].dirty = 0;
+ cp->xmm[oldest].last_used = cp->insn_counter;
+ return x86_make_reg(file_XMM, oldest);
+}
+
+void aos_release_xmm_reg( struct aos_compilation *cp,
+ unsigned idx )
+{
+ cp->xmm[idx].file = TGSI_FILE_NULL;
+ cp->xmm[idx].idx = 0;
+ cp->xmm[idx].dirty = 0;
+ cp->xmm[idx].last_used = 0;
+}
+
+
+static void aos_soft_release_xmm( struct aos_compilation *cp,
+ struct x86_reg reg )
+{
+ if (reg.file == file_XMM) {
+ assert(cp->xmm[reg.idx].last_used == cp->insn_counter);
+ cp->xmm[reg.idx].last_used = cp->insn_counter - 1;
+ }
+}
+
+
+
+/* Mark an xmm reg as holding the current copy of a shader reg.
+ */
+void aos_adopt_xmm_reg( struct aos_compilation *cp,
+ struct x86_reg reg,
+ unsigned file,
+ unsigned idx,
+ unsigned dirty )
+{
+ unsigned i;
+
+ if (reg.file != file_XMM) {
+ assert(0);
+ return;
+ }
+
+
+ /* If any xmm reg thinks it holds this shader reg, break the
+ * illusion.
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].file == file &&
+ cp->xmm[i].idx == idx)
+ {
+ /* If an xmm reg is already holding this shader reg, take into account its
+ * dirty flag...
+ */
+ dirty |= cp->xmm[i].dirty;
+ aos_release_xmm_reg(cp, i);
+ }
+ }
+
+ cp->xmm[reg.idx].file = file;
+ cp->xmm[reg.idx].idx = idx;
+ cp->xmm[reg.idx].dirty = dirty;
+ cp->xmm[reg.idx].last_used = cp->insn_counter;
+}
+
+
+/* Return a pointer to the in-memory copy of the reg, making sure it is uptodate.
+ */
+static struct x86_reg aos_get_shader_reg_ptr( struct aos_compilation *cp,
+ unsigned file,
+ unsigned idx )
+{
+ unsigned i;
+
+ /* Ensure the in-memory copy of this reg is up-to-date
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].file == file &&
+ cp->xmm[i].idx == idx &&
+ cp->xmm[i].dirty) {
+ spill(cp, i);
+ }
+ }
+
+ return get_reg_ptr( cp, file, idx );
+}
+
+
+/* As above, but return a pointer. Note - this pointer may alias
+ * those returned by get_arg_ptr().
+ */
+static struct x86_reg get_dst_ptr( struct aos_compilation *cp,
+ const struct tgsi_full_dst_register *dst )
+{
+ unsigned file = dst->DstRegister.File;
+ unsigned idx = dst->DstRegister.Index;
+ unsigned i;
+
+
+ /* Ensure in-memory copy of this reg is up-to-date and invalidate
+ * any xmm copies.
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].file == file &&
+ cp->xmm[i].idx == idx)
+ {
+ if (cp->xmm[i].dirty)
+ spill(cp, i);
+
+ aos_release_xmm_reg(cp, i);
+ }
+ }
+
+ return get_reg_ptr( cp, file, idx );
+}
+
+
+
+
+
+/* Return an XMM reg if the argument is resident, otherwise return a
+ * base+offset pointer to the saved value.
+ */
+struct x86_reg aos_get_shader_reg( struct aos_compilation *cp,
+ unsigned file,
+ unsigned idx )
+{
+ unsigned i;
+
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].file == file &&
+ cp->xmm[i].idx == idx)
+ {
+ cp->xmm[i].last_used = cp->insn_counter;
+ return x86_make_reg(file_XMM, i);
+ }
+ }
+
+ /* If not found in the XMM register file, return an indirect
+ * reference to the in-memory copy:
+ */
+ return get_reg_ptr( cp, file, idx );
+}
+
+
+
+static struct x86_reg aos_get_shader_reg_xmm( struct aos_compilation *cp,
+ unsigned file,
+ unsigned idx )
+{
+ struct x86_reg reg = get_xmm( cp,
+ aos_get_shader_reg( cp, file, idx ) );
+
+ aos_adopt_xmm_reg( cp,
+ reg,
+ file,
+ idx,
+ FALSE );
+
+ return reg;
+}
+
+
+
+struct x86_reg aos_get_internal_xmm( struct aos_compilation *cp,
+ unsigned imm )
+{
+ return aos_get_shader_reg_xmm( cp, AOS_FILE_INTERNAL, imm );
+}
+
+
+struct x86_reg aos_get_internal( struct aos_compilation *cp,
+ unsigned imm )
+{
+ return aos_get_shader_reg( cp, AOS_FILE_INTERNAL, imm );
+}
+
+
+
+
+
+/* Emulate pshufd insn in regular SSE, if necessary:
+ */
+static void emit_pshufd( struct aos_compilation *cp,
+ struct x86_reg dst,
+ struct x86_reg arg0,
+ ubyte shuf )
+{
+ if (cp->have_sse2) {
+ sse2_pshufd(cp->func, dst, arg0, shuf);
+ }
+ else {
+ if (!eq(dst, arg0))
+ sse_movaps(cp->func, dst, arg0);
+
+ sse_shufps(cp->func, dst, dst, shuf);
+ }
+}
+
+/* load masks (pack into negs??)
+ * pshufd - shuffle according to writemask
+ * and - result, mask
+ * nand - dest, mask
+ * or - dest, result
+ */
+static boolean mask_write( struct aos_compilation *cp,
+ struct x86_reg dst,
+ struct x86_reg result,
+ unsigned mask )
+{
+ struct x86_reg imm_swz = aos_get_internal_xmm(cp, IMM_SWZ);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+
+ emit_pshufd(cp, tmp, imm_swz,
+ SHUF((mask & 1) ? 2 : 3,
+ (mask & 2) ? 2 : 3,
+ (mask & 4) ? 2 : 3,
+ (mask & 8) ? 2 : 3));
+
+ sse_andps(cp->func, dst, tmp);
+ sse_andnps(cp->func, tmp, result);
+ sse_orps(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ return TRUE;
+}
+
+
+
+
+/* Helper for writemask:
+ */
+static boolean emit_shuf_copy2( struct aos_compilation *cp,
+ struct x86_reg dst,
+ struct x86_reg arg0,
+ struct x86_reg arg1,
+ ubyte shuf )
+{
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+
+ emit_pshufd(cp, dst, arg1, shuf);
+ emit_pshufd(cp, tmp, arg0, shuf);
+ sse_shufps(cp->func, dst, tmp, SHUF(X, Y, Z, W));
+ emit_pshufd(cp, dst, dst, shuf);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ return TRUE;
+}
+
+
+
+#define SSE_SWIZZLE_NOOP ((0<<0) | (1<<2) | (2<<4) | (3<<6))
+
+
+/* Locate a source register and perform any required (simple) swizzle.
+ *
+ * Just fail on complex swizzles at this point.
+ */
+static struct x86_reg fetch_src( struct aos_compilation *cp,
+ const struct tgsi_full_src_register *src )
+{
+ struct x86_reg arg0 = aos_get_shader_reg(cp,
+ src->SrcRegister.File,
+ src->SrcRegister.Index);
+ unsigned i;
+ ubyte swz = 0;
+ unsigned negs = 0;
+ unsigned abs = 0;
+
+ for (i = 0; i < 4; i++) {
+ unsigned swizzle = tgsi_util_get_full_src_register_extswizzle( src, i );
+ unsigned neg = tgsi_util_get_full_src_register_sign_mode( src, i );
+
+ switch (swizzle) {
+ case TGSI_EXTSWIZZLE_ZERO:
+ case TGSI_EXTSWIZZLE_ONE:
+ AOS_ERROR(cp, "not supporting full swizzles yet in tgsi_aos_sse2");
+ break;
+
+ default:
+ swz |= (swizzle & 0x3) << (i * 2);
+ break;
+ }
+
+ switch (neg) {
+ case TGSI_UTIL_SIGN_TOGGLE:
+ negs |= (1<<i);
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+
+ case TGSI_UTIL_SIGN_CLEAR:
+ abs |= (1<<i);
+ break;
+
+ default:
+ AOS_ERROR(cp, "unsupported sign-mode");
+ break;
+ }
+ }
+
+ if (swz != SSE_SWIZZLE_NOOP || negs != 0 || abs != 0) {
+ struct x86_reg dst = aos_get_xmm_reg(cp);
+
+ if (swz != SSE_SWIZZLE_NOOP)
+ emit_pshufd(cp, dst, arg0, swz);
+ else
+ sse_movaps(cp->func, dst, arg0);
+
+ if (negs && negs != 0xf) {
+ struct x86_reg imm_swz = aos_get_internal_xmm(cp, IMM_SWZ);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+
+ /* Load 1,-1,0,0
+ * Use neg as arg to pshufd
+ * Multiply
+ */
+ emit_pshufd(cp, tmp, imm_swz,
+ SHUF((negs & 1) ? 1 : 0,
+ (negs & 2) ? 1 : 0,
+ (negs & 4) ? 1 : 0,
+ (negs & 8) ? 1 : 0));
+ sse_mulps(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ aos_soft_release_xmm(cp, imm_swz);
+ }
+ else if (negs) {
+ struct x86_reg imm_negs = aos_get_internal_xmm(cp, IMM_NEGS);
+ sse_mulps(cp->func, dst, imm_negs);
+ aos_soft_release_xmm(cp, imm_negs);
+ }
+
+
+ if (abs && abs != 0xf) {
+ AOS_ERROR(cp, "unsupported partial abs");
+ }
+ else if (abs) {
+ struct x86_reg neg = aos_get_internal(cp, IMM_NEGS);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+
+ sse_movaps(cp->func, tmp, dst);
+ sse_mulps(cp->func, tmp, neg);
+ sse_maxps(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ aos_soft_release_xmm(cp, neg);
+ }
+
+ aos_soft_release_xmm(cp, arg0);
+ return dst;
+ }
+
+ return arg0;
+}
+
+static void x87_fld_src( struct aos_compilation *cp,
+ const struct tgsi_full_src_register *src,
+ unsigned channel )
+{
+ struct x86_reg arg0 = aos_get_shader_reg_ptr(cp,
+ src->SrcRegister.File,
+ src->SrcRegister.Index);
+
+ unsigned swizzle = tgsi_util_get_full_src_register_extswizzle( src, channel );
+ unsigned neg = tgsi_util_get_full_src_register_sign_mode( src, channel );
+
+ switch (swizzle) {
+ case TGSI_EXTSWIZZLE_ZERO:
+ x87_fldz( cp->func );
+ break;
+
+ case TGSI_EXTSWIZZLE_ONE:
+ x87_fld1( cp->func );
+ break;
+
+ default:
+ x87_fld( cp->func, x86_make_disp(arg0, (swizzle & 3) * sizeof(float)) );
+ break;
+ }
+
+
+ switch (neg) {
+ case TGSI_UTIL_SIGN_TOGGLE:
+ /* Flip the sign:
+ */
+ x87_fchs( cp->func );
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+
+ case TGSI_UTIL_SIGN_CLEAR:
+ x87_fabs( cp->func );
+ break;
+
+ case TGSI_UTIL_SIGN_SET:
+ x87_fabs( cp->func );
+ x87_fchs( cp->func );
+ break;
+
+ default:
+ AOS_ERROR(cp, "unsupported sign-mode");
+ break;
+ }
+}
+
+
+
+
+
+
+/* Used to implement write masking. This and most of the other instructions
+ * here would be easier to implement if there had been a translation
+ * to a 2 argument format (dst/arg0, arg1) at the shader level before
+ * attempting to translate to x86/sse code.
+ */
+static void store_dest( struct aos_compilation *cp,
+ const struct tgsi_full_dst_register *reg,
+ struct x86_reg result )
+{
+ struct x86_reg dst;
+
+ switch (reg->DstRegister.WriteMask) {
+ case 0:
+ return;
+
+ case TGSI_WRITEMASK_XYZW:
+ aos_adopt_xmm_reg(cp,
+ get_xmm_writable(cp, result),
+ reg->DstRegister.File,
+ reg->DstRegister.Index,
+ TRUE);
+ return;
+ default:
+ break;
+ }
+
+ dst = aos_get_shader_reg_xmm(cp,
+ reg->DstRegister.File,
+ reg->DstRegister.Index);
+
+ switch (reg->DstRegister.WriteMask) {
+ case TGSI_WRITEMASK_X:
+ sse_movss(cp->func, dst, get_xmm(cp, result));
+ break;
+
+ case TGSI_WRITEMASK_ZW:
+ sse_shufps(cp->func, dst, get_xmm(cp, result), SHUF(X, Y, Z, W));
+ break;
+
+ case TGSI_WRITEMASK_XY:
+ result = get_xmm_writable(cp, result);
+ sse_shufps(cp->func, result, dst, SHUF(X, Y, Z, W));
+ dst = result;
+ break;
+
+ case TGSI_WRITEMASK_YZW:
+ result = get_xmm_writable(cp, result);
+ sse_movss(cp->func, result, dst);
+ dst = result;
+ break;
+
+ default:
+ mask_write(cp, dst, result, reg->DstRegister.WriteMask);
+ break;
+ }
+
+ aos_adopt_xmm_reg(cp,
+ dst,
+ reg->DstRegister.File,
+ reg->DstRegister.Index,
+ TRUE);
+
+}
+
+static void inject_scalar( struct aos_compilation *cp,
+ struct x86_reg dst,
+ struct x86_reg result,
+ ubyte swizzle )
+{
+ sse_shufps(cp->func, dst, dst, swizzle);
+ sse_movss(cp->func, dst, result);
+ sse_shufps(cp->func, dst, dst, swizzle);
+}
+
+
+static void store_scalar_dest( struct aos_compilation *cp,
+ const struct tgsi_full_dst_register *reg,
+ struct x86_reg result )
+{
+ unsigned writemask = reg->DstRegister.WriteMask;
+ struct x86_reg dst;
+
+ if (writemask != TGSI_WRITEMASK_X &&
+ writemask != TGSI_WRITEMASK_Y &&
+ writemask != TGSI_WRITEMASK_Z &&
+ writemask != TGSI_WRITEMASK_W &&
+ writemask != 0)
+ {
+ result = get_xmm_writable(cp, result); /* already true, right? */
+ sse_shufps(cp->func, result, result, SHUF(X,X,X,X));
+ store_dest(cp, reg, result);
+ return;
+ }
+
+ result = get_xmm(cp, result);
+ dst = aos_get_shader_reg_xmm(cp,
+ reg->DstRegister.File,
+ reg->DstRegister.Index);
+
+
+
+ switch (reg->DstRegister.WriteMask) {
+ case TGSI_WRITEMASK_X:
+ sse_movss(cp->func, dst, result);
+ break;
+
+ case TGSI_WRITEMASK_Y:
+ inject_scalar(cp, dst, result, SHUF(Y, X, Z, W));
+ break;
+
+ case TGSI_WRITEMASK_Z:
+ inject_scalar(cp, dst, result, SHUF(Z, Y, X, W));
+ break;
+
+ case TGSI_WRITEMASK_W:
+ inject_scalar(cp, dst, result, SHUF(W, Y, Z, X));
+ break;
+
+ default:
+ break;
+ }
+
+ aos_adopt_xmm_reg(cp,
+ dst,
+ reg->DstRegister.File,
+ reg->DstRegister.Index,
+ TRUE);
+}
+
+
+
+static void x87_fst_or_nop( struct x86_function *func,
+ unsigned writemask,
+ unsigned channel,
+ struct x86_reg ptr )
+{
+ assert(ptr.file == file_REG32);
+ if (writemask & (1<<channel))
+ x87_fst( func, x86_make_disp(ptr, channel * sizeof(float)) );
+}
+
+static void x87_fstp_or_pop( struct x86_function *func,
+ unsigned writemask,
+ unsigned channel,
+ struct x86_reg ptr )
+{
+ assert(ptr.file == file_REG32);
+ if (writemask & (1<<channel))
+ x87_fstp( func, x86_make_disp(ptr, channel * sizeof(float)) );
+ else
+ x87_fstp( func, x86_make_reg( file_x87, 0 ));
+}
+
+
+
+/*
+ */
+static void x87_fstp_dest4( struct aos_compilation *cp,
+ const struct tgsi_full_dst_register *dst )
+{
+ struct x86_reg ptr = get_dst_ptr(cp, dst);
+ unsigned writemask = dst->DstRegister.WriteMask;
+
+ x87_fst_or_nop(cp->func, writemask, 0, ptr);
+ x87_fst_or_nop(cp->func, writemask, 1, ptr);
+ x87_fst_or_nop(cp->func, writemask, 2, ptr);
+ x87_fstp_or_pop(cp->func, writemask, 3, ptr);
+}
+
+/* Save current x87 state and put it into single precision mode.
+ */
+static void save_fpu_state( struct aos_compilation *cp )
+{
+ x87_fnstcw( cp->func, x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, fpu_restore)));
+}
+
+static void restore_fpu_state( struct aos_compilation *cp )
+{
+ x87_fnclex(cp->func);
+ x87_fldcw( cp->func, x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, fpu_restore)));
+}
+
+static void set_fpu_round_neg_inf( struct aos_compilation *cp )
+{
+ if (cp->fpucntl != FPU_RND_NEG) {
+ cp->fpucntl = FPU_RND_NEG;
+ x87_fnclex(cp->func);
+ x87_fldcw( cp->func, x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, fpu_rnd_neg_inf)));
+ }
+}
+
+static void set_fpu_round_nearest( struct aos_compilation *cp )
+{
+ if (cp->fpucntl != FPU_RND_NEAREST) {
+ cp->fpucntl = FPU_RND_NEAREST;
+ x87_fnclex(cp->func);
+ x87_fldcw( cp->func, x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, fpu_rnd_nearest)));
+ }
+}
+
+#if 0
+static void x87_emit_ex2( struct aos_compilation *cp )
+{
+ struct x86_reg st0 = x86_make_reg(file_x87, 0);
+ struct x86_reg st1 = x86_make_reg(file_x87, 1);
+ int stack = cp->func->x87_stack;
+
+// set_fpu_round_neg_inf( cp );
+
+ x87_fld(cp->func, st0); /* a a */
+ x87_fprndint( cp->func ); /* int(a) a*/
+ x87_fsubr(cp->func, st1, st0); /* int(a) frc(a) */
+ x87_fxch(cp->func, st1); /* frc(a) int(a) */
+ x87_f2xm1(cp->func); /* (2^frc(a))-1 int(a) */
+ x87_fld1(cp->func); /* 1 (2^frc(a))-1 int(a) */
+ x87_faddp(cp->func, st1); /* 2^frac(a) int(a) */
+ x87_fscale(cp->func); /* (2^frac(a)*2^int(int(a))) int(a) */
+ /* 2^a int(a) */
+ x87_fstp(cp->func, st1); /* 2^a */
+
+ assert( stack == cp->func->x87_stack);
+
+}
+#endif
+
+#if 0
+static void PIPE_CDECL print_reg( const char *msg,
+ const float *reg )
+{
+ debug_printf("%s: %f %f %f %f\n", msg, reg[0], reg[1], reg[2], reg[3]);
+}
+#endif
+
+#if 0
+static void emit_print( struct aos_compilation *cp,
+ const char *message, /* must point to a static string! */
+ unsigned file,
+ unsigned idx )
+{
+ struct x86_reg ecx = x86_make_reg( file_REG32, reg_CX );
+ struct x86_reg arg = aos_get_shader_reg_ptr( cp, file, idx );
+ unsigned i;
+
+ /* There shouldn't be anything on the x87 stack. Can add this
+ * capacity later if need be.
+ */
+ assert(cp->func->x87_stack == 0);
+
+ /* For absolute correctness, need to spill/invalidate all XMM regs
+ * too. We're obviously not concerned about performance on this
+ * debug path, so here goes:
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].dirty)
+ spill(cp, i);
+
+ aos_release_xmm_reg(cp, i);
+ }
+
+ /* Push caller-save (ie scratch) regs.
+ */
+ x86_cdecl_caller_push_regs( cp->func );
+
+
+ /* Push the arguments:
+ */
+ x86_lea( cp->func, ecx, arg );
+ x86_push( cp->func, ecx );
+ x86_push_imm32( cp->func, (int)message );
+
+ /* Call the helper. Could call debug_printf directly, but
+ * print_reg is a nice place to put a breakpoint if need be.
+ */
+ x86_mov_reg_imm( cp->func, ecx, (int)print_reg );
+ x86_call( cp->func, ecx );
+ x86_pop( cp->func, ecx );
+ x86_pop( cp->func, ecx );
+
+ /* Pop caller-save regs
+ */
+ x86_cdecl_caller_pop_regs( cp->func );
+
+ /* Done...
+ */
+}
+#endif
+
+/**
+ * The traditional instructions. All operate on internal registers
+ * and ignore write masks and swizzling issues.
+ */
+
+static boolean emit_ABS( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg neg = aos_get_internal(cp, IMM_NEGS);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+
+ sse_movaps(cp->func, tmp, arg0);
+ sse_mulps(cp->func, tmp, neg);
+ sse_maxps(cp->func, tmp, arg0);
+
+ store_dest(cp, &op->FullDstRegisters[0], tmp);
+ return TRUE;
+}
+
+static boolean emit_ADD( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_addps(cp->func, dst, arg1);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_COS( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 0);
+ x87_fcos(cp->func);
+ x87_fstp_dest4(cp, &op->FullDstRegisters[0]);
+ return TRUE;
+}
+
+/* The dotproduct instructions don't really do that well in sse:
+ * XXX: produces wrong results -- disabled.
+ */
+static boolean emit_DP3( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_mulps(cp->func, dst, arg1);
+ /* Now the hard bit: sum the first 3 values:
+ */
+ sse_movhlps(cp->func, tmp, dst);
+ sse_addss(cp->func, dst, tmp); /* a*x+c*z, b*y, ?, ? */
+ emit_pshufd(cp, tmp, dst, SHUF(Y,X,W,Z));
+ sse_addss(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ store_scalar_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_DP4( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_mulps(cp->func, dst, arg1);
+
+ /* Now the hard bit: sum the values:
+ */
+ sse_movhlps(cp->func, tmp, dst);
+ sse_addps(cp->func, dst, tmp); /* a*x+c*z, b*y+d*w, a*x+c*z, b*y+d*w */
+ emit_pshufd(cp, tmp, dst, SHUF(Y,X,W,Z));
+ sse_addss(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ store_scalar_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_DPH( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_mulps(cp->func, dst, arg1);
+
+ /* Now the hard bit: sum the values (from DP3):
+ */
+ sse_movhlps(cp->func, tmp, dst);
+ sse_addss(cp->func, dst, tmp); /* a*x+c*z, b*y, ?, ? */
+ emit_pshufd(cp, tmp, dst, SHUF(Y,X,W,Z));
+ sse_addss(cp->func, dst, tmp);
+ emit_pshufd(cp, tmp, arg1, SHUF(W,W,W,W));
+ sse_addss(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ store_scalar_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_DST( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg dst = aos_get_xmm_reg(cp);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ struct x86_reg ones = aos_get_internal(cp, IMM_ONES);
+
+/* dst[0] = 1.0 * 1.0F; */
+/* dst[1] = arg0[1] * arg1[1]; */
+/* dst[2] = arg0[2] * 1.0; */
+/* dst[3] = 1.0 * arg1[3]; */
+
+ emit_shuf_copy2(cp, dst, arg0, ones, SHUF(X,W,Z,Y));
+ emit_shuf_copy2(cp, tmp, arg1, ones, SHUF(X,Z,Y,W));
+ sse_mulps(cp->func, dst, tmp);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_LG2( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ x87_fld1(cp->func); /* 1 */
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 0); /* a0 1 */
+ x87_fyl2x(cp->func); /* log2(a0) */
+ x87_fstp_dest4(cp, &op->FullDstRegisters[0]);
+ return TRUE;
+}
+
+#if 0
+static boolean emit_EX2( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 0);
+ x87_emit_ex2(cp);
+ x87_fstp_dest4(cp, &op->FullDstRegisters[0]);
+ return TRUE;
+}
+#endif
+
+
+static boolean emit_FLR( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg dst = get_dst_ptr(cp, &op->FullDstRegisters[0]);
+ unsigned writemask = op->FullDstRegisters[0].DstRegister.WriteMask;
+ int i;
+
+ set_fpu_round_neg_inf( cp );
+
+ /* Load all sources first to avoid aliasing
+ */
+ for (i = 3; i >= 0; i--) {
+ if (writemask & (1<<i)) {
+ x87_fld_src(cp, &op->FullSrcRegisters[0], i);
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (writemask & (1<<i)) {
+ x87_fprndint( cp->func );
+ x87_fstp(cp->func, x86_make_disp(dst, i*4));
+ }
+ }
+
+ return TRUE;
+}
+
+
+static boolean emit_RND( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg dst = get_dst_ptr(cp, &op->FullDstRegisters[0]);
+ unsigned writemask = op->FullDstRegisters[0].DstRegister.WriteMask;
+ int i;
+
+ set_fpu_round_nearest( cp );
+
+ /* Load all sources first to avoid aliasing
+ */
+ for (i = 3; i >= 0; i--) {
+ if (writemask & (1<<i)) {
+ x87_fld_src(cp, &op->FullSrcRegisters[0], i);
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (writemask & (1<<i)) {
+ x87_fprndint( cp->func );
+ x87_fstp(cp->func, x86_make_disp(dst, i*4));
+ }
+ }
+
+ return TRUE;
+}
+
+
+static boolean emit_FRC( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg dst = get_dst_ptr(cp, &op->FullDstRegisters[0]);
+ struct x86_reg st0 = x86_make_reg(file_x87, 0);
+ struct x86_reg st1 = x86_make_reg(file_x87, 1);
+ unsigned writemask = op->FullDstRegisters[0].DstRegister.WriteMask;
+ int i;
+
+ set_fpu_round_neg_inf( cp );
+
+ /* suck all the source values onto the stack before writing out any
+ * dst, which may alias...
+ */
+ for (i = 3; i >= 0; i--) {
+ if (writemask & (1<<i)) {
+ x87_fld_src(cp, &op->FullSrcRegisters[0], i);
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (writemask & (1<<i)) {
+ x87_fld(cp->func, st0); /* a a */
+ x87_fprndint( cp->func ); /* flr(a) a */
+ x87_fsubp(cp->func, st1); /* frc(a) */
+ x87_fstp(cp->func, x86_make_disp(dst, i*4));
+ }
+ }
+
+ return TRUE;
+}
+
+
+
+
+
+
+static boolean emit_LIT( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg ecx = x86_make_reg( file_REG32, reg_CX );
+ unsigned writemask = op->FullDstRegisters[0].DstRegister.WriteMask;
+ unsigned lit_count = cp->lit_count++;
+ struct x86_reg result, arg0;
+ unsigned i;
+
+#if 1
+ /* For absolute correctness, need to spill/invalidate all XMM regs
+ * too.
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].dirty)
+ spill(cp, i);
+ aos_release_xmm_reg(cp, i);
+ }
+#endif
+
+ if (writemask != TGSI_WRITEMASK_XYZW)
+ result = x86_make_disp(cp->machine_EDX, Offset(struct aos_machine, tmp[0]));
+ else
+ result = get_dst_ptr(cp, &op->FullDstRegisters[0]);
+
+
+ arg0 = fetch_src( cp, &op->FullSrcRegisters[0] );
+ if (arg0.file == file_XMM) {
+ struct x86_reg tmp = x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, tmp[1]));
+ sse_movaps( cp->func, tmp, arg0 );
+ arg0 = tmp;
+ }
+
+
+
+ /* Push caller-save (ie scratch) regs.
+ */
+ x86_cdecl_caller_push_regs( cp->func );
+
+ /* Push the arguments:
+ */
+ x86_push_imm32( cp->func, lit_count );
+
+ x86_lea( cp->func, ecx, arg0 );
+ x86_push( cp->func, ecx );
+
+ x86_lea( cp->func, ecx, result );
+ x86_push( cp->func, ecx );
+
+ x86_push( cp->func, cp->machine_EDX );
+
+ if (lit_count < MAX_LIT_INFO) {
+ x86_mov( cp->func, ecx, x86_make_disp( cp->machine_EDX,
+ Offset(struct aos_machine, lit_info) +
+ lit_count * sizeof(struct lit_info) +
+ Offset(struct lit_info, func)));
+ }
+ else {
+ x86_mov_reg_imm( cp->func, ecx, (int)aos_do_lit );
+ }
+
+ x86_call( cp->func, ecx );
+
+ x86_pop( cp->func, ecx ); /* fixme... */
+ x86_pop( cp->func, ecx );
+ x86_pop( cp->func, ecx );
+ x86_pop( cp->func, ecx );
+
+ x86_cdecl_caller_pop_regs( cp->func );
+
+ if (writemask != TGSI_WRITEMASK_XYZW) {
+ store_dest( cp,
+ &op->FullDstRegisters[0],
+ get_xmm_writable( cp, result ) );
+ }
+
+ return TRUE;
+}
+
+#if 0
+static boolean emit_inline_LIT( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg dst = get_dst_ptr(cp, &op->FullDstRegisters[0]);
+ unsigned writemask = op->FullDstRegisters[0].DstRegister.WriteMask;
+
+ if (writemask & TGSI_WRITEMASK_YZ) {
+ struct x86_reg st1 = x86_make_reg(file_x87, 1);
+ struct x86_reg st2 = x86_make_reg(file_x87, 2);
+
+ /* a1' = a1 <= 0 ? 1 : a1;
+ */
+ x87_fldz(cp->func); /* 1 0 */
+#if 1
+ x87_fld1(cp->func); /* 1 0 */
+#else
+ /* Correct but slow due to fp exceptions generated in fyl2x - fix me.
+ */
+ x87_fldz(cp->func); /* 1 0 */
+#endif
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 1); /* a1 1 0 */
+ x87_fcomi(cp->func, st2); /* a1 1 0 */
+ x87_fcmovb(cp->func, st1); /* a1' 1 0 */
+ x87_fstp(cp->func, st1); /* a1' 0 */
+ x87_fstp(cp->func, st1); /* a1' */
+
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 3); /* a3 a1' */
+ x87_fxch(cp->func, st1); /* a1' a3 */
+
+
+ /* Compute pow(a1, a3)
+ */
+ x87_fyl2x(cp->func); /* a3*log2(a1) */
+ x87_emit_ex2( cp ); /* 2^(a3*log2(a1)) */
+
+
+ /* a0' = max2(a0, 0):
+ */
+ x87_fldz(cp->func); /* 0 r2 */
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 0); /* a0 0 r2 */
+ x87_fcomi(cp->func, st1);
+ x87_fcmovb(cp->func, st1); /* a0' 0 r2 */
+
+ x87_fst_or_nop(cp->func, writemask, 1, dst); /* result[1] = a0' */
+
+ x87_fcomi(cp->func, st1); /* a0' 0 r2 */
+ x87_fcmovnbe(cp->func, st2); /* r2' 0' r2 */
+
+ x87_fstp_or_pop(cp->func, writemask, 2, dst); /* 0 r2 */
+ x87_fpop(cp->func); /* r2 */
+ x87_fpop(cp->func);
+ }
+
+ if (writemask & TGSI_WRITEMASK_XW) {
+ x87_fld1(cp->func);
+ x87_fst_or_nop(cp->func, writemask, 0, dst);
+ x87_fstp_or_pop(cp->func, writemask, 3, dst);
+ }
+
+ return TRUE;
+}
+#endif
+
+
+
+static boolean emit_MAX( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_maxps(cp->func, dst, arg1);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+
+static boolean emit_MIN( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_minps(cp->func, dst, arg1);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_MOV( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ /* potentially nothing to do */
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_MUL( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_mulps(cp->func, dst, arg1);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+
+static boolean emit_MAD( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg arg2 = fetch_src(cp, &op->FullSrcRegisters[2]);
+
+ /* If we can't clobber old contents of arg0, get a temporary & copy
+ * it there, then clobber it...
+ */
+ arg0 = get_xmm_writable(cp, arg0);
+
+ sse_mulps(cp->func, arg0, arg1);
+ sse_addps(cp->func, arg0, arg2);
+ store_dest(cp, &op->FullDstRegisters[0], arg0);
+ return TRUE;
+}
+
+
+
+/* A wrapper for powf().
+ * Makes sure it is cdecl and operates on floats.
+ */
+static float PIPE_CDECL _powerf( float x, float y )
+{
+#if FAST_MATH
+ return util_fast_pow(x, y);
+#else
+ return powf( x, y );
+#endif
+}
+
+#if FAST_MATH
+static float PIPE_CDECL _exp2(float x)
+{
+ return util_fast_exp2(x);
+}
+#endif
+
+
+/* Really not sufficient -- need to check for conditions that could
+ * generate inf/nan values, which will slow things down hugely.
+ */
+static boolean emit_POW( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+#if 0
+ x87_fld_src(cp, &op->FullSrcRegisters[1], 0); /* a1.x */
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 0); /* a0.x a1.x */
+ x87_fyl2x(cp->func); /* a1*log2(a0) */
+
+ x87_emit_ex2( cp ); /* 2^(a1*log2(a0)) */
+
+ x87_fstp_dest4(cp, &op->FullDstRegisters[0]);
+#else
+ uint i;
+
+ /* For absolute correctness, need to spill/invalidate all XMM regs
+ * too.
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].dirty)
+ spill(cp, i);
+ aos_release_xmm_reg(cp, i);
+ }
+
+ /* Push caller-save (ie scratch) regs.
+ */
+ x86_cdecl_caller_push_regs( cp->func );
+
+ x86_lea( cp->func, cp->stack_ESP, x86_make_disp(cp->stack_ESP, -8) );
+
+ x87_fld_src( cp, &op->FullSrcRegisters[1], 0 );
+ x87_fstp( cp->func, x86_make_disp( cp->stack_ESP, 4 ) );
+ x87_fld_src( cp, &op->FullSrcRegisters[0], 0 );
+ x87_fstp( cp->func, x86_make_disp( cp->stack_ESP, 0 ) );
+
+ /* tmp_EAX has been pushed & will be restored below */
+ x86_mov_reg_imm( cp->func, cp->tmp_EAX, (unsigned long) _powerf );
+ x86_call( cp->func, cp->tmp_EAX );
+
+ x86_lea( cp->func, cp->stack_ESP, x86_make_disp(cp->stack_ESP, 8) );
+
+ x86_cdecl_caller_pop_regs( cp->func );
+
+ /* Note retval on x87 stack:
+ */
+ cp->func->x87_stack++;
+
+ x87_fstp_dest4( cp, &op->FullDstRegisters[0] );
+#endif
+ return TRUE;
+}
+
+
+#if FAST_MATH
+static boolean emit_EXPBASE2( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ uint i;
+
+ /* For absolute correctness, need to spill/invalidate all XMM regs
+ * too.
+ */
+ for (i = 0; i < 8; i++) {
+ if (cp->xmm[i].dirty)
+ spill(cp, i);
+ aos_release_xmm_reg(cp, i);
+ }
+
+ /* Push caller-save (ie scratch) regs.
+ */
+ x86_cdecl_caller_push_regs( cp->func );
+
+ x86_lea( cp->func, cp->stack_ESP, x86_make_disp(cp->stack_ESP, -4) );
+
+ x87_fld_src( cp, &op->FullSrcRegisters[0], 0 );
+ x87_fstp( cp->func, x86_make_disp( cp->stack_ESP, 0 ) );
+
+ /* tmp_EAX has been pushed & will be restored below */
+ x86_mov_reg_imm( cp->func, cp->tmp_EAX, (unsigned long) _exp2 );
+ x86_call( cp->func, cp->tmp_EAX );
+
+ x86_lea( cp->func, cp->stack_ESP, x86_make_disp(cp->stack_ESP, 4) );
+
+ x86_cdecl_caller_pop_regs( cp->func );
+
+ /* Note retval on x87 stack:
+ */
+ cp->func->x87_stack++;
+
+ x87_fstp_dest4( cp, &op->FullDstRegisters[0] );
+
+ return TRUE;
+}
+#endif
+
+
+static boolean emit_RCP( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg dst = aos_get_xmm_reg(cp);
+
+ if (cp->have_sse2) {
+ sse2_rcpss(cp->func, dst, arg0);
+ /* extend precision here...
+ */
+ }
+ else {
+ struct x86_reg ones = aos_get_internal(cp, IMM_ONES);
+ sse_movss(cp->func, dst, ones);
+ sse_divss(cp->func, dst, arg0);
+ }
+
+ store_scalar_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+
+/* Although rsqrtps() and rcpps() are low precision on some/all SSE
+ * implementations, it is possible to improve its precision at
+ * fairly low cost, using a newton/raphson step, as below:
+ *
+ * x1 = 2 * rcpps(a) - a * rcpps(a) * rcpps(a)
+ * x1 = 0.5 * rsqrtps(a) * [3.0 - (a * rsqrtps(a))* rsqrtps(a)]
+ * or:
+ * x1 = rsqrtps(a) * [1.5 - .5 * a * rsqrtps(a) * rsqrtps(a)]
+ *
+ *
+ * See: http://softwarecommunity.intel.com/articles/eng/1818.htm
+ */
+static boolean emit_RSQ( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ if (0) {
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg r = aos_get_xmm_reg(cp);
+ sse_rsqrtss(cp->func, r, arg0);
+ store_scalar_dest(cp, &op->FullDstRegisters[0], r);
+ return TRUE;
+ }
+ else {
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg r = aos_get_xmm_reg(cp);
+
+ struct x86_reg neg_half = get_reg_ptr( cp, AOS_FILE_INTERNAL, IMM_RSQ );
+ struct x86_reg one_point_five = x86_make_disp( neg_half, 4 );
+ struct x86_reg src = get_xmm_writable( cp, arg0 );
+ struct x86_reg neg = aos_get_internal(cp, IMM_NEGS);
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+
+ sse_movaps(cp->func, tmp, src);
+ sse_mulps(cp->func, tmp, neg);
+ sse_maxps(cp->func, tmp, src);
+
+ sse_rsqrtss( cp->func, r, tmp ); /* rsqrtss(a) */
+ sse_mulss( cp->func, tmp, neg_half ); /* -.5 * a */
+ sse_mulss( cp->func, tmp, r ); /* -.5 * a * r */
+ sse_mulss( cp->func, tmp, r ); /* -.5 * a * r * r */
+ sse_addss( cp->func, tmp, one_point_five ); /* 1.5 - .5 * a * r * r */
+ sse_mulss( cp->func, r, tmp ); /* r * (1.5 - .5 * a * r * r) */
+
+ store_scalar_dest(cp, &op->FullDstRegisters[0], r);
+
+ aos_release_xmm_reg(cp, tmp.idx);
+
+ return TRUE;
+ }
+}
+
+
+static boolean emit_SGE( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg ones = aos_get_internal(cp, IMM_ONES);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_cmpps(cp->func, dst, arg1, cc_NotLessThan);
+ sse_andps(cp->func, dst, ones);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_SIN( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ x87_fld_src(cp, &op->FullSrcRegisters[0], 0);
+ x87_fsin(cp->func);
+ x87_fstp_dest4(cp, &op->FullDstRegisters[0]);
+ return TRUE;
+}
+
+
+
+static boolean emit_SLT( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg ones = aos_get_internal(cp, IMM_ONES);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_cmpps(cp->func, dst, arg1, cc_LessThan);
+ sse_andps(cp->func, dst, ones);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_SUB( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg dst = get_xmm_writable(cp, arg0);
+
+ sse_subps(cp->func, dst, arg1);
+
+ store_dest(cp, &op->FullDstRegisters[0], dst);
+ return TRUE;
+}
+
+static boolean emit_TRUNC( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg tmp0 = aos_get_xmm_reg(cp);
+
+ sse2_cvttps2dq(cp->func, tmp0, arg0);
+ sse2_cvtdq2ps(cp->func, tmp0, tmp0);
+
+ store_dest(cp, &op->FullDstRegisters[0], tmp0);
+ return TRUE;
+}
+
+static boolean emit_XPD( struct aos_compilation *cp, const struct tgsi_full_instruction *op )
+{
+ struct x86_reg arg0 = fetch_src(cp, &op->FullSrcRegisters[0]);
+ struct x86_reg arg1 = fetch_src(cp, &op->FullSrcRegisters[1]);
+ struct x86_reg tmp0 = aos_get_xmm_reg(cp);
+ struct x86_reg tmp1 = aos_get_xmm_reg(cp);
+
+ emit_pshufd(cp, tmp1, arg1, SHUF(Y, Z, X, W));
+ sse_mulps(cp->func, tmp1, arg0);
+ emit_pshufd(cp, tmp0, arg0, SHUF(Y, Z, X, W));
+ sse_mulps(cp->func, tmp0, arg1);
+ sse_subps(cp->func, tmp1, tmp0);
+ sse_shufps(cp->func, tmp1, tmp1, SHUF(Y, Z, X, W));
+
+/* dst[2] = arg0[0] * arg1[1] - arg0[1] * arg1[0]; */
+/* dst[0] = arg0[1] * arg1[2] - arg0[2] * arg1[1]; */
+/* dst[1] = arg0[2] * arg1[0] - arg0[0] * arg1[2]; */
+/* dst[3] is undef */
+
+
+ aos_release_xmm_reg(cp, tmp0.idx);
+ store_dest(cp, &op->FullDstRegisters[0], tmp1);
+ return TRUE;
+}
+
+
+
+static boolean
+emit_instruction( struct aos_compilation *cp,
+ struct tgsi_full_instruction *inst )
+{
+ x87_assert_stack_empty(cp->func);
+
+ switch( inst->Instruction.Opcode ) {
+ case TGSI_OPCODE_MOV:
+ return emit_MOV( cp, inst );
+
+ case TGSI_OPCODE_LIT:
+ return emit_LIT(cp, inst);
+
+ case TGSI_OPCODE_RCP:
+ return emit_RCP(cp, inst);
+
+ case TGSI_OPCODE_RSQ:
+ return emit_RSQ(cp, inst);
+
+ case TGSI_OPCODE_EXP:
+ /*return emit_EXP(cp, inst);*/
+ return FALSE;
+
+ case TGSI_OPCODE_LOG:
+ /*return emit_LOG(cp, inst);*/
+ return FALSE;
+
+ case TGSI_OPCODE_MUL:
+ return emit_MUL(cp, inst);
+
+ case TGSI_OPCODE_ADD:
+ return emit_ADD(cp, inst);
+
+ case TGSI_OPCODE_DP3:
+ return emit_DP3(cp, inst);
+
+ case TGSI_OPCODE_DP4:
+ return emit_DP4(cp, inst);
+
+ case TGSI_OPCODE_DST:
+ return emit_DST(cp, inst);
+
+ case TGSI_OPCODE_MIN:
+ return emit_MIN(cp, inst);
+
+ case TGSI_OPCODE_MAX:
+ return emit_MAX(cp, inst);
+
+ case TGSI_OPCODE_SLT:
+ return emit_SLT(cp, inst);
+
+ case TGSI_OPCODE_SGE:
+ return emit_SGE(cp, inst);
+
+ case TGSI_OPCODE_MAD:
+ return emit_MAD(cp, inst);
+
+ case TGSI_OPCODE_SUB:
+ return emit_SUB(cp, inst);
+
+ case TGSI_OPCODE_LRP:
+// return emit_LERP(cp, inst);
+ return FALSE;
+
+ case TGSI_OPCODE_FRC:
+ return emit_FRC(cp, inst);
+
+ case TGSI_OPCODE_CLAMP:
+// return emit_CLAMP(cp, inst);
+ return FALSE;
+
+ case TGSI_OPCODE_FLR:
+ return emit_FLR(cp, inst);
+
+ case TGSI_OPCODE_ROUND:
+ return emit_RND(cp, inst);
+
+ case TGSI_OPCODE_EX2:
+#if FAST_MATH
+ return emit_EXPBASE2(cp, inst);
+#elif 0
+ /* this seems to fail for "larger" exponents.
+ * See glean tvertProg1's EX2 test.
+ */
+ return emit_EX2(cp, inst);
+#else
+ return FALSE;
+#endif
+
+ case TGSI_OPCODE_LG2:
+ return emit_LG2(cp, inst);
+
+ case TGSI_OPCODE_POW:
+ return emit_POW(cp, inst);
+
+ case TGSI_OPCODE_XPD:
+ return emit_XPD(cp, inst);
+
+ case TGSI_OPCODE_ABS:
+ return emit_ABS(cp, inst);
+
+ case TGSI_OPCODE_DPH:
+ return emit_DPH(cp, inst);
+
+ case TGSI_OPCODE_COS:
+ return emit_COS(cp, inst);
+
+ case TGSI_OPCODE_SIN:
+ return emit_SIN(cp, inst);
+
+ case TGSI_OPCODE_TRUNC:
+ return emit_TRUNC(cp, inst);
+
+ case TGSI_OPCODE_END:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+
+
+static boolean emit_viewport( struct aos_compilation *cp )
+{
+ struct x86_reg pos = aos_get_shader_reg_xmm(cp,
+ TGSI_FILE_OUTPUT,
+ cp->vaos->draw->vs.position_output );
+
+ struct x86_reg scale = x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, scale));
+
+ struct x86_reg translate = x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, translate));
+
+ sse_mulps(cp->func, pos, scale);
+ sse_addps(cp->func, pos, translate);
+
+ aos_adopt_xmm_reg( cp,
+ pos,
+ TGSI_FILE_OUTPUT,
+ cp->vaos->draw->vs.position_output,
+ TRUE );
+ return TRUE;
+}
+
+
+/* This is useful to be able to see the results on softpipe. Doesn't
+ * do proper clipping, just assumes the backend can do it during
+ * rasterization -- for debug only...
+ */
+static boolean emit_rhw_viewport( struct aos_compilation *cp )
+{
+ struct x86_reg tmp = aos_get_xmm_reg(cp);
+ struct x86_reg pos = aos_get_shader_reg_xmm(cp,
+ TGSI_FILE_OUTPUT,
+ cp->vaos->draw->vs.position_output);
+
+ struct x86_reg scale = x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, scale));
+
+ struct x86_reg translate = x86_make_disp(cp->machine_EDX,
+ Offset(struct aos_machine, translate));
+
+
+
+ emit_pshufd(cp, tmp, pos, SHUF(W, W, W, W));
+ sse2_rcpss(cp->func, tmp, tmp);
+ sse_shufps(cp->func, tmp, tmp, SHUF(X, X, X, X));
+
+ sse_mulps(cp->func, pos, scale);
+ sse_mulps(cp->func, pos, tmp);
+ sse_addps(cp->func, pos, translate);
+
+ /* Set pos[3] = w
+ */
+ mask_write(cp, pos, tmp, TGSI_WRITEMASK_W);
+
+ aos_adopt_xmm_reg( cp,
+ pos,
+ TGSI_FILE_OUTPUT,
+ cp->vaos->draw->vs.position_output,
+ TRUE );
+ return TRUE;
+}
+
+
+#if 0
+static boolean note_immediate( struct aos_compilation *cp,
+ struct tgsi_full_immediate *imm )
+{
+ unsigned pos = cp->num_immediates++;
+ unsigned j;
+
+ assert( imm->Immediate.NrTokens <= 4 + 1 );
+ for (j = 0; j < imm->Immediate.NrTokens - 1; j++) {
+ cp->vaos->machine->immediate[pos][j] = imm->u[j].Float;
+ }
+
+ return TRUE;
+}
+#endif
+
+
+
+
+static void find_last_write_outputs( struct aos_compilation *cp )
+{
+ struct tgsi_parse_context parse;
+ unsigned this_instruction = 0;
+ unsigned i;
+
+ tgsi_parse_init( &parse, cp->vaos->base.vs->state.tokens );
+
+ while (!tgsi_parse_end_of_tokens( &parse )) {
+
+ tgsi_parse_token( &parse );
+
+ if (parse.FullToken.Token.Type != TGSI_TOKEN_TYPE_INSTRUCTION)
+ continue;
+
+ for (i = 0; i < TGSI_FULL_MAX_DST_REGISTERS; i++) {
+ if (parse.FullToken.FullInstruction.FullDstRegisters[i].DstRegister.File ==
+ TGSI_FILE_OUTPUT)
+ {
+ unsigned idx = parse.FullToken.FullInstruction.FullDstRegisters[i].DstRegister.Index;
+ cp->output_last_write[idx] = this_instruction;
+ }
+ }
+
+ this_instruction++;
+ }
+
+ tgsi_parse_free( &parse );
+}
+
+
+#define ARG_MACHINE 1
+#define ARG_START_ELTS 2
+#define ARG_COUNT 3
+#define ARG_OUTBUF 4
+
+
+static boolean build_vertex_program( struct draw_vs_varient_aos_sse *varient,
+ boolean linear )
+{
+ struct tgsi_parse_context parse;
+ struct aos_compilation cp;
+ unsigned fixup, label;
+
+ util_init_math();
+
+ tgsi_parse_init( &parse, varient->base.vs->state.tokens );
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.insn_counter = 1;
+ cp.vaos = varient;
+ cp.have_sse2 = 1;
+ cp.func = &varient->func[ linear ? 0 : 1 ];
+
+ cp.tmp_EAX = x86_make_reg(file_REG32, reg_AX);
+ cp.idx_EBX = x86_make_reg(file_REG32, reg_BX);
+ cp.outbuf_ECX = x86_make_reg(file_REG32, reg_CX);
+ cp.machine_EDX = x86_make_reg(file_REG32, reg_DX);
+ cp.count_ESI = x86_make_reg(file_REG32, reg_SI);
+ cp.temp_EBP = x86_make_reg(file_REG32, reg_BP);
+ cp.stack_ESP = x86_make_reg( file_REG32, reg_SP );
+
+ x86_init_func(cp.func);
+
+ find_last_write_outputs(&cp);
+
+ x86_push(cp.func, cp.idx_EBX);
+ x86_push(cp.func, cp.count_ESI);
+ x86_push(cp.func, cp.temp_EBP);
+
+
+ /* Load arguments into regs:
+ */
+ x86_mov(cp.func, cp.machine_EDX, x86_fn_arg(cp.func, ARG_MACHINE));
+ x86_mov(cp.func, cp.idx_EBX, x86_fn_arg(cp.func, ARG_START_ELTS));
+ x86_mov(cp.func, cp.count_ESI, x86_fn_arg(cp.func, ARG_COUNT));
+ x86_mov(cp.func, cp.outbuf_ECX, x86_fn_arg(cp.func, ARG_OUTBUF));
+
+
+ /* Compare count to zero and possibly bail.
+ */
+ x86_xor(cp.func, cp.tmp_EAX, cp.tmp_EAX);
+ x86_cmp(cp.func, cp.count_ESI, cp.tmp_EAX);
+ fixup = x86_jcc_forward(cp.func, cc_E);
+
+
+ save_fpu_state( &cp );
+ set_fpu_round_nearest( &cp );
+
+ aos_init_inputs( &cp, linear );
+
+ cp.x86_reg[0] = 0;
+ cp.x86_reg[1] = 0;
+
+ /* Note address for loop jump
+ */
+ label = x86_get_label(cp.func);
+ {
+ /* Fetch inputs... TODO: fetch lazily...
+ */
+ if (!aos_fetch_inputs( &cp, linear ))
+ goto fail;
+
+ /* Emit the shader:
+ */
+ while( !tgsi_parse_end_of_tokens( &parse ) && !cp.error )
+ {
+ tgsi_parse_token( &parse );
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+#if 0
+ if (!note_immediate( &cp, &parse.FullToken.FullImmediate ))
+ goto fail;
+#endif
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ if (DISASSEM)
+ tgsi_dump_instruction( &parse.FullToken.FullInstruction, cp.insn_counter );
+
+ if (!emit_instruction( &cp, &parse.FullToken.FullInstruction ))
+ goto fail;
+ break;
+ }
+
+ x87_assert_stack_empty(cp.func);
+ cp.insn_counter++;
+
+ if (DISASSEM)
+ debug_printf("\n");
+ }
+
+
+ {
+ unsigned i;
+ for (i = 0; i < 8; i++) {
+ if (cp.xmm[i].file != TGSI_FILE_OUTPUT) {
+ cp.xmm[i].file = TGSI_FILE_NULL;
+ cp.xmm[i].dirty = 0;
+ }
+ }
+ }
+
+ if (cp.error)
+ goto fail;
+
+ if (cp.vaos->base.key.clip) {
+ /* not really handling clipping, just do the rhw so we can
+ * see the results...
+ */
+ emit_rhw_viewport(&cp);
+ }
+ else if (cp.vaos->base.key.viewport) {
+ emit_viewport(&cp);
+ }
+
+ /* Emit output... TODO: do this eagerly after the last write to a
+ * given output.
+ */
+ if (!aos_emit_outputs( &cp ))
+ goto fail;
+
+
+ /* Next vertex:
+ */
+ x86_lea(cp.func,
+ cp.outbuf_ECX,
+ x86_make_disp(cp.outbuf_ECX,
+ cp.vaos->base.key.output_stride));
+
+ /* Incr index
+ */
+ aos_incr_inputs( &cp, linear );
+ }
+ /* decr count, loop if not zero
+ */
+ x86_dec(cp.func, cp.count_ESI);
+ x86_jcc(cp.func, cc_NZ, label);
+
+ restore_fpu_state(&cp);
+
+ /* Land forward jump here:
+ */
+ x86_fixup_fwd_jump(cp.func, fixup);
+
+ /* Exit mmx state?
+ */
+ if (cp.func->need_emms)
+ mmx_emms(cp.func);
+
+ x86_pop(cp.func, cp.temp_EBP);
+ x86_pop(cp.func, cp.count_ESI);
+ x86_pop(cp.func, cp.idx_EBX);
+
+ x87_assert_stack_empty(cp.func);
+ x86_ret(cp.func);
+
+ tgsi_parse_free( &parse );
+ return !cp.error;
+
+ fail:
+ tgsi_parse_free( &parse );
+ return FALSE;
+}
+
+
+
+static void vaos_set_buffer( struct draw_vs_varient *varient,
+ unsigned buf,
+ const void *ptr,
+ unsigned stride )
+{
+ struct draw_vs_varient_aos_sse *vaos = (struct draw_vs_varient_aos_sse *)varient;
+
+ if (buf < vaos->nr_vb) {
+ vaos->buffer[buf].base_ptr = (char *)ptr;
+ vaos->buffer[buf].stride = stride;
+ }
+
+ if (0) debug_printf("%s %d/%d: %p %d\n", __FUNCTION__, buf, vaos->nr_vb, ptr, stride);
+}
+
+
+
+static void PIPE_CDECL vaos_run_elts( struct draw_vs_varient *varient,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer )
+{
+ struct draw_vs_varient_aos_sse *vaos = (struct draw_vs_varient_aos_sse *)varient;
+ struct aos_machine *machine = vaos->draw->vs.aos_machine;
+
+ if (0) debug_printf("%s %d\n", __FUNCTION__, count);
+
+ machine->internal[IMM_PSIZE][0] = vaos->draw->rasterizer->point_size;
+ machine->constants = vaos->draw->vs.aligned_constants;
+ machine->immediates = vaos->base.vs->immediates;
+ machine->buffer = vaos->buffer;
+
+ vaos->gen_run_elts( machine,
+ elts,
+ count,
+ output_buffer );
+}
+
+static void PIPE_CDECL vaos_run_linear( struct draw_vs_varient *varient,
+ unsigned start,
+ unsigned count,
+ void *output_buffer )
+{
+ struct draw_vs_varient_aos_sse *vaos = (struct draw_vs_varient_aos_sse *)varient;
+ struct aos_machine *machine = vaos->draw->vs.aos_machine;
+
+ if (0) debug_printf("%s %d %d const: %x\n", __FUNCTION__, start, count,
+ vaos->base.key.const_vbuffers);
+
+ machine->internal[IMM_PSIZE][0] = vaos->draw->rasterizer->point_size;
+ machine->constants = vaos->draw->vs.aligned_constants;
+ machine->immediates = vaos->base.vs->immediates;
+ machine->buffer = vaos->buffer;
+
+ vaos->gen_run_linear( machine,
+ start,
+ count,
+ output_buffer );
+
+ /* Sanity spot checks to make sure we didn't trash our constants */
+ assert(machine->internal[IMM_ONES][0] == 1.0f);
+ assert(machine->internal[IMM_IDENTITY][0] == 0.0f);
+ assert(machine->internal[IMM_NEGS][0] == -1.0f);
+}
+
+
+
+static void vaos_destroy( struct draw_vs_varient *varient )
+{
+ struct draw_vs_varient_aos_sse *vaos = (struct draw_vs_varient_aos_sse *)varient;
+
+ FREE( vaos->buffer );
+
+ x86_release_func( &vaos->func[0] );
+ x86_release_func( &vaos->func[1] );
+
+ FREE(vaos);
+}
+
+
+
+static struct draw_vs_varient *varient_aos_sse( struct draw_vertex_shader *vs,
+ const struct draw_vs_varient_key *key )
+{
+ unsigned i;
+ struct draw_vs_varient_aos_sse *vaos = CALLOC_STRUCT(draw_vs_varient_aos_sse);
+
+ if (!vaos)
+ goto fail;
+
+ vaos->base.key = *key;
+ vaos->base.vs = vs;
+ vaos->base.set_buffer = vaos_set_buffer;
+ vaos->base.destroy = vaos_destroy;
+ vaos->base.run_linear = vaos_run_linear;
+ vaos->base.run_elts = vaos_run_elts;
+
+ vaos->draw = vs->draw;
+
+ for (i = 0; i < key->nr_inputs; i++)
+ vaos->nr_vb = MAX2( vaos->nr_vb, key->element[i].in.buffer + 1 );
+
+ vaos->buffer = MALLOC( vaos->nr_vb * sizeof(vaos->buffer[0]) );
+ if (!vaos->buffer)
+ goto fail;
+
+ if (0)
+ debug_printf("nr_vb: %d const: %x\n", vaos->nr_vb, vaos->base.key.const_vbuffers);
+
+#if 0
+ tgsi_dump(vs->state.tokens, 0);
+#endif
+
+ if (!build_vertex_program( vaos, TRUE ))
+ goto fail;
+
+ if (!build_vertex_program( vaos, FALSE ))
+ goto fail;
+
+ vaos->gen_run_linear = (vaos_run_linear_func)x86_get_func(&vaos->func[0]);
+ if (!vaos->gen_run_linear)
+ goto fail;
+
+ vaos->gen_run_elts = (vaos_run_elts_func)x86_get_func(&vaos->func[1]);
+ if (!vaos->gen_run_elts)
+ goto fail;
+
+ return &vaos->base;
+
+ fail:
+ if (vaos && vaos->buffer)
+ FREE(vaos->buffer);
+
+ if (vaos)
+ x86_release_func( &vaos->func[0] );
+
+ if (vaos)
+ x86_release_func( &vaos->func[1] );
+
+ FREE(vaos);
+
+ return NULL;
+}
+
+
+struct draw_vs_varient *draw_vs_varient_aos_sse( struct draw_vertex_shader *vs,
+ const struct draw_vs_varient_key *key )
+{
+ struct draw_vs_varient *varient = varient_aos_sse( vs, key );
+
+ if (varient == NULL) {
+ varient = draw_vs_varient_generic( vs, key );
+ }
+
+ return varient;
+}
+
+
+
+#endif /* PIPE_ARCH_X86 */
diff --git a/src/gallium/auxiliary/draw/draw_vs_aos.h b/src/gallium/auxiliary/draw/draw_vs_aos.h
new file mode 100644
index 0000000000..2cf72ddf7b
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_aos.h
@@ -0,0 +1,253 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef DRAW_VS_AOS_H
+#define DRAW_VS_AOS_H
+
+#include "pipe/p_config.h"
+
+#ifdef PIPE_ARCH_X86
+
+struct tgsi_token;
+struct x86_function;
+
+#include "pipe/p_state.h"
+#include "rtasm/rtasm_x86sse.h"
+
+
+
+
+
+#define X 0
+#define Y 1
+#define Z 2
+#define W 3
+
+#define MAX_INPUTS PIPE_MAX_ATTRIBS
+#define MAX_OUTPUTS PIPE_MAX_SHADER_OUTPUTS
+#define MAX_TEMPS TGSI_EXEC_NUM_TEMPS
+#define MAX_CONSTANTS 1024 /** only used for sanity checking */
+#define MAX_IMMEDIATES 1024 /** only used for sanity checking */
+#define MAX_INTERNALS 8 /** see IMM_x values below */
+
+#define AOS_FILE_INTERNAL TGSI_FILE_COUNT
+
+#define FPU_RND_NEG 1
+#define FPU_RND_NEAREST 2
+
+struct aos_machine;
+typedef void (PIPE_CDECL *lit_func)( struct aos_machine *,
+ float *result,
+ const float *in,
+ unsigned count );
+
+void PIPE_CDECL aos_do_lit( struct aos_machine *machine,
+ float *result,
+ const float *in,
+ unsigned count );
+
+struct shine_tab {
+ float exponent;
+ float values[258];
+ unsigned last_used;
+};
+
+struct lit_info {
+ lit_func func;
+ struct shine_tab *shine_tab;
+};
+
+#define MAX_SHINE_TAB 4
+#define MAX_LIT_INFO 16
+
+struct aos_buffer {
+ const void *base_ptr;
+ unsigned stride;
+ void *ptr; /* updated per vertex */
+};
+
+
+
+
+/* This is the temporary storage used by all the aos_sse vs varients.
+ * Create one per context and reuse by passing a pointer in at
+ * vs_varient creation??
+ */
+struct aos_machine {
+ float input [MAX_INPUTS ][4];
+ float output [MAX_OUTPUTS ][4];
+ float temp [MAX_TEMPS ][4];
+ float internal [MAX_INTERNALS ][4];
+
+ float scale[4]; /* viewport */
+ float translate[4]; /* viewport */
+
+ float tmp[2][4]; /* scratch space for LIT */
+
+ struct shine_tab shine_tab[MAX_SHINE_TAB];
+ struct lit_info lit_info[MAX_LIT_INFO];
+ unsigned now;
+
+
+ ushort fpu_rnd_nearest;
+ ushort fpu_rnd_neg_inf;
+ ushort fpu_restore;
+ ushort fpucntl; /* one of FPU_* above */
+
+ const float (*immediates)[4]; /* points to shader data */
+ const float (*constants)[4]; /* points to draw data */
+
+ const struct aos_buffer *buffer; /* points to ? */
+};
+
+
+
+
+struct aos_compilation {
+ struct x86_function *func;
+ struct draw_vs_varient_aos_sse *vaos;
+
+ unsigned insn_counter;
+ unsigned num_immediates;
+ unsigned count;
+ unsigned lit_count;
+
+ struct {
+ unsigned idx:16;
+ unsigned file:8;
+ unsigned dirty:8;
+ unsigned last_used;
+ } xmm[8];
+
+ unsigned x86_reg[2]; /* one of X86_* */
+
+ boolean input_fetched[PIPE_MAX_ATTRIBS];
+ unsigned output_last_write[PIPE_MAX_ATTRIBS];
+
+ boolean have_sse2;
+ boolean error;
+ short fpucntl;
+
+ /* these are actually known values, but putting them in a struct
+ * like this is helpful to keep them in sync across the file.
+ */
+ struct x86_reg tmp_EAX;
+ struct x86_reg idx_EBX; /* either start+i or &elt[i] */
+ struct x86_reg outbuf_ECX;
+ struct x86_reg machine_EDX;
+ struct x86_reg count_ESI; /* decrements to zero */
+ struct x86_reg temp_EBP;
+ struct x86_reg stack_ESP;
+};
+
+struct x86_reg aos_get_xmm_reg( struct aos_compilation *cp );
+void aos_release_xmm_reg( struct aos_compilation *cp, unsigned idx );
+
+void aos_adopt_xmm_reg( struct aos_compilation *cp,
+ struct x86_reg reg,
+ unsigned file,
+ unsigned idx,
+ unsigned dirty );
+
+void aos_spill_all( struct aos_compilation *cp );
+
+struct x86_reg aos_get_shader_reg( struct aos_compilation *cp,
+ unsigned file,
+ unsigned idx );
+
+boolean aos_init_inputs( struct aos_compilation *cp, boolean linear );
+boolean aos_fetch_inputs( struct aos_compilation *cp, boolean linear );
+boolean aos_incr_inputs( struct aos_compilation *cp, boolean linear );
+
+boolean aos_emit_outputs( struct aos_compilation *cp );
+
+
+#define IMM_ONES 0 /* 1, 1,1,1 */
+#define IMM_SWZ 1 /* 1,-1,0, 0xffffffff */
+#define IMM_IDENTITY 2 /* 0, 0,0,1 */
+#define IMM_INV_255 3 /* 1/255, 1/255, 1/255, 1/255 */
+#define IMM_255 4 /* 255, 255, 255, 255 */
+#define IMM_NEGS 5 /* -1,-1,-1,-1 */
+#define IMM_RSQ 6 /* -.5,1.5,_,_ */
+#define IMM_PSIZE 7 /* not really an immediate - updated each run */
+
+struct x86_reg aos_get_internal( struct aos_compilation *cp,
+ unsigned imm );
+struct x86_reg aos_get_internal_xmm( struct aos_compilation *cp,
+ unsigned imm );
+
+
+#define AOS_ERROR(cp, msg) \
+do { \
+ if (0) debug_printf("%s: x86 translation failed: %s\n", __FUNCTION__, msg); \
+ cp->error = 1; \
+} while (0)
+
+
+#define X86_NULL 0
+#define X86_IMMEDIATES 1
+#define X86_CONSTANTS 2
+#define X86_BUFFERS 3
+
+struct x86_reg aos_get_x86( struct aos_compilation *cp,
+ unsigned which_reg,
+ unsigned value );
+
+
+typedef void (PIPE_CDECL *vaos_run_elts_func)( struct aos_machine *,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer);
+
+typedef void (PIPE_CDECL *vaos_run_linear_func)( struct aos_machine *,
+ unsigned start,
+ unsigned count,
+ void *output_buffer);
+
+
+struct draw_vs_varient_aos_sse {
+ struct draw_vs_varient base;
+ struct draw_context *draw;
+
+ struct aos_buffer *buffer;
+ unsigned nr_vb;
+
+ vaos_run_linear_func gen_run_linear;
+ vaos_run_elts_func gen_run_elts;
+
+
+ struct x86_function func[2];
+};
+
+
+#endif
+
+#endif
+
diff --git a/src/gallium/auxiliary/draw/draw_vs_aos_io.c b/src/gallium/auxiliary/draw/draw_vs_aos_io.c
new file mode 100644
index 0000000000..a6eb37d128
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_aos_io.c
@@ -0,0 +1,462 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_exec.h"
+#include "draw_vs.h"
+#include "draw_vs_aos.h"
+#include "draw_vertex.h"
+
+#include "rtasm/rtasm_x86sse.h"
+
+#ifdef PIPE_ARCH_X86
+
+/* Note - don't yet have to worry about interacting with the code in
+ * draw_vs_aos.c as there is no intermingling of generated code...
+ * That may have to change, we'll see.
+ */
+static void emit_load_R32G32B32A32( struct aos_compilation *cp,
+ struct x86_reg data,
+ struct x86_reg src_ptr )
+{
+ sse_movups(cp->func, data, src_ptr);
+}
+
+static void emit_load_R32G32B32( struct aos_compilation *cp,
+ struct x86_reg data,
+ struct x86_reg src_ptr )
+{
+#if 1
+ sse_movss(cp->func, data, x86_make_disp(src_ptr, 8));
+ /* data = z ? ? ? */
+ sse_shufps(cp->func, data, aos_get_internal_xmm( cp, IMM_IDENTITY ), SHUF(X,Y,Z,W) );
+ /* data = z ? 0 1 */
+ sse_shufps(cp->func, data, data, SHUF(Y,Z,X,W) );
+ /* data = ? 0 z 1 */
+ sse_movlps(cp->func, data, src_ptr);
+ /* data = x y z 1 */
+#else
+ sse_movups(cp->func, data, src_ptr);
+ /* data = x y z ? */
+ sse2_pshufd(cp->func, data, data, SHUF(W,X,Y,Z) );
+ /* data = ? x y z */
+ sse_movss(cp->func, data, aos_get_internal_xmm( cp, IMM_ONES ) );
+ /* data = 1 x y z */
+ sse2_pshufd(cp->func, data, data, SHUF(Y,Z,W,X) );
+ /* data = x y z 1 */
+#endif
+}
+
+static void emit_load_R32G32( struct aos_compilation *cp,
+ struct x86_reg data,
+ struct x86_reg src_ptr )
+{
+ sse_movups(cp->func, data, aos_get_internal_xmm( cp, IMM_IDENTITY ) );
+ sse_movlps(cp->func, data, src_ptr);
+}
+
+
+static void emit_load_R32( struct aos_compilation *cp,
+ struct x86_reg data,
+ struct x86_reg src_ptr )
+{
+ sse_movss(cp->func, data, src_ptr);
+ sse_orps(cp->func, data, aos_get_internal_xmm( cp, IMM_IDENTITY ) );
+}
+
+
+static void emit_load_R8G8B8A8_UNORM( struct aos_compilation *cp,
+ struct x86_reg data,
+ struct x86_reg src_ptr )
+{
+ sse_movss(cp->func, data, src_ptr);
+ sse2_punpcklbw(cp->func, data, aos_get_internal_xmm( cp, IMM_IDENTITY ));
+ sse2_punpcklbw(cp->func, data, aos_get_internal_xmm( cp, IMM_IDENTITY ));
+ sse2_cvtdq2ps(cp->func, data, data);
+ sse_mulps(cp->func, data, aos_get_internal(cp, IMM_INV_255));
+}
+
+
+
+/* Extended swizzles? Maybe later.
+ */
+static void emit_swizzle( struct aos_compilation *cp,
+ struct x86_reg dest,
+ struct x86_reg src,
+ ubyte shuffle )
+{
+ sse_shufps(cp->func, dest, src, shuffle);
+}
+
+
+
+static boolean get_buffer_ptr( struct aos_compilation *cp,
+ boolean linear,
+ unsigned buf_idx,
+ struct x86_reg elt,
+ struct x86_reg ptr)
+{
+ struct x86_reg buf = x86_make_disp(aos_get_x86( cp, 0, X86_BUFFERS ),
+ buf_idx * sizeof(struct aos_buffer));
+
+ struct x86_reg buf_stride = x86_make_disp(buf,
+ Offset(struct aos_buffer, stride));
+ if (linear) {
+ struct x86_reg buf_ptr = x86_make_disp(buf,
+ Offset(struct aos_buffer, ptr));
+
+
+ /* Calculate pointer to current attrib:
+ */
+ x86_mov(cp->func, ptr, buf_ptr);
+ x86_mov(cp->func, elt, buf_stride);
+ x86_add(cp->func, elt, ptr);
+ if (buf_idx == 0) sse_prefetchnta(cp->func, x86_make_disp(elt, 192));
+ x86_mov(cp->func, buf_ptr, elt);
+ }
+ else {
+ struct x86_reg buf_base_ptr = x86_make_disp(buf,
+ Offset(struct aos_buffer, base_ptr));
+
+
+ /* Calculate pointer to current attrib:
+ */
+ x86_mov(cp->func, ptr, buf_stride);
+ x86_imul(cp->func, ptr, elt);
+ x86_add(cp->func, ptr, buf_base_ptr);
+ }
+
+ cp->insn_counter++;
+
+ return TRUE;
+}
+
+
+static boolean load_input( struct aos_compilation *cp,
+ unsigned idx,
+ struct x86_reg bufptr )
+{
+ unsigned format = cp->vaos->base.key.element[idx].in.format;
+ unsigned offset = cp->vaos->base.key.element[idx].in.offset;
+ struct x86_reg dataXMM = aos_get_xmm_reg(cp);
+
+ /* Figure out source pointer address:
+ */
+ struct x86_reg src = x86_make_disp(bufptr, offset);
+
+ aos_adopt_xmm_reg( cp,
+ dataXMM,
+ TGSI_FILE_INPUT,
+ idx,
+ TRUE );
+
+ switch (format) {
+ case PIPE_FORMAT_R32_FLOAT:
+ emit_load_R32(cp, dataXMM, src);
+ break;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ emit_load_R32G32(cp, dataXMM, src);
+ break;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ emit_load_R32G32B32(cp, dataXMM, src);
+ break;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ emit_load_R32G32B32A32(cp, dataXMM, src);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ emit_load_R8G8B8A8_UNORM(cp, dataXMM, src);
+ emit_swizzle(cp, dataXMM, dataXMM, SHUF(Z,Y,X,W));
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ emit_load_R8G8B8A8_UNORM(cp, dataXMM, src);
+ break;
+ default:
+ AOS_ERROR(cp, "unhandled input format");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static boolean load_inputs( struct aos_compilation *cp,
+ unsigned buffer,
+ struct x86_reg ptr )
+{
+ unsigned i;
+
+ for (i = 0; i < cp->vaos->base.key.nr_inputs; i++) {
+ if (cp->vaos->base.key.element[i].in.buffer == buffer) {
+
+ if (!load_input( cp, i, ptr ))
+ return FALSE;
+
+ cp->insn_counter++;
+ }
+ }
+
+ return TRUE;
+}
+
+boolean aos_init_inputs( struct aos_compilation *cp, boolean linear )
+{
+ unsigned i;
+ for (i = 0; i < cp->vaos->nr_vb; i++) {
+ struct x86_reg buf = x86_make_disp(aos_get_x86( cp, 0, X86_BUFFERS ),
+ i * sizeof(struct aos_buffer));
+
+ struct x86_reg buf_base_ptr = x86_make_disp(buf,
+ Offset(struct aos_buffer, base_ptr));
+
+ if (cp->vaos->base.key.const_vbuffers & (1<<i)) {
+ struct x86_reg ptr = cp->tmp_EAX;
+
+ x86_mov(cp->func, ptr, buf_base_ptr);
+
+ /* Load all inputs for this constant vertex buffer
+ */
+ load_inputs( cp, i, x86_deref(ptr) );
+
+ /* Then just force them out to aos_machine.input[]
+ */
+ aos_spill_all( cp );
+
+ }
+ else if (linear) {
+
+ struct x86_reg elt = cp->idx_EBX;
+ struct x86_reg ptr = cp->tmp_EAX;
+
+ struct x86_reg buf_stride = x86_make_disp(buf,
+ Offset(struct aos_buffer, stride));
+
+ struct x86_reg buf_ptr = x86_make_disp(buf,
+ Offset(struct aos_buffer, ptr));
+
+
+ /* Calculate pointer to current attrib:
+ */
+ x86_mov(cp->func, ptr, buf_stride);
+ x86_imul(cp->func, ptr, elt);
+ x86_add(cp->func, ptr, buf_base_ptr);
+
+
+ /* In the linear case, keep the buffer pointer instead of the
+ * index number.
+ */
+ if (cp->vaos->nr_vb == 1)
+ x86_mov( cp->func, elt, ptr );
+ else
+ x86_mov( cp->func, buf_ptr, ptr );
+
+ cp->insn_counter++;
+ }
+ }
+
+ return TRUE;
+}
+
+boolean aos_fetch_inputs( struct aos_compilation *cp, boolean linear )
+{
+ unsigned j;
+
+ for (j = 0; j < cp->vaos->nr_vb; j++) {
+ if (cp->vaos->base.key.const_vbuffers & (1<<j)) {
+ /* just retreive pre-transformed input */
+ }
+ else if (linear && cp->vaos->nr_vb == 1) {
+ load_inputs( cp, 0, cp->idx_EBX );
+ }
+ else {
+ struct x86_reg elt = linear ? cp->idx_EBX : x86_deref(cp->idx_EBX);
+ struct x86_reg ptr = cp->tmp_EAX;
+
+ if (!get_buffer_ptr( cp, linear, j, elt, ptr ))
+ return FALSE;
+
+ if (!load_inputs( cp, j, ptr ))
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+boolean aos_incr_inputs( struct aos_compilation *cp, boolean linear )
+{
+ if (linear && cp->vaos->nr_vb == 1) {
+ struct x86_reg stride = x86_make_disp(aos_get_x86( cp, 0, X86_BUFFERS ),
+ (0 * sizeof(struct aos_buffer) +
+ Offset(struct aos_buffer, stride)));
+
+ x86_add(cp->func, cp->idx_EBX, stride);
+ sse_prefetchnta(cp->func, x86_make_disp(cp->idx_EBX, 192));
+ }
+ else if (linear) {
+ /* Nothing to do */
+ }
+ else {
+ x86_lea(cp->func, cp->idx_EBX, x86_make_disp(cp->idx_EBX, 4));
+ }
+
+ return TRUE;
+}
+
+
+
+
+
+
+static void emit_store_R32G32B32A32( struct aos_compilation *cp,
+ struct x86_reg dst_ptr,
+ struct x86_reg dataXMM )
+{
+ sse_movups(cp->func, dst_ptr, dataXMM);
+}
+
+static void emit_store_R32G32B32( struct aos_compilation *cp,
+ struct x86_reg dst_ptr,
+ struct x86_reg dataXMM )
+{
+ sse_movlps(cp->func, dst_ptr, dataXMM);
+ sse_shufps(cp->func, dataXMM, dataXMM, SHUF(Z,Z,Z,Z) ); /* NOTE! destructive */
+ sse_movss(cp->func, x86_make_disp(dst_ptr,8), dataXMM);
+}
+
+static void emit_store_R32G32( struct aos_compilation *cp,
+ struct x86_reg dst_ptr,
+ struct x86_reg dataXMM )
+{
+ sse_movlps(cp->func, dst_ptr, dataXMM);
+}
+
+static void emit_store_R32( struct aos_compilation *cp,
+ struct x86_reg dst_ptr,
+ struct x86_reg dataXMM )
+{
+ sse_movss(cp->func, dst_ptr, dataXMM);
+}
+
+
+
+static void emit_store_R8G8B8A8_UNORM( struct aos_compilation *cp,
+ struct x86_reg dst_ptr,
+ struct x86_reg dataXMM )
+{
+ sse_mulps(cp->func, dataXMM, aos_get_internal(cp, IMM_255));
+ sse2_cvtps2dq(cp->func, dataXMM, dataXMM);
+ sse2_packssdw(cp->func, dataXMM, dataXMM);
+ sse2_packuswb(cp->func, dataXMM, dataXMM);
+ sse_movss(cp->func, dst_ptr, dataXMM);
+}
+
+
+
+
+
+static boolean emit_output( struct aos_compilation *cp,
+ struct x86_reg ptr,
+ struct x86_reg dataXMM,
+ unsigned format )
+{
+ switch (format) {
+ case EMIT_1F:
+ case EMIT_1F_PSIZE:
+ emit_store_R32(cp, ptr, dataXMM);
+ break;
+ case EMIT_2F:
+ emit_store_R32G32(cp, ptr, dataXMM);
+ break;
+ case EMIT_3F:
+ emit_store_R32G32B32(cp, ptr, dataXMM);
+ break;
+ case EMIT_4F:
+ emit_store_R32G32B32A32(cp, ptr, dataXMM);
+ break;
+ case EMIT_4UB:
+ if (1) {
+ emit_swizzle(cp, dataXMM, dataXMM, SHUF(Z,Y,X,W));
+ emit_store_R8G8B8A8_UNORM(cp, ptr, dataXMM);
+ }
+ else {
+ emit_store_R8G8B8A8_UNORM(cp, ptr, dataXMM);
+ }
+ break;
+ default:
+ AOS_ERROR(cp, "unhandled output format");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+
+boolean aos_emit_outputs( struct aos_compilation *cp )
+{
+ unsigned i;
+
+ for (i = 0; i < cp->vaos->base.key.nr_outputs; i++) {
+ unsigned format = cp->vaos->base.key.element[i].out.format;
+ unsigned offset = cp->vaos->base.key.element[i].out.offset;
+ unsigned vs_output = cp->vaos->base.key.element[i].out.vs_output;
+
+ struct x86_reg data;
+
+ if (format == EMIT_1F_PSIZE) {
+ data = aos_get_internal_xmm( cp, IMM_PSIZE );
+ }
+ else {
+ data = aos_get_shader_reg( cp,
+ TGSI_FILE_OUTPUT,
+ vs_output );
+ }
+
+ if (data.file != file_XMM) {
+ struct x86_reg tmp = aos_get_xmm_reg( cp );
+ sse_movaps(cp->func, tmp, data);
+ data = tmp;
+ }
+
+ if (!emit_output( cp,
+ x86_make_disp( cp->outbuf_ECX, offset ),
+ data,
+ format ))
+ return FALSE;
+
+ aos_release_xmm_reg( cp, data.idx );
+
+ cp->insn_counter++;
+ }
+
+ return TRUE;
+}
+
+#endif
diff --git a/src/gallium/auxiliary/draw/draw_vs_aos_machine.c b/src/gallium/auxiliary/draw/draw_vs_aos_machine.c
new file mode 100644
index 0000000000..3240e3745d
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_aos_machine.c
@@ -0,0 +1,324 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_config.h"
+
+
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_exec.h"
+#include "draw_vs.h"
+#include "draw_vs_aos.h"
+#include "draw_vertex.h"
+
+#ifdef PIPE_ARCH_X86
+
+#include "rtasm/rtasm_x86sse.h"
+
+
+#define X87_CW_EXCEPTION_INV_OP (1<<0)
+#define X87_CW_EXCEPTION_DENORM_OP (1<<1)
+#define X87_CW_EXCEPTION_ZERO_DIVIDE (1<<2)
+#define X87_CW_EXCEPTION_OVERFLOW (1<<3)
+#define X87_CW_EXCEPTION_UNDERFLOW (1<<4)
+#define X87_CW_EXCEPTION_PRECISION (1<<5)
+#define X87_CW_PRECISION_SINGLE (0<<8)
+#define X87_CW_PRECISION_RESERVED (1<<8)
+#define X87_CW_PRECISION_DOUBLE (2<<8)
+#define X87_CW_PRECISION_DOUBLE_EXT (3<<8)
+#define X87_CW_PRECISION_MASK (3<<8)
+#define X87_CW_ROUND_NEAREST (0<<10)
+#define X87_CW_ROUND_DOWN (1<<10)
+#define X87_CW_ROUND_UP (2<<10)
+#define X87_CW_ROUND_ZERO (3<<10)
+#define X87_CW_ROUND_MASK (3<<10)
+#define X87_CW_INFINITY (1<<12)
+
+
+void PIPE_CDECL aos_do_lit( struct aos_machine *machine,
+ float *result,
+ const float *in,
+ unsigned count )
+{
+ if (in[0] > 0)
+ {
+ if (in[1] <= 0.0)
+ {
+ result[0] = 1.0F;
+ result[1] = in[0];
+ result[2] = 0.0F;
+ result[3] = 1.0F;
+ }
+ else
+ {
+ const float epsilon = 1.0F / 256.0F;
+ float exponent = CLAMP(in[3], -(128.0F - epsilon), (128.0F - epsilon));
+ result[0] = 1.0F;
+ result[1] = in[0];
+ result[2] = powf(in[1], exponent);
+ result[3] = 1.0;
+ }
+ }
+ else
+ {
+ result[0] = 1.0F;
+ result[1] = 0.0;
+ result[2] = 0.0;
+ result[3] = 1.0F;
+ }
+}
+
+
+static void PIPE_CDECL do_lit_lut( struct aos_machine *machine,
+ float *result,
+ const float *in,
+ unsigned count )
+{
+ if (in[0] > 0)
+ {
+ if (in[1] <= 0.0)
+ {
+ result[0] = 1.0F;
+ result[1] = in[0];
+ result[2] = 0.0F;
+ result[3] = 1.0F;
+ return;
+ }
+
+ if (machine->lit_info[count].shine_tab->exponent != in[3]) {
+ machine->lit_info[count].func = aos_do_lit;
+ goto no_luck;
+ }
+
+ if (in[1] <= 1.0)
+ {
+ const float *tab = machine->lit_info[count].shine_tab->values;
+ float f = in[1] * 256;
+ int k = (int)f;
+ float frac = f - (float)k;
+
+ result[0] = 1.0F;
+ result[1] = in[0];
+ result[2] = tab[k] + frac*(tab[k+1]-tab[k]);
+ result[3] = 1.0;
+ return;
+ }
+
+ no_luck:
+ {
+ const float epsilon = 1.0F / 256.0F;
+ float exponent = CLAMP(in[3], -(128.0F - epsilon), (128.0F - epsilon));
+ result[0] = 1.0F;
+ result[1] = in[0];
+ result[2] = powf(in[1], exponent);
+ result[3] = 1.0;
+ }
+ }
+ else
+ {
+ result[0] = 1.0F;
+ result[1] = 0.0;
+ result[2] = 0.0;
+ result[3] = 1.0F;
+ }
+}
+
+
+static void do_populate_lut( struct shine_tab *tab,
+ float unclamped_exponent )
+{
+ const float epsilon = 1.0F / 256.0F;
+ float exponent = CLAMP(unclamped_exponent, -(128.0F - epsilon), (128.0F - epsilon));
+ unsigned i;
+
+ tab->exponent = unclamped_exponent; /* for later comparison */
+
+ tab->values[0] = 0;
+ if (exponent == 0) {
+ for (i = 1; i < 258; i++) {
+ tab->values[i] = 1.0;
+ }
+ }
+ else {
+ for (i = 1; i < 258; i++) {
+ tab->values[i] = powf((float)i * epsilon, exponent);
+ }
+ }
+}
+
+
+
+
+static void PIPE_CDECL populate_lut( struct aos_machine *machine,
+ float *result,
+ const float *in,
+ unsigned count )
+{
+ unsigned i, tab;
+
+ /* Search for an existing table for this value. Note that without
+ * static analysis we don't really know if in[3] will be constant,
+ * but it usually is...
+ */
+ for (tab = 0; tab < 4; tab++) {
+ if (machine->shine_tab[tab].exponent == in[3]) {
+ goto found;
+ }
+ }
+
+ for (tab = 0, i = 1; i < 4; i++) {
+ if (machine->shine_tab[i].last_used < machine->shine_tab[tab].last_used)
+ tab = i;
+ }
+
+ if (machine->shine_tab[tab].last_used == machine->now) {
+ /* No unused tables (this is not a ffvertex program...). Just
+ * call pow each time:
+ */
+ machine->lit_info[count].func = aos_do_lit;
+ machine->lit_info[count].func( machine, result, in, count );
+ return;
+ }
+ else {
+ do_populate_lut( &machine->shine_tab[tab], in[3] );
+ }
+
+ found:
+ machine->shine_tab[tab].last_used = machine->now;
+ machine->lit_info[count].shine_tab = &machine->shine_tab[tab];
+ machine->lit_info[count].func = do_lit_lut;
+ machine->lit_info[count].func( machine, result, in, count );
+}
+
+
+void draw_vs_aos_machine_constants( struct aos_machine *machine,
+ const float (*constants)[4] )
+{
+ machine->constants = constants;
+
+ {
+ unsigned i;
+ for (i = 0; i < MAX_LIT_INFO; i++) {
+ machine->lit_info[i].func = populate_lut;
+ machine->now++;
+ }
+ }
+}
+
+
+void draw_vs_aos_machine_viewport( struct aos_machine *machine,
+ const struct pipe_viewport_state *viewport )
+{
+ memcpy(machine->scale, viewport->scale, 4 * sizeof(float));
+ memcpy(machine->translate, viewport->translate, 4 * sizeof(float));
+}
+
+
+
+void draw_vs_aos_machine_destroy( struct aos_machine *machine )
+{
+ align_free(machine);
+}
+
+struct aos_machine *draw_vs_aos_machine( void )
+{
+ struct aos_machine *machine;
+ unsigned i;
+ float inv = 1.0f/255.0f;
+ float f255 = 255.0f;
+
+ machine = align_malloc(sizeof(struct aos_machine), 16);
+ if (!machine)
+ return NULL;
+
+ memset(machine, 0, sizeof(*machine));
+
+ ASSIGN_4V(machine->internal[IMM_SWZ], 1.0f, -1.0f, 0.0f, 1.0f);
+ *(unsigned *)&machine->internal[IMM_SWZ][3] = 0xffffffff;
+
+ ASSIGN_4V(machine->internal[IMM_ONES], 1.0f, 1.0f, 1.0f, 1.0f);
+ ASSIGN_4V(machine->internal[IMM_NEGS], -1.0f, -1.0f, -1.0f, -1.0f);
+ ASSIGN_4V(machine->internal[IMM_IDENTITY], 0.0f, 0.0f, 0.0f, 1.0f);
+ ASSIGN_4V(machine->internal[IMM_INV_255], inv, inv, inv, inv);
+ ASSIGN_4V(machine->internal[IMM_255], f255, f255, f255, f255);
+ ASSIGN_4V(machine->internal[IMM_RSQ], -.5f, 1.5f, 0.0f, 0.0f);
+
+
+ machine->fpu_rnd_nearest = (X87_CW_EXCEPTION_INV_OP |
+ X87_CW_EXCEPTION_DENORM_OP |
+ X87_CW_EXCEPTION_ZERO_DIVIDE |
+ X87_CW_EXCEPTION_OVERFLOW |
+ X87_CW_EXCEPTION_UNDERFLOW |
+ X87_CW_EXCEPTION_PRECISION |
+ (1<<6) |
+ X87_CW_ROUND_NEAREST |
+ X87_CW_PRECISION_DOUBLE_EXT);
+
+ assert(machine->fpu_rnd_nearest == 0x37f);
+
+ machine->fpu_rnd_neg_inf = (X87_CW_EXCEPTION_INV_OP |
+ X87_CW_EXCEPTION_DENORM_OP |
+ X87_CW_EXCEPTION_ZERO_DIVIDE |
+ X87_CW_EXCEPTION_OVERFLOW |
+ X87_CW_EXCEPTION_UNDERFLOW |
+ X87_CW_EXCEPTION_PRECISION |
+ (1<<6) |
+ X87_CW_ROUND_DOWN |
+ X87_CW_PRECISION_DOUBLE_EXT);
+
+ for (i = 0; i < MAX_SHINE_TAB; i++)
+ do_populate_lut( &machine->shine_tab[i], 1.0f );
+
+ return machine;
+}
+
+#else
+
+void draw_vs_aos_machine_viewport( struct aos_machine *machine,
+ const struct pipe_viewport_state *viewport )
+{
+}
+
+void draw_vs_aos_machine_constants( struct aos_machine *machine,
+ const float (*constants)[4] )
+{
+}
+
+void draw_vs_aos_machine_destroy( struct aos_machine *machine )
+{
+}
+
+struct aos_machine *draw_vs_aos_machine( void )
+{
+ return NULL;
+}
+#endif
+
diff --git a/src/gallium/auxiliary/draw/draw_vs_exec.c b/src/gallium/auxiliary/draw/draw_vs_exec.c
new file mode 100644
index 0000000000..41cc802613
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_exec.c
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "draw_private.h"
+#include "draw_context.h"
+#include "draw_vs.h"
+
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_scan.h"
+#include "tgsi/tgsi_exec.h"
+
+
+struct exec_vertex_shader {
+ struct draw_vertex_shader base;
+ struct tgsi_exec_machine *machine;
+};
+
+static struct exec_vertex_shader *exec_vertex_shader( struct draw_vertex_shader *vs )
+{
+ return (struct exec_vertex_shader *)vs;
+}
+
+
+/* Not required for run_linear.
+ */
+static void
+vs_exec_prepare( struct draw_vertex_shader *shader,
+ struct draw_context *draw )
+{
+ struct exec_vertex_shader *evs = exec_vertex_shader(shader);
+
+ /* Specify the vertex program to interpret/execute.
+ * Avoid rebinding when possible.
+ */
+ if (evs->machine->Tokens != shader->state.tokens) {
+ tgsi_exec_machine_bind_shader(evs->machine,
+ shader->state.tokens,
+ draw->vs.num_samplers,
+ draw->vs.samplers);
+ }
+}
+
+
+
+
+/* Simplified vertex shader interface for the pt paths. Given the
+ * complexity of code-generating all the above operations together,
+ * it's time to try doing all the other stuff separately.
+ */
+static void
+vs_exec_run_linear( struct draw_vertex_shader *shader,
+ const float (*input)[4],
+ float (*output)[4],
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride )
+{
+ struct exec_vertex_shader *evs = exec_vertex_shader(shader);
+ struct tgsi_exec_machine *machine = evs->machine;
+ unsigned int i, j;
+ unsigned slot;
+
+ machine->Consts = constants;
+
+ for (i = 0; i < count; i += MAX_TGSI_VERTICES) {
+ unsigned int max_vertices = MIN2(MAX_TGSI_VERTICES, count - i);
+
+ /* Swizzle inputs.
+ */
+ for (j = 0; j < max_vertices; j++) {
+#if 0
+ debug_printf("%d) Input vert:\n", i + j);
+ for (slot = 0; slot < shader->info.num_inputs; slot++) {
+ debug_printf("\t%d: %f %f %f %f\n", slot,
+ input[slot][0],
+ input[slot][1],
+ input[slot][2],
+ input[slot][3]);
+ }
+#endif
+
+ for (slot = 0; slot < shader->info.num_inputs; slot++) {
+#if 0
+ assert(!util_is_inf_or_nan(input[slot][0]));
+ assert(!util_is_inf_or_nan(input[slot][1]));
+ assert(!util_is_inf_or_nan(input[slot][2]));
+ assert(!util_is_inf_or_nan(input[slot][3]));
+#endif
+ machine->Inputs[slot].xyzw[0].f[j] = input[slot][0];
+ machine->Inputs[slot].xyzw[1].f[j] = input[slot][1];
+ machine->Inputs[slot].xyzw[2].f[j] = input[slot][2];
+ machine->Inputs[slot].xyzw[3].f[j] = input[slot][3];
+ }
+
+ input = (const float (*)[4])((const char *)input + input_stride);
+ }
+
+ tgsi_set_exec_mask(machine,
+ 1,
+ max_vertices > 1,
+ max_vertices > 2,
+ max_vertices > 3);
+
+ /* run interpreter */
+ tgsi_exec_machine_run( machine );
+
+ /* Unswizzle all output results.
+ */
+ for (j = 0; j < max_vertices; j++) {
+ for (slot = 0; slot < shader->info.num_outputs; slot++) {
+ output[slot][0] = machine->Outputs[slot].xyzw[0].f[j];
+ output[slot][1] = machine->Outputs[slot].xyzw[1].f[j];
+ output[slot][2] = machine->Outputs[slot].xyzw[2].f[j];
+ output[slot][3] = machine->Outputs[slot].xyzw[3].f[j];
+
+ }
+
+#if 0
+ debug_printf("%d) Post xform vert:\n", i + j);
+ for (slot = 0; slot < shader->info.num_outputs; slot++) {
+ debug_printf("\t%d: %f %f %f %f\n", slot,
+ output[slot][0],
+ output[slot][1],
+ output[slot][2],
+ output[slot][3]);
+ assert(!util_is_inf_or_nan(output[slot][0]));
+ }
+#endif
+
+ output = (float (*)[4])((char *)output + output_stride);
+ }
+
+ }
+}
+
+
+
+
+static void
+vs_exec_delete( struct draw_vertex_shader *dvs )
+{
+ FREE((void*) dvs->state.tokens);
+ FREE( dvs );
+}
+
+
+struct draw_vertex_shader *
+draw_create_vs_exec(struct draw_context *draw,
+ const struct pipe_shader_state *state)
+{
+ struct exec_vertex_shader *vs = CALLOC_STRUCT( exec_vertex_shader );
+
+ if (vs == NULL)
+ return NULL;
+
+ /* we make a private copy of the tokens */
+ vs->base.state.tokens = tgsi_dup_tokens(state->tokens);
+ if (!vs->base.state.tokens) {
+ FREE(vs);
+ return NULL;
+ }
+
+ tgsi_scan_shader(state->tokens, &vs->base.info);
+
+ vs->base.draw = draw;
+ vs->base.prepare = vs_exec_prepare;
+ vs->base.run_linear = vs_exec_run_linear;
+ vs->base.delete = vs_exec_delete;
+ vs->base.create_varient = draw_vs_varient_generic;
+ vs->machine = draw->vs.machine;
+
+ return &vs->base;
+}
diff --git a/src/gallium/auxiliary/draw/draw_vs_llvm.c b/src/gallium/auxiliary/draw/draw_vs_llvm.c
new file mode 100644
index 0000000000..b3535c0e48
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_llvm.c
@@ -0,0 +1,161 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw_private.h"
+#include "draw_context.h"
+#include "draw_vs.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#ifdef MESA_LLVM
+
+#include "gallivm/gallivm.h"
+
+struct draw_llvm_vertex_shader {
+ struct draw_vertex_shader base;
+ struct gallivm_prog *llvm_prog;
+ struct tgsi_exec_machine *machine;
+};
+
+
+static void
+vs_llvm_prepare( struct draw_vertex_shader *base,
+ struct draw_context *draw )
+{
+}
+
+
+
+
+static void
+vs_llvm_run_linear( struct draw_vertex_shader *base,
+ const float (*input)[4],
+ float (*output)[4],
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride )
+{
+ struct draw_llvm_vertex_shader *shader =
+ (struct draw_llvm_vertex_shader *)base;
+
+ gallivm_cpu_vs_exec(shader->llvm_prog, shader->machine,
+ input, base->info.num_inputs, output, base->info.num_outputs,
+ constants, count, input_stride, output_stride);
+}
+
+
+
+static void
+vs_llvm_delete( struct draw_vertex_shader *base )
+{
+ struct draw_llvm_vertex_shader *shader =
+ (struct draw_llvm_vertex_shader *)base;
+
+ /* Do something to free compiled shader:
+ */
+
+ FREE( (void*) shader->base.state.tokens );
+ FREE( shader );
+}
+
+
+
+
+struct draw_vertex_shader *
+draw_create_vs_llvm(struct draw_context *draw,
+ const struct pipe_shader_state *templ)
+{
+ struct draw_llvm_vertex_shader *vs;
+
+ vs = CALLOC_STRUCT( draw_llvm_vertex_shader );
+ if (vs == NULL)
+ return NULL;
+
+ /* we make a private copy of the tokens */
+ vs->base.state.tokens = tgsi_dup_tokens(templ->tokens);
+ if (!vs->base.state.tokens) {
+ FREE(vs);
+ return NULL;
+ }
+
+ tgsi_scan_shader(vs->base.state.tokens, &vs->base.info);
+
+ vs->base.draw = draw;
+ vs->base.prepare = vs_llvm_prepare;
+ vs->base.create_varient = draw_vs_varient_generic;
+ vs->base.run_linear = vs_llvm_run_linear;
+ vs->base.delete = vs_llvm_delete;
+ vs->machine = draw->vs.machine;
+
+ {
+ struct gallivm_ir *ir = gallivm_ir_new(GALLIVM_VS);
+ gallivm_ir_set_layout(ir, GALLIVM_SOA);
+ gallivm_ir_set_components(ir, 4);
+ gallivm_ir_fill_from_tgsi(ir, vs->base.state.tokens);
+ vs->llvm_prog = gallivm_ir_compile(ir);
+ gallivm_ir_delete(ir);
+ }
+
+ draw->vs.engine = gallivm_global_cpu_engine();
+
+ /* XXX: Why are there two versions of this? Shouldn't creating the
+ * engine be a separate operation to compiling a shader?
+ */
+ if (!draw->vs.engine) {
+ draw->vs.engine = gallivm_cpu_engine_create(vs->llvm_prog);
+ }
+ else {
+ gallivm_cpu_jit_compile(draw->vs.engine, vs->llvm_prog);
+ }
+
+ return &vs->base;
+}
+
+
+
+
+
+#else
+
+struct draw_vertex_shader *
+draw_create_vs_llvm(struct draw_context *draw,
+ const struct pipe_shader_state *shader)
+{
+ return NULL;
+}
+
+#endif
diff --git a/src/gallium/auxiliary/draw/draw_vs_ppc.c b/src/gallium/auxiliary/draw/draw_vs_ppc.c
new file mode 100644
index 0000000000..ad184bd696
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_ppc.c
@@ -0,0 +1,245 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_config.h"
+
+#include "draw_vs.h"
+
+#if defined(PIPE_ARCH_PPC)
+
+#include "pipe/p_shader_tokens.h"
+
+#include "draw_private.h"
+#include "draw_context.h"
+
+#include "rtasm/rtasm_cpu.h"
+#include "rtasm/rtasm_ppc.h"
+#include "tgsi/tgsi_ppc.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_exec.h"
+
+
+
+typedef void (PIPE_CDECL *codegen_function) (float (*inputs)[4][4],
+ float (*outputs)[4][4],
+ float (*temps)[4][4],
+ float (*immeds)[4],
+ float (*consts)[4],
+ const float *builtins);
+
+
+struct draw_ppc_vertex_shader {
+ struct draw_vertex_shader base;
+ struct ppc_function ppc_program;
+
+ codegen_function func;
+};
+
+
+static void
+vs_ppc_prepare( struct draw_vertex_shader *base,
+ struct draw_context *draw )
+{
+ /* nothing */
+}
+
+
+/**
+ * Simplified vertex shader interface for the pt paths. Given the
+ * complexity of code-generating all the above operations together,
+ * it's time to try doing all the other stuff separately.
+ */
+static void
+vs_ppc_run_linear( struct draw_vertex_shader *base,
+ const float (*input)[4],
+ float (*output)[4],
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride )
+{
+ struct draw_ppc_vertex_shader *shader = (struct draw_ppc_vertex_shader *)base;
+ unsigned int i;
+
+#define MAX_VERTICES 4
+
+ /* loop over verts */
+ for (i = 0; i < count; i += MAX_VERTICES) {
+ const uint max_vertices = MIN2(MAX_VERTICES, count - i);
+ float inputs_soa[PIPE_MAX_SHADER_INPUTS][4][4] ALIGN16_ATTRIB;
+ float outputs_soa[PIPE_MAX_SHADER_OUTPUTS][4][4] ALIGN16_ATTRIB;
+ float temps_soa[TGSI_EXEC_NUM_TEMPS][4][4] ALIGN16_ATTRIB;
+ uint attr;
+
+ /* convert (up to) four input verts to SoA format */
+ for (attr = 0; attr < base->info.num_inputs; attr++) {
+ const float *vIn = (const float *) input;
+ uint vert;
+ for (vert = 0; vert < max_vertices; vert++) {
+#if 0
+ if (attr==0)
+ printf("Input v%d a%d: %f %f %f %f\n",
+ vert, attr, vIn[0], vIn[1], vIn[2], vIn[3]);
+#endif
+ inputs_soa[attr][0][vert] = vIn[attr * 4 + 0];
+ inputs_soa[attr][1][vert] = vIn[attr * 4 + 1];
+ inputs_soa[attr][2][vert] = vIn[attr * 4 + 2];
+ inputs_soa[attr][3][vert] = vIn[attr * 4 + 3];
+ vIn += input_stride / 4;
+ }
+ }
+
+ /* run compiled shader
+ */
+ shader->func(inputs_soa, outputs_soa, temps_soa,
+ (float (*)[4]) shader->base.immediates,
+ (float (*)[4]) constants,
+ ppc_builtin_constants);
+
+ /* convert (up to) four output verts from SoA back to AoS format */
+ for (attr = 0; attr < base->info.num_outputs; attr++) {
+ float *vOut = (float *) output;
+ uint vert;
+ for (vert = 0; vert < max_vertices; vert++) {
+ vOut[attr * 4 + 0] = outputs_soa[attr][0][vert];
+ vOut[attr * 4 + 1] = outputs_soa[attr][1][vert];
+ vOut[attr * 4 + 2] = outputs_soa[attr][2][vert];
+ vOut[attr * 4 + 3] = outputs_soa[attr][3][vert];
+#if 0
+ if (attr==0)
+ printf("Output v%d a%d: %f %f %f %f\n",
+ vert, attr, vOut[0], vOut[1], vOut[2], vOut[3]);
+#endif
+ vOut += output_stride / 4;
+ }
+ }
+
+ /* advance to next group of four input/output verts */
+ input = (const float (*)[4])((const char *)input + input_stride * max_vertices);
+ output = (float (*)[4])((char *)output + output_stride * max_vertices);
+ }
+}
+
+
+static void
+vs_ppc_delete( struct draw_vertex_shader *base )
+{
+ struct draw_ppc_vertex_shader *shader = (struct draw_ppc_vertex_shader *)base;
+
+ ppc_release_func( &shader->ppc_program );
+
+ align_free( (void *) shader->base.immediates );
+
+ FREE( (void*) shader->base.state.tokens );
+ FREE( shader );
+}
+
+
+struct draw_vertex_shader *
+draw_create_vs_ppc(struct draw_context *draw,
+ const struct pipe_shader_state *templ)
+{
+ struct draw_ppc_vertex_shader *vs;
+
+ vs = CALLOC_STRUCT( draw_ppc_vertex_shader );
+ if (vs == NULL)
+ return NULL;
+
+ /* we make a private copy of the tokens */
+ vs->base.state.tokens = tgsi_dup_tokens(templ->tokens);
+ if (!vs->base.state.tokens)
+ goto fail;
+
+ tgsi_scan_shader(templ->tokens, &vs->base.info);
+
+ vs->base.draw = draw;
+#if 0
+ if (1)
+ vs->base.create_varient = draw_vs_varient_aos_ppc;
+ else
+#endif
+ vs->base.create_varient = draw_vs_varient_generic;
+ vs->base.prepare = vs_ppc_prepare;
+ vs->base.run_linear = vs_ppc_run_linear;
+ vs->base.delete = vs_ppc_delete;
+
+ vs->base.immediates = align_malloc(TGSI_EXEC_NUM_IMMEDIATES * 4 *
+ sizeof(float), 16);
+
+ ppc_init_func( &vs->ppc_program );
+
+#if 0
+ ppc_print_code(&vs->ppc_program, TRUE);
+ ppc_indent(&vs->ppc_program, 8);
+#endif
+
+ if (!tgsi_emit_ppc( (struct tgsi_token *) vs->base.state.tokens,
+ &vs->ppc_program,
+ (float (*)[4]) vs->base.immediates,
+ TRUE ))
+ goto fail;
+
+ vs->func = (codegen_function) ppc_get_func( &vs->ppc_program );
+ if (!vs->func) {
+ goto fail;
+ }
+
+ return &vs->base;
+
+fail:
+ /*
+ debug_error("tgsi_emit_ppc() failed, falling back to interpreter\n");
+ */
+
+ ppc_release_func( &vs->ppc_program );
+
+ FREE(vs);
+ return NULL;
+}
+
+
+
+#else /* PIPE_ARCH_PPC */
+
+
+struct draw_vertex_shader *
+draw_create_vs_ppc( struct draw_context *draw,
+ const struct pipe_shader_state *templ )
+{
+ return (void *) 0;
+}
+
+
+#endif /* PIPE_ARCH_PPC */
diff --git a/src/gallium/auxiliary/draw/draw_vs_sse.c b/src/gallium/auxiliary/draw/draw_vs_sse.c
new file mode 100644
index 0000000000..702051387a
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_sse.c
@@ -0,0 +1,217 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_config.h"
+
+#include "draw_vs.h"
+
+#if defined(PIPE_ARCH_X86)
+
+#include "pipe/p_shader_tokens.h"
+
+#include "draw_private.h"
+#include "draw_context.h"
+
+#include "rtasm/rtasm_cpu.h"
+#include "rtasm/rtasm_x86sse.h"
+#include "tgsi/tgsi_sse2.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_exec.h"
+
+#define SSE_MAX_VERTICES 4
+
+
+struct draw_sse_vertex_shader {
+ struct draw_vertex_shader base;
+ struct x86_function sse2_program;
+
+ tgsi_sse2_vs_func func;
+
+ struct tgsi_exec_machine *machine;
+};
+
+
+static void
+vs_sse_prepare( struct draw_vertex_shader *base,
+ struct draw_context *draw )
+{
+ struct draw_sse_vertex_shader *shader = (struct draw_sse_vertex_shader *)base;
+ struct tgsi_exec_machine *machine = shader->machine;
+
+ machine->Samplers = draw->vs.samplers;
+}
+
+
+
+/* Simplified vertex shader interface for the pt paths. Given the
+ * complexity of code-generating all the above operations together,
+ * it's time to try doing all the other stuff separately.
+ */
+static void
+vs_sse_run_linear( struct draw_vertex_shader *base,
+ const float (*input)[4],
+ float (*output)[4],
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride )
+{
+ struct draw_sse_vertex_shader *shader = (struct draw_sse_vertex_shader *)base;
+ struct tgsi_exec_machine *machine = shader->machine;
+ unsigned int i;
+
+ /* By default, execute all channels. XXX move this inside the loop
+ * below when we support shader conditionals/loops.
+ */
+ tgsi_set_exec_mask(machine, 1, 1, 1, 1);
+
+ for (i = 0; i < count; i += MAX_TGSI_VERTICES) {
+ unsigned int max_vertices = MIN2(MAX_TGSI_VERTICES, count - i);
+
+ if (max_vertices < 4) {
+ /* disable the unused execution channels */
+ tgsi_set_exec_mask(machine,
+ 1,
+ max_vertices > 1,
+ max_vertices > 2,
+ 0);
+ }
+
+ /* run compiled shader
+ */
+ shader->func(machine,
+ constants,
+ shader->base.immediates,
+ input,
+ base->info.num_inputs,
+ input_stride,
+ output,
+ base->info.num_outputs,
+ output_stride );
+
+ input = (const float (*)[4])((const char *)input + input_stride * max_vertices);
+ output = (float (*)[4])((char *)output + output_stride * max_vertices);
+ }
+}
+
+
+
+
+static void
+vs_sse_delete( struct draw_vertex_shader *base )
+{
+ struct draw_sse_vertex_shader *shader = (struct draw_sse_vertex_shader *)base;
+
+ x86_release_func( &shader->sse2_program );
+
+ align_free( (void *) shader->base.immediates );
+
+ FREE( (void*) shader->base.state.tokens );
+ FREE( shader );
+}
+
+
+struct draw_vertex_shader *
+draw_create_vs_sse(struct draw_context *draw,
+ const struct pipe_shader_state *templ)
+{
+ struct draw_sse_vertex_shader *vs;
+
+ if (!rtasm_cpu_has_sse2())
+ return NULL;
+
+ vs = CALLOC_STRUCT( draw_sse_vertex_shader );
+ if (vs == NULL)
+ return NULL;
+
+ /* we make a private copy of the tokens */
+ vs->base.state.tokens = tgsi_dup_tokens(templ->tokens);
+ if (!vs->base.state.tokens)
+ goto fail;
+
+ tgsi_scan_shader(templ->tokens, &vs->base.info);
+
+ vs->base.draw = draw;
+ if (1)
+ vs->base.create_varient = draw_vs_varient_aos_sse;
+ else
+ vs->base.create_varient = draw_vs_varient_generic;
+ vs->base.prepare = vs_sse_prepare;
+ vs->base.run_linear = vs_sse_run_linear;
+ vs->base.delete = vs_sse_delete;
+
+ vs->base.immediates = align_malloc(TGSI_EXEC_NUM_IMMEDIATES * 4 *
+ sizeof(float), 16);
+
+ vs->machine = draw->vs.machine;
+
+ x86_init_func( &vs->sse2_program );
+
+ if (!tgsi_emit_sse2( (struct tgsi_token *) vs->base.state.tokens,
+ &vs->sse2_program,
+ (float (*)[4])vs->base.immediates,
+ TRUE ))
+ goto fail;
+
+ vs->func = (tgsi_sse2_vs_func) x86_get_func( &vs->sse2_program );
+ if (!vs->func) {
+ goto fail;
+ }
+
+ return &vs->base;
+
+fail:
+ debug_error("tgsi_emit_sse2() failed, falling back to interpreter\n");
+
+ x86_release_func( &vs->sse2_program );
+
+ FREE(vs);
+ return NULL;
+}
+
+
+
+#else
+
+struct draw_vertex_shader *
+draw_create_vs_sse( struct draw_context *draw,
+ const struct pipe_shader_state *templ )
+{
+ return (void *) 0;
+}
+
+
+#endif
+
diff --git a/src/gallium/auxiliary/draw/draw_vs_varient.c b/src/gallium/auxiliary/draw/draw_vs_varient.c
new file mode 100644
index 0000000000..7ee567d478
--- /dev/null
+++ b/src/gallium/auxiliary/draw/draw_vs_varient.c
@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vbuf.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_vs.h"
+#include "translate/translate.h"
+#include "translate/translate_cache.h"
+
+/* A first pass at incorporating vertex fetch/emit functionality into
+ */
+struct draw_vs_varient_generic {
+ struct draw_vs_varient base;
+
+ struct draw_vertex_shader *shader;
+ struct draw_context *draw;
+
+ /* Basic plan is to run these two translate functions before/after
+ * the vertex shader's existing run_linear() routine to simulate
+ * the inclusion of this functionality into the shader...
+ *
+ * Next will look at actually including it.
+ */
+ struct translate *fetch;
+ struct translate *emit;
+
+ unsigned temp_vertex_stride;
+};
+
+
+
+
+
+static void vsvg_set_buffer( struct draw_vs_varient *varient,
+ unsigned buffer,
+ const void *ptr,
+ unsigned stride )
+{
+ struct draw_vs_varient_generic *vsvg = (struct draw_vs_varient_generic *)varient;
+
+ vsvg->fetch->set_buffer(vsvg->fetch,
+ buffer,
+ ptr,
+ stride);
+}
+
+
+/* Mainly for debug at this stage:
+ */
+static void do_rhw_viewport( struct draw_vs_varient_generic *vsvg,
+ unsigned count,
+ void *output_buffer )
+{
+ char *ptr = (char *)output_buffer;
+ const float *scale = vsvg->base.vs->draw->viewport.scale;
+ const float *trans = vsvg->base.vs->draw->viewport.translate;
+ unsigned stride = vsvg->temp_vertex_stride;
+ unsigned j;
+
+ ptr += vsvg->base.vs->position_output * 4 * sizeof(float);
+
+ for (j = 0; j < count; j++, ptr += stride) {
+ float *data = (float *)ptr;
+ float w = 1.0f / data[3];
+
+ data[0] = data[0] * w * scale[0] + trans[0];
+ data[1] = data[1] * w * scale[1] + trans[1];
+ data[2] = data[2] * w * scale[2] + trans[2];
+ data[3] = w;
+ }
+}
+
+static void do_viewport( struct draw_vs_varient_generic *vsvg,
+ unsigned count,
+ void *output_buffer )
+{
+ char *ptr = (char *)output_buffer;
+ const float *scale = vsvg->base.vs->draw->viewport.scale;
+ const float *trans = vsvg->base.vs->draw->viewport.translate;
+ unsigned stride = vsvg->temp_vertex_stride;
+ unsigned j;
+
+ ptr += vsvg->base.vs->position_output * 4 * sizeof(float);
+
+ for (j = 0; j < count; j++, ptr += stride) {
+ float *data = (float *)ptr;
+
+ data[0] = data[0] * scale[0] + trans[0];
+ data[1] = data[1] * scale[1] + trans[1];
+ data[2] = data[2] * scale[2] + trans[2];
+ }
+}
+
+
+static void PIPE_CDECL vsvg_run_elts( struct draw_vs_varient *varient,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer)
+{
+ struct draw_vs_varient_generic *vsvg = (struct draw_vs_varient_generic *)varient;
+ unsigned temp_vertex_stride = vsvg->temp_vertex_stride;
+ void *temp_buffer = MALLOC( align(count,4) * temp_vertex_stride );
+
+ if (0) debug_printf("%s %d \n", __FUNCTION__, count);
+
+ /* Want to do this in small batches for cache locality?
+ */
+
+ vsvg->fetch->run_elts( vsvg->fetch,
+ elts,
+ count,
+ temp_buffer );
+
+ vsvg->base.vs->run_linear( vsvg->base.vs,
+ temp_buffer,
+ temp_buffer,
+ (const float (*)[4])vsvg->base.vs->draw->pt.user.constants,
+ count,
+ temp_vertex_stride,
+ temp_vertex_stride);
+
+
+ if (vsvg->base.key.clip) {
+ /* not really handling clipping, just do the rhw so we can
+ * see the results...
+ */
+ do_rhw_viewport( vsvg,
+ count,
+ temp_buffer );
+ }
+ else if (vsvg->base.key.viewport) {
+ do_viewport( vsvg,
+ count,
+ temp_buffer );
+ }
+
+
+ vsvg->emit->set_buffer( vsvg->emit,
+ 0,
+ temp_buffer,
+ temp_vertex_stride );
+
+ vsvg->emit->set_buffer( vsvg->emit,
+ 1,
+ &vsvg->draw->rasterizer->point_size,
+ 0);
+
+ vsvg->emit->run( vsvg->emit,
+ 0, count,
+ output_buffer );
+
+ FREE(temp_buffer);
+}
+
+
+static void PIPE_CDECL vsvg_run_linear( struct draw_vs_varient *varient,
+ unsigned start,
+ unsigned count,
+ void *output_buffer )
+{
+ struct draw_vs_varient_generic *vsvg = (struct draw_vs_varient_generic *)varient;
+ unsigned temp_vertex_stride = vsvg->temp_vertex_stride;
+ void *temp_buffer = MALLOC( align(count,4) * temp_vertex_stride );
+
+ if (0) debug_printf("%s %d %d (sz %d, %d)\n", __FUNCTION__, start, count,
+ vsvg->base.key.output_stride,
+ temp_vertex_stride);
+
+ vsvg->fetch->run( vsvg->fetch,
+ start,
+ count,
+ temp_buffer );
+
+ vsvg->base.vs->run_linear( vsvg->base.vs,
+ temp_buffer,
+ temp_buffer,
+ (const float (*)[4])vsvg->base.vs->draw->pt.user.constants,
+ count,
+ temp_vertex_stride,
+ temp_vertex_stride);
+
+ if (vsvg->base.key.clip) {
+ /* not really handling clipping, just do the rhw so we can
+ * see the results...
+ */
+ do_rhw_viewport( vsvg,
+ count,
+ temp_buffer );
+ }
+ else if (vsvg->base.key.viewport) {
+ do_viewport( vsvg,
+ count,
+ temp_buffer );
+ }
+
+ vsvg->emit->set_buffer( vsvg->emit,
+ 0,
+ temp_buffer,
+ temp_vertex_stride );
+
+ vsvg->emit->set_buffer( vsvg->emit,
+ 1,
+ &vsvg->draw->rasterizer->point_size,
+ 0);
+
+ vsvg->emit->run( vsvg->emit,
+ 0, count,
+ output_buffer );
+
+ FREE(temp_buffer);
+}
+
+
+
+
+
+static void vsvg_destroy( struct draw_vs_varient *varient )
+{
+ FREE(varient);
+}
+
+
+struct draw_vs_varient *draw_vs_varient_generic( struct draw_vertex_shader *vs,
+ const struct draw_vs_varient_key *key )
+{
+ unsigned i;
+ struct translate_key fetch, emit;
+
+ struct draw_vs_varient_generic *vsvg = CALLOC_STRUCT( draw_vs_varient_generic );
+ if (vsvg == NULL)
+ return NULL;
+
+ vsvg->base.key = *key;
+ vsvg->base.vs = vs;
+ vsvg->base.set_buffer = vsvg_set_buffer;
+ vsvg->base.run_elts = vsvg_run_elts;
+ vsvg->base.run_linear = vsvg_run_linear;
+ vsvg->base.destroy = vsvg_destroy;
+
+ vsvg->draw = vs->draw;
+
+ vsvg->temp_vertex_stride = MAX2(key->nr_inputs,
+ vsvg->base.vs->info.num_outputs) * 4 * sizeof(float);
+
+ /* Build free-standing fetch and emit functions:
+ */
+ fetch.nr_elements = key->nr_inputs;
+ fetch.output_stride = vsvg->temp_vertex_stride;
+ for (i = 0; i < key->nr_inputs; i++) {
+ fetch.element[i].input_format = key->element[i].in.format;
+ fetch.element[i].input_buffer = key->element[i].in.buffer;
+ fetch.element[i].input_offset = key->element[i].in.offset;
+ fetch.element[i].output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ fetch.element[i].output_offset = i * 4 * sizeof(float);
+ assert(fetch.element[i].output_offset < fetch.output_stride);
+ }
+
+
+ emit.nr_elements = key->nr_outputs;
+ emit.output_stride = key->output_stride;
+ for (i = 0; i < key->nr_outputs; i++) {
+ if (key->element[i].out.format != EMIT_1F_PSIZE)
+ {
+ emit.element[i].input_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ emit.element[i].input_buffer = 0;
+ emit.element[i].input_offset = key->element[i].out.vs_output * 4 * sizeof(float);
+ emit.element[i].output_format = draw_translate_vinfo_format(key->element[i].out.format);
+ emit.element[i].output_offset = key->element[i].out.offset;
+ assert(emit.element[i].input_offset <= fetch.output_stride);
+ }
+ else {
+ emit.element[i].input_format = PIPE_FORMAT_R32_FLOAT;
+ emit.element[i].input_buffer = 1;
+ emit.element[i].input_offset = 0;
+ emit.element[i].output_format = PIPE_FORMAT_R32_FLOAT;
+ emit.element[i].output_offset = key->element[i].out.offset;
+ }
+ }
+
+ vsvg->fetch = draw_vs_get_fetch( vs->draw, &fetch );
+ vsvg->emit = draw_vs_get_emit( vs->draw, &emit );
+
+ return &vsvg->base;
+}
+
+
+
+
+
diff --git a/src/gallium/auxiliary/gallivm/Makefile b/src/gallium/auxiliary/gallivm/Makefile
new file mode 100644
index 0000000000..5a96d94ec3
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/Makefile
@@ -0,0 +1,92 @@
+# -*-makefile-*-
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = gallivm
+
+
+GALLIVM_SOURCES = \
+ gallivm.cpp \
+ gallivm_cpu.cpp \
+ instructions.cpp \
+ loweringpass.cpp \
+ tgsitollvm.cpp \
+ storage.cpp \
+ storagesoa.cpp \
+ instructionssoa.cpp
+
+INC_SOURCES = gallivm_builtins.cpp gallivmsoabuiltins.cpp
+
+CPP_SOURCES = \
+ $(GALLIVM_SOURCES)
+
+C_SOURCES =
+ASM_SOURCES =
+
+OBJECTS = $(C_SOURCES:.c=.o) \
+ $(CPP_SOURCES:.cpp=.o) \
+ $(ASM_SOURCES:.S=.o)
+
+### Include directories
+INCLUDES = \
+ -I. \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/include
+
+
+##### RULES #####
+
+.c.o:
+ $(CC) -c $(INCLUDES) $(LLVM_CFLAGS) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+.cpp.o:
+ $(CXX) -c $(INCLUDES) $(LLVM_CXXFLAGS) $(CXXFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+.S.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+##### TARGETS #####
+
+default:: depend symlinks $(LIBNAME)
+
+
+$(LIBNAME): $(OBJECTS) Makefile
+ $(TOP)/bin/mklib -o $@ -static $(OBJECTS)
+
+
+depend: $(C_SOURCES) $(CPP_SOURCES) $(ASM_SOURCES) $(INC_SOURCES)
+ rm -f depend
+ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(DRIVER_DEFINES) $(INCLUDES) $(C_SOURCES) $(CPP_SOURCES) \
+ $(ASM_SOURCES) $(INC_SOURCES) 2> /dev/null
+
+
+gallivm_builtins.cpp: llvm_builtins.c
+ clang --emit-llvm < $< |llvm-as|opt -std-compile-opts > temp1.bin
+ (echo "static const unsigned char llvm_builtins_data[] = {"; od -txC temp1.bin | sed -e "s/^[0-9]*//" -e s"/ \([0-9a-f][0-9a-f]\)/0x\1,/g" -e"\$$d" | sed -e"\$$s/,$$/,0x00};/") >$@
+ rm temp1.bin
+
+gallivmsoabuiltins.cpp: soabuiltins.c
+ clang --emit-llvm < $< |llvm-as|opt -std-compile-opts > temp2.bin
+ (echo "static const unsigned char soabuiltins_data[] = {"; od -txC temp2.bin | sed -e "s/^[0-9]*//" -e s"/ \([0-9a-f][0-9a-f]\)/0x\1,/g" -e"\$$d" | sed -e"\$$s/,$$/,0x00};/") >$@
+ rm temp2.bin
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` `find ../include`
+
+
+# Remove .o and backup files
+clean:
+ -rm -f *.o */*.o *~ *.so *~ server/*.o
+ -rm -f depend depend.bak
+ -rm -f gallivm_builtins.cpp
+ -rm -f gallivmsoabuiltins.cpp
+
+symlinks:
+
+
+include depend
diff --git a/src/gallium/auxiliary/gallivm/SConscript b/src/gallium/auxiliary/gallivm/SConscript
new file mode 100644
index 0000000000..c0aa51b90a
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/SConscript
@@ -0,0 +1,16 @@
+Import('*')
+
+gallivm = env.ConvenienceLibrary(
+ target = 'gallivm',
+ source = [
+ 'gallivm.cpp',
+ 'gallivm_cpu.cpp',
+ 'instructions.cpp',
+ 'loweringpass.cpp',
+ 'tgsitollvm.cpp',
+ 'storage.cpp',
+ 'storagesoa.cpp',
+ 'instructionssoa.cpp',
+ ])
+
+auxiliaries.insert(0, gallivm)
diff --git a/src/gallium/auxiliary/gallivm/gallivm.cpp b/src/gallium/auxiliary/gallivm/gallivm.cpp
new file mode 100644
index 0000000000..f4af5cc8ad
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/gallivm.cpp
@@ -0,0 +1,332 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+#ifdef MESA_LLVM
+
+#include "gallivm.h"
+#include "gallivm_p.h"
+
+#include "instructions.h"
+#include "loweringpass.h"
+#include "storage.h"
+#include "tgsitollvm.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_dump.h"
+
+#include <llvm/Module.h>
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/Instructions.h>
+#include <llvm/ModuleProvider.h>
+#include <llvm/Pass.h>
+#include <llvm/PassManager.h>
+#include <llvm/Attributes.h>
+#include <llvm/Support/PatternMatch.h>
+#include <llvm/ExecutionEngine/JIT.h>
+#include <llvm/ExecutionEngine/Interpreter.h>
+#include <llvm/ExecutionEngine/GenericValue.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/LinkAllPasses.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Analysis/LoopPass.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/Transforms/Utils/Cloning.h>
+
+#include <sstream>
+#include <fstream>
+#include <iostream>
+
+static int GLOBAL_ID = 0;
+
+using namespace llvm;
+
+static inline
+void AddStandardCompilePasses(PassManager &PM)
+{
+ PM.add(new LoweringPass());
+ PM.add(createVerifierPass()); // Verify that input is correct
+
+ PM.add(createLowerSetJmpPass()); // Lower llvm.setjmp/.longjmp
+
+ //PM.add(createStripSymbolsPass(true));
+
+ PM.add(createRaiseAllocationsPass()); // call %malloc -> malloc inst
+ PM.add(createCFGSimplificationPass()); // Clean up disgusting code
+ PM.add(createPromoteMemoryToRegisterPass());// Kill useless allocas
+ PM.add(createGlobalOptimizerPass()); // Optimize out global vars
+ PM.add(createGlobalDCEPass()); // Remove unused fns and globs
+ PM.add(createIPConstantPropagationPass());// IP Constant Propagation
+ PM.add(createDeadArgEliminationPass()); // Dead argument elimination
+ PM.add(createInstructionCombiningPass()); // Clean up after IPCP & DAE
+ PM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
+
+ PM.add(createPruneEHPass()); // Remove dead EH info
+
+ PM.add(createFunctionInliningPass()); // Inline small functions
+ PM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args
+
+ PM.add(createTailDuplicationPass()); // Simplify cfg by copying code
+ PM.add(createInstructionCombiningPass()); // Cleanup for scalarrepl.
+ PM.add(createCFGSimplificationPass()); // Merge & remove BBs
+ PM.add(createScalarReplAggregatesPass()); // Break up aggregate allocas
+ PM.add(createInstructionCombiningPass()); // Combine silly seq's
+ PM.add(createCondPropagationPass()); // Propagate conditionals
+
+ PM.add(createTailCallEliminationPass()); // Eliminate tail calls
+ PM.add(createCFGSimplificationPass()); // Merge & remove BBs
+ PM.add(createReassociatePass()); // Reassociate expressions
+ PM.add(createLoopRotatePass());
+ PM.add(createLICMPass()); // Hoist loop invariants
+ PM.add(createLoopUnswitchPass()); // Unswitch loops.
+ PM.add(createLoopIndexSplitPass()); // Index split loops.
+ PM.add(createInstructionCombiningPass()); // Clean up after LICM/reassoc
+ PM.add(createIndVarSimplifyPass()); // Canonicalize indvars
+ PM.add(createLoopUnrollPass()); // Unroll small loops
+ PM.add(createInstructionCombiningPass()); // Clean up after the unroller
+ PM.add(createGVNPass()); // Remove redundancies
+ PM.add(createSCCPPass()); // Constant prop with SCCP
+
+ // Run instcombine after redundancy elimination to exploit opportunities
+ // opened up by them.
+ PM.add(createInstructionCombiningPass());
+ PM.add(createCondPropagationPass()); // Propagate conditionals
+
+ PM.add(createDeadStoreEliminationPass()); // Delete dead stores
+ PM.add(createAggressiveDCEPass()); // SSA based 'Aggressive DCE'
+ PM.add(createCFGSimplificationPass()); // Merge & remove BBs
+ PM.add(createSimplifyLibCallsPass()); // Library Call Optimizations
+ PM.add(createDeadTypeEliminationPass()); // Eliminate dead types
+ PM.add(createConstantMergePass()); // Merge dup global constants
+}
+
+void gallivm_prog_delete(struct gallivm_prog *prog)
+{
+ delete prog->module;
+ prog->module = 0;
+ prog->function = 0;
+ free(prog);
+}
+
+static inline void
+constant_interpolation(float (*inputs)[16][4],
+ const struct tgsi_interp_coef *coefs,
+ unsigned attrib,
+ unsigned chan)
+{
+ unsigned i;
+
+ for (i = 0; i < QUAD_SIZE; ++i) {
+ inputs[i][attrib][chan] = coefs[attrib].a0[chan];
+ }
+}
+
+static inline void
+linear_interpolation(float (*inputs)[16][4],
+ const struct tgsi_interp_coef *coefs,
+ unsigned attrib,
+ unsigned chan)
+{
+ unsigned i;
+
+ for( i = 0; i < QUAD_SIZE; i++ ) {
+ const float x = inputs[i][0][0];
+ const float y = inputs[i][0][1];
+
+ inputs[i][attrib][chan] =
+ coefs[attrib].a0[chan] +
+ coefs[attrib].dadx[chan] * x +
+ coefs[attrib].dady[chan] * y;
+ }
+}
+
+static inline void
+perspective_interpolation(float (*inputs)[16][4],
+ const struct tgsi_interp_coef *coefs,
+ unsigned attrib,
+ unsigned chan )
+{
+ unsigned i;
+
+ for( i = 0; i < QUAD_SIZE; i++ ) {
+ const float x = inputs[i][0][0];
+ const float y = inputs[i][0][1];
+ /* WPOS.w here is really 1/w */
+ const float w = 1.0f / inputs[i][0][3];
+ assert(inputs[i][0][3] != 0.0);
+
+ inputs[i][attrib][chan] =
+ (coefs[attrib].a0[chan] +
+ coefs[attrib].dadx[chan] * x +
+ coefs[attrib].dady[chan] * y) * w;
+ }
+}
+
+void gallivm_ir_dump(struct gallivm_ir *ir, const char *file_prefix)
+{
+ if (!ir || !ir->module)
+ return;
+
+ if (file_prefix) {
+ std::ostringstream stream;
+ stream << file_prefix;
+ stream << ir->id;
+ stream << ".ll";
+ std::string name = stream.str();
+ std::ofstream out(name.c_str());
+ if (!out) {
+ std::cerr<<"Can't open file : "<<stream.str()<<std::endl;;
+ return;
+ }
+ out << (*ir->module);
+ out.close();
+ } else {
+ const llvm::Module::FunctionListType &funcs = ir->module->getFunctionList();
+ llvm::Module::FunctionListType::const_iterator itr;
+ std::cout<<"; ---------- Start shader "<<ir->id<<std::endl;
+ for (itr = funcs.begin(); itr != funcs.end(); ++itr) {
+ const llvm::Function &func = (*itr);
+ std::string name = func.getName();
+ const llvm::Function *found = 0;
+ if (name.find("vs_shader") != std::string::npos ||
+ name.find("fs_shader") != std::string::npos ||
+ name.find("function") != std::string::npos)
+ found = &func;
+ if (found) {
+ std::cout<<*found<<std::endl;
+ }
+ }
+ std::cout<<"; ---------- End shader "<<ir->id<<std::endl;
+ }
+}
+
+
+void gallivm_prog_inputs_interpolate(struct gallivm_prog *prog,
+ float (*inputs)[16][4],
+ const struct tgsi_interp_coef *coef)
+{
+ for (int i = 0; i < prog->num_interp; ++i) {
+ const gallivm_interpolate &interp = prog->interpolators[i];
+ switch (interp.type) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ constant_interpolation(inputs, coef, interp.attrib, interp.chan);
+ break;
+
+ case TGSI_INTERPOLATE_LINEAR:
+ linear_interpolation(inputs, coef, interp.attrib, interp.chan);
+ break;
+
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ perspective_interpolation(inputs, coef, interp.attrib, interp.chan);
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+}
+
+
+struct gallivm_ir * gallivm_ir_new(enum gallivm_shader_type type)
+{
+ struct gallivm_ir *ir =
+ (struct gallivm_ir *)calloc(1, sizeof(struct gallivm_ir));
+ ++GLOBAL_ID;
+ ir->id = GLOBAL_ID;
+ ir->type = type;
+
+ return ir;
+}
+
+void gallivm_ir_set_layout(struct gallivm_ir *ir,
+ enum gallivm_vector_layout layout)
+{
+ ir->layout = layout;
+}
+
+void gallivm_ir_set_components(struct gallivm_ir *ir, int num)
+{
+ ir->num_components = num;
+}
+
+void gallivm_ir_fill_from_tgsi(struct gallivm_ir *ir,
+ const struct tgsi_token *tokens)
+{
+ std::cout << "Creating llvm from: " <<std::endl;
+ tgsi_dump(tokens, 0);
+
+ llvm::Module *mod = tgsi_to_llvmir(ir, tokens);
+ ir->module = mod;
+ gallivm_ir_dump(ir, 0);
+}
+
+void gallivm_ir_delete(struct gallivm_ir *ir)
+{
+ delete ir->module;
+ free(ir);
+}
+
+struct gallivm_prog * gallivm_ir_compile(struct gallivm_ir *ir)
+{
+ struct gallivm_prog *prog =
+ (struct gallivm_prog *)calloc(1, sizeof(struct gallivm_prog));
+
+ std::cout << "Before optimizations:"<<std::endl;
+ ir->module->dump();
+ std::cout<<"-------------------------------"<<std::endl;
+
+ PassManager veri;
+ veri.add(createVerifierPass());
+ veri.run(*ir->module);
+ llvm::Module *mod = llvm::CloneModule(ir->module);
+ prog->num_consts = ir->num_consts;
+ memcpy(prog->interpolators, ir->interpolators, sizeof(prog->interpolators));
+ prog->num_interp = ir->num_interp;
+
+ /* Run optimization passes over it */
+ PassManager passes;
+ passes.add(new TargetData(mod));
+ AddStandardCompilePasses(passes);
+ passes.run(*mod);
+ prog->module = mod;
+
+ std::cout << "After optimizations:"<<std::endl;
+ mod->dump();
+
+ return prog;
+}
+
+#endif /* MESA_LLVM */
diff --git a/src/gallium/auxiliary/gallivm/gallivm.h b/src/gallium/auxiliary/gallivm/gallivm.h
new file mode 100644
index 0000000000..36a64a7747
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/gallivm.h
@@ -0,0 +1,118 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+
+#ifndef GALLIVM_H
+#define GALLIVM_H
+
+/*
+ LLVM representation consists of two stages - layout independent
+ intermediate representation gallivm_ir and driver specific
+ gallivm_prog. TGSI is first being translated into gallivm_ir
+ after that driver can set number of options on gallivm_ir and
+ have it compiled into gallivm_prog. gallivm_prog can be either
+ executed (assuming there's LLVM JIT backend for the current
+ target) or machine code generation can be done (assuming there's
+ a LLVM code generator for thecurrent target)
+ */
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#include "pipe/p_state.h"
+
+#ifdef MESA_LLVM
+
+struct tgsi_token;
+
+struct gallivm_ir;
+struct gallivm_prog;
+struct gallivm_cpu_engine;
+struct tgsi_interp_coef;
+struct tgsi_sampler;
+struct tgsi_exec_vector;
+
+enum gallivm_shader_type {
+ GALLIVM_VS,
+ GALLIVM_FS
+};
+
+enum gallivm_vector_layout {
+ GALLIVM_AOS,
+ GALLIVM_SOA
+};
+
+struct gallivm_ir *gallivm_ir_new(enum gallivm_shader_type type);
+void gallivm_ir_set_layout(struct gallivm_ir *ir,
+ enum gallivm_vector_layout layout);
+void gallivm_ir_set_components(struct gallivm_ir *ir, int num);
+void gallivm_ir_fill_from_tgsi(struct gallivm_ir *ir,
+ const struct tgsi_token *tokens);
+void gallivm_ir_delete(struct gallivm_ir *ir);
+
+
+struct gallivm_prog *gallivm_ir_compile(struct gallivm_ir *ir);
+
+void gallivm_prog_inputs_interpolate(struct gallivm_prog *prog,
+ float (*inputs)[PIPE_MAX_SHADER_INPUTS][4],
+ const struct tgsi_interp_coef *coefs);
+void gallivm_prog_dump(struct gallivm_prog *prog, const char *file_prefix);
+
+
+struct gallivm_cpu_engine *gallivm_cpu_engine_create(struct gallivm_prog *prog);
+struct gallivm_cpu_engine *gallivm_global_cpu_engine();
+int gallivm_cpu_vs_exec(struct gallivm_prog *prog,
+ struct tgsi_exec_machine *machine,
+ const float (*input)[4],
+ unsigned num_inputs,
+ float (*output)[4],
+ unsigned num_outputs,
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride);
+int gallivm_cpu_fs_exec(struct gallivm_prog *prog,
+ float x, float y,
+ float (*dests)[PIPE_MAX_SHADER_INPUTS][4],
+ float (*inputs)[PIPE_MAX_SHADER_INPUTS][4],
+ float (*consts)[4],
+ struct tgsi_sampler *samplers);
+void gallivm_cpu_jit_compile(struct gallivm_cpu_engine *ee, struct gallivm_prog *prog);
+void gallivm_cpu_engine_delete(struct gallivm_cpu_engine *ee);
+
+
+#endif /* MESA_LLVM */
+
+#if defined __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/gallivm_builtins.cpp b/src/gallium/auxiliary/gallivm/gallivm_builtins.cpp
new file mode 100644
index 0000000000..634bac0150
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/gallivm_builtins.cpp
@@ -0,0 +1,140 @@
+static const unsigned char llvm_builtins_data[] = {
+0x42,0x43,0xc0,0xde,0x21,0x0c,0x00,0x00,0x27,0x02,0x00,0x00,0x01,0x10,0x00,0x00,
+0x10,0x00,0x00,0x00,0x07,0x81,0x23,0x91,0x41,0xc8,0x04,0x49,0x06,0x10,0x32,0x39,
+0x92,0x01,0x84,0x0c,0x25,0x05,0x08,0x19,0x1e,0x04,0x8b,0x62,0x80,0x14,0x45,0x02,
+0x42,0x92,0x0b,0x42,0xa4,0x10,0x32,0x14,0x38,0x08,0x18,0x49,0x0a,0x32,0x44,0x24,
+0x48,0x0a,0x90,0x21,0x23,0x44,0x72,0x80,0x8c,0x14,0x21,0x86,0x0a,0x8a,0x0a,0x64,
+0x0c,0x1f,0x00,0x00,0x49,0x18,0x00,0x00,0x03,0x00,0x00,0x00,0x0b,0x84,0xff,0xff,
+0xff,0xff,0x1f,0xc0,0x00,0x00,0x00,0x00,0x51,0x20,0x00,0x00,0x12,0x00,0x00,0x00,
+0x32,0x22,0x48,0x09,0x20,0x65,0x82,0x84,0x00,0x26,0x45,0x48,0x05,0x09,0x26,0x45,
+0xc6,0x05,0x42,0x52,0x26,0x08,0xae,0x19,0x80,0x61,0x04,0x02,0x98,0x23,0x00,0x83,
+0x29,0x80,0x21,0x00,0xb2,0x73,0x04,0x01,0x51,0x8a,0xf4,0x08,0x92,0xa4,0x39,0x47,
+0x80,0x50,0x2b,0x03,0x00,0xa0,0x08,0x21,0x5c,0x46,0x2b,0x44,0x08,0x21,0xd4,0x40,
+0x14,0x01,0x80,0x11,0x80,0x22,0x88,0x00,0x13,0x30,0x7c,0xc0,0x03,0x3b,0xf8,0x05,
+0x3b,0xa0,0x83,0x36,0xa8,0x07,0x77,0x58,0x07,0x77,0x78,0x87,0x7b,0x70,0x87,0x36,
+0x60,0x87,0x74,0x70,0x87,0x7a,0xc0,0x87,0x36,0x38,0x07,0x77,0xa8,0x87,0x0d,0xf7,
+0x50,0x0e,0x6d,0x00,0x0f,0x7a,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,
+0x07,0x74,0xd0,0x06,0xe9,0x10,0x07,0x7a,0x80,0x07,0x7a,0x80,0x07,0x6d,0x90,0x0e,
+0x78,0xa0,0x07,0x78,0xa0,0x07,0x78,0xd0,0x06,0xe9,0x10,0x07,0x76,0xa0,0x07,0x71,
+0x60,0x07,0x7a,0x10,0x07,0x76,0xd0,0x06,0xe9,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,
+0x07,0x7a,0x30,0x07,0x72,0xd0,0x06,0xe9,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,
+0x7a,0x60,0x07,0x74,0xd0,0x06,0xe6,0x30,0x07,0x72,0xa0,0x07,0x73,0x20,0x07,0x7a,
+0x30,0x07,0x72,0xd0,0x06,0xe6,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,
+0x07,0x74,0xd0,0x06,0xf6,0x60,0x07,0x74,0xa0,0x07,0x76,0x40,0x07,0x7a,0x60,0x07,
+0x74,0xd0,0x06,0xf6,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x7a,
+0x10,0x07,0x72,0x80,0x07,0x6d,0x10,0x0e,0x70,0xa0,0x07,0x70,0xa0,0x07,0x76,0x40,
+0x07,0x6d,0x60,0x0e,0x78,0x00,0x07,0x7a,0x10,0x07,0x72,0x80,0x07,0x7a,0x10,0x07,
+0x72,0x80,0x07,0x3a,0x0f,0x84,0x48,0x20,0x23,0x24,0x40,0x00,0x62,0x67,0x88,0x9f,
+0x19,0x92,0x24,0x00,0x10,0x04,0x00,0x00,0x00,0x43,0x92,0x04,0x08,0x00,0x00,0x00,
+0x00,0x60,0x48,0xa2,0x00,0x40,0x10,0x00,0x00,0x00,0x0c,0x49,0x16,0x00,0x08,0x02,
+0x00,0x00,0x80,0x21,0x89,0x02,0x00,0x41,0x00,0x00,0x00,0x30,0x24,0x61,0x80,0x00,
+0x00,0x00,0x00,0x00,0x86,0x24,0x07,0x10,0x00,0x00,0x00,0x00,0xc0,0x90,0x44,0x01,
+0x80,0x20,0x00,0x00,0x00,0x18,0x92,0x1c,0x40,0x00,0x00,0x00,0x00,0x00,0x43,0x12,
+0x05,0x00,0x82,0x00,0x00,0x00,0x60,0x48,0x52,0x00,0x40,0x10,0x00,0x00,0x00,0x64,
+0x81,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x32,0x1e,0x98,0x10,0x19,0x11,0x4c,0x90,
+0x8c,0x09,0x26,0x47,0xc6,0x04,0x43,0x8a,0x8a,0x59,0x8b,0x43,0x50,0xd2,0x09,0x02,
+0x81,0xd2,0x73,0x50,0xc9,0x0c,0x2a,0x99,0x41,0x25,0x33,0xa8,0x64,0x56,0x28,0x66,
+0x2d,0x0e,0x41,0xcf,0x2a,0x15,0x04,0x4a,0xcf,0x41,0x25,0x33,0xa8,0x64,0x06,0x95,
+0xcc,0xa0,0x92,0x59,0x01,0x00,0x00,0x00,0x53,0x82,0x26,0x0c,0x04,0x00,0x00,0x00,
+0x22,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00,0x05,0x00,0x00,0x00,
+0x04,0xc6,0x08,0x40,0x10,0x04,0xe1,0x70,0x18,0x23,0x00,0x41,0x10,0x84,0xc3,0x60,
+0x04,0x00,0x00,0x00,0xc3,0x0d,0xce,0x43,0x4c,0x37,0x3c,0x8e,0x34,0xdc,0x30,0x41,
+0xc2,0x74,0x03,0x34,0x51,0xc3,0x0d,0x4d,0x44,0x4c,0x37,0x44,0x8d,0x35,0x56,0x01,
+0x04,0xc3,0x55,0x21,0x16,0x0e,0x04,0x00,0x0f,0x00,0x00,0x00,0xd6,0x10,0x00,0xe6,
+0x10,0x04,0x76,0x81,0x00,0x3e,0x30,0x0c,0x91,0x4f,0x1b,0x05,0x21,0x30,0x8f,0x6d,
+0x13,0x48,0xe0,0x03,0xc3,0x10,0xf9,0xb4,0x55,0x20,0x81,0x0f,0x0c,0x43,0xe4,0xd7,
+0x66,0x41,0x08,0xcc,0xa3,0x1f,0x40,0x41,0x34,0x53,0x84,0x99,0xc4,0x20,0x30,0x8f,
+0x61,0x10,0x02,0xb0,0x2c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x61,0x20,0x00,0x00,
+0x27,0x00,0x00,0x00,0x13,0x04,0x43,0x2c,0x10,0x00,0x00,0x00,0x08,0x00,0x00,0x00,
+0x24,0x8a,0xa0,0x0c,0x46,0x00,0x4a,0x80,0xc2,0x1c,0x84,0x55,0x55,0xd6,0x1c,0x84,
+0x45,0x51,0x16,0x81,0x19,0x80,0x11,0x80,0x31,0x02,0x10,0x04,0x41,0xfc,0x03,0x00,
+0x63,0x08,0x0d,0x34,0xdc,0x70,0x55,0xc2,0x2c,0x43,0x20,0x60,0x73,0x0c,0xd3,0x15,
+0x8d,0x21,0x34,0xd1,0x18,0x42,0xf3,0x8c,0x55,0x00,0x81,0xa0,0x6d,0x73,0x0c,0x19,
+0xe7,0x60,0x87,0x52,0x38,0x10,0x00,0x00,0x10,0x00,0x00,0x00,0x27,0x50,0x20,0x05,
+0xd1,0x0c,0x17,0x60,0x20,0xc5,0x74,0x10,0x8d,0x65,0x14,0x13,0xf3,0xd4,0xb4,0x6d,
+0x14,0x13,0xf3,0xd4,0xb8,0x69,0x14,0x13,0xf3,0xd4,0xb6,0x75,0x14,0x13,0xf3,0xd4,
+0xba,0x35,0x0c,0x13,0xf3,0xd8,0x05,0x31,0x31,0x8f,0x6e,0x1c,0x84,0x00,0x2c,0xcb,
+0x01,0x14,0x44,0x33,0x45,0x98,0x61,0x0c,0x02,0xf3,0x00,0x00,0x00,0x00,0x00,0x00,
+0x61,0x20,0x00,0x00,0x81,0x00,0x00,0x00,0x13,0x04,0x4d,0x2c,0x10,0x00,0x00,0x00,
+0x04,0x00,0x00,0x00,0x24,0xca,0x60,0x04,0xa0,0x04,0x8a,0x80,0xc2,0x0c,0x00,0x91,
+0x11,0x00,0x00,0x00,0x63,0x08,0x4d,0x64,0x16,0xc1,0xe1,0x86,0xab,0x22,0x66,0x19,
+0x02,0x01,0x1b,0x43,0x70,0xa2,0x59,0x82,0x61,0x0c,0xe1,0x89,0x66,0x09,0x86,0x81,
+0x0a,0x20,0x0b,0x34,0x61,0x8e,0x81,0xda,0xa2,0x31,0x84,0x46,0xb2,0x8e,0xe0,0x70,
+0x83,0x57,0x11,0xb3,0x0c,0x44,0xf1,0x8d,0x21,0x38,0xd2,0x2c,0x81,0x31,0x86,0xf0,
+0x48,0xb3,0x04,0xc6,0x40,0x05,0x00,0x06,0x44,0x18,0x14,0x73,0x0c,0x9c,0x18,0x48,
+0x63,0x08,0xcd,0x64,0x64,0x40,0x70,0xb8,0xa1,0x0c,0x2a,0x62,0x96,0xe1,0x40,0xcc,
+0x60,0x0c,0xc1,0x99,0x66,0x09,0x92,0x31,0x84,0x67,0x9a,0x25,0x48,0x06,0x2a,0x80,
+0x33,0x38,0xd0,0x00,0x99,0x63,0x18,0x83,0x34,0x98,0xc6,0x10,0x1a,0xc8,0xd6,0x80,
+0xe0,0x70,0x03,0x1b,0x54,0xc4,0x2c,0x83,0xb2,0xb4,0xc1,0x18,0x82,0x03,0xcd,0x12,
+0x30,0x63,0x08,0x0f,0x34,0x4b,0xc0,0x0c,0x54,0x00,0x6e,0xa0,0xbc,0xc1,0x32,0xc7,
+0xa0,0x06,0x70,0x00,0x61,0x1c,0x84,0x03,0x01,0x00,0x00,0x00,0x4e,0x00,0x00,0x00,
+0x76,0x52,0x4c,0xcc,0x73,0xd3,0x24,0x05,0x64,0xec,0xcd,0x8d,0xcc,0xe5,0x87,0x46,
+0xc6,0x50,0x8a,0x89,0x79,0xee,0xdb,0x54,0x8a,0x89,0x79,0xee,0xdd,0x1a,0x88,0x89,
+0x79,0x68,0x73,0x20,0x26,0xe6,0xa9,0xed,0x81,0x98,0x98,0xc7,0x36,0x0b,0x62,0x62,
+0x9e,0xdb,0x32,0x88,0x89,0x79,0x72,0xd3,0x20,0x26,0xe6,0xd9,0x8d,0x83,0x98,0x98,
+0xa7,0xb7,0x95,0x62,0x62,0x9e,0xbb,0x27,0x2d,0x20,0x63,0x6f,0x6e,0x64,0x2e,0x3a,
+0x34,0x35,0x56,0x62,0x08,0x4e,0x53,0xd9,0xba,0xb5,0x14,0x02,0xf3,0xe0,0xf5,0x25,
+0x2c,0x82,0xd3,0x0c,0xbe,0xe0,0x34,0xd3,0x8d,0x9b,0x88,0x21,0x38,0xcd,0x60,0xd7,
+0x24,0x01,0x63,0xec,0xcd,0x8d,0xcc,0x45,0x87,0x44,0x80,0x8c,0xbd,0xb9,0x91,0xb9,
+0xfc,0xc4,0xd0,0x90,0x02,0x8c,0xb1,0x37,0x37,0x32,0x97,0x1f,0x73,0x29,0x26,0xe6,
+0xc1,0x71,0x7b,0x29,0x26,0xe6,0xc1,0x77,0xfb,0x28,0x04,0xe6,0xa9,0x6f,0x52,0x01,
+0x32,0xf6,0xe6,0x46,0xe6,0xa2,0x13,0x73,0x63,0x18,0x83,0xc0,0x3c,0xb6,0x41,0x08,
+0x4e,0x33,0x58,0x47,0x31,0x31,0x4f,0x5d,0x1f,0xc3,0x22,0x38,0xcd,0xe0,0x0b,0x4e,
+0x33,0xe1,0xbc,0xa5,0x18,0x82,0xd3,0x0c,0x77,0x6e,0x20,0xc5,0xc4,0x3c,0xb5,0x4e,
+0x3a,0x40,0xc6,0xde,0xdc,0xc8,0x5c,0x7e,0x64,0x70,0x2c,0xa4,0x98,0x98,0xa7,0xee,
+0x6f,0x20,0x11,0x9c,0x66,0xf0,0x05,0xa7,0x99,0xec,0x82,0x10,0x9c,0xa6,0x32,0x93,
+0x42,0x60,0x1e,0x7b,0xb7,0x98,0x62,0x62,0x9e,0xbc,0x36,0x16,0x43,0x70,0x9a,0x0a,
+0xa7,0x6d,0xa4,0x98,0x98,0xc7,0xbe,0x8d,0xa4,0x98,0x98,0xc7,0xce,0x0d,0xc6,0x10,
+0x9c,0x66,0xc0,0x7b,0x12,0x02,0x32,0xf6,0xe6,0x46,0xe6,0xa2,0x33,0x13,0x73,0x06,
+0x8b,0xe0,0x34,0x83,0x2f,0x38,0xcd,0x64,0xd3,0x07,0x50,0x10,0xcd,0x14,0x61,0xe6,
+0x61,0x08,0x4e,0x53,0xd5,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x61,0x20,0x00,0x00,
+0x4a,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00,0x07,0x00,0x00,0x00,
+0x24,0xca,0x60,0x04,0xa0,0x04,0x8a,0x80,0xc2,0x0c,0x00,0xb9,0x61,0x0c,0x04,0x10,
+0x1e,0xe1,0x19,0xc6,0x40,0x02,0xe1,0x11,0x1e,0x00,0x00,0x00,0x63,0x08,0xcd,0x63,
+0x15,0xc1,0x31,0x84,0x06,0xb2,0x8b,0xe0,0x18,0x42,0x13,0x59,0x46,0x70,0x0c,0xa1,
+0x71,0x6c,0x23,0x38,0x16,0x02,0x04,0xc7,0x64,0x61,0x1a,0x37,0x16,0x01,0x04,0x48,
+0x35,0xc7,0x20,0x79,0xcf,0x58,0x04,0x10,0x20,0xd5,0x1c,0xc3,0x07,0x06,0xd0,0x58,
+0x04,0x10,0x20,0xd5,0x1c,0x43,0x18,0x88,0x41,0x34,0x16,0x01,0x04,0x48,0x35,0xc7,
+0x30,0x06,0x64,0xe0,0x98,0x47,0xd0,0xc0,0x80,0xa0,0x89,0x01,0x41,0x23,0x03,0x82,
+0x63,0x21,0x40,0x70,0x50,0x66,0x70,0x06,0x68,0x90,0x06,0x58,0x06,0xe1,0x40,0x00,
+0x25,0x00,0x00,0x00,0x56,0x52,0x4c,0xcc,0x73,0xd3,0x56,0x41,0x4c,0xcc,0x53,0xdb,
+0x05,0x31,0x31,0xcf,0x6d,0x19,0xc4,0xc4,0x3c,0xba,0x6d,0x10,0x13,0xf3,0xf4,0xd6,
+0x41,0x08,0xc0,0xb2,0x18,0x46,0x21,0x38,0x4d,0x85,0x9b,0x46,0x21,0x38,0x4d,0xb5,
+0x9b,0x8a,0x21,0x00,0xcb,0x82,0xdf,0x66,0x62,0x08,0x4e,0x53,0xdd,0xb7,0x9d,0x18,
+0x82,0xd3,0x54,0xb7,0x6e,0x28,0x86,0xe0,0x34,0xd5,0xdd,0xdb,0x47,0x31,0x31,0x4f,
+0x9d,0x9b,0x87,0x21,0x00,0xcb,0x52,0xdf,0x06,0x62,0x08,0xc0,0xb2,0xd4,0xbc,0x59,
+0x10,0x82,0xd3,0x54,0x96,0x62,0x08,0x4e,0x53,0xe1,0xb6,0x85,0x14,0x13,0xf3,0xd8,
+0xb4,0x8d,0x14,0x13,0xf3,0xd8,0xb9,0x89,0x18,0x02,0xb0,0x2c,0xf6,0x6d,0x24,0x86,
+0x00,0x2c,0x8b,0xcd,0x1b,0x87,0x21,0x38,0x4d,0x55,0xd3,0xd6,0x30,0x54,0xc0,0x72,
+0x00,0x05,0xd1,0x4c,0x11,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x61,0x20,0x00,0x00,
+0x19,0x00,0x00,0x00,0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
+0x24,0x4a,0x60,0x04,0x80,0xc2,0x0c,0x00,0x00,0x00,0x00,0x00,0x63,0x08,0xcd,0x33,
+0x16,0x01,0x04,0x48,0x34,0xc7,0x00,0x49,0xcf,0x58,0x04,0x10,0x28,0xd1,0x1c,0xc3,
+0x44,0x39,0x58,0x85,0x03,0x01,0x00,0x00,0x0a,0x00,0x00,0x00,0x26,0x41,0x08,0xc0,
+0xb2,0x18,0x45,0x21,0x00,0xcb,0xb2,0x5b,0x04,0x31,0x31,0x8f,0x6d,0x13,0xc4,0xc4,
+0x3c,0xb9,0x35,0x0c,0x15,0xb0,0x58,0x05,0x31,0x31,0x4f,0x7f,0x00,0x05,0xd1,0x4c,
+0x11,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x61,0x20,0x00,0x00,0x1b,0x00,0x00,0x00,
+0x13,0x04,0x41,0x2c,0x10,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x24,0xca,0x60,0x04,
+0xa0,0x04,0x8a,0x80,0xc2,0x0c,0x00,0x00,0x63,0x08,0xcd,0x33,0x16,0x01,0x04,0xca,
+0x34,0xc7,0x20,0x51,0xcf,0x1c,0x43,0x45,0x41,0x73,0x0c,0x16,0x15,0xcd,0x31,0x5c,
+0x94,0x83,0x58,0x38,0x10,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x76,0x51,0x4c,0xcc,
+0x53,0xdb,0x86,0x51,0x4c,0xcc,0x53,0xe7,0x36,0x41,0x4c,0xcc,0x63,0x5b,0x05,0x31,
+0x31,0x8f,0x6e,0x16,0xc4,0xc4,0x3c,0xbd,0x51,0x10,0x02,0xb0,0x2c,0xd6,0x30,0x54,
+0xc0,0x72,0x00,0x05,0xd1,0x4c,0x11,0x06,0x00,0x00,0x00,0x00,0x61,0x20,0x00,0x00,
+0x2c,0x00,0x00,0x00,0x13,0x04,0x45,0x2c,0x10,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
+0x24,0xca,0xa0,0x04,0x46,0x00,0x8a,0x80,0xc0,0x08,0x00,0x00,0x63,0x08,0x0d,0x34,
+0xdc,0x30,0x49,0xc4,0x2c,0x03,0x11,0x50,0x63,0x08,0xcd,0x33,0xdc,0x50,0x49,0xc4,
+0x2c,0x03,0x21,0x58,0x63,0x08,0x4d,0x34,0xdc,0x70,0x49,0xc4,0x2c,0x03,0x31,0x60,
+0x63,0x08,0x8d,0x33,0xdc,0x90,0x49,0x84,0x69,0x22,0x70,0xc3,0x27,0x1c,0x08,0x00,
+0x17,0x00,0x00,0x00,0x96,0x51,0x4c,0xcc,0x53,0xdf,0x66,0x41,0x08,0xcc,0x83,0xdb,
+0x04,0x31,0x31,0x4f,0x6d,0x15,0xc4,0xc4,0x3c,0xb7,0x61,0x10,0x02,0xf3,0xf0,0x76,
+0x41,0x4c,0xcc,0xb3,0x1f,0x81,0x11,0x11,0x13,0x15,0x35,0x37,0x90,0x2c,0x4e,0xf4,
+0x47,0x87,0x54,0xd7,0x17,0x70,0x2c,0x4e,0xf4,0x47,0x87,0x74,0x02,0xc8,0xe2,0x44,
+0x7f,0x74,0x48,0xb9,0x69,0x14,0x02,0xf3,0xd4,0xb8,0x6d,0x18,0x11,0x31,0x55,0xc0,
+0x62,0x0d,0x43,0x05,0x2c,0x07,0x50,0x10,0xcd,0x14,0x61,0x46,0x31,0x08,0xcc,0x03,
+0x00,0x00,0x00,0x00,0x71,0x20,0x00,0x00,0x12,0x00,0x00,0x00,0x66,0x40,0x54,0x82,
+0x23,0x19,0xc3,0xa0,0x20,0x8b,0x1d,0x18,0x4f,0x84,0x34,0x53,0x61,0x03,0xc4,0xe3,
+0x58,0x85,0x05,0x14,0xbe,0x34,0x45,0xb5,0x21,0x10,0x82,0x23,0x15,0x46,0x30,0x2c,
+0xc8,0x64,0x02,0x06,0xf0,0x3c,0x91,0x73,0x19,0x00,0xe1,0x4b,0x53,0x64,0x0a,0x84,
+0x84,0x34,0x85,0x25,0x0c,0x92,0x20,0x59,0xc1,0x20,0x30,0x8f,0x2d,0x10,0x95,0x84,
+0x34,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
diff --git a/src/gallium/auxiliary/gallivm/gallivm_cpu.cpp b/src/gallium/auxiliary/gallivm/gallivm_cpu.cpp
new file mode 100644
index 0000000000..1bd00a0c2a
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/gallivm_cpu.cpp
@@ -0,0 +1,243 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+#ifdef MESA_LLVM
+
+#include "gallivm.h"
+#include "gallivm_p.h"
+
+#include "instructions.h"
+#include "loweringpass.h"
+#include "storage.h"
+#include "tgsitollvm.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include <llvm/Module.h>
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/Instructions.h>
+#include <llvm/ModuleProvider.h>
+#include <llvm/Pass.h>
+#include <llvm/PassManager.h>
+#include <llvm/Attributes.h>
+#include <llvm/Support/PatternMatch.h>
+#include <llvm/ExecutionEngine/JIT.h>
+#include <llvm/ExecutionEngine/Interpreter.h>
+#include <llvm/ExecutionEngine/GenericValue.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/LinkAllPasses.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Analysis/LoopPass.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/Transforms/Utils/Cloning.h>
+
+#include <sstream>
+#include <fstream>
+#include <iostream>
+
+struct gallivm_cpu_engine {
+ llvm::ExecutionEngine *engine;
+};
+
+static struct gallivm_cpu_engine *CPU = 0;
+
+typedef int (*fragment_shader_runner)(float x, float y,
+ float (*dests)[16][4],
+ float (*inputs)[16][4],
+ int num_attribs,
+ float (*consts)[4], int num_consts,
+ struct tgsi_sampler *samplers);
+
+int gallivm_cpu_fs_exec(struct gallivm_prog *prog,
+ float fx, float fy,
+ float (*dests)[16][4],
+ float (*inputs)[16][4],
+ float (*consts)[4],
+ struct tgsi_sampler *samplers)
+{
+ fragment_shader_runner runner = reinterpret_cast<fragment_shader_runner>(prog->function);
+ assert(runner);
+
+ return runner(fx, fy, dests, inputs, prog->num_interp,
+ consts, prog->num_consts,
+ samplers);
+}
+
+static inline llvm::Function *func_for_shader(struct gallivm_prog *prog)
+{
+ llvm::Module *mod = prog->module;
+ llvm::Function *func = 0;
+
+ switch (prog->type) {
+ case GALLIVM_VS:
+ func = mod->getFunction("vs_shader");
+ break;
+ case GALLIVM_FS:
+ func = mod->getFunction("fs_shader");
+ break;
+ default:
+ assert(!"Unknown shader type!");
+ break;
+ }
+ return func;
+}
+
+/*!
+ This function creates a CPU based execution engine for the given gallivm_prog.
+ gallivm_cpu_engine should be used as a singleton throughout the library. Before
+ executing gallivm_prog_exec one needs to call gallivm_cpu_jit_compile.
+ The gallivm_prog instance which is being passed to the constructor is being
+ automatically JIT compiled so one shouldn't call gallivm_cpu_jit_compile
+ with it again.
+ */
+struct gallivm_cpu_engine * gallivm_cpu_engine_create(struct gallivm_prog *prog)
+{
+ struct gallivm_cpu_engine *cpu = (struct gallivm_cpu_engine *)
+ calloc(1, sizeof(struct gallivm_cpu_engine));
+ llvm::Module *mod = static_cast<llvm::Module*>(prog->module);
+ llvm::ExistingModuleProvider *mp = new llvm::ExistingModuleProvider(mod);
+ llvm::ExecutionEngine *ee = llvm::ExecutionEngine::create(mp, false);
+ ee->DisableLazyCompilation();
+ cpu->engine = ee;
+
+ llvm::Function *func = func_for_shader(prog);
+
+ prog->function = ee->getPointerToFunction(func);
+ CPU = cpu;
+ return cpu;
+}
+
+
+/*!
+ This function JIT compiles the given gallivm_prog with the given cpu based execution engine.
+ The reference to the generated machine code entry point will be stored
+ in the gallivm_prog program. After executing this function one can call gallivm_prog_exec
+ in order to execute the gallivm_prog on the CPU.
+ */
+void gallivm_cpu_jit_compile(struct gallivm_cpu_engine *cpu, struct gallivm_prog *prog)
+{
+ llvm::Module *mod = static_cast<llvm::Module*>(prog->module);
+ llvm::ExistingModuleProvider *mp = new llvm::ExistingModuleProvider(mod);
+ llvm::ExecutionEngine *ee = cpu->engine;
+ assert(ee);
+ /*FIXME : why was this disabled ? we need it for pow/sqrt/... */
+ ee->DisableLazyCompilation(false);
+ ee->addModuleProvider(mp);
+
+ llvm::Function *func = func_for_shader(prog);
+ prog->function = ee->getPointerToFunction(func);
+}
+
+void gallivm_cpu_engine_delete(struct gallivm_cpu_engine *cpu)
+{
+ free(cpu);
+}
+
+struct gallivm_cpu_engine * gallivm_global_cpu_engine()
+{
+ return CPU;
+}
+
+
+typedef void (*vertex_shader_runner)(void *ainputs,
+ void *dests,
+ float (*aconsts)[4]);
+
+#define MAX_TGSI_VERTICES 4
+/*!
+ This function is used to execute the gallivm_prog in software. Before calling
+ this function the gallivm_prog has to be JIT compiled with the gallivm_cpu_jit_compile
+ function.
+ */
+int gallivm_cpu_vs_exec(struct gallivm_prog *prog,
+ struct tgsi_exec_machine *machine,
+ const float (*input)[4],
+ unsigned num_inputs,
+ float (*output)[4],
+ unsigned num_outputs,
+ const float (*constants)[4],
+ unsigned count,
+ unsigned input_stride,
+ unsigned output_stride )
+{
+ unsigned int i, j;
+ unsigned slot;
+ vertex_shader_runner runner = reinterpret_cast<vertex_shader_runner>(prog->function);
+ assert(runner);
+
+ for (i = 0; i < count; i += MAX_TGSI_VERTICES) {
+ unsigned int max_vertices = MIN2(MAX_TGSI_VERTICES, count - i);
+
+ /* Swizzle inputs.
+ */
+ for (j = 0; j < max_vertices; j++) {
+ for (slot = 0; slot < num_inputs; slot++) {
+ machine->Inputs[slot].xyzw[0].f[j] = input[slot][0];
+ machine->Inputs[slot].xyzw[1].f[j] = input[slot][1];
+ machine->Inputs[slot].xyzw[2].f[j] = input[slot][2];
+ machine->Inputs[slot].xyzw[3].f[j] = input[slot][3];
+ }
+
+ input = (const float (*)[4])((const char *)input + input_stride);
+ }
+
+ /* run shader */
+ runner(machine->Inputs,
+ machine->Outputs,
+ (float (*)[4]) constants);
+
+ /* Unswizzle all output results
+ */
+ for (j = 0; j < max_vertices; j++) {
+ for (slot = 0; slot < num_outputs; slot++) {
+ output[slot][0] = machine->Outputs[slot].xyzw[0].f[j];
+ output[slot][1] = machine->Outputs[slot].xyzw[1].f[j];
+ output[slot][2] = machine->Outputs[slot].xyzw[2].f[j];
+ output[slot][3] = machine->Outputs[slot].xyzw[3].f[j];
+ }
+ output = (float (*)[4])((char *)output + output_stride);
+ }
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/gallivm_p.h b/src/gallium/auxiliary/gallivm/gallivm_p.h
new file mode 100644
index 0000000000..d2c5852bdf
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/gallivm_p.h
@@ -0,0 +1,110 @@
+#ifndef GALLIVM_P_H
+#define GALLIVM_P_H
+
+#ifdef MESA_LLVM
+
+#include "gallivm.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_compiler.h"
+
+namespace llvm {
+ class Module;
+}
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+enum gallivm_shader_type;
+enum gallivm_vector_layout;
+
+struct gallivm_interpolate {
+ int attrib;
+ int chan;
+ int type;
+};
+
+struct gallivm_ir {
+ llvm::Module *module;
+ int id;
+ enum gallivm_shader_type type;
+ enum gallivm_vector_layout layout;
+ int num_components;
+ int num_consts;
+
+ /* FIXME: this might not be enough for some shaders */
+ struct gallivm_interpolate interpolators[32*4];
+ int num_interp;
+};
+
+struct gallivm_prog {
+ llvm::Module *module;
+ void *function;
+
+ int id;
+ enum gallivm_shader_type type;
+
+ int num_consts;
+
+ /* FIXME: this might not be enough for some shaders */
+ struct gallivm_interpolate interpolators[32*4];
+ int num_interp;
+};
+
+static INLINE void gallivm_swizzle_components(int swizzle,
+ int *xc, int *yc,
+ int *zc, int *wc)
+{
+ int x = swizzle / 1000; swizzle -= x * 1000;
+ int y = swizzle / 100; swizzle -= y * 100;
+ int z = swizzle / 10; swizzle -= z * 10;
+ int w = swizzle;
+
+ if (xc) *xc = x;
+ if (yc) *yc = y;
+ if (zc) *zc = z;
+ if (wc) *wc = w;
+}
+
+static INLINE boolean gallivm_is_swizzle(int swizzle)
+{
+ const int NO_SWIZZLE = TGSI_SWIZZLE_X * 1000 + TGSI_SWIZZLE_Y * 100 +
+ TGSI_SWIZZLE_Z * 10 + TGSI_SWIZZLE_W;
+ return swizzle != NO_SWIZZLE;
+}
+
+static INLINE int gallivm_x_swizzle(int swizzle)
+{
+ int x;
+ gallivm_swizzle_components(swizzle, &x, 0, 0, 0);
+ return x;
+}
+
+static INLINE int gallivm_y_swizzle(int swizzle)
+{
+ int y;
+ gallivm_swizzle_components(swizzle, 0, &y, 0, 0);
+ return y;
+}
+
+static INLINE int gallivm_z_swizzle(int swizzle)
+{
+ int z;
+ gallivm_swizzle_components(swizzle, 0, 0, &z, 0);
+ return z;
+}
+
+static INLINE int gallivm_w_swizzle(int swizzle)
+{
+ int w;
+ gallivm_swizzle_components(swizzle, 0, 0, 0, &w);
+ return w;
+}
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* MESA_LLVM */
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/instructions.cpp b/src/gallium/auxiliary/gallivm/instructions.cpp
new file mode 100644
index 0000000000..ee8162efce
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/instructions.cpp
@@ -0,0 +1,1193 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+#ifdef MESA_LLVM
+
+#include "instructions.h"
+
+#include "storage.h"
+
+#include "util/u_memory.h"
+
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/Function.h>
+#include <llvm/InstrTypes.h>
+#include <llvm/Instructions.h>
+#include <llvm/Attributes.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+
+#include <sstream>
+#include <fstream>
+#include <iostream>
+
+using namespace llvm;
+
+#include "gallivm_builtins.cpp"
+
+#if 0
+llvm::Value *arrayFromChannels(std::vector<llvm::Value*> &vals)
+{
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ ArrayType *vectorArray = ArrayType::get(vectorType, 4);
+}
+#endif
+
+static inline std::string createFuncName(int label)
+{
+ std::ostringstream stream;
+ stream << "function";
+ stream << label;
+ return stream.str();
+}
+
+Instructions::Instructions(llvm::Module *mod, llvm::Function *func, llvm::BasicBlock *block,
+ Storage *storage)
+ : m_mod(mod), m_func(func), m_builder(block), m_idx(0),
+ m_storage(storage)
+{
+ m_floatVecType = VectorType::get(Type::FloatTy, 4);
+
+ m_llvmFSqrt = 0;
+ m_llvmFAbs = 0;
+ m_llvmPow = 0;
+ m_llvmFloor = 0;
+ m_llvmFlog = 0;
+ m_llvmFexp = 0;
+ m_llvmLit = 0;
+ m_fmtPtr = 0;
+
+ MemoryBuffer *buffer = MemoryBuffer::getMemBuffer(
+ (const char*)&llvm_builtins_data[0],
+ (const char*)&llvm_builtins_data[Elements(llvm_builtins_data)-1]);
+ m_mod = ParseBitcodeFile(buffer);
+}
+
+llvm::BasicBlock * Instructions::currentBlock() const
+{
+ return m_builder.GetInsertBlock();
+}
+
+llvm::Value * Instructions::abs(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ Value *xabs = callFAbs(vec[0]);
+ Value *yabs = callFAbs(vec[1]);
+ Value *zabs = callFAbs(vec[2]);
+ Value *wabs = callFAbs(vec[3]);
+ return vectorFromVals(xabs, yabs, zabs, wabs);
+}
+
+llvm::Value * Instructions::add(llvm::Value *in1, llvm::Value *in2)
+{
+ return m_builder.CreateAdd(in1, in2, name("add"));
+}
+
+llvm::Value * Instructions::arl(llvm::Value *in)
+{
+ return floor(in);
+}
+
+void Instructions::beginLoop()
+{
+ BasicBlock *begin = BasicBlock::Create(name("loop"), m_func,0);
+ BasicBlock *end = BasicBlock::Create(name("endloop"), m_func,0);
+
+ m_builder.CreateBr(begin);
+ Loop loop;
+ loop.begin = begin;
+ loop.end = end;
+ m_builder.SetInsertPoint(begin);
+ m_loopStack.push(loop);
+}
+
+void Instructions::bgnSub(unsigned label)
+{
+ llvm::Function *func = findFunction(label);
+
+ Function::arg_iterator args = func->arg_begin();
+ Value *ptr_INPUT = args++;
+ ptr_INPUT->setName("INPUT");
+ m_storage->pushArguments(ptr_INPUT);
+
+ llvm::BasicBlock *entry = BasicBlock::Create("entry", func, 0);
+
+ m_func = func;
+ m_builder.SetInsertPoint(entry);
+}
+
+void Instructions::brk()
+{
+ assert(!m_loopStack.empty());
+ BasicBlock *unr = BasicBlock::Create(name("unreachable"), m_func,0);
+ m_builder.CreateBr(m_loopStack.top().end);
+ m_builder.SetInsertPoint(unr);
+}
+
+void Instructions::cal(int label, llvm::Value *input)
+{
+ std::vector<Value*> params;
+ params.push_back(input);
+ llvm::Function *func = findFunction(label);
+
+ m_builder.CreateCall(func, params.begin(), params.end());
+}
+
+llvm::Value * Instructions::ceil(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ return vectorFromVals(callCeil(vec[0]), callCeil(vec[1]),
+ callCeil(vec[2]), callCeil(vec[3]));
+}
+
+llvm::Value * Instructions::clamp(llvm::Value *in1)
+{
+ llvm::Value *zero = constVector(0.0f, 0.0f, 0.0f, 0.0f);
+ llvm::Value *one = constVector(1.0f, 1.0f, 1.0f, 1.0f);
+ return min( max(zero, in1), one);
+}
+
+llvm::Value * Instructions::cmp(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3)
+{
+ llvm::Function *func = m_mod->getFunction("cmp");
+ assert(func);
+
+ std::vector<Value*> params;
+ params.push_back(in1);
+ params.push_back(in2);
+ params.push_back(in3);
+ CallInst *call = m_builder.CreateCall(func, params.begin(), params.end(), name("cmpres"));
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::cnd(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3)
+{
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+ std::vector<llvm::Value*> vec3 = extractVector(in3);
+ Constant *half = ConstantFP::get(APFloat(0.5f));
+
+ Value *xcmp = m_builder.CreateFCmpOGT(vec1[0], half, name("xcmp"));
+ Value *selx = m_builder.CreateSelect(xcmp, vec2[0], vec3[0],
+ name("selx"));
+
+ Value *ycmp = m_builder.CreateFCmpOGT(vec1[1], half, name("ycmp"));
+ Value *sely = m_builder.CreateSelect(ycmp, vec2[1], vec3[1],
+ name("sely"));
+
+ Value *zcmp = m_builder.CreateFCmpOGT(vec1[2], half, name("zcmp"));
+ Value *selz = m_builder.CreateSelect(zcmp, vec2[2], vec3[2],
+ name("selz"));
+
+ Value *wcmp = m_builder.CreateFCmpOGT(vec1[3], half, name("wcmp"));
+ Value *selw = m_builder.CreateSelect(wcmp, vec2[3], vec3[3],
+ name("selw"));
+
+ return vectorFromVals(selx, sely, selz, selw);
+}
+
+llvm::Value * Instructions::cnd0(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3)
+{
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+ std::vector<llvm::Value*> vec3 = extractVector(in3);
+ Constant *zero = Constant::getNullValue(Type::FloatTy);
+
+ Value *xcmp = m_builder.CreateFCmpOGE(vec1[0], zero, name("xcmp"));
+ Value *selx = m_builder.CreateSelect(xcmp, vec2[0], vec3[0],
+ name("selx"));
+
+ Value *ycmp = m_builder.CreateFCmpOGE(vec1[1], zero, name("ycmp"));
+ Value *sely = m_builder.CreateSelect(ycmp, vec2[1], vec3[1],
+ name("sely"));
+
+ Value *zcmp = m_builder.CreateFCmpOGE(vec1[2], zero, name("zcmp"));
+ Value *selz = m_builder.CreateSelect(zcmp, vec2[2], vec3[2],
+ name("selz"));
+
+ Value *wcmp = m_builder.CreateFCmpOGE(vec1[3], zero, name("wcmp"));
+ Value *selw = m_builder.CreateSelect(wcmp, vec2[3], vec3[3],
+ name("selw"));
+
+ return vectorFromVals(selx, sely, selz, selw);
+}
+
+llvm::Value * Instructions::cos(llvm::Value *in)
+{
+#if 0
+ llvm::Function *func = m_mod->getFunction("vcos");
+ assert(func);
+
+ CallInst *call = m_builder.CreateCall(func, in, name("cosres"));
+ call->setTailCall(false);
+ return call;
+#else
+ std::vector<llvm::Value*> elems = extractVector(in);
+ Function *func = m_mod->getFunction("cosf");
+ assert(func);
+ CallInst *cos = m_builder.CreateCall(func, elems[0], name("cosres"));
+ cos->setCallingConv(CallingConv::C);
+ cos->setTailCall(true);
+ return vectorFromVals(cos, cos, cos, cos);
+#endif
+}
+
+llvm::Value * Instructions::cross(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *x1 = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(0),
+ name("x1"));
+ Value *y1 = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(1),
+ name("y1"));
+ Value *z1 = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(2),
+ name("z1"));
+
+ Value *x2 = m_builder.CreateExtractElement(in2,
+ m_storage->constantInt(0),
+ name("x2"));
+ Value *y2 = m_builder.CreateExtractElement(in2,
+ m_storage->constantInt(1),
+ name("y2"));
+ Value *z2 = m_builder.CreateExtractElement(in2,
+ m_storage->constantInt(2),
+ name("z2"));
+ Value *y1z2 = mul(y1, z2);
+ Value *z1y2 = mul(z1, y2);
+
+ Value *z1x2 = mul(z1, x2);
+ Value *x1z2 = mul(x1, z2);
+
+ Value *x1y2 = mul(x1, y2);
+ Value *y1x2 = mul(y1, x2);
+
+ return vectorFromVals(sub(y1z2, z1y2), sub(z1x2, x1z2), sub(x1y2, y1x2));
+}
+
+llvm::Value * Instructions::ddx(llvm::Value *in)
+{
+ // FIXME
+ assert(0);
+}
+
+llvm::Value * Instructions::ddy(llvm::Value *in)
+{
+ // FIXME
+ assert(0);
+}
+
+llvm::Value * Instructions::div(llvm::Value *in1, llvm::Value *in2)
+{
+ return m_builder.CreateFDiv(in1, in2, name("div"));
+}
+
+llvm::Value * Instructions::dot2add(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3)
+{
+ Value *mulRes = mul(in1, in2);
+ Value *x = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(0),
+ name("extractx"));
+ Value *y = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(1),
+ name("extracty"));
+ Value *z = m_builder.CreateExtractElement(in3,
+ m_storage->constantInt(2),
+ name("extractz"));
+ Value *xy = m_builder.CreateAdd(x, y,name("xy"));
+ Value *dot2add = m_builder.CreateAdd(xy, z, name("dot2add"));
+ return vectorFromVals(dot2add, dot2add, dot2add, dot2add);
+}
+
+llvm::Value * Instructions::dp2(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *mulRes = mul(in1, in2);
+ Value *x = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(0),
+ name("extractx"));
+ Value *y = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(1),
+ name("extracty"));
+ Value *xy = m_builder.CreateAdd(x, y,name("xy"));
+ return vectorFromVals(xy, xy, xy, xy);
+}
+
+llvm::Value * Instructions::dp3(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *mulRes = mul(in1, in2);
+ Value *x = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(0),
+ name("extractx"));
+ Value *y = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(1),
+ name("extracty"));
+ Value *z = m_builder.CreateExtractElement(mulRes,
+ m_storage->constantInt(2),
+ name("extractz"));
+ Value *xy = m_builder.CreateAdd(x, y,name("xy"));
+ Value *dot3 = m_builder.CreateAdd(xy, z, name("dot3"));
+ return vectorFromVals(dot3, dot3, dot3, dot3);
+}
+
+llvm::Value * Instructions::dp4(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *mulRes = mul(in1, in2);
+ std::vector<llvm::Value*> vec = extractVector(mulRes);
+ Value *xy = m_builder.CreateAdd(vec[0], vec[1], name("xy"));
+ Value *xyz = m_builder.CreateAdd(xy, vec[2], name("xyz"));
+ Value *dot4 = m_builder.CreateAdd(xyz, vec[3], name("dot4"));
+ return vectorFromVals(dot4, dot4, dot4, dot4);
+}
+
+llvm::Value * Instructions::dph(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *mulRes = mul(in1, in2);
+ std::vector<llvm::Value*> vec1 = extractVector(mulRes);
+ Value *xy = m_builder.CreateAdd(vec1[0], vec1[1], name("xy"));
+ Value *xyz = m_builder.CreateAdd(xy, vec1[2], name("xyz"));
+ Value *dph = m_builder.CreateAdd(xyz, vec1[3], name("dph"));
+ return vectorFromVals(dph, dph, dph, dph);
+}
+
+llvm::Value * Instructions::dst(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *y1 = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(1),
+ name("y1"));
+ Value *z = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(2),
+ name("z"));
+ Value *y2 = m_builder.CreateExtractElement(in2,
+ m_storage->constantInt(1),
+ name("y2"));
+ Value *w = m_builder.CreateExtractElement(in2,
+ m_storage->constantInt(3),
+ name("w"));
+ Value *ry = m_builder.CreateMul(y1, y2, name("tyuy"));
+ return vectorFromVals(ConstantFP::get(APFloat(1.f)),
+ ry, z, w);
+}
+
+void Instructions::elseop()
+{
+ assert(!m_ifStack.empty());
+ BasicBlock *ifend = BasicBlock::Create(name("ifend"), m_func,0);
+ m_builder.CreateBr(ifend);
+ m_builder.SetInsertPoint(m_ifStack.top());
+ currentBlock()->setName(name("ifelse"));
+ m_ifStack.pop();
+ m_ifStack.push(ifend);
+}
+
+void Instructions::endif()
+{
+ assert(!m_ifStack.empty());
+ m_builder.CreateBr(m_ifStack.top());
+ m_builder.SetInsertPoint(m_ifStack.top());
+ m_ifStack.pop();
+}
+
+void Instructions::endLoop()
+{
+ assert(!m_loopStack.empty());
+ Loop loop = m_loopStack.top();
+ m_builder.CreateBr(loop.begin);
+ loop.end->moveAfter(currentBlock());
+ m_builder.SetInsertPoint(loop.end);
+ m_loopStack.pop();
+}
+
+void Instructions::end()
+{
+ m_builder.CreateRetVoid();
+}
+
+void Instructions::endSub()
+{
+ m_func = 0;
+ m_builder.SetInsertPoint(0);
+}
+
+llvm::Value * Instructions::exp(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ return vectorFromVals(callFExp(vec[0]), callFExp(vec[1]),
+ callFExp(vec[2]), callFExp(vec[3]));
+}
+
+llvm::Value * Instructions::ex2(llvm::Value *in)
+{
+ llvm::Value *val = callPow(ConstantFP::get(APFloat(2.f)),
+ m_builder.CreateExtractElement(
+ in, m_storage->constantInt(0),
+ name("x1")));
+ return vectorFromVals(val, val, val, val);
+}
+
+llvm::Value * Instructions::floor(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ return vectorFromVals(callFloor(vec[0]), callFloor(vec[1]),
+ callFloor(vec[2]), callFloor(vec[3]));
+}
+
+llvm::Value * Instructions::frc(llvm::Value *in)
+{
+ llvm::Value *flr = floor(in);
+ return sub(in, flr);
+}
+
+void Instructions::ifop(llvm::Value *in)
+{
+ BasicBlock *ifthen = BasicBlock::Create(name("ifthen"), m_func,0);
+ BasicBlock *ifend = BasicBlock::Create(name("ifthenend"), m_func,0);
+
+ //BasicBlock *yblock = new BasicBlock(name("yblock"), m_func,0);
+ //BasicBlock *zblock = new BasicBlock(name("zblock"), m_func,0);
+ //BasicBlock *wblock = new BasicBlock(name("wblock"), m_func,0);
+
+ Constant *float0 = Constant::getNullValue(Type::FloatTy);
+
+ Value *x = m_builder.CreateExtractElement(in, m_storage->constantInt(0),
+ name("extractx"));
+ Value *xcmp = m_builder.CreateFCmpUNE(x, float0, name("xcmp"));
+ m_builder.CreateCondBr(xcmp, ifthen, ifend);
+ //m_builder.SetInsertPoint(yblock);
+
+ m_builder.SetInsertPoint(ifthen);
+ m_ifStack.push(ifend);
+}
+
+llvm::Value * Instructions::kil(llvm::Value *in)
+{
+ llvm::Function *func = m_mod->getFunction("kil");
+ assert(func);
+
+ CallInst *call = m_builder.CreateCall(func, in, name("kilpres"));
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::lerp(llvm::Value *in1, llvm::Value *in2,
+ llvm::Value *in3)
+{
+ llvm::Value *m = mul(in1, in2);
+ llvm::Value *vec1 = constVector(1.f, 1.f, 1.f, 1.f);
+ llvm::Value *s = sub(vec1, in1);
+ return add(m, mul(s, in3));
+}
+
+llvm::Value * Instructions::lg2(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ llvm::Value *const_vec = constVector(1.442695f, 1.442695f,
+ 1.442695f, 1.442695f);
+ return mul(vectorFromVals(callFLog(vec[0]), callFLog(vec[1]),
+ callFLog(vec[2]), callFLog(vec[3])), const_vec);
+}
+
+llvm::Value * Instructions::lit(llvm::Value *in)
+{
+ if (!m_llvmLit) {
+ m_llvmLit = m_mod->getFunction("lit");
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmLit, in, name("litres"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::log(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ return vectorFromVals(callFLog(vec[0]), callFLog(vec[1]),
+ callFLog(vec[2]), callFLog(vec[3]));
+}
+
+llvm::Value * Instructions::madd(llvm::Value *in1, llvm::Value *in2,
+ llvm::Value *in3)
+{
+ Value *mulRes = mul(in1, in2);
+ return add(mulRes, in3);
+}
+
+llvm::Value * Instructions::max(llvm::Value *in1, llvm::Value *in2)
+{
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpOGT(vec1[0], vec2[0],
+ name("xcmp"));
+ Value *selx = m_builder.CreateSelect(xcmp, vec1[0], vec2[0],
+ name("selx"));
+
+ Value *ycmp = m_builder.CreateFCmpOGT(vec1[1], vec2[1],
+ name("ycmp"));
+ Value *sely = m_builder.CreateSelect(ycmp, vec1[1], vec2[1],
+ name("sely"));
+
+ Value *zcmp = m_builder.CreateFCmpOGT(vec1[2], vec2[2],
+ name("zcmp"));
+ Value *selz = m_builder.CreateSelect(zcmp, vec1[2], vec2[2],
+ name("selz"));
+
+ Value *wcmp = m_builder.CreateFCmpOGT(vec1[3], vec2[3],
+ name("wcmp"));
+ Value *selw = m_builder.CreateSelect(wcmp, vec1[3], vec2[3],
+ name("selw"));
+
+ return vectorFromVals(selx, sely, selz, selw);
+}
+
+llvm::Value * Instructions::min(llvm::Value *in1, llvm::Value *in2)
+{
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpOLT(vec1[0], vec2[0], name("xcmp"));
+ Value *selx = m_builder.CreateSelect(xcmp, vec1[0], vec2[0],
+ name("selx"));
+
+ Value *ycmp = m_builder.CreateFCmpOLT(vec1[1], vec2[1], name("ycmp"));
+ Value *sely = m_builder.CreateSelect(ycmp, vec1[1], vec2[1],
+ name("sely"));
+
+ Value *zcmp = m_builder.CreateFCmpOLT(vec1[2], vec2[2], name("zcmp"));
+ Value *selz = m_builder.CreateSelect(zcmp, vec1[2], vec2[2],
+ name("selz"));
+
+ Value *wcmp = m_builder.CreateFCmpOLT(vec1[3], vec2[3], name("wcmp"));
+ Value *selw = m_builder.CreateSelect(wcmp, vec1[3], vec2[3],
+ name("selw"));
+
+ return vectorFromVals(selx, sely, selz, selw);
+}
+
+llvm::Value * Instructions::mul(llvm::Value *in1, llvm::Value *in2)
+{
+ return m_builder.CreateMul(in1, in2, name("mul"));
+}
+
+llvm::Value * Instructions::neg(llvm::Value *in)
+{
+ Value *neg = m_builder.CreateNeg(in, name("neg"));
+ return neg;
+}
+
+llvm::Value * Instructions::nrm(llvm::Value *in)
+{
+ llvm::Value *v = rsq(in);
+ return mul(v, in);
+}
+
+llvm::Value * Instructions::pow(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *x1 = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(0),
+ name("x1"));
+ Value *x2 = m_builder.CreateExtractElement(in2,
+ m_storage->constantInt(0),
+ name("x2"));
+ llvm::Value *val = callPow(x1, x2);
+ return vectorFromVals(val, val, val, val);
+}
+
+llvm::Value * Instructions::rcp(llvm::Value *in1)
+{
+ Value *x1 = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(0),
+ name("x1"));
+ Value *res = m_builder.CreateFDiv(ConstantFP::get(APFloat(1.f)),
+ x1, name("rcp"));
+ return vectorFromVals(res, res, res, res);
+}
+
+llvm::Value * Instructions::rsq(llvm::Value *in1)
+{
+ Value *x = m_builder.CreateExtractElement(in1,
+ m_storage->constantInt(0),
+ name("extractx"));
+ Value *abs = callFAbs(x);
+ Value *sqrt = callFSqrt(abs);
+
+ Value *rsqrt = m_builder.CreateFDiv(ConstantFP::get(APFloat(1.f)),
+ sqrt,
+ name("rsqrt"));
+ return vectorFromVals(rsqrt, rsqrt, rsqrt, rsqrt);
+}
+
+llvm::Value * Instructions::scs(llvm::Value *in)
+{
+ llvm::Function *func = m_mod->getFunction("scs");
+ assert(func);
+
+ CallInst *call = m_builder.CreateCall(func, in, name("scsres"));
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::seq(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpOEQ(vec1[0], vec2[0], name("xcmp"));
+ Value *x = m_builder.CreateSelect(xcmp, const1f, const0f, name("xsel"));
+
+ Value *ycmp = m_builder.CreateFCmpOEQ(vec1[1], vec2[1], name("ycmp"));
+ Value *y = m_builder.CreateSelect(ycmp, const1f, const0f, name("ysel"));
+
+ Value *zcmp = m_builder.CreateFCmpOEQ(vec1[2], vec2[2], name("zcmp"));
+ Value *z = m_builder.CreateSelect(zcmp, const1f, const0f, name("zsel"));
+
+ Value *wcmp = m_builder.CreateFCmpOEQ(vec1[3], vec2[3], name("wcmp"));
+ Value *w = m_builder.CreateSelect(wcmp, const1f, const0f, name("wsel"));
+
+ return vectorFromVals(x, y, z, w);
+}
+
+llvm::Value * Instructions::sfl(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ return vectorFromVals(const0f, const0f, const0f, const0f);
+}
+
+llvm::Value * Instructions::sge(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpOGE(vec1[0], vec2[0], name("xcmp"));
+ Value *x = m_builder.CreateSelect(xcmp, const1f, const0f, name("xsel"));
+
+ Value *ycmp = m_builder.CreateFCmpOGE(vec1[1], vec2[1], name("ycmp"));
+ Value *y = m_builder.CreateSelect(ycmp, const1f, const0f, name("ysel"));
+
+ Value *zcmp = m_builder.CreateFCmpOGE(vec1[2], vec2[2], name("zcmp"));
+ Value *z = m_builder.CreateSelect(zcmp, const1f, const0f, name("zsel"));
+
+ Value *wcmp = m_builder.CreateFCmpOGE(vec1[3], vec2[3], name("wcmp"));
+ Value *w = m_builder.CreateSelect(wcmp, const1f, const0f, name("wsel"));
+
+ return vectorFromVals(x, y, z, w);
+}
+
+llvm::Value * Instructions::sgt(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+ Value *xcmp = m_builder.CreateFCmpOGT(vec1[0], vec2[0], name("xcmp"));
+ Value *x = m_builder.CreateSelect(xcmp, const1f, const0f, name("xsel"));
+
+ Value *ycmp = m_builder.CreateFCmpOGT(vec1[1], vec2[1], name("ycmp"));
+ Value *y = m_builder.CreateSelect(ycmp, const1f, const0f, name("ysel"));
+
+ Value *zcmp = m_builder.CreateFCmpOGT(vec1[2], vec2[2], name("zcmp"));
+ Value *z = m_builder.CreateSelect(zcmp, const1f, const0f, name("zsel"));
+
+ Value *wcmp = m_builder.CreateFCmpOGT(vec1[3], vec2[3], name("wcmp"));
+ Value *w = m_builder.CreateSelect(wcmp, const1f, const0f, name("wsel"));
+
+ return vectorFromVals(x, y, z, w);
+}
+
+llvm::Value * Instructions::sin(llvm::Value *in)
+{
+ llvm::Function *func = m_mod->getFunction("vsin");
+ assert(func);
+
+ CallInst *call = m_builder.CreateCall(func, in, name("sinres"));
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::sle(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpOLE(vec1[0], vec2[0], name("xcmp"));
+ Value *x = m_builder.CreateSelect(xcmp, const1f, const0f, name("xsel"));
+
+ Value *ycmp = m_builder.CreateFCmpOLE(vec1[1], vec2[1], name("ycmp"));
+ Value *y = m_builder.CreateSelect(ycmp, const1f, const0f, name("ysel"));
+
+ Value *zcmp = m_builder.CreateFCmpOLE(vec1[2], vec2[2], name("zcmp"));
+ Value *z = m_builder.CreateSelect(zcmp, const1f, const0f, name("zsel"));
+
+ Value *wcmp = m_builder.CreateFCmpOLE(vec1[3], vec2[3], name("wcmp"));
+ Value *w = m_builder.CreateSelect(wcmp, const1f, const0f, name("wsel"));
+
+ return vectorFromVals(x, y, z, w);
+}
+
+llvm::Value * Instructions::slt(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpOLT(vec1[0], vec2[0], name("xcmp"));
+ Value *x = m_builder.CreateSelect(xcmp, const1f, const0f, name("xsel"));
+
+ Value *ycmp = m_builder.CreateFCmpOLT(vec1[1], vec2[1], name("ycmp"));
+ Value *y = m_builder.CreateSelect(ycmp, const1f, const0f, name("ysel"));
+
+ Value *zcmp = m_builder.CreateFCmpOLT(vec1[2], vec2[2], name("zcmp"));
+ Value *z = m_builder.CreateSelect(zcmp, const1f, const0f, name("zsel"));
+
+ Value *wcmp = m_builder.CreateFCmpOLT(vec1[3], vec2[3], name("wcmp"));
+ Value *w = m_builder.CreateSelect(wcmp, const1f, const0f, name("wsel"));
+
+ return vectorFromVals(x, y, z, w);
+}
+
+llvm::Value * Instructions::sne(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+ Constant *const0f = Constant::getNullValue(Type::FloatTy);
+
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+
+ Value *xcmp = m_builder.CreateFCmpONE(vec1[0], vec2[0], name("xcmp"));
+ Value *x = m_builder.CreateSelect(xcmp, const1f, const0f, name("xsel"));
+
+ Value *ycmp = m_builder.CreateFCmpONE(vec1[1], vec2[1], name("ycmp"));
+ Value *y = m_builder.CreateSelect(ycmp, const1f, const0f, name("ysel"));
+
+ Value *zcmp = m_builder.CreateFCmpONE(vec1[2], vec2[2], name("zcmp"));
+ Value *z = m_builder.CreateSelect(zcmp, const1f, const0f, name("zsel"));
+
+ Value *wcmp = m_builder.CreateFCmpONE(vec1[3], vec2[3], name("wcmp"));
+ Value *w = m_builder.CreateSelect(wcmp, const1f, const0f, name("wsel"));
+
+ return vectorFromVals(x, y, z, w);
+}
+
+llvm::Value * Instructions::str(llvm::Value *in1, llvm::Value *in2)
+{
+ Constant *const1f = ConstantFP::get(APFloat(1.000000e+00f));
+
+ return vectorFromVals(const1f, const1f, const1f, const1f);
+}
+
+llvm::Value * Instructions::sub(llvm::Value *in1, llvm::Value *in2)
+{
+ Value *res = m_builder.CreateSub(in1, in2, name("sub"));
+ return res;
+}
+
+llvm::Value * Instructions::trunc(llvm::Value *in)
+{
+ std::vector<llvm::Value*> vec = extractVector(in);
+ Value *icastx = m_builder.CreateFPToSI(vec[0], IntegerType::get(32),
+ name("ftoix"));
+ Value *icasty = m_builder.CreateFPToSI(vec[1], IntegerType::get(32),
+ name("ftoiy"));
+ Value *icastz = m_builder.CreateFPToSI(vec[2], IntegerType::get(32),
+ name("ftoiz"));
+ Value *icastw = m_builder.CreateFPToSI(vec[3], IntegerType::get(32),
+ name("ftoiw"));
+ Value *fx = m_builder.CreateSIToFP(icastx, Type::FloatTy,
+ name("fx"));
+ Value *fy = m_builder.CreateSIToFP(icasty, Type::FloatTy,
+ name("fy"));
+ Value *fz = m_builder.CreateSIToFP(icastz, Type::FloatTy,
+ name("fz"));
+ Value *fw = m_builder.CreateSIToFP(icastw, Type::FloatTy,
+ name("fw"));
+ return vectorFromVals(fx, fy, fz, fw);
+}
+
+llvm::Value * Instructions::x2d(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3)
+{
+ std::vector<llvm::Value*> vec1 = extractVector(in1);
+ std::vector<llvm::Value*> vec2 = extractVector(in2);
+ std::vector<llvm::Value*> vec3 = extractVector(in3);
+
+ Value *x2x3 = m_builder.CreateMul( vec2[0], vec3[0], name("x2x3"));
+ Value *y2y3 = m_builder.CreateMul( vec2[1], vec3[1], name("y2y3"));
+ Value *x1px2x3 = m_builder.CreateAdd (vec1[0], x2x3, name("x1 + x2x3"));
+ Value *x1px2x3py2y3 = m_builder.CreateAdd (x1px2x3, y2y3, name("x1 + x2x3 + y2y3"));
+
+ Value *x2z3 = m_builder.CreateMul( vec2[0], vec3[2], name("x2z3"));
+ Value *y2w3 = m_builder.CreateMul( vec2[1], vec3[3], name("y2w3"));
+ Value *y1px2z3 = m_builder.CreateAdd (vec1[1], x2z3, name("y1 + x2z3"));
+ Value *y1px2z3py2w3 = m_builder.CreateAdd (y1px2z3, y2w3, name("y1 + x2z3 + y2w3"));
+
+ return vectorFromVals(x1px2x3py2y3, y1px2z3py2w3, x1px2x3py2y3, y1px2z3py2w3);
+}
+
+void Instructions::printVector(llvm::Value *val)
+{
+ static const char *frmt = "Vector is [%f, %f, %f, %f]\x0A";
+
+ if (!m_fmtPtr) {
+ Constant *format = ConstantArray::get(frmt, true);
+ ArrayType *arrayTy = ArrayType::get(IntegerType::get(8), strlen(frmt) + 1);
+ GlobalVariable* globalFormat = new GlobalVariable(
+ /*Type=*/arrayTy,
+ /*isConstant=*/true,
+ /*Linkage=*/GlobalValue::InternalLinkage,
+ /*Initializer=*/0, // has initializer, specified below
+ /*Name=*/name(".str"),
+ m_mod);
+ globalFormat->setInitializer(format);
+
+ Constant* const_int0 = Constant::getNullValue(IntegerType::get(32));
+ std::vector<Constant*> const_ptr_21_indices;
+ const_ptr_21_indices.push_back(const_int0);
+ const_ptr_21_indices.push_back(const_int0);
+ m_fmtPtr = ConstantExpr::getGetElementPtr(globalFormat,
+ &const_ptr_21_indices[0], const_ptr_21_indices.size());
+ }
+
+ Function *func_printf = m_mod->getFunction("printf");
+ if (!func_printf)
+ func_printf = declarePrintf();
+ assert(func_printf);
+ std::vector<llvm::Value*> vec = extractVector(val);
+ Value *dx = m_builder.CreateFPExt(vec[0], Type::DoubleTy, name("dx"));
+ Value *dy = m_builder.CreateFPExt(vec[1], Type::DoubleTy, name("dy"));
+ Value *dz = m_builder.CreateFPExt(vec[2], Type::DoubleTy, name("dz"));
+ Value *dw = m_builder.CreateFPExt(vec[3], Type::DoubleTy, name("dw"));
+ std::vector<Value*> params;
+ params.push_back(m_fmtPtr);
+ params.push_back(dx);
+ params.push_back(dy);
+ params.push_back(dz);
+ params.push_back(dw);
+ CallInst *call = m_builder.CreateCall(func_printf, params.begin(), params.end(),
+ name("printf"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(true);
+}
+
+const char * Instructions::name(const char *prefix)
+{
+ ++m_idx;
+ snprintf(m_name, 32, "%s%d", prefix, m_idx);
+ return m_name;
+}
+
+llvm::Value * Instructions::callCeil(llvm::Value *val)
+{
+ if (!m_llvmCeil) {
+ // predeclare the intrinsic
+ std::vector<const Type*> ceilArgs;
+ ceilArgs.push_back(Type::FloatTy);
+ AttrListPtr ceilPal;
+ FunctionType* ceilType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/ceilArgs,
+ /*isVarArg=*/false);
+ m_llvmCeil = Function::Create(
+ /*Type=*/ceilType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"ceilf", m_mod);
+ m_llvmCeil->setCallingConv(CallingConv::C);
+ m_llvmCeil->setAttributes(ceilPal);
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmCeil, val,
+ name("ceilf"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value *Instructions::callFAbs(llvm::Value *val)
+{
+ if (!m_llvmFAbs) {
+ // predeclare the intrinsic
+ std::vector<const Type*> fabsArgs;
+ fabsArgs.push_back(Type::FloatTy);
+ AttrListPtr fabsPal;
+ FunctionType* fabsType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/fabsArgs,
+ /*isVarArg=*/false);
+ m_llvmFAbs = Function::Create(
+ /*Type=*/fabsType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"fabs", m_mod);
+ m_llvmFAbs->setCallingConv(CallingConv::C);
+ m_llvmFAbs->setAttributes(fabsPal);
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmFAbs, val,
+ name("fabs"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::callFExp(llvm::Value *val)
+{
+ if (!m_llvmFexp) {
+ // predeclare the intrinsic
+ std::vector<const Type*> fexpArgs;
+ fexpArgs.push_back(Type::FloatTy);
+ AttrListPtr fexpPal;
+ FunctionType* fexpType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/fexpArgs,
+ /*isVarArg=*/false);
+ m_llvmFexp = Function::Create(
+ /*Type=*/fexpType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"expf", m_mod);
+ m_llvmFexp->setCallingConv(CallingConv::C);
+ m_llvmFexp->setAttributes(fexpPal);
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmFexp, val,
+ name("expf"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::callFLog(llvm::Value *val)
+{
+ if (!m_llvmFlog) {
+ // predeclare the intrinsic
+ std::vector<const Type*> flogArgs;
+ flogArgs.push_back(Type::FloatTy);
+ AttrListPtr flogPal;
+ FunctionType* flogType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/flogArgs,
+ /*isVarArg=*/false);
+ m_llvmFlog = Function::Create(
+ /*Type=*/flogType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"logf", m_mod);
+ m_llvmFlog->setCallingConv(CallingConv::C);
+ m_llvmFlog->setAttributes(flogPal);
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmFlog, val,
+ name("logf"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::callFloor(llvm::Value *val)
+{
+ if (!m_llvmFloor) {
+ // predeclare the intrinsic
+ std::vector<const Type*> floorArgs;
+ floorArgs.push_back(Type::FloatTy);
+ AttrListPtr floorPal;
+ FunctionType* floorType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/floorArgs,
+ /*isVarArg=*/false);
+ m_llvmFloor = Function::Create(
+ /*Type=*/floorType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"floorf", m_mod);
+ m_llvmFloor->setCallingConv(CallingConv::C);
+ m_llvmFloor->setAttributes(floorPal);
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmFloor, val,
+ name("floorf"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value *Instructions::callFSqrt(llvm::Value *val)
+{
+ if (!m_llvmFSqrt) {
+ // predeclare the intrinsic
+ std::vector<const Type*> fsqrtArgs;
+ fsqrtArgs.push_back(Type::FloatTy);
+ AttrListPtr fsqrtPal;
+ FunctionType* fsqrtType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/fsqrtArgs,
+ /*isVarArg=*/false);
+ m_llvmFSqrt = Function::Create(
+ /*Type=*/fsqrtType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"llvm.sqrt.f32", m_mod);
+ m_llvmFSqrt->setCallingConv(CallingConv::C);
+ m_llvmFSqrt->setAttributes(fsqrtPal);
+ }
+ CallInst *call = m_builder.CreateCall(m_llvmFSqrt, val,
+ name("sqrt"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::callPow(llvm::Value *val1, llvm::Value *val2)
+{
+ if (!m_llvmPow) {
+ // predeclare the intrinsic
+ std::vector<const Type*> powArgs;
+ powArgs.push_back(Type::FloatTy);
+ powArgs.push_back(Type::FloatTy);
+ AttrListPtr powPal;
+ FunctionType* powType = FunctionType::get(
+ /*Result=*/Type::FloatTy,
+ /*Params=*/powArgs,
+ /*isVarArg=*/false);
+ m_llvmPow = Function::Create(
+ /*Type=*/powType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"llvm.pow.f32", m_mod);
+ m_llvmPow->setCallingConv(CallingConv::C);
+ m_llvmPow->setAttributes(powPal);
+ }
+ std::vector<Value*> params;
+ params.push_back(val1);
+ params.push_back(val2);
+ CallInst *call = m_builder.CreateCall(m_llvmPow, params.begin(), params.end(),
+ name("pow"));
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+ return call;
+}
+
+llvm::Value * Instructions::vectorFromVals(llvm::Value *x, llvm::Value *y,
+ llvm::Value *z, llvm::Value *w)
+{
+ Constant *const_vec = Constant::getNullValue(m_floatVecType);
+ Value *res = m_builder.CreateInsertElement(const_vec, x,
+ m_storage->constantInt(0),
+ name("vecx"));
+ res = m_builder.CreateInsertElement(res, y, m_storage->constantInt(1),
+ name("vecxy"));
+ res = m_builder.CreateInsertElement(res, z, m_storage->constantInt(2),
+ name("vecxyz"));
+ if (w)
+ res = m_builder.CreateInsertElement(res, w, m_storage->constantInt(3),
+ name("vecxyzw"));
+ return res;
+}
+
+llvm::Value * Instructions::constVector(float x, float y, float z, float w)
+{
+ std::vector<Constant*> vec(4);
+ vec[0] = ConstantFP::get(APFloat(x));
+ vec[1] = ConstantFP::get(APFloat(y));
+ vec[2] = ConstantFP::get(APFloat(z));
+ vec[3] = ConstantFP::get(APFloat(w));
+ return ConstantVector::get(m_floatVecType, vec);
+}
+
+llvm::Function * Instructions::declarePrintf()
+{
+ std::vector<const Type*> args;
+ AttrListPtr params;
+ FunctionType* funcTy = FunctionType::get(
+ /*Result=*/IntegerType::get(32),
+ /*Params=*/args,
+ /*isVarArg=*/true);
+ Function* func_printf = Function::Create(
+ /*Type=*/funcTy,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/"printf", m_mod);
+ func_printf->setCallingConv(CallingConv::C);
+ func_printf->setAttributes(params);
+ return func_printf;
+}
+
+llvm::Function * Instructions::declareFunc(int label)
+{
+ PointerType *vecPtr = PointerType::getUnqual(m_floatVecType);
+ std::vector<const Type*> args;
+ args.push_back(vecPtr);
+ args.push_back(vecPtr);
+ args.push_back(vecPtr);
+ args.push_back(vecPtr);
+ AttrListPtr params;
+ FunctionType *funcType = FunctionType::get(
+ /*Result=*/Type::VoidTy,
+ /*Params=*/args,
+ /*isVarArg=*/false);
+ std::string name = createFuncName(label);
+ Function *func = Function::Create(
+ /*Type=*/funcType,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Name=*/name.c_str(), m_mod);
+ func->setCallingConv(CallingConv::C);
+ func->setAttributes(params);
+ return func;
+}
+
+llvm::Function * Instructions::findFunction(int label)
+{
+ llvm::Function *func = m_functions[label];
+ if (!func) {
+ func = declareFunc(label);
+ m_functions[label] = func;
+ }
+ return func;
+}
+
+std::vector<llvm::Value*> Instructions::extractVector(llvm::Value *vec)
+{
+ std::vector<llvm::Value*> elems(4);
+ elems[0] = m_builder.CreateExtractElement(vec, m_storage->constantInt(0),
+ name("x"));
+ elems[1] = m_builder.CreateExtractElement(vec, m_storage->constantInt(1),
+ name("y"));
+ elems[2] = m_builder.CreateExtractElement(vec, m_storage->constantInt(2),
+ name("z"));
+ elems[3] = m_builder.CreateExtractElement(vec, m_storage->constantInt(3),
+ name("w"));
+ return elems;
+}
+
+
+#endif //MESA_LLVM
+
+
diff --git a/src/gallium/auxiliary/gallivm/instructions.h b/src/gallium/auxiliary/gallivm/instructions.h
new file mode 100644
index 0000000000..e18571251e
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/instructions.h
@@ -0,0 +1,175 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+
+#ifndef INSTRUCTIONS_H
+#define INSTRUCTIONS_H
+
+#include <llvm/BasicBlock.h>
+#include <llvm/Module.h>
+#include <llvm/Value.h>
+#include <llvm/Support/IRBuilder.h>
+
+#include <map>
+#include <stack>
+
+namespace llvm {
+ class VectorType;
+ class Function;
+}
+
+class Storage;
+
+class Instructions
+{
+public:
+ Instructions(llvm::Module *mod, llvm::Function *func, llvm::BasicBlock *block,
+ Storage *storage);
+
+ llvm::BasicBlock *currentBlock() const;
+
+ llvm::Value *abs(llvm::Value *in1);
+ llvm::Value *add(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *arl(llvm::Value *in1);
+ void beginLoop();
+ void bgnSub(unsigned);
+ void brk();
+ void cal(int label, llvm::Value *input);
+ llvm::Value *ceil(llvm::Value *in);
+ llvm::Value *clamp(llvm::Value *in);
+ llvm::Value *cmp(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3);
+ llvm::Value *cnd(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3);
+ llvm::Value *cnd0(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3);
+ llvm::Value *cos(llvm::Value *in);
+ llvm::Value *cross(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *ddx(llvm::Value *in);
+ llvm::Value *ddy(llvm::Value *in);
+ llvm::Value *div(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *dot2add(llvm::Value *in, llvm::Value *in2, llvm::Value *in3);
+ llvm::Value *dp2(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *dp3(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *dp4(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *dph(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *dst(llvm::Value *in1, llvm::Value *in2);
+ void elseop();
+ void endif();
+ void endLoop();
+ void end();
+ void endSub();
+ llvm::Value *exp(llvm::Value *in);
+ llvm::Value *ex2(llvm::Value *in);
+ llvm::Value *floor(llvm::Value *in);
+ llvm::Value *frc(llvm::Value *in);
+ void ifop(llvm::Value *in);
+ llvm::Value *kil(llvm::Value *in);
+ llvm::Value *lerp(llvm::Value *in1, llvm::Value *in2,
+ llvm::Value *in3);
+ llvm::Value *lg2(llvm::Value *in);
+ llvm::Value *lit(llvm::Value *in);
+ llvm::Value *log(llvm::Value *in);
+ llvm::Value *madd(llvm::Value *in1, llvm::Value *in2,
+ llvm::Value *in3);
+ llvm::Value *max(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *min(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *mul(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *neg(llvm::Value *in);
+ llvm::Value *nrm(llvm::Value *in);
+ llvm::Value *pow(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *rcp(llvm::Value *in);
+ llvm::Value *rsq(llvm::Value *in);
+ llvm::Value *scs(llvm::Value *in);
+ llvm::Value *seq(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *sfl(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *sge(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *sgt(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *sin(llvm::Value *in);
+ llvm::Value *sle(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *slt(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *sne(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *str(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *sub(llvm::Value *in1, llvm::Value *in2);
+ llvm::Value *trunc(llvm::Value *in);
+ llvm::Value *x2d(llvm::Value *in1, llvm::Value *in2, llvm::Value *in3);
+
+ void printVector(llvm::Value *val);
+private:
+ const char *name(const char *prefix);
+
+ llvm::Value *callCeil(llvm::Value *val);
+ llvm::Value *callFAbs(llvm::Value *val);
+ llvm::Value *callFExp(llvm::Value *val);
+ llvm::Value *callFLog(llvm::Value *val);
+ llvm::Value *callFloor(llvm::Value *val);
+ llvm::Value *callFSqrt(llvm::Value *val);
+ llvm::Value *callPow(llvm::Value *val1, llvm::Value *val2);
+
+ llvm::Value *vectorFromVals(llvm::Value *x, llvm::Value *y,
+ llvm::Value *z, llvm::Value *w=0);
+
+ llvm::Value *constVector(float x, float y, float z, float w);
+
+ llvm::Function *declarePrintf();
+ llvm::Function *declareFunc(int label);
+
+ llvm::Function *findFunction(int label);
+
+ std::vector<llvm::Value*> extractVector(llvm::Value *vec);
+private:
+ llvm::Module *m_mod;
+ llvm::Function *m_func;
+ char m_name[32];
+ llvm::IRBuilder<> m_builder;
+ int m_idx;
+
+ llvm::VectorType *m_floatVecType;
+
+ llvm::Function *m_llvmCeil;
+ llvm::Function *m_llvmFSqrt;
+ llvm::Function *m_llvmFAbs;
+ llvm::Function *m_llvmPow;
+ llvm::Function *m_llvmFloor;
+ llvm::Function *m_llvmFlog;
+ llvm::Function *m_llvmFexp;
+ llvm::Function *m_llvmLit;
+
+ llvm::Constant *m_fmtPtr;
+
+ std::stack<llvm::BasicBlock*> m_ifStack;
+ struct Loop {
+ llvm::BasicBlock *begin;
+ llvm::BasicBlock *end;
+ };
+ std::stack<Loop> m_loopStack;
+ std::map<int, llvm::Function*> m_functions;
+ Storage *m_storage;
+};
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/instructionssoa.cpp b/src/gallium/auxiliary/gallivm/instructionssoa.cpp
new file mode 100644
index 0000000000..721b7d2d83
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/instructionssoa.cpp
@@ -0,0 +1,525 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <cstdio>
+#include "instructionssoa.h"
+
+#include "storagesoa.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "util/u_memory.h"
+
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/Module.h>
+#include <llvm/Function.h>
+#include <llvm/Instructions.h>
+#include <llvm/Transforms/Utils/Cloning.h>
+#include <llvm/Attributes.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+
+
+#include <iostream>
+
+
+/* disable some warnings. this file is autogenerated */
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+using namespace llvm;
+#include "gallivmsoabuiltins.cpp"
+#if defined(__GNUC__)
+#pragma GCC diagnostic warning "-Wunused-variable"
+#endif
+
+InstructionsSoa::InstructionsSoa(llvm::Module *mod, llvm::Function *func,
+ llvm::BasicBlock *block, StorageSoa *storage)
+ : m_builder(block),
+ m_storage(storage),
+ m_idx(0)
+{
+ createFunctionMap();
+ createBuiltins();
+}
+
+const char * InstructionsSoa::name(const char *prefix) const
+{
+ ++m_idx;
+ snprintf(m_name, 32, "%s%d", prefix, m_idx);
+ return m_name;
+}
+
+llvm::Value * InstructionsSoa::vectorFromVals(llvm::Value *x, llvm::Value *y,
+ llvm::Value *z, llvm::Value *w)
+{
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ Constant *constVector = Constant::getNullValue(vectorType);
+ Value *res = m_builder.CreateInsertElement(constVector, x,
+ m_storage->constantInt(0),
+ name("vecx"));
+ res = m_builder.CreateInsertElement(res, y, m_storage->constantInt(1),
+ name("vecxy"));
+ res = m_builder.CreateInsertElement(res, z, m_storage->constantInt(2),
+ name("vecxyz"));
+ if (w)
+ res = m_builder.CreateInsertElement(res, w, m_storage->constantInt(3),
+ name("vecxyzw"));
+ return res;
+}
+
+void InstructionsSoa::end()
+{
+ m_builder.CreateRetVoid();
+}
+
+std::vector<llvm::Value*> InstructionsSoa::extractVector(llvm::Value *vector)
+{
+ std::vector<llvm::Value*> res(4);
+ res[0] = m_builder.CreateExtractElement(vector,
+ m_storage->constantInt(0),
+ name("extract1X"));
+ res[1] = m_builder.CreateExtractElement(vector,
+ m_storage->constantInt(1),
+ name("extract2X"));
+ res[2] = m_builder.CreateExtractElement(vector,
+ m_storage->constantInt(2),
+ name("extract3X"));
+ res[3] = m_builder.CreateExtractElement(vector,
+ m_storage->constantInt(3),
+ name("extract4X"));
+
+ return res;
+}
+
+llvm::IRBuilder<>* InstructionsSoa::getIRBuilder()
+{
+ return &m_builder;
+}
+
+void InstructionsSoa::createFunctionMap()
+{
+ m_functionsMap[TGSI_OPCODE_ABS] = "abs";
+ m_functionsMap[TGSI_OPCODE_DP3] = "dp3";
+ m_functionsMap[TGSI_OPCODE_DP4] = "dp4";
+ m_functionsMap[TGSI_OPCODE_MIN] = "min";
+ m_functionsMap[TGSI_OPCODE_MAX] = "max";
+ m_functionsMap[TGSI_OPCODE_POW] = "pow";
+ m_functionsMap[TGSI_OPCODE_LIT] = "lit";
+ m_functionsMap[TGSI_OPCODE_RSQ] = "rsq";
+ m_functionsMap[TGSI_OPCODE_SLT] = "slt";
+}
+
+void InstructionsSoa::createDependencies()
+{
+ {
+ std::vector<std::string> powDeps(2);
+ powDeps[0] = "powf";
+ powDeps[1] = "powvec";
+ m_builtinDependencies["pow"] = powDeps;
+ }
+ {
+ std::vector<std::string> absDeps(2);
+ absDeps[0] = "fabsf";
+ absDeps[1] = "absvec";
+ m_builtinDependencies["abs"] = absDeps;
+ }
+ {
+ std::vector<std::string> maxDeps(1);
+ maxDeps[0] = "maxvec";
+ m_builtinDependencies["max"] = maxDeps;
+ }
+ {
+ std::vector<std::string> minDeps(1);
+ minDeps[0] = "minvec";
+ m_builtinDependencies["min"] = minDeps;
+ }
+ {
+ std::vector<std::string> litDeps(4);
+ litDeps[0] = "minvec";
+ litDeps[1] = "maxvec";
+ litDeps[2] = "powf";
+ litDeps[3] = "powvec";
+ m_builtinDependencies["lit"] = litDeps;
+ }
+ {
+ std::vector<std::string> rsqDeps(4);
+ rsqDeps[0] = "sqrtf";
+ rsqDeps[1] = "sqrtvec";
+ rsqDeps[2] = "fabsf";
+ rsqDeps[3] = "absvec";
+ m_builtinDependencies["rsq"] = rsqDeps;
+ }
+}
+
+llvm::Function * InstructionsSoa::function(int op)
+{
+ if (m_functions.find(op) != m_functions.end())
+ return m_functions[op];
+
+ std::string name = m_functionsMap[op];
+
+ std::cout <<"For op = "<<op<<", func is '"<<name<<"'"<<std::endl;
+
+ std::vector<std::string> deps = m_builtinDependencies[name];
+ for (unsigned int i = 0; i < deps.size(); ++i) {
+ llvm::Function *func = m_builtins->getFunction(deps[i]);
+ std::cout <<"\tinjecting dep = '"<<func->getName()<<"'"<<std::endl;
+ injectFunction(func);
+ }
+
+ llvm::Function *originalFunc = m_builtins->getFunction(name);
+ injectFunction(originalFunc, op);
+ return m_functions[op];
+}
+
+llvm::Module * InstructionsSoa::currentModule() const
+{
+ BasicBlock *block = m_builder.GetInsertBlock();
+ if (!block || !block->getParent())
+ return 0;
+
+ return block->getParent()->getParent();
+}
+
+void InstructionsSoa::createBuiltins()
+{
+ std::string ErrMsg;
+ MemoryBuffer *buffer = MemoryBuffer::getMemBuffer(
+ (const char*)&soabuiltins_data[0],
+ (const char*)&soabuiltins_data[Elements(soabuiltins_data) - 1]);
+ m_builtins = ParseBitcodeFile(buffer, &ErrMsg);
+ std::cout<<"Builtins created at "<<m_builtins<<" ("<<ErrMsg<<")"<<std::endl;
+ assert(m_builtins);
+ createDependencies();
+}
+
+
+std::vector<llvm::Value*> InstructionsSoa::abs(const std::vector<llvm::Value*> in1)
+{
+ llvm::Function *func = function(TGSI_OPCODE_ABS);
+ return callBuiltin(func, in1);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::add(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = m_builder.CreateAdd(in1[0], in2[0], name("addx"));
+ res[1] = m_builder.CreateAdd(in1[1], in2[1], name("addy"));
+ res[2] = m_builder.CreateAdd(in1[2], in2[2], name("addz"));
+ res[3] = m_builder.CreateAdd(in1[3], in2[3], name("addw"));
+
+ return res;
+}
+
+std::vector<llvm::Value*> InstructionsSoa::arl(const std::vector<llvm::Value*> in)
+{
+ std::vector<llvm::Value*> res(4);
+
+ //Extract x's
+ llvm::Value *x1 = m_builder.CreateExtractElement(in[0],
+ m_storage->constantInt(0),
+ name("extractX"));
+ //cast it to an unsigned int
+ x1 = m_builder.CreateFPToUI(x1, IntegerType::get(32), name("x1IntCast"));
+
+ res[0] = x1;//vectorFromVals(x1, x2, x3, x4);
+ //only x is valid. the others shouldn't be necessary
+ /*
+ res[1] = Constant::getNullValue(m_floatVecType);
+ res[2] = Constant::getNullValue(m_floatVecType);
+ res[3] = Constant::getNullValue(m_floatVecType);
+ */
+
+ return res;
+}
+
+std::vector<llvm::Value*> InstructionsSoa::dp3(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ llvm::Function *func = function(TGSI_OPCODE_DP3);
+ return callBuiltin(func, in1, in2);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::lit(const std::vector<llvm::Value*> in)
+{
+ llvm::Function *func = function(TGSI_OPCODE_LIT);
+ return callBuiltin(func, in);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::madd(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2,
+ const std::vector<llvm::Value*> in3)
+{
+ std::vector<llvm::Value*> res = mul(in1, in2);
+ return add(res, in3);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::max(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ llvm::Function *func = function(TGSI_OPCODE_MAX);
+ return callBuiltin(func, in1, in2);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::min(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ llvm::Function *func = function(TGSI_OPCODE_MIN);
+ return callBuiltin(func, in1, in2);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::mul(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = m_builder.CreateMul(in1[0], in2[0], name("mulx"));
+ res[1] = m_builder.CreateMul(in1[1], in2[1], name("muly"));
+ res[2] = m_builder.CreateMul(in1[2], in2[2], name("mulz"));
+ res[3] = m_builder.CreateMul(in1[3], in2[3], name("mulw"));
+
+ return res;
+}
+
+std::vector<llvm::Value*> InstructionsSoa::pow(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ llvm::Function *func = function(TGSI_OPCODE_POW);
+ return callBuiltin(func, in1, in2);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::rsq(const std::vector<llvm::Value*> in)
+{
+ llvm::Function *func = function(TGSI_OPCODE_RSQ);
+ return callBuiltin(func, in);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::slt(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ llvm::Function *func = function(TGSI_OPCODE_SLT);
+ return callBuiltin(func, in1, in2);
+}
+
+std::vector<llvm::Value*> InstructionsSoa::sub(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = m_builder.CreateSub(in1[0], in2[0], name("subx"));
+ res[1] = m_builder.CreateSub(in1[1], in2[1], name("suby"));
+ res[2] = m_builder.CreateSub(in1[2], in2[2], name("subz"));
+ res[3] = m_builder.CreateSub(in1[3], in2[3], name("subw"));
+
+ return res;
+}
+
+void checkFunction(Function *func)
+{
+ for (Function::const_iterator BI = func->begin(), BE = func->end();
+ BI != BE; ++BI) {
+ const BasicBlock &BB = *BI;
+ for (BasicBlock::const_iterator II = BB.begin(), IE = BB.end();
+ II != IE; ++II) {
+ const Instruction &I = *II;
+ std::cout<< "Instr = "<<I;
+ for (unsigned op = 0, E = I.getNumOperands(); op != E; ++op) {
+ const Value *Op = I.getOperand(op);
+ std::cout<< "\top = "<<Op<<"("<<op<<")"<<std::endl;
+ //I->setOperand(op, V);
+ }
+ }
+ }
+}
+
+llvm::Value * InstructionsSoa::allocaTemp()
+{
+ VectorType *vector = VectorType::get(Type::FloatTy, 4);
+ ArrayType *vecArray = ArrayType::get(vector, 4);
+ AllocaInst *alloca = new AllocaInst(vecArray, name("tmpRes"),
+ m_builder.GetInsertBlock());
+
+ std::vector<Value*> indices;
+ indices.push_back(m_storage->constantInt(0));
+ indices.push_back(m_storage->constantInt(0));
+ GetElementPtrInst *getElem = GetElementPtrInst::Create(alloca,
+ indices.begin(),
+ indices.end(),
+ name("allocaPtr"),
+ m_builder.GetInsertBlock());
+ return getElem;
+}
+
+std::vector<llvm::Value*> InstructionsSoa::allocaToResult(llvm::Value *allocaPtr)
+{
+ GetElementPtrInst *xElemPtr = GetElementPtrInst::Create(allocaPtr,
+ m_storage->constantInt(0),
+ name("xPtr"),
+ m_builder.GetInsertBlock());
+ GetElementPtrInst *yElemPtr = GetElementPtrInst::Create(allocaPtr,
+ m_storage->constantInt(1),
+ name("yPtr"),
+ m_builder.GetInsertBlock());
+ GetElementPtrInst *zElemPtr = GetElementPtrInst::Create(allocaPtr,
+ m_storage->constantInt(2),
+ name("zPtr"),
+ m_builder.GetInsertBlock());
+ GetElementPtrInst *wElemPtr = GetElementPtrInst::Create(allocaPtr,
+ m_storage->constantInt(3),
+ name("wPtr"),
+ m_builder.GetInsertBlock());
+
+ std::vector<llvm::Value*> res(4);
+ res[0] = new LoadInst(xElemPtr, name("xRes"), false, m_builder.GetInsertBlock());
+ res[1] = new LoadInst(yElemPtr, name("yRes"), false, m_builder.GetInsertBlock());
+ res[2] = new LoadInst(zElemPtr, name("zRes"), false, m_builder.GetInsertBlock());
+ res[3] = new LoadInst(wElemPtr, name("wRes"), false, m_builder.GetInsertBlock());
+
+ return res;
+}
+
+std::vector<llvm::Value*> InstructionsSoa::dp4(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ llvm::Function *func = function(TGSI_OPCODE_DP4);
+ return callBuiltin(func, in1, in2);
+}
+
+std::vector<Value*> InstructionsSoa::callBuiltin(llvm::Function *func, const std::vector<llvm::Value*> in1)
+{
+ std::vector<Value*> params;
+
+ llvm::Value *allocaPtr = allocaTemp();
+ params.push_back(allocaPtr);
+ params.push_back(in1[0]);
+ params.push_back(in1[1]);
+ params.push_back(in1[2]);
+ params.push_back(in1[3]);
+ CallInst *call = m_builder.CreateCall(func, params.begin(), params.end());
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+
+ return allocaToResult(allocaPtr);
+}
+
+std::vector<Value*> InstructionsSoa::callBuiltin(llvm::Function *func, const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2)
+{
+ std::vector<Value*> params;
+
+ llvm::Value *allocaPtr = allocaTemp();
+ params.push_back(allocaPtr);
+ params.push_back(in1[0]);
+ params.push_back(in1[1]);
+ params.push_back(in1[2]);
+ params.push_back(in1[3]);
+ params.push_back(in2[0]);
+ params.push_back(in2[1]);
+ params.push_back(in2[2]);
+ params.push_back(in2[3]);
+ CallInst *call = m_builder.CreateCall(func, params.begin(), params.end());
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+
+ return allocaToResult(allocaPtr);
+}
+
+std::vector<Value*> InstructionsSoa::callBuiltin(llvm::Function *func, const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2,
+ const std::vector<llvm::Value*> in3)
+{
+ std::vector<Value*> params;
+
+ llvm::Value *allocaPtr = allocaTemp();
+ params.push_back(allocaPtr);
+ params.push_back(in1[0]);
+ params.push_back(in1[1]);
+ params.push_back(in1[2]);
+ params.push_back(in1[3]);
+ params.push_back(in2[0]);
+ params.push_back(in2[1]);
+ params.push_back(in2[2]);
+ params.push_back(in2[3]);
+ params.push_back(in3[0]);
+ params.push_back(in3[1]);
+ params.push_back(in3[2]);
+ params.push_back(in3[3]);
+ CallInst *call = m_builder.CreateCall(func, params.begin(), params.end());
+ call->setCallingConv(CallingConv::C);
+ call->setTailCall(false);
+
+ return allocaToResult(allocaPtr);
+}
+
+void InstructionsSoa::injectFunction(llvm::Function *originalFunc, int op)
+{
+ assert(originalFunc);
+ std::cout << "injecting function originalFunc " <<originalFunc->getName() <<std::endl;
+ if (op != TGSI_OPCODE_LAST) {
+ /* in this case it's possible the function has been already
+ * injected as part of the dependency chain, which gets
+ * injected below */
+ llvm::Function *func = currentModule()->getFunction(originalFunc->getName());
+ if (func) {
+ m_functions[op] = func;
+ return;
+ }
+ }
+ llvm::Function *func = 0;
+ if (originalFunc->isDeclaration()) {
+ func = Function::Create(originalFunc->getFunctionType(), GlobalValue::ExternalLinkage,
+ originalFunc->getName(), currentModule());
+ func->setCallingConv(CallingConv::C);
+ const AttrListPtr pal;
+ func->setAttributes(pal);
+ currentModule()->dump();
+ } else {
+ DenseMap<const Value*, Value *> val;
+ val[m_builtins->getFunction("fabsf")] = currentModule()->getFunction("fabsf");
+ val[m_builtins->getFunction("powf")] = currentModule()->getFunction("powf");
+ val[m_builtins->getFunction("sqrtf")] = currentModule()->getFunction("sqrtf");
+ func = CloneFunction(originalFunc, val);
+#if 0
+ std::cout <<" replacing "<<m_builtins->getFunction("powf")
+ <<", with " <<currentModule()->getFunction("powf")<<std::endl;
+ std::cout<<"1111-------------------------------"<<std::endl;
+ checkFunction(originalFunc);
+ std::cout<<"2222-------------------------------"<<std::endl;
+ checkFunction(func);
+ std::cout <<"XXXX = " <<val[m_builtins->getFunction("powf")]<<std::endl;
+#endif
+ currentModule()->getFunctionList().push_back(func);
+ }
+ if (op != TGSI_OPCODE_LAST) {
+ m_functions[op] = func;
+ }
+}
+
+
diff --git a/src/gallium/auxiliary/gallivm/instructionssoa.h b/src/gallium/auxiliary/gallivm/instructionssoa.h
new file mode 100644
index 0000000000..d6831e0a6b
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/instructionssoa.h
@@ -0,0 +1,116 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INSTRUCTIONSSOA_H
+#define INSTRUCTIONSSOA_H
+
+#include <pipe/p_shader_tokens.h>
+#include <llvm/Support/IRBuilder.h>
+
+#include <map>
+#include <vector>
+
+namespace llvm {
+ class Module;
+ class Function;
+ class BasicBlock;
+ class Value;
+}
+class StorageSoa;
+
+class InstructionsSoa
+{
+public:
+ InstructionsSoa(llvm::Module *mod, llvm::Function *func,
+ llvm::BasicBlock *block, StorageSoa *storage);
+
+ std::vector<llvm::Value*> abs(const std::vector<llvm::Value*> in1);
+ std::vector<llvm::Value*> arl(const std::vector<llvm::Value*> in);
+ std::vector<llvm::Value*> add(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> dp3(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> dp4(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> lit(const std::vector<llvm::Value*> in);
+ std::vector<llvm::Value*> madd(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2,
+ const std::vector<llvm::Value*> in3);
+ std::vector<llvm::Value*> max(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> min(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> mul(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> pow(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> rsq(const std::vector<llvm::Value*> in1);
+ std::vector<llvm::Value*> slt(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> sub(const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ void end();
+
+ std::vector<llvm::Value*> extractVector(llvm::Value *vector);
+ llvm::IRBuilder<>* getIRBuilder();
+private:
+ const char * name(const char *prefix) const;
+ llvm::Value *vectorFromVals(llvm::Value *x, llvm::Value *y,
+ llvm::Value *z, llvm::Value *w);
+ void createFunctionMap();
+ void createBuiltins();
+ void createDependencies();
+ llvm::Function *function(int);
+ llvm::Module *currentModule() const;
+ llvm::Value *allocaTemp();
+ std::vector<llvm::Value*> allocaToResult(llvm::Value *allocaPtr);
+ std::vector<llvm::Value*> callBuiltin(llvm::Function *func,
+ const std::vector<llvm::Value*> in1);
+ std::vector<llvm::Value*> callBuiltin(llvm::Function *func,
+ const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2);
+ std::vector<llvm::Value*> callBuiltin(llvm::Function *func,
+ const std::vector<llvm::Value*> in1,
+ const std::vector<llvm::Value*> in2,
+ const std::vector<llvm::Value*> in3);
+ void injectFunction(llvm::Function *originalFunc, int op = TGSI_OPCODE_LAST);
+private:
+ llvm::IRBuilder<> m_builder;
+ StorageSoa *m_storage;
+
+ std::map<int, std::string> m_functionsMap;
+ std::map<int, llvm::Function*> m_functions;
+ llvm::Module *m_builtins;
+ std::map<std::string, std::vector<std::string> > m_builtinDependencies;
+
+private:
+ mutable int m_idx;
+ mutable char m_name[32];
+};
+
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/llvm_builtins.c b/src/gallium/auxiliary/gallivm/llvm_builtins.c
new file mode 100644
index 0000000000..d5a003a48b
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/llvm_builtins.c
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+typedef __attribute__(( ext_vector_type(4) )) float float4;
+
+extern float powf(float a, float b);
+
+inline float approx(float a, float b)
+{
+ if (b < -128.0f) b = -128.0f;
+ if (b > 128.0f) b = 128.0f;
+ if (a < 0) a = 0;
+ return powf(a, b);
+}
+
+inline float4 lit(float4 tmp)
+{
+ float4 result;
+ result.x = 1.0;
+ result.w = 1.0;
+ if (tmp.x > 0) {
+ result.y = tmp.x;
+ result.z = approx(tmp.y, tmp.w);
+ } else {
+ result.y = 0;
+ result.z = 0;
+ }
+ return result;
+}
+
+inline float4 cmp(float4 tmp0, float4 tmp1, float4 tmp2)
+{
+ float4 result;
+
+ result.x = (tmp0.x < 0.0) ? tmp1.x : tmp2.x;
+ result.y = (tmp0.y < 0.0) ? tmp1.y : tmp2.y;
+ result.z = (tmp0.z < 0.0) ? tmp1.z : tmp2.z;
+ result.w = (tmp0.w < 0.0) ? tmp1.w : tmp2.w;
+
+ return result;
+}
+
+extern float cosf(float val);
+extern float sinf(float val);
+
+inline float4 vcos(float4 val)
+{
+ float4 result;
+ printf("VEC IN is %f %f %f %f\n", val.x, val.y, val.z, val.w);
+ result.x = cosf(val.x);
+ result.y = cosf(val.x);
+ result.z = cosf(val.x);
+ result.w = cosf(val.x);
+ printf("VEC OUT is %f %f %f %f\n", result.x, result.y, result.z, result.w);
+ return result;
+}
+
+inline float4 scs(float4 val)
+{
+ float4 result;
+ float tmp = val.x;
+ result.x = cosf(tmp);
+ result.y = sinf(tmp);
+ return result;
+}
+
+
+inline float4 vsin(float4 val)
+{
+ float4 result;
+ float tmp = val.x;
+ float res = sinf(tmp);
+ result.x = res;
+ result.y = res;
+ result.z = res;
+ result.w = res;
+ return result;
+}
+
+inline int kil(float4 val)
+{
+ if (val.x < 0 || val.y < 0 || val.z < 0 || val.w < 0)
+ return 1;
+ else
+ return 0;
+}
diff --git a/src/gallium/auxiliary/gallivm/loweringpass.cpp b/src/gallium/auxiliary/gallivm/loweringpass.cpp
new file mode 100644
index 0000000000..556dbec366
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/loweringpass.cpp
@@ -0,0 +1,17 @@
+#include "loweringpass.h"
+
+using namespace llvm;
+
+char LoweringPass::ID = 0;
+RegisterPass<LoweringPass> X("lowering", "Lowering Pass");
+
+LoweringPass::LoweringPass()
+ : ModulePass((intptr_t)&ID)
+{
+}
+
+bool LoweringPass::runOnModule(Module &m)
+{
+ llvm::cerr << "Hello: " << m.getModuleIdentifier() << "\n";
+ return false;
+}
diff --git a/src/gallium/auxiliary/gallivm/loweringpass.h b/src/gallium/auxiliary/gallivm/loweringpass.h
new file mode 100644
index 0000000000..f62dcf6ba7
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/loweringpass.h
@@ -0,0 +1,15 @@
+#ifndef LOWERINGPASS_H
+#define LOWERINGPASS_H
+
+#include "llvm/Pass.h"
+#include "llvm/Module.h"
+
+struct LoweringPass : public llvm::ModulePass
+{
+ static char ID;
+ LoweringPass();
+
+ virtual bool runOnModule(llvm::Module &m);
+};
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/soabuiltins.c b/src/gallium/auxiliary/gallivm/soabuiltins.c
new file mode 100644
index 0000000000..cb85e1734e
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/soabuiltins.c
@@ -0,0 +1,210 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * This file is compiled with clang into the LLVM bitcode
+ *
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+typedef __attribute__(( ext_vector_type(4) )) float float4;
+
+
+extern float fabsf(float val);
+
+/* helpers */
+
+float4 absvec(float4 vec)
+{
+ float4 res;
+ res.x = fabsf(vec.x);
+ res.y = fabsf(vec.y);
+ res.z = fabsf(vec.z);
+ res.w = fabsf(vec.w);
+
+ return res;
+}
+
+float4 maxvec(float4 a, float4 b)
+{
+ return (float4){(a.x > b.x) ? a.x : b.x,
+ (a.y > b.y) ? a.y : b.y,
+ (a.z > b.z) ? a.z : b.z,
+ (a.w > b.w) ? a.w : b.w};
+}
+
+float4 minvec(float4 a, float4 b)
+{
+ return (float4){(a.x < b.x) ? a.x : b.x,
+ (a.y < b.y) ? a.y : b.y,
+ (a.z < b.z) ? a.z : b.z,
+ (a.w < b.w) ? a.w : b.w};
+}
+
+extern float powf(float num, float p);
+extern float sqrtf(float x);
+
+float4 powvec(float4 vec, float4 q)
+{
+ float4 p;
+ p.x = powf(vec.x, q.x);
+ p.y = powf(vec.y, q.y);
+ p.z = powf(vec.z, q.z);
+ p.w = powf(vec.w, q.w);
+ return p;
+}
+
+float4 sqrtvec(float4 vec)
+{
+ float4 p;
+ p.x = sqrtf(vec.x);
+ p.y = sqrtf(vec.y);
+ p.z = sqrtf(vec.z);
+ p.w = sqrtf(vec.w);
+ return p;
+}
+
+float4 sltvec(float4 v1, float4 v2)
+{
+ float4 p;
+ p.x = (v1.x < v2.x) ? 1.0 : 0.0;
+ p.y = (v1.y < v2.y) ? 1.0 : 0.0;
+ p.z = (v1.z < v2.z) ? 1.0 : 0.0;
+ p.w = (v1.w < v2.w) ? 1.0 : 0.0;
+ return p;
+}
+
+
+/* instructions */
+
+void abs(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w)
+{
+ res[0] = absvec(tmp0x);
+ res[1] = absvec(tmp0y);
+ res[2] = absvec(tmp0z);
+ res[3] = absvec(tmp0w);
+}
+
+void dp3(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w,
+ float4 tmp1x, float4 tmp1y, float4 tmp1z, float4 tmp1w)
+{
+ float4 dot = (tmp0x * tmp1x) + (tmp0y * tmp1y) +
+ (tmp0z * tmp1z);
+
+ res[0] = dot;
+ res[1] = dot;
+ res[2] = dot;
+ res[3] = dot;
+}
+
+void dp4(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w,
+ float4 tmp1x, float4 tmp1y, float4 tmp1z, float4 tmp1w)
+{
+ float4 dot = (tmp0x * tmp1x) + (tmp0y * tmp1y) +
+ (tmp0z * tmp1z) + (tmp0w * tmp1w);
+
+ res[0] = dot;
+ res[1] = dot;
+ res[2] = dot;
+ res[3] = dot;
+}
+
+void lit(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w)
+{
+ const float4 zerovec = (float4) {0.0, 0.0, 0.0, 0.0};
+ const float4 min128 = (float4) {-128.f, -128.f, -128.f, -128.f};
+ const float4 plus128 = (float4) {128.f, 128.f, 128.f, 128.f};
+
+ res[0] = (float4){1.0, 1.0, 1.0, 1.0};
+ if (tmp0x.x > 0) {
+ float4 tmpy = maxvec(tmp0y, zerovec);
+ float4 tmpw = minvec(tmp0w, plus128);
+ tmpw = maxvec(tmpw, min128);
+ res[1] = tmp0x;
+ res[2] = powvec(tmpy, tmpw);
+ } else {
+ res[1] = zerovec;
+ res[2] = zerovec;
+ }
+ res[3] = (float4){1.0, 1.0, 1.0, 1.0};
+}
+
+void min(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w,
+ float4 tmp1x, float4 tmp1y, float4 tmp1z, float4 tmp1w)
+{
+ res[0] = minvec(tmp0x, tmp1x);
+ res[1] = minvec(tmp0y, tmp1y);
+ res[2] = minvec(tmp0z, tmp1z);
+ res[3] = minvec(tmp0w, tmp1w);
+}
+
+
+void max(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w,
+ float4 tmp1x, float4 tmp1y, float4 tmp1z, float4 tmp1w)
+{
+ res[0] = maxvec(tmp0x, tmp1x);
+ res[1] = maxvec(tmp0y, tmp1y);
+ res[2] = maxvec(tmp0z, tmp1z);
+ res[3] = maxvec(tmp0w, tmp1w);
+}
+
+void pow(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w,
+ float4 tmp1x, float4 tmp1y, float4 tmp1z, float4 tmp1w)
+{
+ res[0] = powvec(tmp0x, tmp1x);
+ res[1] = res[0];
+ res[2] = res[0];
+ res[3] = res[0];
+}
+
+void rsq(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w)
+{
+ const float4 onevec = (float4) {1., 1., 1., 1.};
+ res[0] = onevec/sqrtvec(absvec(tmp0x));
+ res[1] = onevec/sqrtvec(absvec(tmp0y));
+ res[2] = onevec/sqrtvec(absvec(tmp0z));
+ res[3] = onevec/sqrtvec(absvec(tmp0w));
+}
+
+void slt(float4 *res,
+ float4 tmp0x, float4 tmp0y, float4 tmp0z, float4 tmp0w,
+ float4 tmp1x, float4 tmp1y, float4 tmp1z, float4 tmp1w)
+{
+ res[0] = sltvec(tmp0x, tmp1x);
+ res[1] = sltvec(tmp0y, tmp1y);
+ res[2] = sltvec(tmp0z, tmp1z);
+ res[3] = sltvec(tmp0w, tmp1w);
+}
+
diff --git a/src/gallium/auxiliary/gallivm/storage.cpp b/src/gallium/auxiliary/gallivm/storage.cpp
new file mode 100644
index 0000000000..73df24c976
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/storage.cpp
@@ -0,0 +1,364 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+#ifdef MESA_LLVM
+
+#include "storage.h"
+
+#include "gallivm_p.h"
+
+#include "pipe/p_shader_tokens.h"
+#include <llvm/BasicBlock.h>
+#include <llvm/Module.h>
+#include <llvm/Value.h>
+
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/InstrTypes.h>
+#include <llvm/Instructions.h>
+
+using namespace llvm;
+
+Storage::Storage(llvm::BasicBlock *block, llvm::Value *input)
+ : m_block(block),
+ m_INPUT(input),
+ m_addrs(32),
+ m_idx(0)
+{
+ m_floatVecType = VectorType::get(Type::FloatTy, 4);
+ m_intVecType = VectorType::get(IntegerType::get(32), 4);
+
+ m_undefFloatVec = UndefValue::get(m_floatVecType);
+ m_undefIntVec = UndefValue::get(m_intVecType);
+ m_extSwizzleVec = 0;
+
+ m_numConsts = 0;
+}
+
+//can only build vectors with all members in the [0, 9] range
+llvm::Constant *Storage::shuffleMask(int vec)
+{
+ if (!m_extSwizzleVec) {
+ std::vector<Constant*> elems;
+ elems.push_back(ConstantFP::get(APFloat(0.f)));
+ elems.push_back(ConstantFP::get(APFloat(1.f)));
+ elems.push_back(ConstantFP::get(APFloat(0.f)));
+ elems.push_back(ConstantFP::get(APFloat(1.f)));
+ m_extSwizzleVec = ConstantVector::get(m_floatVecType, elems);
+ }
+
+ if (m_intVecs.find(vec) != m_intVecs.end()) {
+ return m_intVecs[vec];
+ }
+ int origVec = vec;
+ Constant* const_vec = 0;
+ if (origVec == 0) {
+ const_vec = Constant::getNullValue(m_intVecType);
+ } else {
+ int x = gallivm_x_swizzle(vec);
+ int y = gallivm_y_swizzle(vec);
+ int z = gallivm_z_swizzle(vec);
+ int w = gallivm_w_swizzle(vec);
+ std::vector<Constant*> elems;
+ elems.push_back(constantInt(x));
+ elems.push_back(constantInt(y));
+ elems.push_back(constantInt(z));
+ elems.push_back(constantInt(w));
+ const_vec = ConstantVector::get(m_intVecType, elems);
+ }
+
+ m_intVecs[origVec] = const_vec;
+ return const_vec;
+}
+
+llvm::ConstantInt *Storage::constantInt(int idx)
+{
+ if (m_constInts.find(idx) != m_constInts.end()) {
+ return m_constInts[idx];
+ }
+ ConstantInt *const_int = ConstantInt::get(APInt(32, idx));
+ m_constInts[idx] = const_int;
+ return const_int;
+}
+
+llvm::Value *Storage::inputElement(int idx, llvm::Value *indIdx)
+{
+ Value *val = element(InputsArg, idx, indIdx);
+ LoadInst *load = new LoadInst(val, name("input"), false, m_block);
+ load->setAlignment(8);
+
+ return load;
+}
+
+llvm::Value *Storage::constElement(int idx, llvm::Value *indIdx)
+{
+ m_numConsts = ((idx + 1) > m_numConsts) ? (idx + 1) : m_numConsts;
+
+ Value *elem = element(ConstsArg, idx, indIdx);
+ LoadInst *load = new LoadInst(elem, name("const"), false, m_block);
+ load->setAlignment(8);
+ return load;
+}
+
+llvm::Value *Storage::shuffleVector(llvm::Value *vec, int shuffle)
+{
+ Constant *mask = shuffleMask(shuffle);
+ ShuffleVectorInst *res =
+ new ShuffleVectorInst(vec, m_extSwizzleVec, mask,
+ name("shuffle"), m_block);
+ return res;
+}
+
+
+llvm::Value *Storage::tempElement(int idx, llvm::Value *indIdx)
+{
+ Value *elem = element(TempsArg, idx, indIdx);
+
+ LoadInst *load = new LoadInst(elem, name("temp"), false, m_block);
+ load->setAlignment(8);
+
+ return load;
+}
+
+void Storage::setTempElement(int idx, llvm::Value *val, int mask)
+{
+ if (mask != TGSI_WRITEMASK_XYZW) {
+ llvm::Value *templ = 0;
+ if (m_tempWriteMap[idx])
+ templ = tempElement(idx);
+ val = maskWrite(val, mask, templ);
+ }
+ Value *elem = element(TempsArg, idx);
+ StoreInst *st = new StoreInst(val, elem, false, m_block);
+ st->setAlignment(8);
+ m_tempWriteMap[idx] = true;
+}
+
+void Storage::setOutputElement(int dstIdx, llvm::Value *val, int mask)
+{
+ if (mask != TGSI_WRITEMASK_XYZW) {
+ llvm::Value *templ = 0;
+ if (m_destWriteMap[dstIdx])
+ templ = outputElement(dstIdx);
+ val = maskWrite(val, mask, templ);
+ }
+
+ Value *elem = element(DestsArg, dstIdx);
+ StoreInst *st = new StoreInst(val, elem, false, m_block);
+ st->setAlignment(8);
+ m_destWriteMap[dstIdx] = true;
+}
+
+llvm::Value *Storage::maskWrite(llvm::Value *src, int mask, llvm::Value *templ)
+{
+ llvm::Value *dst = templ;
+ if (!dst)
+ dst = Constant::getNullValue(m_floatVecType);
+ if ((mask & TGSI_WRITEMASK_X)) {
+ llvm::Value *x = new ExtractElementInst(src, unsigned(0),
+ name("x"), m_block);
+ dst = InsertElementInst::Create(dst, x, unsigned(0),
+ name("dstx"), m_block);
+ }
+ if ((mask & TGSI_WRITEMASK_Y)) {
+ llvm::Value *y = new ExtractElementInst(src, unsigned(1),
+ name("y"), m_block);
+ dst = InsertElementInst::Create(dst, y, unsigned(1),
+ name("dsty"), m_block);
+ }
+ if ((mask & TGSI_WRITEMASK_Z)) {
+ llvm::Value *z = new ExtractElementInst(src, unsigned(2),
+ name("z"), m_block);
+ dst = InsertElementInst::Create(dst, z, unsigned(2),
+ name("dstz"), m_block);
+ }
+ if ((mask & TGSI_WRITEMASK_W)) {
+ llvm::Value *w = new ExtractElementInst(src, unsigned(3),
+ name("w"), m_block);
+ dst = InsertElementInst::Create(dst, w, unsigned(3),
+ name("dstw"), m_block);
+ }
+ return dst;
+}
+
+const char * Storage::name(const char *prefix)
+{
+ ++m_idx;
+ snprintf(m_name, 32, "%s%d", prefix, m_idx);
+ return m_name;
+}
+
+int Storage::numConsts() const
+{
+ return m_numConsts;
+}
+
+llvm::Value * Storage::addrElement(int idx) const
+{
+ Value *ret = m_addrs[idx];
+ if (!ret)
+ return m_undefFloatVec;
+ return ret;
+}
+
+void Storage::setAddrElement(int idx, llvm::Value *val, int mask)
+{
+ if (mask != TGSI_WRITEMASK_XYZW) {
+ llvm::Value *templ = m_addrs[idx];
+ val = maskWrite(val, mask, templ);
+ }
+ m_addrs[idx] = val;
+}
+
+llvm::Value * Storage::extractIndex(llvm::Value *vec)
+{
+ llvm::Value *x = new ExtractElementInst(vec, unsigned(0),
+ name("x"), m_block);
+ return new FPToSIInst(x, IntegerType::get(32), name("intidx"), m_block);
+}
+
+void Storage::setCurrentBlock(llvm::BasicBlock *block)
+{
+ m_block = block;
+}
+
+llvm::Value * Storage::outputElement(int idx, llvm::Value *indIdx)
+{
+ Value *elem = element(DestsArg, idx, indIdx);
+ LoadInst *load = new LoadInst(elem, name("output"), false, m_block);
+ load->setAlignment(8);
+
+ return load;
+}
+
+llvm::Value * Storage::inputPtr() const
+{
+ return m_INPUT;
+}
+
+void Storage::pushArguments(llvm::Value *input)
+{
+ m_argStack.push(m_INPUT);
+
+ m_INPUT = input;
+}
+
+void Storage::popArguments()
+{
+ m_INPUT = m_argStack.top();
+ m_argStack.pop();
+}
+
+void Storage::pushTemps()
+{
+ m_extSwizzleVec = 0;
+}
+
+void Storage::popTemps()
+{
+}
+
+llvm::Value * Storage::immediateElement(int idx)
+{
+ return m_immediates[idx];
+}
+
+void Storage::addImmediate(float *val)
+{
+ std::vector<Constant*> vec(4);
+ vec[0] = ConstantFP::get(APFloat(val[0]));
+ vec[1] = ConstantFP::get(APFloat(val[1]));
+ vec[2] = ConstantFP::get(APFloat(val[2]));
+ vec[3] = ConstantFP::get(APFloat(val[3]));
+ m_immediates.push_back(ConstantVector::get(m_floatVecType, vec));
+}
+
+
+llvm::Value * Storage::elemPtr(Args arg)
+{
+ std::vector<Value*> indices;
+ indices.push_back(constantInt(0));
+ indices.push_back(constantInt(static_cast<int>(arg)));
+ GetElementPtrInst *getElem = GetElementPtrInst::Create(m_INPUT,
+ indices.begin(),
+ indices.end(),
+ name("input_ptr"),
+ m_block);
+ return new LoadInst(getElem, name("input_field"), false, m_block);
+}
+
+llvm::Value * Storage::elemIdx(llvm::Value *ptr, int idx,
+ llvm::Value *indIdx )
+{
+ GetElementPtrInst *getElem = 0;
+
+ if (indIdx) {
+ getElem = GetElementPtrInst::Create(ptr,
+ BinaryOperator::Create(Instruction::Add,
+ indIdx,
+ constantInt(idx),
+ name("add"),
+ m_block),
+ name("field"),
+ m_block);
+ } else {
+ getElem = GetElementPtrInst::Create(ptr,
+ constantInt(idx),
+ name("field"),
+ m_block);
+ }
+ return getElem;
+}
+
+llvm::Value * Storage::element(Args arg, int idx, llvm::Value *indIdx )
+{
+ Value *val = elemPtr(arg);
+ return elemIdx(val, idx, indIdx);
+}
+
+void Storage::setKilElement(llvm::Value *val)
+{
+ std::vector<Value*> indices;
+ indices.push_back(constantInt(0));
+ indices.push_back(constantInt(static_cast<int>(KilArg)));
+ GetElementPtrInst *elem = GetElementPtrInst::Create(m_INPUT,
+ indices.begin(),
+ indices.end(),
+ name("kil_ptr"),
+ m_block);
+ StoreInst *st = new StoreInst(val, elem, false, m_block);
+ st->setAlignment(8);
+}
+
+#endif //MESA_LLVM
+
+
diff --git a/src/gallium/auxiliary/gallivm/storage.h b/src/gallium/auxiliary/gallivm/storage.h
new file mode 100644
index 0000000000..8574f7554e
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/storage.h
@@ -0,0 +1,133 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Zack Rusin zack@tungstengraphics.com
+ */
+
+#ifndef STORAGE_H
+#define STORAGE_H
+
+#include <map>
+#include <set>
+#include <stack>
+#include <vector>
+
+namespace llvm {
+ class BasicBlock;
+ class Constant;
+ class ConstantInt;
+ class LoadInst;
+ class Value;
+ class VectorType;
+}
+
+class Storage
+{
+public:
+ Storage(llvm::BasicBlock *block,
+ llvm::Value *input);
+
+ llvm::Value *inputPtr() const;
+
+ void setCurrentBlock(llvm::BasicBlock *block);
+
+ llvm::ConstantInt *constantInt(int);
+ llvm::Constant *shuffleMask(int vec);
+ llvm::Value *inputElement(int idx, llvm::Value *indIdx =0);
+ llvm::Value *constElement(int idx, llvm::Value *indIdx =0);
+ llvm::Value *outputElement(int idx, llvm::Value *indIdx =0);
+ llvm::Value *tempElement(int idx, llvm::Value *indIdx =0);
+ llvm::Value *immediateElement(int idx);
+
+ void setOutputElement(int dstIdx, llvm::Value *val, int mask);
+ void setTempElement(int idx, llvm::Value *val, int mask);
+
+ llvm::Value *addrElement(int idx) const;
+ void setAddrElement(int idx, llvm::Value *val, int mask);
+
+ void setKilElement(llvm::Value *val);
+
+ llvm::Value *shuffleVector(llvm::Value *vec, int shuffle);
+
+ llvm::Value *extractIndex(llvm::Value *vec);
+
+ int numConsts() const;
+
+ void pushArguments(llvm::Value *input);
+ void popArguments();
+ void pushTemps();
+ void popTemps();
+
+ void addImmediate(float *val);
+
+private:
+ llvm::Value *maskWrite(llvm::Value *src, int mask, llvm::Value *templ);
+ const char *name(const char *prefix);
+
+ enum Args {
+ DestsArg = 0,
+ InputsArg = 1,
+ TempsArg = 2,
+ ConstsArg = 3,
+ KilArg = 4
+ };
+ llvm::Value *elemPtr(Args arg);
+ llvm::Value *elemIdx(llvm::Value *ptr, int idx,
+ llvm::Value *indIdx = 0);
+ llvm::Value *element(Args arg, int idx, llvm::Value *indIdx = 0);
+
+private:
+ llvm::BasicBlock *m_block;
+ llvm::Value *m_INPUT;
+
+ std::map<int, llvm::ConstantInt*> m_constInts;
+ std::map<int, llvm::Constant*> m_intVecs;
+ std::vector<llvm::Value*> m_addrs;
+ std::vector<llvm::Constant*> m_immediates;
+
+ llvm::VectorType *m_floatVecType;
+ llvm::VectorType *m_intVecType;
+
+ char m_name[32];
+ int m_idx;
+
+ int m_numConsts;
+
+ std::map<int, bool > m_destWriteMap;
+ std::map<int, bool > m_tempWriteMap;
+
+ llvm::Value *m_undefFloatVec;
+ llvm::Value *m_undefIntVec;
+ llvm::Value *m_extSwizzleVec;
+
+ std::stack<llvm::Value*> m_argStack;
+ std::stack<std::vector<llvm::Value*> > m_tempStack;
+};
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/storagesoa.cpp b/src/gallium/auxiliary/gallivm/storagesoa.cpp
new file mode 100644
index 0000000000..4984ce985c
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/storagesoa.cpp
@@ -0,0 +1,438 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "storagesoa.h"
+
+#include "gallivm_p.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "util/u_debug.h"
+
+#include <llvm/BasicBlock.h>
+#include <llvm/Module.h>
+#include <llvm/Value.h>
+
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/InstrTypes.h>
+#include <llvm/Instructions.h>
+
+using namespace llvm;
+
+
+StorageSoa::StorageSoa(llvm::BasicBlock *block,
+ llvm::Value *input,
+ llvm::Value *output,
+ llvm::Value *consts)
+ : m_block(block),
+ m_input(input),
+ m_output(output),
+ m_consts(consts),
+ m_immediates(0),
+ m_idx(0)
+{
+}
+
+void StorageSoa::addImmediate(float *vec)
+{
+ std::vector<float> vals(4);
+ vals[0] = vec[0];
+ vals[1] = vec[1];
+ vals[2] = vec[2];
+ vals[3] = vec[3];
+ m_immediatesToFlush.push_back(vals);
+}
+
+void StorageSoa::declareImmediates()
+{
+ if (m_immediatesToFlush.empty())
+ return;
+
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ ArrayType *vectorChannels = ArrayType::get(vectorType, 4);
+ ArrayType *arrayType = ArrayType::get(vectorChannels, m_immediatesToFlush.size());
+
+ m_immediates = new GlobalVariable(
+ /*Type=*/arrayType,
+ /*isConstant=*/false,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, // has initializer, specified below
+ /*Name=*/name("immediates"),
+ currentModule());
+
+ std::vector<Constant*> arrayVals;
+ for (unsigned int i = 0; i < m_immediatesToFlush.size(); ++i) {
+ std::vector<float> vec = m_immediatesToFlush[i];
+ std::vector<float> vals(4);
+ std::vector<Constant*> channelArray;
+
+ vals[0] = vec[0]; vals[1] = vec[1]; vals[2] = vec[2]; vals[3] = vec[3];
+ llvm::Constant *xChannel = createConstGlobalVector(vals);
+
+ vals[0] = vec[1]; vals[1] = vec[1]; vals[2] = vec[1]; vals[3] = vec[1];
+ llvm::Constant *yChannel = createConstGlobalVector(vals);
+
+ vals[0] = vec[2]; vals[1] = vec[2]; vals[2] = vec[2]; vals[3] = vec[2];
+ llvm::Constant *zChannel = createConstGlobalVector(vals);
+
+ vals[0] = vec[3]; vals[1] = vec[3]; vals[2] = vec[3]; vals[3] = vec[3];
+ llvm::Constant *wChannel = createConstGlobalVector(vals);
+ channelArray.push_back(xChannel);
+ channelArray.push_back(yChannel);
+ channelArray.push_back(zChannel);
+ channelArray.push_back(wChannel);
+ Constant *constChannels = ConstantArray::get(vectorChannels,
+ channelArray);
+ arrayVals.push_back(constChannels);
+ }
+ Constant *constArray = ConstantArray::get(arrayType, arrayVals);
+ m_immediates->setInitializer(constArray);
+
+ m_immediatesToFlush.clear();
+}
+
+llvm::Value *StorageSoa::addrElement(int idx) const
+{
+ std::map<int, llvm::Value*>::const_iterator itr = m_addresses.find(idx);
+ if (itr == m_addresses.end()) {
+ debug_printf("Trying to access invalid shader 'address'\n");
+ return 0;
+ }
+ llvm::Value * res = (*itr).second;
+
+ res = new LoadInst(res, name("addr"), false, m_block);
+
+ return res;
+}
+
+std::vector<llvm::Value*> StorageSoa::inputElement(llvm::Value *idx)
+{
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = element(m_input, idx, 0);
+ res[1] = element(m_input, idx, 1);
+ res[2] = element(m_input, idx, 2);
+ res[3] = element(m_input, idx, 3);
+
+ return res;
+}
+
+llvm::Value* StorageSoa::unpackConstElement(llvm::IRBuilder<>* m_builder, llvm::Value* vector, int cc)
+{
+ std::vector<llvm::Value*> x(4);
+ x[0] = m_builder->CreateExtractElement(vector,
+ constantInt(cc),
+ name("x"));
+
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ Constant *constVector = Constant::getNullValue(vectorType);
+ Value *res = m_builder->CreateInsertElement(constVector, x[0],
+ constantInt(0),
+ name("vecx"));
+ res = m_builder->CreateInsertElement(res, x[0], constantInt(1),
+ name("vecxx"));
+ res = m_builder->CreateInsertElement(res, x[0], constantInt(2),
+ name("vecxxx"));
+ res = m_builder->CreateInsertElement(res, x[0], constantInt(3),
+ name("vecxxxx"));
+ return res;
+}
+
+std::vector<llvm::Value*> StorageSoa::constElement(llvm::IRBuilder<>* m_builder, llvm::Value *idx)
+{
+ llvm::Value* res;
+ std::vector<llvm::Value*> res2(4);
+ llvm::Value *xChannel;
+
+ xChannel = elementPointer(m_consts, idx, 0);
+
+ res = alignedArrayLoad(xChannel);
+
+ res2[0]=unpackConstElement(m_builder, res,0);
+ res2[1]=unpackConstElement(m_builder, res,1);
+ res2[2]=unpackConstElement(m_builder, res,2);
+ res2[3]=unpackConstElement(m_builder, res,3);
+
+ return res2;
+}
+
+std::vector<llvm::Value*> StorageSoa::outputElement(llvm::Value *idx)
+{
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = element(m_output, idx, 0);
+ res[1] = element(m_output, idx, 1);
+ res[2] = element(m_output, idx, 2);
+ res[3] = element(m_output, idx, 3);
+
+ return res;
+}
+
+std::vector<llvm::Value*> StorageSoa::tempElement(llvm::IRBuilder<>* m_builder, int idx)
+{
+ std::vector<llvm::Value*> res(4);
+ llvm::Value *temp = m_temps[idx];
+
+ res[0] = element(temp, constantInt(0), 0);
+ res[1] = element(temp, constantInt(0), 1);
+ res[2] = element(temp, constantInt(0), 2);
+ res[3] = element(temp, constantInt(0), 3);
+
+ return res;
+}
+
+std::vector<llvm::Value*> StorageSoa::immediateElement(llvm::Value *idx)
+{
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = element(m_immediates, idx, 0);
+ res[1] = element(m_immediates, idx, 1);
+ res[2] = element(m_immediates, idx, 2);
+ res[3] = element(m_immediates, idx, 3);
+
+ return res;
+}
+
+llvm::Value * StorageSoa::elementPointer(llvm::Value *ptr, llvm::Value *index,
+ int channel) const
+{
+ std::vector<Value*> indices;
+ if (m_immediates == ptr)
+ indices.push_back(constantInt(0));
+ indices.push_back(index);
+ indices.push_back(constantInt(channel));
+
+ GetElementPtrInst *getElem = GetElementPtrInst::Create(ptr,
+ indices.begin(),
+ indices.end(),
+ name("ptr"),
+ m_block);
+ return getElem;
+}
+
+llvm::Value * StorageSoa::element(llvm::Value *ptr, llvm::Value *index,
+ int channel) const
+{
+ llvm::Value *res = elementPointer(ptr, index, channel);
+ LoadInst *load = new LoadInst(res, name("element"), false, m_block);
+ //load->setAlignment(8);
+ return load;
+}
+
+const char * StorageSoa::name(const char *prefix) const
+{
+ ++m_idx;
+ snprintf(m_name, 32, "%s%d", prefix, m_idx);
+ return m_name;
+}
+
+llvm::ConstantInt * StorageSoa::constantInt(int idx) const
+{
+ if (m_constInts.find(idx) != m_constInts.end()) {
+ return m_constInts[idx];
+ }
+ ConstantInt *constInt = ConstantInt::get(APInt(32, idx));
+ m_constInts[idx] = constInt;
+ return constInt;
+}
+
+llvm::Value *StorageSoa::alignedArrayLoad(llvm::Value *val)
+{
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ PointerType *vectorPtr = PointerType::get(vectorType, 0);
+
+ CastInst *cast = new BitCastInst(val, vectorPtr, name("toVector"), m_block);
+ LoadInst *load = new LoadInst(cast, name("alignLoad"), false, m_block);
+ load->setAlignment(8);
+ return load;
+}
+
+llvm::Module * StorageSoa::currentModule() const
+{
+ if (!m_block || !m_block->getParent())
+ return 0;
+
+ return m_block->getParent()->getParent();
+}
+
+llvm::Constant * StorageSoa::createConstGlobalFloat(const float val)
+{
+ Constant*c = ConstantFP::get(APFloat(val));
+ return c;
+}
+
+llvm::Constant * StorageSoa::createConstGlobalVector(const std::vector<float> &vec)
+{
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ std::vector<Constant*> immValues;
+ ConstantFP *constx = ConstantFP::get(APFloat(vec[0]));
+ ConstantFP *consty = ConstantFP::get(APFloat(vec[1]));
+ ConstantFP *constz = ConstantFP::get(APFloat(vec[2]));
+ ConstantFP *constw = ConstantFP::get(APFloat(vec[3]));
+ immValues.push_back(constx);
+ immValues.push_back(consty);
+ immValues.push_back(constz);
+ immValues.push_back(constw);
+ Constant *constVector = ConstantVector::get(vectorType, immValues);
+
+ return constVector;
+}
+
+std::vector<llvm::Value*> StorageSoa::load(enum tgsi_file_type type, int idx, int swizzle,
+ llvm::IRBuilder<>* m_builder,llvm::Value *indIdx)
+{
+ std::vector<llvm::Value*> val(4);
+
+ //if we have an indirect index, always use that
+ // if not use the integer offset to create one
+ llvm::Value *realIndex = 0;
+ if (indIdx)
+ realIndex = indIdx;
+ else
+ realIndex = constantInt(idx);
+ debug_printf("XXXXXXXXX realIdx = %p, indIdx = %p\n", realIndex, indIdx);
+
+ switch(type) {
+ case TGSI_FILE_INPUT:
+ val = inputElement(realIndex);
+ break;
+ case TGSI_FILE_OUTPUT:
+ val = outputElement(realIndex);
+ break;
+ case TGSI_FILE_TEMPORARY:
+ val = tempElement(m_builder, idx);
+ break;
+ case TGSI_FILE_CONSTANT:
+ val = constElement(m_builder, realIndex);
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ val = immediateElement(realIndex);
+ break;
+ case TGSI_FILE_ADDRESS:
+ debug_printf("Address not handled in the load phase!\n");
+ assert(0);
+ break;
+ default:
+ debug_printf("Unknown load!\n");
+ assert(0);
+ break;
+ }
+ if (!gallivm_is_swizzle(swizzle))
+ return val;
+
+ std::vector<llvm::Value*> res(4);
+
+ res[0] = val[gallivm_x_swizzle(swizzle)];
+ res[1] = val[gallivm_y_swizzle(swizzle)];
+ res[2] = val[gallivm_z_swizzle(swizzle)];
+ res[3] = val[gallivm_w_swizzle(swizzle)];
+ return res;
+}
+
+llvm::Value * StorageSoa::allocaTemp(llvm::IRBuilder<>* m_builder)
+{
+ VectorType *vector = VectorType::get(Type::FloatTy, 4);
+ ArrayType *vecArray = ArrayType::get(vector, 4);
+ AllocaInst *alloca = new AllocaInst(vecArray, "temp",
+ m_builder->GetInsertBlock());
+
+ return alloca;
+}
+
+
+void StorageSoa::store(enum tgsi_file_type type, int idx, const std::vector<llvm::Value*> &val,
+ int mask, llvm::IRBuilder<>* m_builder)
+{
+ llvm::Value *out = 0;
+ llvm::Value *realIndex = 0;
+ switch(type) {
+ case TGSI_FILE_OUTPUT:
+ out = m_output;
+ realIndex = constantInt(idx);
+ break;
+ case TGSI_FILE_TEMPORARY:
+ // if that temp doesn't already exist, alloca it
+ if (m_temps.find(idx) == m_temps.end())
+ m_temps[idx] = allocaTemp(m_builder);
+
+ out = m_temps[idx];
+
+ realIndex = constantInt(0);
+ break;
+ case TGSI_FILE_INPUT:
+ out = m_input;
+ realIndex = constantInt(idx);
+ break;
+ case TGSI_FILE_ADDRESS: {
+ llvm::Value *addr = m_addresses[idx];
+ if (!addr) {
+ addAddress(idx);
+ addr = m_addresses[idx];
+ assert(addr);
+ }
+ new StoreInst(val[0], addr, false, m_block);
+ return;
+ break;
+ }
+ default:
+ debug_printf("Can't save output of this type: %d !\n", type);
+ assert(0);
+ break;
+ }
+ if ((mask & TGSI_WRITEMASK_X)) {
+ llvm::Value *xChannel = elementPointer(out, realIndex, 0);
+ new StoreInst(val[0], xChannel, false, m_block);
+ }
+ if ((mask & TGSI_WRITEMASK_Y)) {
+ llvm::Value *yChannel = elementPointer(out, realIndex, 1);
+ new StoreInst(val[1], yChannel, false, m_block);
+ }
+ if ((mask & TGSI_WRITEMASK_Z)) {
+ llvm::Value *zChannel = elementPointer(out, realIndex, 2);
+ new StoreInst(val[2], zChannel, false, m_block);
+ }
+ if ((mask & TGSI_WRITEMASK_W)) {
+ llvm::Value *wChannel = elementPointer(out, realIndex, 3);
+ new StoreInst(val[3], wChannel, false, m_block);
+ }
+}
+
+void StorageSoa::addAddress(int idx)
+{
+ GlobalVariable *val = new GlobalVariable(
+ /*Type=*/IntegerType::get(32),
+ /*isConstant=*/false,
+ /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, // has initializer, specified below
+ /*Name=*/name("address"),
+ currentModule());
+ val->setInitializer(Constant::getNullValue(IntegerType::get(32)));
+
+ debug_printf("adding to %d\n", idx);
+ m_addresses[idx] = val;
+}
diff --git a/src/gallium/auxiliary/gallivm/storagesoa.h b/src/gallium/auxiliary/gallivm/storagesoa.h
new file mode 100644
index 0000000000..56886f85e7
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/storagesoa.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STORAGESOA_H
+#define STORAGESOA_H
+
+#include <pipe/p_shader_tokens.h>
+#include <llvm/Support/IRBuilder.h>
+
+#include <vector>
+#include <list>
+#include <map>
+
+namespace llvm {
+ class BasicBlock;
+ class Constant;
+ class ConstantInt;
+ class GlobalVariable;
+ class LoadInst;
+ class Value;
+ class VectorType;
+ class Module;
+}
+
+class StorageSoa
+{
+public:
+ StorageSoa(llvm::BasicBlock *block,
+ llvm::Value *input,
+ llvm::Value *output,
+ llvm::Value *consts);
+
+
+ std::vector<llvm::Value*> load(enum tgsi_file_type type, int idx, int swizzle,
+ llvm::IRBuilder<>* m_builder, llvm::Value *indIdx =0);
+ void store(enum tgsi_file_type type, int idx, const std::vector<llvm::Value*> &val,
+ int mask, llvm::IRBuilder<>* m_builder);
+
+ void addImmediate(float *vec);
+ void declareImmediates();
+
+ void addAddress(int idx);
+
+ llvm::Value * addrElement(int idx) const;
+
+ llvm::ConstantInt *constantInt(int) const;
+private:
+ llvm::Value *elementPointer(llvm::Value *ptr, llvm::Value *indIdx,
+ int channel) const;
+ llvm::Value *element(llvm::Value *ptr, llvm::Value *idx,
+ int channel) const;
+ const char *name(const char *prefix) const;
+ llvm::Value *alignedArrayLoad(llvm::Value *val);
+ llvm::Module *currentModule() const;
+ llvm::Constant *createConstGlobalFloat(const float val);
+ llvm::Constant *createConstGlobalVector(const std::vector<float> &vec);
+
+ std::vector<llvm::Value*> inputElement(llvm::Value *indIdx);
+ llvm::Value* unpackConstElement(llvm::IRBuilder<>* m_builder, llvm::Value *indIdx, int cc);
+ std::vector<llvm::Value*> constElement(llvm::IRBuilder<>* m_builder, llvm::Value *indIdx);
+ std::vector<llvm::Value*> outputElement(llvm::Value *indIdx);
+ std::vector<llvm::Value*> tempElement(llvm::IRBuilder<>* m_builder, int idx);
+ std::vector<llvm::Value*> immediateElement(llvm::Value *indIdx);
+private:
+ llvm::BasicBlock *m_block;
+
+ llvm::Value *m_input;
+ llvm::Value *m_output;
+ llvm::Value *m_consts;
+ std::map<int, llvm::Value*> m_temps;
+ llvm::GlobalVariable *m_immediates;
+
+ std::map<int, llvm::Value*> m_addresses;
+
+ std::vector<std::vector<float> > m_immediatesToFlush;
+ llvm::Value * allocaTemp(llvm::IRBuilder<>* m_builder);
+
+ mutable std::map<int, llvm::ConstantInt*> m_constInts;
+ mutable char m_name[32];
+ mutable int m_idx;
+};
+
+#endif
diff --git a/src/gallium/auxiliary/gallivm/tgsitollvm.cpp b/src/gallium/auxiliary/gallivm/tgsitollvm.cpp
new file mode 100644
index 0000000000..bf84401e11
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/tgsitollvm.cpp
@@ -0,0 +1,1136 @@
+#include "tgsitollvm.h"
+
+#include "gallivm.h"
+#include "gallivm_p.h"
+
+#include "storage.h"
+#include "instructions.h"
+#include "storagesoa.h"
+#include "instructionssoa.h"
+
+#include "pipe/p_shader_tokens.h"
+
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_build.h"
+#include "tgsi/tgsi_dump.h"
+
+
+#include <llvm/Module.h>
+#include <llvm/CallingConv.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/Instructions.h>
+#include <llvm/ModuleProvider.h>
+#include <llvm/Pass.h>
+#include <llvm/PassManager.h>
+#include <llvm/Attributes.h>
+#include <llvm/Support/PatternMatch.h>
+#include <llvm/ExecutionEngine/JIT.h>
+#include <llvm/ExecutionEngine/Interpreter.h>
+#include <llvm/ExecutionEngine/GenericValue.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/LinkAllPasses.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Analysis/LoopPass.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/Transforms/Utils/Cloning.h>
+
+
+#include <sstream>
+#include <fstream>
+#include <iostream>
+
+using namespace llvm;
+
+static inline FunctionType *vertexShaderFunctionType()
+{
+ //Function takes three arguments,
+ // the calling code has to make sure the types it will
+ // pass are castable to the following:
+ // [4 x <4 x float>] inputs,
+ // [4 x <4 x float>] output,
+ // [4 x [1 x float]] consts,
+
+ std::vector<const Type*> funcArgs;
+ VectorType *vectorType = VectorType::get(Type::FloatTy, 4);
+ ArrayType *vectorArray = ArrayType::get(vectorType, 4);
+ PointerType *vectorArrayPtr = PointerType::get(vectorArray, 0);
+
+ ArrayType *floatArray = ArrayType::get(Type::FloatTy, 4);
+ ArrayType *constsArray = ArrayType::get(floatArray, 1);
+ PointerType *constsArrayPtr = PointerType::get(constsArray, 0);
+
+ funcArgs.push_back(vectorArrayPtr);//inputs
+ funcArgs.push_back(vectorArrayPtr);//output
+ funcArgs.push_back(constsArrayPtr);//consts
+
+ FunctionType *functionType = FunctionType::get(
+ /*Result=*/Type::VoidTy,
+ /*Params=*/funcArgs,
+ /*isVarArg=*/false);
+
+ return functionType;
+}
+
+static inline void
+add_interpolator(struct gallivm_ir *ir,
+ struct gallivm_interpolate *interp)
+{
+ ir->interpolators[ir->num_interp] = *interp;
+ ++ir->num_interp;
+}
+
+static void
+translate_declaration(struct gallivm_ir *prog,
+ llvm::Module *module,
+ Storage *storage,
+ struct tgsi_full_declaration *decl,
+ struct tgsi_full_declaration *fd)
+{
+ if (decl->Declaration.File == TGSI_FILE_INPUT) {
+ unsigned first, last, mask;
+ uint interp_method;
+
+ first = decl->DeclarationRange.First;
+ last = decl->DeclarationRange.Last;
+ mask = decl->Declaration.UsageMask;
+
+ /* Do not touch WPOS.xy */
+ if (first == 0) {
+ mask &= ~TGSI_WRITEMASK_XY;
+ if (mask == TGSI_WRITEMASK_NONE) {
+ first++;
+ if (first > last) {
+ return;
+ }
+ }
+ }
+
+ interp_method = decl->Declaration.Interpolate;
+
+ if (mask == TGSI_WRITEMASK_XYZW) {
+ unsigned i, j;
+
+ for (i = first; i <= last; i++) {
+ for (j = 0; j < NUM_CHANNELS; j++) {
+ //interp( mach, i, j );
+ struct gallivm_interpolate interp;
+ interp.type = interp_method;
+ interp.attrib = i;
+ interp.chan = j;
+ add_interpolator(prog, &interp);
+ }
+ }
+ } else {
+ unsigned i, j;
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ if( mask & (1 << j) ) {
+ for( i = first; i <= last; i++ ) {
+ struct gallivm_interpolate interp;
+ interp.type = interp_method;
+ interp.attrib = i;
+ interp.chan = j;
+ add_interpolator(prog, &interp);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void
+translate_declarationir(struct gallivm_ir *,
+ llvm::Module *,
+ StorageSoa *storage,
+ struct tgsi_full_declaration *decl,
+ struct tgsi_full_declaration *)
+{
+ if (decl->Declaration.File == TGSI_FILE_ADDRESS) {
+ int idx = decl->DeclarationRange.First;
+ storage->addAddress(idx);
+ }
+}
+
+static void
+translate_immediate(Storage *storage,
+ struct tgsi_full_immediate *imm)
+{
+ float vec[4];
+ int i;
+ assert( imm->Immediate.NrTokens <= 4 + 1 );
+ for (i = 0; i < imm->Immediate.NrTokens - 1; ++i) {
+ switch (imm->Immediate.DataType) {
+ case TGSI_IMM_FLOAT32:
+ vec[i] = imm->u[i].Float;
+ break;
+ default:
+ assert(0);
+ }
+ }
+ storage->addImmediate(vec);
+}
+
+
+static void
+translate_immediateir(StorageSoa *storage,
+ struct tgsi_full_immediate *imm)
+{
+ float vec[4];
+ int i;
+ assert( imm->Immediate.NrTokens <= 4 + 1 );
+ for (i = 0; i < imm->Immediate.NrTokens - 1; ++i) {
+ switch (imm->Immediate.DataType) {
+ case TGSI_IMM_FLOAT32:
+ vec[i] = imm->u[i].Float;
+ break;
+ default:
+ assert(0);
+ }
+ }
+ storage->addImmediate(vec);
+}
+
+static inline int
+swizzleInt(struct tgsi_full_src_register *src)
+{
+ int swizzle = 0;
+ int start = 1000;
+
+ for (int k = 0; k < 4; ++k) {
+ swizzle += tgsi_util_get_full_src_register_extswizzle(src, k) * start;
+ start /= 10;
+ }
+ return swizzle;
+}
+
+static inline llvm::Value *
+swizzleVector(llvm::Value *val, struct tgsi_full_src_register *src,
+ Storage *storage)
+{
+ int swizzle = swizzleInt(src);
+
+ if (gallivm_is_swizzle(swizzle)) {
+ /*fprintf(stderr, "XXXXXXXX swizzle = %d\n", swizzle);*/
+ val = storage->shuffleVector(val, swizzle);
+ }
+ return val;
+}
+
+static void
+translate_instruction(llvm::Module *module,
+ Storage *storage,
+ Instructions *instr,
+ struct tgsi_full_instruction *inst,
+ struct tgsi_full_instruction *fi,
+ unsigned instno)
+{
+ llvm::Value *inputs[4];
+ inputs[0] = 0;
+ inputs[1] = 0;
+ inputs[2] = 0;
+ inputs[3] = 0;
+
+ for (int i = 0; i < inst->Instruction.NumSrcRegs; ++i) {
+ struct tgsi_full_src_register *src = &inst->FullSrcRegisters[i];
+ llvm::Value *val = 0;
+ llvm::Value *indIdx = 0;
+
+ if (src->SrcRegister.Indirect) {
+ indIdx = storage->addrElement(src->SrcRegisterInd.Index);
+ indIdx = storage->extractIndex(indIdx);
+ }
+ if (src->SrcRegister.File == TGSI_FILE_CONSTANT) {
+ val = storage->constElement(src->SrcRegister.Index, indIdx);
+ } else if (src->SrcRegister.File == TGSI_FILE_INPUT) {
+ val = storage->inputElement(src->SrcRegister.Index, indIdx);
+ } else if (src->SrcRegister.File == TGSI_FILE_TEMPORARY) {
+ val = storage->tempElement(src->SrcRegister.Index);
+ } else if (src->SrcRegister.File == TGSI_FILE_OUTPUT) {
+ val = storage->outputElement(src->SrcRegister.Index, indIdx);
+ } else if (src->SrcRegister.File == TGSI_FILE_IMMEDIATE) {
+ val = storage->immediateElement(src->SrcRegister.Index);
+ } else {
+ fprintf(stderr, "ERROR: not supported llvm source %d\n", src->SrcRegister.File);
+ return;
+ }
+
+ inputs[i] = swizzleVector(val, src, storage);
+ }
+
+ /*if (inputs[0])
+ instr->printVector(inputs[0]);
+ if (inputs[1])
+ instr->printVector(inputs[1]);*/
+ llvm::Value *out = 0;
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL: {
+ out = instr->arl(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_MOV: {
+ out = inputs[0];
+ }
+ break;
+ case TGSI_OPCODE_LIT: {
+ out = instr->lit(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_RCP: {
+ out = instr->rcp(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_RSQ: {
+ out = instr->rsq(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_EXP: {
+ out = instr->exp(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_LOG: {
+ out = instr->log(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_MUL: {
+ out = instr->mul(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_ADD: {
+ out = instr->add(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DP3: {
+ out = instr->dp3(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DP4: {
+ out = instr->dp4(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DST: {
+ out = instr->dst(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_MIN: {
+ out = instr->min(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_MAX: {
+ out = instr->max(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SLT: {
+ out = instr->slt(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SGE: {
+ out = instr->sge(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_MAD: {
+ out = instr->madd(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_SUB: {
+ out = instr->sub(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_LRP: {
+ out = instr->lerp(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_CND: {
+ out = instr->cnd(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_CND0: {
+ out = instr->cnd0(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_DP2A: {
+ out = instr->dot2add(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_FRC: {
+ out = instr->frc(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_CLAMP: {
+ out = instr->clamp(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_FLR: {
+ out = instr->floor(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_ROUND:
+ break;
+ case TGSI_OPCODE_EX2: {
+ out = instr->ex2(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_LG2: {
+ out = instr->lg2(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_POW: {
+ out = instr->pow(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_XPD: {
+ out = instr->cross(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_ABS: {
+ out = instr->abs(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_RCC:
+ break;
+ case TGSI_OPCODE_DPH: {
+ out = instr->dph(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_COS: {
+ out = instr->cos(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_DDX: {
+ out = instr->ddx(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_DDY: {
+ out = instr->ddy(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_KILP:
+ break;
+ case TGSI_OPCODE_PK2H:
+ break;
+ case TGSI_OPCODE_PK2US:
+ break;
+ case TGSI_OPCODE_PK4B:
+ break;
+ case TGSI_OPCODE_PK4UB:
+ break;
+ case TGSI_OPCODE_RFL:
+ break;
+ case TGSI_OPCODE_SEQ: {
+ out = instr->seq(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SFL: {
+ out = instr->sfl(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SGT: {
+ out = instr->sgt(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SIN: {
+ out = instr->sin(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_SLE: {
+ out = instr->sle(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SNE: {
+ out = instr->sne(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_STR: {
+ out = instr->str(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_TEX:
+ break;
+ case TGSI_OPCODE_TXD:
+ break;
+ case TGSI_OPCODE_UP2H:
+ break;
+ case TGSI_OPCODE_UP2US:
+ break;
+ case TGSI_OPCODE_UP4B:
+ break;
+ case TGSI_OPCODE_UP4UB:
+ break;
+ case TGSI_OPCODE_X2D: {
+ out = instr->x2d(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_ARA:
+ break;
+ case TGSI_OPCODE_ARR:
+ break;
+ case TGSI_OPCODE_BRA:
+ break;
+ case TGSI_OPCODE_CAL: {
+ instr->cal(inst->InstructionExtLabel.Label, storage->inputPtr());
+ return;
+ }
+ break;
+ case TGSI_OPCODE_RET: {
+ instr->end();
+ return;
+ }
+ break;
+ case TGSI_OPCODE_SSG:
+ break;
+ case TGSI_OPCODE_CMP: {
+ out = instr->cmp(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_SCS: {
+ out = instr->scs(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_TXB:
+ break;
+ case TGSI_OPCODE_NRM4:
+ case TGSI_OPCODE_NRM: {
+ out = instr->nrm(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_DIV: {
+ out = instr->div(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DP2: {
+ out = instr->dp2(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_TXL:
+ break;
+ case TGSI_OPCODE_BRK: {
+ instr->brk();
+ return;
+ }
+ break;
+ case TGSI_OPCODE_IF: {
+ instr->ifop(inputs[0]);
+ storage->setCurrentBlock(instr->currentBlock());
+ return; //just update the state
+ }
+ break;
+ case TGSI_OPCODE_BGNFOR:
+ break;
+ case TGSI_OPCODE_REP:
+ break;
+ case TGSI_OPCODE_ELSE: {
+ instr->elseop();
+ storage->setCurrentBlock(instr->currentBlock());
+ return; //only state update
+ }
+ break;
+ case TGSI_OPCODE_ENDIF: {
+ instr->endif();
+ storage->setCurrentBlock(instr->currentBlock());
+ return; //just update the state
+ }
+ break;
+ case TGSI_OPCODE_ENDFOR:
+ break;
+ case TGSI_OPCODE_ENDREP:
+ break;
+ case TGSI_OPCODE_PUSHA:
+ break;
+ case TGSI_OPCODE_POPA:
+ break;
+ case TGSI_OPCODE_CEIL:
+ break;
+ case TGSI_OPCODE_I2F:
+ break;
+ case TGSI_OPCODE_NOT:
+ break;
+ case TGSI_OPCODE_TRUNC: {
+ out = instr->trunc(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_SHL:
+ break;
+ case TGSI_OPCODE_SHR:
+ break;
+ case TGSI_OPCODE_AND:
+ break;
+ case TGSI_OPCODE_OR:
+ break;
+ case TGSI_OPCODE_MOD:
+ break;
+ case TGSI_OPCODE_XOR:
+ break;
+ case TGSI_OPCODE_SAD:
+ break;
+ case TGSI_OPCODE_TXF:
+ break;
+ case TGSI_OPCODE_TXQ:
+ break;
+ case TGSI_OPCODE_CONT:
+ break;
+ case TGSI_OPCODE_EMIT:
+ break;
+ case TGSI_OPCODE_ENDPRIM:
+ break;
+ case TGSI_OPCODE_BGNLOOP: {
+ instr->beginLoop();
+ storage->setCurrentBlock(instr->currentBlock());
+ return;
+ }
+ break;
+ case TGSI_OPCODE_BGNSUB: {
+ instr->bgnSub(instno);
+ storage->setCurrentBlock(instr->currentBlock());
+ storage->pushTemps();
+ return;
+ }
+ break;
+ case TGSI_OPCODE_ENDLOOP: {
+ instr->endLoop();
+ storage->setCurrentBlock(instr->currentBlock());
+ return;
+ }
+ break;
+ case TGSI_OPCODE_ENDSUB: {
+ instr->endSub();
+ storage->setCurrentBlock(instr->currentBlock());
+ storage->popArguments();
+ storage->popTemps();
+ return;
+ }
+ break;
+ case TGSI_OPCODE_NOISE1:
+ break;
+ case TGSI_OPCODE_NOISE2:
+ break;
+ case TGSI_OPCODE_NOISE3:
+ break;
+ case TGSI_OPCODE_NOISE4:
+ break;
+ case TGSI_OPCODE_NOP:
+ break;
+ case TGSI_OPCODE_CALLNZ:
+ break;
+ case TGSI_OPCODE_IFC:
+ break;
+ case TGSI_OPCODE_BREAKC:
+ break;
+ case TGSI_OPCODE_KIL: {
+ out = instr->kil(inputs[0]);
+ storage->setKilElement(out);
+ return;
+ }
+ break;
+ case TGSI_OPCODE_END:
+ instr->end();
+ return;
+ break;
+ default:
+ fprintf(stderr, "ERROR: Unknown opcode %d\n",
+ inst->Instruction.Opcode);
+ assert(0);
+ break;
+ }
+
+ if (!out) {
+ fprintf(stderr, "ERROR: unsupported opcode %d\n",
+ inst->Instruction.Opcode);
+ assert(!"Unsupported opcode");
+ }
+
+ /* # not sure if we need this */
+ switch( inst->Instruction.Saturate ) {
+ case TGSI_SAT_NONE:
+ break;
+ case TGSI_SAT_ZERO_ONE:
+ /*TXT( "_SAT" );*/
+ break;
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ /*TXT( "_SAT[-1,1]" );*/
+ break;
+ default:
+ assert( 0 );
+ }
+
+ /* store results */
+ for (int i = 0; i < inst->Instruction.NumDstRegs; ++i) {
+ struct tgsi_full_dst_register *dst = &inst->FullDstRegisters[i];
+
+ if (dst->DstRegister.File == TGSI_FILE_OUTPUT) {
+ storage->setOutputElement(dst->DstRegister.Index, out, dst->DstRegister.WriteMask);
+ } else if (dst->DstRegister.File == TGSI_FILE_TEMPORARY) {
+ storage->setTempElement(dst->DstRegister.Index, out, dst->DstRegister.WriteMask);
+ } else if (dst->DstRegister.File == TGSI_FILE_ADDRESS) {
+ storage->setAddrElement(dst->DstRegister.Index, out, dst->DstRegister.WriteMask);
+ } else {
+ fprintf(stderr, "ERROR: unsupported LLVM destination!");
+ assert(!"wrong destination");
+ }
+ }
+}
+
+
+static void
+translate_instructionir(llvm::Module *module,
+ StorageSoa *storage,
+ InstructionsSoa *instr,
+ struct tgsi_full_instruction *inst,
+ struct tgsi_full_instruction *fi,
+ unsigned instno)
+{
+ std::vector< std::vector<llvm::Value*> > inputs(inst->Instruction.NumSrcRegs);
+
+ for (int i = 0; i < inst->Instruction.NumSrcRegs; ++i) {
+ struct tgsi_full_src_register *src = &inst->FullSrcRegisters[i];
+ std::vector<llvm::Value*> val;
+ llvm::Value *indIdx = 0;
+ int swizzle = swizzleInt(src);
+
+ if (src->SrcRegister.Indirect) {
+ indIdx = storage->addrElement(src->SrcRegisterInd.Index);
+ }
+ val = storage->load((enum tgsi_file_type)src->SrcRegister.File,
+ src->SrcRegister.Index, swizzle, instr->getIRBuilder(), indIdx);
+
+ inputs[i] = val;
+ }
+
+ std::vector<llvm::Value*> out(4);
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL: {
+ out = instr->arl(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_MOV: {
+ out = inputs[0];
+ }
+ break;
+ case TGSI_OPCODE_LIT: {
+ out = instr->lit(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_RCP: {
+ }
+ break;
+ case TGSI_OPCODE_RSQ: {
+ out = instr->rsq(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_EXP:
+ break;
+ case TGSI_OPCODE_LOG:
+ break;
+ case TGSI_OPCODE_MUL: {
+ out = instr->mul(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_ADD: {
+ out = instr->add(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DP3: {
+ out = instr->dp3(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DP4: {
+ out = instr->dp4(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_DST: {
+ }
+ break;
+ case TGSI_OPCODE_MIN: {
+ out = instr->min(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_MAX: {
+ out = instr->max(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SLT: {
+ out = instr->slt(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_SGE: {
+ }
+ break;
+ case TGSI_OPCODE_MAD: {
+ out = instr->madd(inputs[0], inputs[1], inputs[2]);
+ }
+ break;
+ case TGSI_OPCODE_SUB: {
+ out = instr->sub(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_LRP: {
+ }
+ break;
+ case TGSI_OPCODE_CND:
+ break;
+ case TGSI_OPCODE_CND0:
+ break;
+ case TGSI_OPCODE_DP2A:
+ break;
+ case TGSI_OPCODE_FRC: {
+ }
+ break;
+ case TGSI_OPCODE_CLAMP:
+ break;
+ case TGSI_OPCODE_FLR: {
+ }
+ break;
+ case TGSI_OPCODE_ROUND:
+ break;
+ case TGSI_OPCODE_EX2: {
+ }
+ break;
+ case TGSI_OPCODE_LG2: {
+ }
+ break;
+ case TGSI_OPCODE_POW: {
+ out = instr->pow(inputs[0], inputs[1]);
+ }
+ break;
+ case TGSI_OPCODE_XPD: {
+ }
+ break;
+ case TGSI_OPCODE_ABS: {
+ out = instr->abs(inputs[0]);
+ }
+ break;
+ case TGSI_OPCODE_RCC:
+ break;
+ case TGSI_OPCODE_DPH: {
+ }
+ break;
+ case TGSI_OPCODE_COS: {
+ }
+ break;
+ case TGSI_OPCODE_DDX:
+ break;
+ case TGSI_OPCODE_DDY:
+ break;
+ case TGSI_OPCODE_KILP:
+ break;
+ case TGSI_OPCODE_PK2H:
+ break;
+ case TGSI_OPCODE_PK2US:
+ break;
+ case TGSI_OPCODE_PK4B:
+ break;
+ case TGSI_OPCODE_PK4UB:
+ break;
+ case TGSI_OPCODE_RFL:
+ break;
+ case TGSI_OPCODE_SEQ:
+ break;
+ case TGSI_OPCODE_SFL:
+ break;
+ case TGSI_OPCODE_SGT: {
+ }
+ break;
+ case TGSI_OPCODE_SIN: {
+ }
+ break;
+ case TGSI_OPCODE_SLE:
+ break;
+ case TGSI_OPCODE_SNE:
+ break;
+ case TGSI_OPCODE_STR:
+ break;
+ case TGSI_OPCODE_TEX:
+ break;
+ case TGSI_OPCODE_TXD:
+ break;
+ case TGSI_OPCODE_UP2H:
+ break;
+ case TGSI_OPCODE_UP2US:
+ break;
+ case TGSI_OPCODE_UP4B:
+ break;
+ case TGSI_OPCODE_UP4UB:
+ break;
+ case TGSI_OPCODE_X2D:
+ break;
+ case TGSI_OPCODE_ARA:
+ break;
+ case TGSI_OPCODE_ARR:
+ break;
+ case TGSI_OPCODE_BRA:
+ break;
+ case TGSI_OPCODE_CAL: {
+ }
+ break;
+ case TGSI_OPCODE_RET: {
+ }
+ break;
+ case TGSI_OPCODE_SSG:
+ break;
+ case TGSI_OPCODE_CMP: {
+ }
+ break;
+ case TGSI_OPCODE_SCS: {
+ }
+ break;
+ case TGSI_OPCODE_TXB:
+ break;
+ case TGSI_OPCODE_NRM:
+ break;
+ case TGSI_OPCODE_DIV:
+ break;
+ case TGSI_OPCODE_DP2:
+ break;
+ case TGSI_OPCODE_TXL:
+ break;
+ case TGSI_OPCODE_BRK: {
+ }
+ break;
+ case TGSI_OPCODE_IF: {
+ }
+ break;
+ case TGSI_OPCODE_BGNFOR:
+ break;
+ case TGSI_OPCODE_REP:
+ break;
+ case TGSI_OPCODE_ELSE: {
+ }
+ break;
+ case TGSI_OPCODE_ENDIF: {
+ }
+ break;
+ case TGSI_OPCODE_ENDFOR:
+ break;
+ case TGSI_OPCODE_ENDREP:
+ break;
+ case TGSI_OPCODE_PUSHA:
+ break;
+ case TGSI_OPCODE_POPA:
+ break;
+ case TGSI_OPCODE_CEIL:
+ break;
+ case TGSI_OPCODE_I2F:
+ break;
+ case TGSI_OPCODE_NOT:
+ break;
+ case TGSI_OPCODE_TRUNC: {
+ }
+ break;
+ case TGSI_OPCODE_SHL:
+ break;
+ case TGSI_OPCODE_SHR:
+ break;
+ case TGSI_OPCODE_AND:
+ break;
+ case TGSI_OPCODE_OR:
+ break;
+ case TGSI_OPCODE_MOD:
+ break;
+ case TGSI_OPCODE_XOR:
+ break;
+ case TGSI_OPCODE_SAD:
+ break;
+ case TGSI_OPCODE_TXF:
+ break;
+ case TGSI_OPCODE_TXQ:
+ break;
+ case TGSI_OPCODE_CONT:
+ break;
+ case TGSI_OPCODE_EMIT:
+ break;
+ case TGSI_OPCODE_ENDPRIM:
+ break;
+ case TGSI_OPCODE_BGNLOOP: {
+ }
+ break;
+ case TGSI_OPCODE_BGNSUB: {
+ }
+ break;
+ case TGSI_OPCODE_ENDLOOP: {
+ }
+ break;
+ case TGSI_OPCODE_ENDSUB: {
+ }
+ break;
+ case TGSI_OPCODE_NOISE1:
+ break;
+ case TGSI_OPCODE_NOISE2:
+ break;
+ case TGSI_OPCODE_NOISE3:
+ break;
+ case TGSI_OPCODE_NOISE4:
+ break;
+ case TGSI_OPCODE_NOP:
+ break;
+ case TGSI_OPCODE_NRM4:
+ break;
+ case TGSI_OPCODE_CALLNZ:
+ break;
+ case TGSI_OPCODE_IFC:
+ break;
+ case TGSI_OPCODE_BREAKC:
+ break;
+ case TGSI_OPCODE_KIL: {
+ }
+ break;
+ case TGSI_OPCODE_END:
+ instr->end();
+ return;
+ break;
+ default:
+ fprintf(stderr, "ERROR: Unknown opcode %d\n",
+ inst->Instruction.Opcode);
+ assert(0);
+ break;
+ }
+
+ if (!out[0]) {
+ fprintf(stderr, "ERROR: unsupported opcode %d\n",
+ inst->Instruction.Opcode);
+ assert(!"Unsupported opcode");
+ }
+
+ /* store results */
+ for (int i = 0; i < inst->Instruction.NumDstRegs; ++i) {
+ struct tgsi_full_dst_register *dst = &inst->FullDstRegisters[i];
+ storage->store((enum tgsi_file_type)dst->DstRegister.File,
+ dst->DstRegister.Index, out, dst->DstRegister.WriteMask,
+ instr->getIRBuilder() );
+ }
+}
+
+llvm::Module *
+tgsi_to_llvm(struct gallivm_ir *ir, const struct tgsi_token *tokens)
+{
+ llvm::Module *mod = new Module("shader");
+ struct tgsi_parse_context parse;
+ struct tgsi_full_instruction fi;
+ struct tgsi_full_declaration fd;
+ unsigned instno = 0;
+ Function* shader = mod->getFunction("execute_shader");
+ std::ostringstream stream;
+ if (ir->type == GALLIVM_VS) {
+ stream << "vs_shader";
+ } else {
+ stream << "fs_shader";
+ }
+ stream << ir->id;
+ std::string func_name = stream.str();
+ shader->setName(func_name.c_str());
+
+ Function::arg_iterator args = shader->arg_begin();
+ Value *ptr_INPUT = args++;
+ ptr_INPUT->setName("input");
+
+ BasicBlock *label_entry = BasicBlock::Create("entry", shader, 0);
+
+ tgsi_parse_init(&parse, tokens);
+
+ fi = tgsi_default_full_instruction();
+ fd = tgsi_default_full_declaration();
+ Storage storage(label_entry, ptr_INPUT);
+ Instructions instr(mod, shader, label_entry, &storage);
+ while(!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ translate_declaration(ir, mod, &storage,
+ &parse.FullToken.FullDeclaration,
+ &fd);
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ translate_immediate(&storage,
+ &parse.FullToken.FullImmediate);
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ translate_instruction(mod, &storage, &instr,
+ &parse.FullToken.FullInstruction,
+ &fi, instno);
+ ++instno;
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ tgsi_parse_free(&parse);
+
+ ir->num_consts = storage.numConsts();
+ return mod;
+}
+
+llvm::Module * tgsi_to_llvmir(struct gallivm_ir *ir,
+ const struct tgsi_token *tokens)
+{
+ llvm::Module *mod = new Module("shader");
+ struct tgsi_parse_context parse;
+ struct tgsi_full_instruction fi;
+ struct tgsi_full_declaration fd;
+ unsigned instno = 0;
+ std::ostringstream stream;
+ if (ir->type == GALLIVM_VS) {
+ stream << "vs_shader";
+ } else {
+ stream << "fs_shader";
+ }
+ //stream << ir->id;
+ std::string func_name = stream.str();
+ Function *shader = llvm::cast<Function>(mod->getOrInsertFunction(
+ func_name.c_str(),
+ vertexShaderFunctionType()));
+
+ Function::arg_iterator args = shader->arg_begin();
+ Value *input = args++;
+ input->setName("inputs");
+ Value *output = args++;
+ output->setName("outputs");
+ Value *consts = args++;
+ consts->setName("consts");
+
+ BasicBlock *label_entry = BasicBlock::Create("entry", shader, 0);
+
+ tgsi_parse_init(&parse, tokens);
+
+ fi = tgsi_default_full_instruction();
+ fd = tgsi_default_full_declaration();
+
+ StorageSoa storage(label_entry, input, output, consts);
+ InstructionsSoa instr(mod, shader, label_entry, &storage);
+
+ while(!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ translate_declarationir(ir, mod, &storage,
+ &parse.FullToken.FullDeclaration,
+ &fd);
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ translate_immediateir(&storage,
+ &parse.FullToken.FullImmediate);
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ storage.declareImmediates();
+ translate_instructionir(mod, &storage, &instr,
+ &parse.FullToken.FullInstruction,
+ &fi, instno);
+ ++instno;
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ tgsi_parse_free(&parse);
+
+ return mod;
+}
diff --git a/src/gallium/auxiliary/gallivm/tgsitollvm.h b/src/gallium/auxiliary/gallivm/tgsitollvm.h
new file mode 100644
index 0000000000..7ada04d629
--- /dev/null
+++ b/src/gallium/auxiliary/gallivm/tgsitollvm.h
@@ -0,0 +1,20 @@
+#ifndef TGSITOLLVM_H
+#define TGSITOLLVM_H
+
+
+namespace llvm {
+ class Module;
+}
+
+struct gallivm_ir;
+struct tgsi_token;
+
+
+llvm::Module * tgsi_to_llvm(struct gallivm_ir *ir,
+ const struct tgsi_token *tokens);
+
+
+llvm::Module * tgsi_to_llvmir(struct gallivm_ir *ir,
+ const struct tgsi_token *tokens);
+
+#endif
diff --git a/src/gallium/auxiliary/indices/Makefile b/src/gallium/auxiliary/indices/Makefile
new file mode 100644
index 0000000000..f2ebc3f410
--- /dev/null
+++ b/src/gallium/auxiliary/indices/Makefile
@@ -0,0 +1,16 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = indices
+
+C_SOURCES = \
+ u_indices_gen.c \
+ u_unfilled_gen.c
+
+include ../../Makefile.template
+
+u_indices_gen.c: u_indices_gen.py
+ python $< > $@
+
+u_unfilled_gen.c: u_unfilled_gen.py
+ python $< > $@
diff --git a/src/gallium/auxiliary/indices/SConscript b/src/gallium/auxiliary/indices/SConscript
new file mode 100644
index 0000000000..712e215534
--- /dev/null
+++ b/src/gallium/auxiliary/indices/SConscript
@@ -0,0 +1,28 @@
+Import('*')
+
+from sys import executable as python_cmd
+
+env.CodeGenerate(
+ target = 'u_indices_gen.c',
+ script = 'u_indices_gen.py',
+ source = [],
+ command = python_cmd + ' $SCRIPT > $TARGET'
+)
+
+env.CodeGenerate(
+ target = 'u_unfilled_gen.c',
+ script = 'u_unfilled_gen.py',
+ source = [],
+ command = python_cmd + ' $SCRIPT > $TARGET'
+)
+
+indices = env.ConvenienceLibrary(
+ target = 'indices',
+ source = [
+# 'u_indices.c',
+# 'u_unfilled_indices.c',
+ 'u_indices_gen.c',
+ 'u_unfilled_gen.c',
+ ])
+
+auxiliaries.insert(0, indices)
diff --git a/src/gallium/auxiliary/indices/u_indices.c b/src/gallium/auxiliary/indices/u_indices.c
new file mode 100644
index 0000000000..03d7453f30
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_indices.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "u_indices.h"
+#include "u_indices_priv.h"
+
+static void translate_memcpy_ushort( const void *in,
+ unsigned nr,
+ void *out )
+{
+ memcpy(out, in, nr*sizeof(short));
+}
+
+static void translate_memcpy_uint( const void *in,
+ unsigned nr,
+ void *out )
+{
+ memcpy(out, in, nr*sizeof(int));
+}
+
+
+int u_index_translator( unsigned hw_mask,
+ unsigned prim,
+ unsigned in_index_size,
+ unsigned nr,
+ unsigned in_pv,
+ unsigned out_pv,
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_translate_func *out_translate )
+{
+ unsigned in_idx;
+ unsigned out_idx;
+ int ret = U_TRANSLATE_NORMAL;
+
+ u_index_init();
+
+ in_idx = in_size_idx(in_index_size);
+ *out_index_size = (in_index_size == 4) ? 4 : 2;
+ out_idx = out_size_idx(*out_index_size);
+
+ if ((hw_mask & (1<<prim)) &&
+ in_index_size == *out_index_size &&
+ in_pv == out_pv)
+ {
+ if (in_index_size == 4)
+ *out_translate = translate_memcpy_uint;
+ else
+ *out_translate = translate_memcpy_ushort;
+
+ *out_prim = prim;
+ *out_nr = nr;
+
+ return U_TRANSLATE_MEMCPY;
+ }
+ else {
+ switch (prim) {
+ case PIPE_PRIM_POINTS:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_POINTS;
+ *out_nr = nr;
+ break;
+
+ case PIPE_PRIM_LINES:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_LINES;
+ *out_nr = nr;
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_LINES;
+ *out_nr = (nr - 1) * 2;
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_LINES;
+ *out_nr = nr * 2;
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = nr;
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ break;
+
+ case PIPE_PRIM_QUADS:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr / 4) * 6;
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ break;
+
+ default:
+ assert(0);
+ *out_translate = translate[in_idx][out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_POINTS;
+ *out_nr = nr;
+ return U_TRANSLATE_ERROR;
+ }
+ }
+
+ return ret;
+}
+
+
+
+
+
+int u_index_generator( unsigned hw_mask,
+ unsigned prim,
+ unsigned start,
+ unsigned nr,
+ unsigned in_pv,
+ unsigned out_pv,
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_generate_func *out_generate )
+
+{
+ unsigned out_idx;
+
+ u_index_init();
+
+ *out_index_size = ((start + nr) > 0xfffe) ? 4 : 2;
+ out_idx = out_size_idx(*out_index_size);
+
+ if ((hw_mask & (1<<prim)) &&
+ (in_pv == out_pv)) {
+
+ *out_generate = generate[out_idx][in_pv][out_pv][PIPE_PRIM_POINTS];
+ *out_prim = prim;
+ *out_nr = nr;
+ return U_GENERATE_LINEAR;
+ }
+ else {
+ switch (prim) {
+ case PIPE_PRIM_POINTS:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_POINTS;
+ *out_nr = nr;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_LINES:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_LINES;
+ *out_nr = nr;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_LINE_STRIP:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_LINES;
+ *out_nr = (nr - 1) * 2;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_LINE_LOOP:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_LINES;
+ *out_nr = nr * 2;
+ return U_GENERATE_ONE_OFF;
+
+ case PIPE_PRIM_TRIANGLES:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = nr;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_QUADS:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr / 4) * 6;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ return U_GENERATE_REUSABLE;
+
+ case PIPE_PRIM_POLYGON:
+ *out_generate = generate[out_idx][in_pv][out_pv][prim];
+ *out_prim = PIPE_PRIM_TRIANGLES;
+ *out_nr = (nr - 2) * 3;
+ return U_GENERATE_REUSABLE;
+
+ default:
+ assert(0);
+ *out_generate = generate[out_idx][in_pv][out_pv][PIPE_PRIM_POINTS];
+ *out_prim = PIPE_PRIM_POINTS;
+ *out_nr = nr;
+ return U_TRANSLATE_ERROR;
+ }
+ }
+}
diff --git a/src/gallium/auxiliary/indices/u_indices.h b/src/gallium/auxiliary/indices/u_indices.h
new file mode 100644
index 0000000000..be522c6725
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_indices.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef U_INDICES_H
+#define U_INDICES_H
+
+#include "pipe/p_compiler.h"
+
+#define PV_FIRST 0
+#define PV_LAST 1
+#define PV_COUNT 2
+
+typedef void (*u_translate_func)( const void *in,
+ unsigned nr,
+ void *out );
+
+typedef void (*u_generate_func)( unsigned nr,
+ void *out );
+
+
+/* Return codes describe the translate/generate operation. Caller may
+ * be able to reuse translated indices under some circumstances.
+ */
+#define U_TRANSLATE_ERROR -1
+#define U_TRANSLATE_NORMAL 1
+#define U_TRANSLATE_MEMCPY 2
+#define U_GENERATE_LINEAR 3
+#define U_GENERATE_REUSABLE 4
+#define U_GENERATE_ONE_OFF 5
+
+
+void u_index_init( void );
+
+int u_index_translator( unsigned hw_mask,
+ unsigned prim,
+ unsigned in_index_size,
+ unsigned nr,
+ unsigned in_pv, /* API */
+ unsigned out_pv, /* hardware */
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_translate_func *out_translate );
+
+/* Note that even when generating it is necessary to know what the
+ * API's PV is, as the indices generated will depend on whether it is
+ * the same as hardware or not, and in the case of triangle strips,
+ * whether it is first or last.
+ */
+int u_index_generator( unsigned hw_mask,
+ unsigned prim,
+ unsigned start,
+ unsigned nr,
+ unsigned in_pv, /* API */
+ unsigned out_pv, /* hardware */
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_generate_func *out_generate );
+
+
+void u_unfilled_init( void );
+
+int u_unfilled_translator( unsigned prim,
+ unsigned in_index_size,
+ unsigned nr,
+ unsigned unfilled_mode,
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_translate_func *out_translate );
+
+int u_unfilled_generator( unsigned prim,
+ unsigned start,
+ unsigned nr,
+ unsigned unfilled_mode,
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_generate_func *out_generate );
+
+
+
+
+#endif
diff --git a/src/gallium/auxiliary/indices/u_indices_gen.c b/src/gallium/auxiliary/indices/u_indices_gen.c
new file mode 100644
index 0000000000..3c981e5d7f
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_indices_gen.c
@@ -0,0 +1,5129 @@
+/* File automatically generated by indices.py */
+
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+
+/**
+ * @file
+ * Functions to translate and generate index lists
+ */
+
+#include "indices/u_indices.h"
+#include "indices/u_indices_priv.h"
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+static unsigned out_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return OUT_UINT;
+ case 2: return OUT_USHORT;
+ default: assert(0); return OUT_USHORT;
+ }
+}
+
+static unsigned in_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return IN_UINT;
+ case 2: return IN_USHORT;
+ case 1: return IN_UBYTE;
+ default: assert(0); return IN_UBYTE;
+ }
+}
+
+
+static u_translate_func translate[IN_COUNT][OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
+static u_generate_func generate[OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
+
+
+
+static void generate_points_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)(i);
+ }
+}
+static void generate_lines_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)(i);
+ (out+i)[1] = (ushort)(i+1);
+ }
+}
+static void generate_linestrip_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1);
+ }
+}
+static void generate_lineloop_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1);
+ }
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(0);
+}
+static void generate_tris_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)(i);
+ (out+i)[1] = (ushort)(i+1);
+ (out+i)[2] = (ushort)(i+2);
+ }
+}
+static void generate_tristrip_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1+(i&1));
+ (out+j)[2] = (ushort)(i+2-(i&1));
+ }
+}
+static void generate_trifan_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j)[2] = (ushort)(i+2);
+ }
+}
+static void generate_quads_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)(i+0);
+ (out+j+0)[1] = (ushort)(i+1);
+ (out+j+0)[2] = (ushort)(i+3);
+ (out+j+3)[0] = (ushort)(i+1);
+ (out+j+3)[1] = (ushort)(i+2);
+ (out+j+3)[2] = (ushort)(i+3);
+ }
+}
+static void generate_quadstrip_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)(i+2);
+ (out+j+0)[1] = (ushort)(i+0);
+ (out+j+0)[2] = (ushort)(i+3);
+ (out+j+3)[0] = (ushort)(i+0);
+ (out+j+3)[1] = (ushort)(i+1);
+ (out+j+3)[2] = (ushort)(i+3);
+ }
+}
+static void generate_polygon_ushort_first2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j)[2] = (ushort)(i+2);
+ }
+}
+static void generate_points_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)(i);
+ }
+}
+static void generate_lines_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)(i+1);
+ (out+i)[1] = (ushort)(i);
+ }
+}
+static void generate_linestrip_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i);
+ }
+}
+static void generate_lineloop_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i);
+ }
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i);
+}
+static void generate_tris_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)(i+1);
+ (out+i)[1] = (ushort)(i+2);
+ (out+i)[2] = (ushort)(i);
+ }
+}
+static void generate_tristrip_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+1+(i&1));
+ (out+j)[1] = (ushort)(i+2-(i&1));
+ (out+j)[2] = (ushort)(i);
+ }
+}
+static void generate_trifan_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i+2);
+ (out+j)[2] = (ushort)(0);
+ }
+}
+static void generate_quads_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)(i+1);
+ (out+j+0)[1] = (ushort)(i+3);
+ (out+j+0)[2] = (ushort)(i+0);
+ (out+j+3)[0] = (ushort)(i+2);
+ (out+j+3)[1] = (ushort)(i+3);
+ (out+j+3)[2] = (ushort)(i+1);
+ }
+}
+static void generate_quadstrip_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)(i+0);
+ (out+j+0)[1] = (ushort)(i+3);
+ (out+j+0)[2] = (ushort)(i+2);
+ (out+j+3)[0] = (ushort)(i+1);
+ (out+j+3)[1] = (ushort)(i+3);
+ (out+j+3)[2] = (ushort)(i+0);
+ }
+}
+static void generate_polygon_ushort_first2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i+2);
+ (out+j)[2] = (ushort)(0);
+ }
+}
+static void generate_points_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)(i);
+ }
+}
+static void generate_lines_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)(i+1);
+ (out+i)[1] = (ushort)(i);
+ }
+}
+static void generate_linestrip_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i);
+ }
+}
+static void generate_lineloop_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i);
+ }
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i);
+}
+static void generate_tris_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)(i+2);
+ (out+i)[1] = (ushort)(i);
+ (out+i)[2] = (ushort)(i+1);
+ }
+}
+static void generate_tristrip_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+2);
+ (out+j)[1] = (ushort)(i+(i&1));
+ (out+j)[2] = (ushort)(i+1-(i&1));
+ }
+}
+static void generate_trifan_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+2);
+ (out+j)[1] = (ushort)(0);
+ (out+j)[2] = (ushort)(i+1);
+ }
+}
+static void generate_quads_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)(i+3);
+ (out+j+0)[1] = (ushort)(i+0);
+ (out+j+0)[2] = (ushort)(i+1);
+ (out+j+3)[0] = (ushort)(i+3);
+ (out+j+3)[1] = (ushort)(i+1);
+ (out+j+3)[2] = (ushort)(i+2);
+ }
+}
+static void generate_quadstrip_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)(i+3);
+ (out+j+0)[1] = (ushort)(i+2);
+ (out+j+0)[2] = (ushort)(i+0);
+ (out+j+3)[0] = (ushort)(i+3);
+ (out+j+3)[1] = (ushort)(i+0);
+ (out+j+3)[2] = (ushort)(i+1);
+ }
+}
+static void generate_polygon_ushort_last2first(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j)[2] = (ushort)(i+2);
+ }
+}
+static void generate_points_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)(i);
+ }
+}
+static void generate_lines_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)(i);
+ (out+i)[1] = (ushort)(i+1);
+ }
+}
+static void generate_linestrip_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1);
+ }
+}
+static void generate_lineloop_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1);
+ }
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(0);
+}
+static void generate_tris_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)(i);
+ (out+i)[1] = (ushort)(i+1);
+ (out+i)[2] = (ushort)(i+2);
+ }
+}
+static void generate_tristrip_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+(i&1));
+ (out+j)[1] = (ushort)(i+1-(i&1));
+ (out+j)[2] = (ushort)(i+2);
+ }
+}
+static void generate_trifan_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j)[2] = (ushort)(i+2);
+ }
+}
+static void generate_quads_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)(i+0);
+ (out+j+0)[1] = (ushort)(i+1);
+ (out+j+0)[2] = (ushort)(i+3);
+ (out+j+3)[0] = (ushort)(i+1);
+ (out+j+3)[1] = (ushort)(i+2);
+ (out+j+3)[2] = (ushort)(i+3);
+ }
+}
+static void generate_quadstrip_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)(i+2);
+ (out+j+0)[1] = (ushort)(i+0);
+ (out+j+0)[2] = (ushort)(i+3);
+ (out+j+3)[0] = (ushort)(i+0);
+ (out+j+3)[1] = (ushort)(i+1);
+ (out+j+3)[2] = (ushort)(i+3);
+ }
+}
+static void generate_polygon_ushort_last2last(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)(i+1);
+ (out+j)[1] = (ushort)(i+2);
+ (out+j)[2] = (ushort)(0);
+ }
+}
+static void generate_points_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)(i);
+ }
+}
+static void generate_lines_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)(i);
+ (out+i)[1] = (uint)(i+1);
+ }
+}
+static void generate_linestrip_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1);
+ }
+}
+static void generate_lineloop_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1);
+ }
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(0);
+}
+static void generate_tris_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)(i);
+ (out+i)[1] = (uint)(i+1);
+ (out+i)[2] = (uint)(i+2);
+ }
+}
+static void generate_tristrip_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1+(i&1));
+ (out+j)[2] = (uint)(i+2-(i&1));
+ }
+}
+static void generate_trifan_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j)[2] = (uint)(i+2);
+ }
+}
+static void generate_quads_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)(i+0);
+ (out+j+0)[1] = (uint)(i+1);
+ (out+j+0)[2] = (uint)(i+3);
+ (out+j+3)[0] = (uint)(i+1);
+ (out+j+3)[1] = (uint)(i+2);
+ (out+j+3)[2] = (uint)(i+3);
+ }
+}
+static void generate_quadstrip_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)(i+2);
+ (out+j+0)[1] = (uint)(i+0);
+ (out+j+0)[2] = (uint)(i+3);
+ (out+j+3)[0] = (uint)(i+0);
+ (out+j+3)[1] = (uint)(i+1);
+ (out+j+3)[2] = (uint)(i+3);
+ }
+}
+static void generate_polygon_uint_first2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j)[2] = (uint)(i+2);
+ }
+}
+static void generate_points_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)(i);
+ }
+}
+static void generate_lines_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)(i+1);
+ (out+i)[1] = (uint)(i);
+ }
+}
+static void generate_linestrip_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i);
+ }
+}
+static void generate_lineloop_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i);
+ }
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i);
+}
+static void generate_tris_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)(i+1);
+ (out+i)[1] = (uint)(i+2);
+ (out+i)[2] = (uint)(i);
+ }
+}
+static void generate_tristrip_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+1+(i&1));
+ (out+j)[1] = (uint)(i+2-(i&1));
+ (out+j)[2] = (uint)(i);
+ }
+}
+static void generate_trifan_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i+2);
+ (out+j)[2] = (uint)(0);
+ }
+}
+static void generate_quads_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)(i+1);
+ (out+j+0)[1] = (uint)(i+3);
+ (out+j+0)[2] = (uint)(i+0);
+ (out+j+3)[0] = (uint)(i+2);
+ (out+j+3)[1] = (uint)(i+3);
+ (out+j+3)[2] = (uint)(i+1);
+ }
+}
+static void generate_quadstrip_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)(i+0);
+ (out+j+0)[1] = (uint)(i+3);
+ (out+j+0)[2] = (uint)(i+2);
+ (out+j+3)[0] = (uint)(i+1);
+ (out+j+3)[1] = (uint)(i+3);
+ (out+j+3)[2] = (uint)(i+0);
+ }
+}
+static void generate_polygon_uint_first2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i+2);
+ (out+j)[2] = (uint)(0);
+ }
+}
+static void generate_points_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)(i);
+ }
+}
+static void generate_lines_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)(i+1);
+ (out+i)[1] = (uint)(i);
+ }
+}
+static void generate_linestrip_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i);
+ }
+}
+static void generate_lineloop_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i);
+ }
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i);
+}
+static void generate_tris_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)(i+2);
+ (out+i)[1] = (uint)(i);
+ (out+i)[2] = (uint)(i+1);
+ }
+}
+static void generate_tristrip_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+2);
+ (out+j)[1] = (uint)(i+(i&1));
+ (out+j)[2] = (uint)(i+1-(i&1));
+ }
+}
+static void generate_trifan_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+2);
+ (out+j)[1] = (uint)(0);
+ (out+j)[2] = (uint)(i+1);
+ }
+}
+static void generate_quads_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)(i+3);
+ (out+j+0)[1] = (uint)(i+0);
+ (out+j+0)[2] = (uint)(i+1);
+ (out+j+3)[0] = (uint)(i+3);
+ (out+j+3)[1] = (uint)(i+1);
+ (out+j+3)[2] = (uint)(i+2);
+ }
+}
+static void generate_quadstrip_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)(i+3);
+ (out+j+0)[1] = (uint)(i+2);
+ (out+j+0)[2] = (uint)(i+0);
+ (out+j+3)[0] = (uint)(i+3);
+ (out+j+3)[1] = (uint)(i+0);
+ (out+j+3)[2] = (uint)(i+1);
+ }
+}
+static void generate_polygon_uint_last2first(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j)[2] = (uint)(i+2);
+ }
+}
+static void generate_points_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)(i);
+ }
+}
+static void generate_lines_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)(i);
+ (out+i)[1] = (uint)(i+1);
+ }
+}
+static void generate_linestrip_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1);
+ }
+}
+static void generate_lineloop_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1);
+ }
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(0);
+}
+static void generate_tris_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)(i);
+ (out+i)[1] = (uint)(i+1);
+ (out+i)[2] = (uint)(i+2);
+ }
+}
+static void generate_tristrip_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+(i&1));
+ (out+j)[1] = (uint)(i+1-(i&1));
+ (out+j)[2] = (uint)(i+2);
+ }
+}
+static void generate_trifan_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j)[2] = (uint)(i+2);
+ }
+}
+static void generate_quads_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)(i+0);
+ (out+j+0)[1] = (uint)(i+1);
+ (out+j+0)[2] = (uint)(i+3);
+ (out+j+3)[0] = (uint)(i+1);
+ (out+j+3)[1] = (uint)(i+2);
+ (out+j+3)[2] = (uint)(i+3);
+ }
+}
+static void generate_quadstrip_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)(i+2);
+ (out+j+0)[1] = (uint)(i+0);
+ (out+j+0)[2] = (uint)(i+3);
+ (out+j+3)[0] = (uint)(i+0);
+ (out+j+3)[1] = (uint)(i+1);
+ (out+j+3)[2] = (uint)(i+3);
+ }
+}
+static void generate_polygon_uint_last2last(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)(i+1);
+ (out+j)[1] = (uint)(i+2);
+ (out+j)[2] = (uint)(0);
+ }
+}
+static void translate_points_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_linestrip_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_lineloop_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[0];
+}
+static void translate_tris_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ (out+i)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_tristrip_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1+(i&1)];
+ (out+j)[2] = (ushort)in[i+2-(i&1)];
+ }
+}
+static void translate_trifan_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quads_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+1];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+2];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_quadstrip_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+2];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+0];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_polygon_ubyte2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_points_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i];
+ }
+}
+static void translate_linestrip_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+}
+static void translate_lineloop_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i];
+}
+static void translate_tris_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i+2];
+ (out+i)[2] = (ushort)in[i];
+ }
+}
+static void translate_tristrip_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1+(i&1)];
+ (out+j)[1] = (ushort)in[i+2-(i&1)];
+ (out+j)[2] = (ushort)in[i];
+ }
+}
+static void translate_trifan_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_quads_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+1];
+ (out+j+0)[1] = (ushort)in[i+3];
+ (out+j+0)[2] = (ushort)in[i+0];
+ (out+j+3)[0] = (ushort)in[i+2];
+ (out+j+3)[1] = (ushort)in[i+3];
+ (out+j+3)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_quadstrip_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+3];
+ (out+j+0)[2] = (ushort)in[i+2];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+3];
+ (out+j+3)[2] = (ushort)in[i+0];
+ }
+}
+static void translate_polygon_ubyte2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_points_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i];
+ }
+}
+static void translate_linestrip_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+}
+static void translate_lineloop_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i];
+}
+static void translate_tris_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i+2];
+ (out+i)[1] = (ushort)in[i];
+ (out+i)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_tristrip_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[i+(i&1)];
+ (out+j)[2] = (ushort)in[i+1-(i&1)];
+ }
+}
+static void translate_trifan_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[0];
+ (out+j)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_quads_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+3];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+1];
+ (out+j+3)[0] = (ushort)in[i+3];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quadstrip_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+3];
+ (out+j+0)[1] = (ushort)in[i+2];
+ (out+j+0)[2] = (ushort)in[i+0];
+ (out+j+3)[0] = (ushort)in[i+3];
+ (out+j+3)[1] = (ushort)in[i+0];
+ (out+j+3)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_polygon_ubyte2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_points_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_linestrip_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_lineloop_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[0];
+}
+static void translate_tris_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ (out+i)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_tristrip_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+(i&1)];
+ (out+j)[1] = (ushort)in[i+1-(i&1)];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_trifan_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quads_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+1];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+2];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_quadstrip_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+2];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+0];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_polygon_ubyte2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_points_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ }
+}
+static void translate_linestrip_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+}
+static void translate_lineloop_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[0];
+}
+static void translate_tris_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ (out+i)[2] = (uint)in[i+2];
+ }
+}
+static void translate_tristrip_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1+(i&1)];
+ (out+j)[2] = (uint)in[i+2-(i&1)];
+ }
+}
+static void translate_trifan_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quads_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+1];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+2];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_quadstrip_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+2];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+0];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_polygon_ubyte2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_points_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i];
+ }
+}
+static void translate_linestrip_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+}
+static void translate_lineloop_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i];
+}
+static void translate_tris_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i+2];
+ (out+i)[2] = (uint)in[i];
+ }
+}
+static void translate_tristrip_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1+(i&1)];
+ (out+j)[1] = (uint)in[i+2-(i&1)];
+ (out+j)[2] = (uint)in[i];
+ }
+}
+static void translate_trifan_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_quads_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+1];
+ (out+j+0)[1] = (uint)in[i+3];
+ (out+j+0)[2] = (uint)in[i+0];
+ (out+j+3)[0] = (uint)in[i+2];
+ (out+j+3)[1] = (uint)in[i+3];
+ (out+j+3)[2] = (uint)in[i+1];
+ }
+}
+static void translate_quadstrip_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+3];
+ (out+j+0)[2] = (uint)in[i+2];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+3];
+ (out+j+3)[2] = (uint)in[i+0];
+ }
+}
+static void translate_polygon_ubyte2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_points_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i];
+ }
+}
+static void translate_linestrip_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+}
+static void translate_lineloop_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i];
+}
+static void translate_tris_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i+2];
+ (out+i)[1] = (uint)in[i];
+ (out+i)[2] = (uint)in[i+1];
+ }
+}
+static void translate_tristrip_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[i+(i&1)];
+ (out+j)[2] = (uint)in[i+1-(i&1)];
+ }
+}
+static void translate_trifan_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[0];
+ (out+j)[2] = (uint)in[i+1];
+ }
+}
+static void translate_quads_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+3];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+1];
+ (out+j+3)[0] = (uint)in[i+3];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quadstrip_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+3];
+ (out+j+0)[1] = (uint)in[i+2];
+ (out+j+0)[2] = (uint)in[i+0];
+ (out+j+3)[0] = (uint)in[i+3];
+ (out+j+3)[1] = (uint)in[i+0];
+ (out+j+3)[2] = (uint)in[i+1];
+ }
+}
+static void translate_polygon_ubyte2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_points_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ }
+}
+static void translate_linestrip_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+}
+static void translate_lineloop_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[0];
+}
+static void translate_tris_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ (out+i)[2] = (uint)in[i+2];
+ }
+}
+static void translate_tristrip_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+(i&1)];
+ (out+j)[1] = (uint)in[i+1-(i&1)];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_trifan_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quads_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+1];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+2];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_quadstrip_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+2];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+0];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_polygon_ubyte2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_points_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_linestrip_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_lineloop_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[0];
+}
+static void translate_tris_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ (out+i)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_tristrip_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1+(i&1)];
+ (out+j)[2] = (ushort)in[i+2-(i&1)];
+ }
+}
+static void translate_trifan_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quads_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+1];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+2];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_quadstrip_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+2];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+0];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_polygon_ushort2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_points_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i];
+ }
+}
+static void translate_linestrip_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+}
+static void translate_lineloop_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i];
+}
+static void translate_tris_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i+2];
+ (out+i)[2] = (ushort)in[i];
+ }
+}
+static void translate_tristrip_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1+(i&1)];
+ (out+j)[1] = (ushort)in[i+2-(i&1)];
+ (out+j)[2] = (ushort)in[i];
+ }
+}
+static void translate_trifan_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_quads_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+1];
+ (out+j+0)[1] = (ushort)in[i+3];
+ (out+j+0)[2] = (ushort)in[i+0];
+ (out+j+3)[0] = (ushort)in[i+2];
+ (out+j+3)[1] = (ushort)in[i+3];
+ (out+j+3)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_quadstrip_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+3];
+ (out+j+0)[2] = (ushort)in[i+2];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+3];
+ (out+j+3)[2] = (ushort)in[i+0];
+ }
+}
+static void translate_polygon_ushort2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_points_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i];
+ }
+}
+static void translate_linestrip_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+}
+static void translate_lineloop_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i];
+}
+static void translate_tris_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i+2];
+ (out+i)[1] = (ushort)in[i];
+ (out+i)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_tristrip_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[i+(i&1)];
+ (out+j)[2] = (ushort)in[i+1-(i&1)];
+ }
+}
+static void translate_trifan_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[0];
+ (out+j)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_quads_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+3];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+1];
+ (out+j+3)[0] = (ushort)in[i+3];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quadstrip_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+3];
+ (out+j+0)[1] = (ushort)in[i+2];
+ (out+j+0)[2] = (ushort)in[i+0];
+ (out+j+3)[0] = (ushort)in[i+3];
+ (out+j+3)[1] = (ushort)in[i+0];
+ (out+j+3)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_polygon_ushort2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_points_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_linestrip_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_lineloop_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[0];
+}
+static void translate_tris_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ (out+i)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_tristrip_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+(i&1)];
+ (out+j)[1] = (ushort)in[i+1-(i&1)];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_trifan_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quads_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+1];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+2];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_quadstrip_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+2];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+0];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_polygon_ushort2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_points_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ }
+}
+static void translate_linestrip_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+}
+static void translate_lineloop_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[0];
+}
+static void translate_tris_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ (out+i)[2] = (uint)in[i+2];
+ }
+}
+static void translate_tristrip_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1+(i&1)];
+ (out+j)[2] = (uint)in[i+2-(i&1)];
+ }
+}
+static void translate_trifan_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quads_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+1];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+2];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_quadstrip_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+2];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+0];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_polygon_ushort2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_points_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i];
+ }
+}
+static void translate_linestrip_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+}
+static void translate_lineloop_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i];
+}
+static void translate_tris_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i+2];
+ (out+i)[2] = (uint)in[i];
+ }
+}
+static void translate_tristrip_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1+(i&1)];
+ (out+j)[1] = (uint)in[i+2-(i&1)];
+ (out+j)[2] = (uint)in[i];
+ }
+}
+static void translate_trifan_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_quads_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+1];
+ (out+j+0)[1] = (uint)in[i+3];
+ (out+j+0)[2] = (uint)in[i+0];
+ (out+j+3)[0] = (uint)in[i+2];
+ (out+j+3)[1] = (uint)in[i+3];
+ (out+j+3)[2] = (uint)in[i+1];
+ }
+}
+static void translate_quadstrip_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+3];
+ (out+j+0)[2] = (uint)in[i+2];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+3];
+ (out+j+3)[2] = (uint)in[i+0];
+ }
+}
+static void translate_polygon_ushort2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_points_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i];
+ }
+}
+static void translate_linestrip_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+}
+static void translate_lineloop_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i];
+}
+static void translate_tris_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i+2];
+ (out+i)[1] = (uint)in[i];
+ (out+i)[2] = (uint)in[i+1];
+ }
+}
+static void translate_tristrip_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[i+(i&1)];
+ (out+j)[2] = (uint)in[i+1-(i&1)];
+ }
+}
+static void translate_trifan_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[0];
+ (out+j)[2] = (uint)in[i+1];
+ }
+}
+static void translate_quads_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+3];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+1];
+ (out+j+3)[0] = (uint)in[i+3];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quadstrip_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+3];
+ (out+j+0)[1] = (uint)in[i+2];
+ (out+j+0)[2] = (uint)in[i+0];
+ (out+j+3)[0] = (uint)in[i+3];
+ (out+j+3)[1] = (uint)in[i+0];
+ (out+j+3)[2] = (uint)in[i+1];
+ }
+}
+static void translate_polygon_ushort2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_points_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ }
+}
+static void translate_linestrip_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+}
+static void translate_lineloop_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[0];
+}
+static void translate_tris_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ (out+i)[2] = (uint)in[i+2];
+ }
+}
+static void translate_tristrip_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+(i&1)];
+ (out+j)[1] = (uint)in[i+1-(i&1)];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_trifan_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quads_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+1];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+2];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_quadstrip_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+2];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+0];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_polygon_ushort2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_points_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_linestrip_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_lineloop_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[0];
+}
+static void translate_tris_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ (out+i)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_tristrip_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1+(i&1)];
+ (out+j)[2] = (ushort)in[i+2-(i&1)];
+ }
+}
+static void translate_trifan_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quads_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+1];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+2];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_quadstrip_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+2];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+0];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_polygon_uint2ushort_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_points_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i];
+ }
+}
+static void translate_linestrip_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+}
+static void translate_lineloop_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i];
+}
+static void translate_tris_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i+2];
+ (out+i)[2] = (ushort)in[i];
+ }
+}
+static void translate_tristrip_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1+(i&1)];
+ (out+j)[1] = (ushort)in[i+2-(i&1)];
+ (out+j)[2] = (ushort)in[i];
+ }
+}
+static void translate_trifan_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_quads_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+1];
+ (out+j+0)[1] = (ushort)in[i+3];
+ (out+j+0)[2] = (ushort)in[i+0];
+ (out+j+3)[0] = (ushort)in[i+2];
+ (out+j+3)[1] = (ushort)in[i+3];
+ (out+j+3)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_quadstrip_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+3];
+ (out+j+0)[2] = (ushort)in[i+2];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+3];
+ (out+j+3)[2] = (ushort)in[i+0];
+ }
+}
+static void translate_polygon_uint2ushort_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_points_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i+1];
+ (out+i)[1] = (ushort)in[i];
+ }
+}
+static void translate_linestrip_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+}
+static void translate_lineloop_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i];
+ }
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i];
+}
+static void translate_tris_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i+2];
+ (out+i)[1] = (ushort)in[i];
+ (out+i)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_tristrip_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[i+(i&1)];
+ (out+j)[2] = (ushort)in[i+1-(i&1)];
+ }
+}
+static void translate_trifan_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[0];
+ (out+j)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_quads_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+3];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+1];
+ (out+j+3)[0] = (ushort)in[i+3];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quadstrip_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+3];
+ (out+j+0)[1] = (ushort)in[i+2];
+ (out+j+0)[2] = (ushort)in[i+0];
+ (out+j+3)[0] = (ushort)in[i+3];
+ (out+j+3)[1] = (ushort)in[i+0];
+ (out+j+3)[2] = (ushort)in[i+1];
+ }
+}
+static void translate_polygon_uint2ushort_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_points_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (ushort)in[i];
+ }
+}
+static void translate_lines_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_linestrip_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+}
+static void translate_lineloop_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ }
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[0];
+}
+static void translate_tris_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (ushort)in[i];
+ (out+i)[1] = (ushort)in[i+1];
+ (out+i)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_tristrip_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+(i&1)];
+ (out+j)[1] = (ushort)in[i+1-(i&1)];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_trifan_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j)[2] = (ushort)in[i+2];
+ }
+}
+static void translate_quads_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (ushort)in[i+0];
+ (out+j+0)[1] = (ushort)in[i+1];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+1];
+ (out+j+3)[1] = (ushort)in[i+2];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_quadstrip_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (ushort)in[i+2];
+ (out+j+0)[1] = (ushort)in[i+0];
+ (out+j+0)[2] = (ushort)in[i+3];
+ (out+j+3)[0] = (ushort)in[i+0];
+ (out+j+3)[1] = (ushort)in[i+1];
+ (out+j+3)[2] = (ushort)in[i+3];
+ }
+}
+static void translate_polygon_uint2ushort_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (ushort)in[i+1];
+ (out+j)[1] = (ushort)in[i+2];
+ (out+j)[2] = (ushort)in[0];
+ }
+}
+static void translate_points_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ }
+}
+static void translate_linestrip_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+}
+static void translate_lineloop_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[0];
+}
+static void translate_tris_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ (out+i)[2] = (uint)in[i+2];
+ }
+}
+static void translate_tristrip_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1+(i&1)];
+ (out+j)[2] = (uint)in[i+2-(i&1)];
+ }
+}
+static void translate_trifan_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quads_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+1];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+2];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_quadstrip_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+2];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+0];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_polygon_uint2uint_first2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_points_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i];
+ }
+}
+static void translate_linestrip_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+}
+static void translate_lineloop_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i];
+}
+static void translate_tris_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i+2];
+ (out+i)[2] = (uint)in[i];
+ }
+}
+static void translate_tristrip_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1+(i&1)];
+ (out+j)[1] = (uint)in[i+2-(i&1)];
+ (out+j)[2] = (uint)in[i];
+ }
+}
+static void translate_trifan_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_quads_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+1];
+ (out+j+0)[1] = (uint)in[i+3];
+ (out+j+0)[2] = (uint)in[i+0];
+ (out+j+3)[0] = (uint)in[i+2];
+ (out+j+3)[1] = (uint)in[i+3];
+ (out+j+3)[2] = (uint)in[i+1];
+ }
+}
+static void translate_quadstrip_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+3];
+ (out+j+0)[2] = (uint)in[i+2];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+3];
+ (out+j+3)[2] = (uint)in[i+0];
+ }
+}
+static void translate_polygon_uint2uint_first2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+static void translate_points_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i+1];
+ (out+i)[1] = (uint)in[i];
+ }
+}
+static void translate_linestrip_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+}
+static void translate_lineloop_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i];
+ }
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i];
+}
+static void translate_tris_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i+2];
+ (out+i)[1] = (uint)in[i];
+ (out+i)[2] = (uint)in[i+1];
+ }
+}
+static void translate_tristrip_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[i+(i&1)];
+ (out+j)[2] = (uint)in[i+1-(i&1)];
+ }
+}
+static void translate_trifan_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[0];
+ (out+j)[2] = (uint)in[i+1];
+ }
+}
+static void translate_quads_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+3];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+1];
+ (out+j+3)[0] = (uint)in[i+3];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quadstrip_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+3];
+ (out+j+0)[1] = (uint)in[i+2];
+ (out+j+0)[2] = (uint)in[i+0];
+ (out+j+3)[0] = (uint)in[i+3];
+ (out+j+3)[1] = (uint)in[i+0];
+ (out+j+3)[2] = (uint)in[i+1];
+ }
+}
+static void translate_polygon_uint2uint_last2first(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_points_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i++) {
+ (out+i)[0] = (uint)in[i];
+ }
+}
+static void translate_lines_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=2) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ }
+}
+static void translate_linestrip_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+}
+static void translate_lineloop_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr - 2; j+=2, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ }
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[0];
+}
+static void translate_tris_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (i = 0; i < nr; i+=3) {
+ (out+i)[0] = (uint)in[i];
+ (out+i)[1] = (uint)in[i+1];
+ (out+i)[2] = (uint)in[i+2];
+ }
+}
+static void translate_tristrip_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+(i&1)];
+ (out+j)[1] = (uint)in[i+1-(i&1)];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_trifan_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j)[2] = (uint)in[i+2];
+ }
+}
+static void translate_quads_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=4) {
+ (out+j+0)[0] = (uint)in[i+0];
+ (out+j+0)[1] = (uint)in[i+1];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+1];
+ (out+j+3)[1] = (uint)in[i+2];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_quadstrip_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=2) {
+ (out+j+0)[0] = (uint)in[i+2];
+ (out+j+0)[1] = (uint)in[i+0];
+ (out+j+0)[2] = (uint)in[i+3];
+ (out+j+3)[0] = (uint)in[i+0];
+ (out+j+3)[1] = (uint)in[i+1];
+ (out+j+3)[2] = (uint)in[i+3];
+ }
+}
+static void translate_polygon_uint2uint_last2last(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=3, i++) {
+ (out+j)[0] = (uint)in[i+1];
+ (out+j)[1] = (uint)in[i+2];
+ (out+j)[2] = (uint)in[0];
+ }
+}
+void u_index_init( void )
+{
+ static int firsttime = 1;
+ if (!firsttime) return;
+ firsttime = 0;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = generate_points_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = generate_lines_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = generate_tris_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = generate_quads_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = generate_polygon_ushort_first2first;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = generate_points_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = generate_lines_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = generate_tris_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = generate_quads_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_ushort_first2last;
+generate[OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = generate_polygon_ushort_first2last;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = generate_points_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = generate_lines_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = generate_tris_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = generate_quads_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = generate_polygon_ushort_last2first;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = generate_points_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = generate_lines_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = generate_tris_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = generate_quads_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_ushort_last2last;
+generate[OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = generate_polygon_ushort_last2last;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = generate_points_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = generate_lines_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = generate_tris_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = generate_quads_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = generate_polygon_uint_first2first;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = generate_points_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = generate_lines_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = generate_tris_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = generate_quads_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_uint_first2last;
+generate[OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = generate_polygon_uint_first2last;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = generate_points_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = generate_lines_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = generate_tris_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = generate_quads_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = generate_polygon_uint_last2first;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = generate_points_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = generate_lines_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = generate_linestrip_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = generate_lineloop_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = generate_tris_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = generate_quads_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_uint_last2last;
+generate[OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = generate_polygon_uint_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2ushort_first2first;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2ushort_first2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2ushort_last2first;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2ushort_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2uint_first2first;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2uint_first2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2uint_last2first;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2uint_last2last;
+translate[IN_UBYTE][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2uint_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2ushort_first2first;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2ushort_first2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2ushort_last2first;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2ushort_last2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2uint_first2first;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2uint_first2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2uint_last2first;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2uint_last2last;
+translate[IN_USHORT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_ushort2uint_last2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_uint2ushort_first2first;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_uint2ushort_first2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_uint2ushort_last2first;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2ushort_last2last;
+translate[IN_UINT][OUT_USHORT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_uint2ushort_last2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_uint2uint_first2first;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_FIRST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_uint2uint_first2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POINTS] = translate_points_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINES] = translate_lines_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUADS] = translate_quads_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_FIRST][PIPE_PRIM_POLYGON] = translate_polygon_uint2uint_last2first;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POINTS] = translate_points_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINES] = translate_lines_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_STRIP] = translate_linestrip_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_LINE_LOOP] = translate_lineloop_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLES] = translate_tris_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUADS] = translate_quads_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2uint_last2last;
+translate[IN_UINT][OUT_UINT][PV_LAST][PV_LAST][PIPE_PRIM_POLYGON] = translate_polygon_uint2uint_last2last;
+}
+#include "indices/u_indices.c"
diff --git a/src/gallium/auxiliary/indices/u_indices_gen.py b/src/gallium/auxiliary/indices/u_indices_gen.py
new file mode 100644
index 0000000000..af63d09930
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_indices_gen.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+copyright = '''
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+'''
+
+GENERATE, UBYTE, USHORT, UINT = 'generate', 'ubyte', 'ushort', 'uint'
+FIRST, LAST = 'first', 'last'
+
+INTYPES = (GENERATE, UBYTE, USHORT, UINT)
+OUTTYPES = (USHORT, UINT)
+PVS=(FIRST, LAST)
+PRIMS=('points',
+ 'lines',
+ 'linestrip',
+ 'lineloop',
+ 'tris',
+ 'trifan',
+ 'tristrip',
+ 'quads',
+ 'quadstrip',
+ 'polygon')
+
+LONGPRIMS=('PIPE_PRIM_POINTS',
+ 'PIPE_PRIM_LINES',
+ 'PIPE_PRIM_LINE_STRIP',
+ 'PIPE_PRIM_LINE_LOOP',
+ 'PIPE_PRIM_TRIANGLES',
+ 'PIPE_PRIM_TRIANGLE_FAN',
+ 'PIPE_PRIM_TRIANGLE_STRIP',
+ 'PIPE_PRIM_QUADS',
+ 'PIPE_PRIM_QUAD_STRIP',
+ 'PIPE_PRIM_POLYGON')
+
+longprim = dict(zip(PRIMS, LONGPRIMS))
+intype_idx = dict(ubyte='IN_UBYTE', ushort='IN_USHORT', uint='IN_UINT')
+outtype_idx = dict(ushort='OUT_USHORT', uint='OUT_UINT')
+pv_idx = dict(first='PV_FIRST', last='PV_LAST')
+
+
+def prolog():
+ print '''/* File automatically generated by indices.py */'''
+ print copyright
+ print r'''
+
+/**
+ * @file
+ * Functions to translate and generate index lists
+ */
+
+#include "indices/u_indices.h"
+#include "indices/u_indices_priv.h"
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+static unsigned out_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return OUT_UINT;
+ case 2: return OUT_USHORT;
+ default: assert(0); return OUT_USHORT;
+ }
+}
+
+static unsigned in_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return IN_UINT;
+ case 2: return IN_USHORT;
+ case 1: return IN_UBYTE;
+ default: assert(0); return IN_UBYTE;
+ }
+}
+
+
+static u_translate_func translate[IN_COUNT][OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
+static u_generate_func generate[OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
+
+
+'''
+
+def vert( intype, outtype, v0 ):
+ if intype == GENERATE:
+ return '(' + outtype + ')(' + v0 + ')'
+ else:
+ return '(' + outtype + ')in[' + v0 + ']'
+
+def point( intype, outtype, ptr, v0 ):
+ print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
+
+def line( intype, outtype, ptr, v0, v1 ):
+ print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
+ print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
+
+def tri( intype, outtype, ptr, v0, v1, v2 ):
+ print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
+ print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
+ print ' (' + ptr + ')[2] = ' + vert( intype, outtype, v2 ) + ';'
+
+def do_point( intype, outtype, ptr, v0 ):
+ point( intype, outtype, ptr, v0 )
+
+def do_line( intype, outtype, ptr, v0, v1, inpv, outpv ):
+ if inpv == outpv:
+ line( intype, outtype, ptr, v0, v1 )
+ else:
+ line( intype, outtype, ptr, v1, v0 )
+
+def do_tri( intype, outtype, ptr, v0, v1, v2, inpv, outpv ):
+ if inpv == outpv:
+ tri( intype, outtype, ptr, v0, v1, v2 )
+ else:
+ if inpv == FIRST:
+ tri( intype, outtype, ptr, v1, v2, v0 )
+ else:
+ tri( intype, outtype, ptr, v2, v0, v1 )
+
+def do_quad( intype, outtype, ptr, v0, v1, v2, v3, inpv, outpv ):
+ do_tri( intype, outtype, ptr+'+0', v0, v1, v3, inpv, outpv );
+ do_tri( intype, outtype, ptr+'+3', v1, v2, v3, inpv, outpv );
+
+def name(intype, outtype, inpv, outpv, prim):
+ if intype == GENERATE:
+ return 'generate_' + prim + '_' + outtype + '_' + inpv + '2' + outpv
+ else:
+ return 'translate_' + prim + '_' + intype + '2' + outtype + '_' + inpv + '2' + outpv
+
+def preamble(intype, outtype, inpv, outpv, prim):
+ print 'static void ' + name( intype, outtype, inpv, outpv, prim ) + '('
+ if intype != GENERATE:
+ print ' const void * _in,'
+ print ' unsigned nr,'
+ print ' void *_out )'
+ print '{'
+ if intype != GENERATE:
+ print ' const ' + intype + '*in = (const ' + intype + '*)_in;'
+ print ' ' + outtype + ' *out = (' + outtype + '*)_out;'
+ print ' unsigned i, j;'
+ print ' (void)j;'
+
+def postamble():
+ print '}'
+
+
+def points(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='points')
+ print ' for (i = 0; i < nr; i++) { '
+ do_point( intype, outtype, 'out+i', 'i' );
+ print ' }'
+ postamble()
+
+def lines(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='lines')
+ print ' for (i = 0; i < nr; i+=2) { '
+ do_line( intype, outtype, 'out+i', 'i', 'i+1', inpv, outpv );
+ print ' }'
+ postamble()
+
+def linestrip(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='linestrip')
+ print ' for (j = i = 0; j < nr; j+=2, i++) { '
+ do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
+ print ' }'
+ postamble()
+
+def lineloop(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='lineloop')
+ print ' for (j = i = 0; j < nr - 2; j+=2, i++) { '
+ do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
+ print ' }'
+ do_line( intype, outtype, 'out+j', 'i', '0', inpv, outpv );
+ postamble()
+
+def tris(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='tris')
+ print ' for (i = 0; i < nr; i+=3) { '
+ do_tri( intype, outtype, 'out+i', 'i', 'i+1', 'i+2', inpv, outpv );
+ print ' }'
+ postamble()
+
+
+def tristrip(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='tristrip')
+ print ' for (j = i = 0; j < nr; j+=3, i++) { '
+ if inpv == FIRST:
+ do_tri( intype, outtype, 'out+j', 'i', 'i+1+(i&1)', 'i+2-(i&1)', inpv, outpv );
+ else:
+ do_tri( intype, outtype, 'out+j', 'i+(i&1)', 'i+1-(i&1)', 'i+2', inpv, outpv );
+ print ' }'
+ postamble()
+
+
+def trifan(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='trifan')
+ print ' for (j = i = 0; j < nr; j+=3, i++) { '
+ do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
+ print ' }'
+ postamble()
+
+
+
+def polygon(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='polygon')
+ print ' for (j = i = 0; j < nr; j+=3, i++) { '
+ if inpv == FIRST:
+ do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
+ else:
+ do_tri( intype, outtype, 'out+j', 'i+1', 'i+2', '0', inpv, outpv );
+ print ' }'
+ postamble()
+
+
+def quads(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='quads')
+ print ' for (j = i = 0; j < nr; j+=6, i+=4) { '
+ do_quad( intype, outtype, 'out+j', 'i+0', 'i+1', 'i+2', 'i+3', inpv, outpv );
+ print ' }'
+ postamble()
+
+
+def quadstrip(intype, outtype, inpv, outpv):
+ preamble(intype, outtype, inpv, outpv, prim='quadstrip')
+ print ' for (j = i = 0; j < nr; j+=6, i+=2) { '
+ do_quad( intype, outtype, 'out+j', 'i+2', 'i+0', 'i+1', 'i+3', inpv, outpv );
+ print ' }'
+ postamble()
+
+
+def emit_funcs():
+ for intype in INTYPES:
+ for outtype in OUTTYPES:
+ for inpv in (FIRST, LAST):
+ for outpv in (FIRST, LAST):
+ points(intype, outtype, inpv, outpv)
+ lines(intype, outtype, inpv, outpv)
+ linestrip(intype, outtype, inpv, outpv)
+ lineloop(intype, outtype, inpv, outpv)
+ tris(intype, outtype, inpv, outpv)
+ tristrip(intype, outtype, inpv, outpv)
+ trifan(intype, outtype, inpv, outpv)
+ quads(intype, outtype, inpv, outpv)
+ quadstrip(intype, outtype, inpv, outpv)
+ polygon(intype, outtype, inpv, outpv)
+
+def init(intype, outtype, inpv, outpv, prim):
+ if intype == GENERATE:
+ print ('generate[' +
+ outtype_idx[outtype] +
+ '][' + pv_idx[inpv] +
+ '][' + pv_idx[outpv] +
+ '][' + longprim[prim] +
+ '] = ' + name( intype, outtype, inpv, outpv, prim ) + ';')
+ else:
+ print ('translate[' +
+ intype_idx[intype] +
+ '][' + outtype_idx[outtype] +
+ '][' + pv_idx[inpv] +
+ '][' + pv_idx[outpv] +
+ '][' + longprim[prim] +
+ '] = ' + name( intype, outtype, inpv, outpv, prim ) + ';')
+
+
+def emit_all_inits():
+ for intype in INTYPES:
+ for outtype in OUTTYPES:
+ for inpv in PVS:
+ for outpv in PVS:
+ for prim in PRIMS:
+ init(intype, outtype, inpv, outpv, prim)
+
+def emit_init():
+ print 'void u_index_init( void )'
+ print '{'
+ print ' static int firsttime = 1;'
+ print ' if (!firsttime) return;'
+ print ' firsttime = 0;'
+ emit_all_inits()
+ print '}'
+
+
+
+
+def epilog():
+ print '#include "indices/u_indices.c"'
+
+
+def main():
+ prolog()
+ emit_funcs()
+ emit_init()
+ epilog()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/auxiliary/indices/u_indices_priv.h b/src/gallium/auxiliary/indices/u_indices_priv.h
new file mode 100644
index 0000000000..9c3298c24d
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_indices_priv.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef U_INDICES_PRIV_H
+#define U_INDICES_PRIV_H
+
+#include "pipe/p_compiler.h"
+#include "u_indices.h"
+
+#define IN_UBYTE 0
+#define IN_USHORT 1
+#define IN_UINT 2
+#define IN_COUNT 3
+
+#define OUT_USHORT 0
+#define OUT_UINT 1
+#define OUT_COUNT 2
+
+
+#define PRIM_COUNT (PIPE_PRIM_POLYGON + 1)
+
+#endif
diff --git a/src/gallium/auxiliary/indices/u_unfilled_gen.c b/src/gallium/auxiliary/indices/u_unfilled_gen.c
new file mode 100644
index 0000000000..93897c98de
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_unfilled_gen.c
@@ -0,0 +1,992 @@
+/* File automatically generated by u_unfilled_gen.py */
+
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+
+/**
+ * @file
+ * Functions to translate and generate index lists
+ */
+
+#include "indices/u_indices.h"
+#include "indices/u_indices_priv.h"
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+static unsigned out_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return OUT_UINT;
+ case 2: return OUT_USHORT;
+ default: assert(0); return OUT_USHORT;
+ }
+}
+
+static unsigned in_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return IN_UINT;
+ case 2: return IN_USHORT;
+ case 1: return IN_UBYTE;
+ default: assert(0); return IN_UBYTE;
+ }
+}
+
+
+static u_generate_func generate_line[OUT_COUNT][PRIM_COUNT];
+static u_translate_func translate_line[IN_COUNT][OUT_COUNT][PRIM_COUNT];
+
+
+static void generate_tris_ushort(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j+2)[0] = (ushort)(i+1);
+ (out+j+2)[1] = (ushort)(i+2);
+ (out+j+4)[0] = (ushort)(i+2);
+ (out+j+4)[1] = (ushort)(i);
+ }
+}
+static void generate_tristrip_ushort(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)(i);
+ (out+j)[1] = (ushort)(i+1/*+(i&1)*/);
+ (out+j+2)[0] = (ushort)(i+1/*+(i&1)*/);
+ (out+j+2)[1] = (ushort)(i+2/*-(i&1)*/);
+ (out+j+4)[0] = (ushort)(i+2/*-(i&1)*/);
+ (out+j+4)[1] = (ushort)(i);
+ }
+}
+static void generate_trifan_ushort(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j+2)[0] = (ushort)(i+1);
+ (out+j+2)[1] = (ushort)(i+2);
+ (out+j+4)[0] = (ushort)(i+2);
+ (out+j+4)[1] = (ushort)(0);
+ }
+}
+static void generate_quads_ushort(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (ushort)(i+0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j+2)[0] = (ushort)(i+1);
+ (out+j+2)[1] = (ushort)(i+2);
+ (out+j+4)[0] = (ushort)(i+2);
+ (out+j+4)[1] = (ushort)(i+3);
+ (out+j+6)[0] = (ushort)(i+3);
+ (out+j+6)[1] = (ushort)(i+0);
+ }
+}
+static void generate_quadstrip_ushort(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (ushort)(i+2);
+ (out+j)[1] = (ushort)(i+0);
+ (out+j+2)[0] = (ushort)(i+0);
+ (out+j+2)[1] = (ushort)(i+1);
+ (out+j+4)[0] = (ushort)(i+1);
+ (out+j+4)[1] = (ushort)(i+3);
+ (out+j+6)[0] = (ushort)(i+3);
+ (out+j+6)[1] = (ushort)(i+2);
+ }
+}
+static void generate_polygon_ushort(
+ unsigned nr,
+ void *_out )
+{
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)(0);
+ (out+j)[1] = (ushort)(i+1);
+ (out+j+2)[0] = (ushort)(i+1);
+ (out+j+2)[1] = (ushort)(i+2);
+ (out+j+4)[0] = (ushort)(i+2);
+ (out+j+4)[1] = (ushort)(0);
+ }
+}
+static void generate_tris_uint(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1);
+ (out+j+2)[0] = (uint)(i+1);
+ (out+j+2)[1] = (uint)(i+2);
+ (out+j+4)[0] = (uint)(i+2);
+ (out+j+4)[1] = (uint)(i);
+ }
+}
+static void generate_tristrip_uint(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)(i);
+ (out+j)[1] = (uint)(i+1/*+(i&1)*/);
+ (out+j+2)[0] = (uint)(i+1/*+(i&1)*/);
+ (out+j+2)[1] = (uint)(i+2/*-(i&1)*/);
+ (out+j+4)[0] = (uint)(i+2/*-(i&1)*/);
+ (out+j+4)[1] = (uint)(i);
+ }
+}
+static void generate_trifan_uint(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j+2)[0] = (uint)(i+1);
+ (out+j+2)[1] = (uint)(i+2);
+ (out+j+4)[0] = (uint)(i+2);
+ (out+j+4)[1] = (uint)(0);
+ }
+}
+static void generate_quads_uint(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (uint)(i+0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j+2)[0] = (uint)(i+1);
+ (out+j+2)[1] = (uint)(i+2);
+ (out+j+4)[0] = (uint)(i+2);
+ (out+j+4)[1] = (uint)(i+3);
+ (out+j+6)[0] = (uint)(i+3);
+ (out+j+6)[1] = (uint)(i+0);
+ }
+}
+static void generate_quadstrip_uint(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (uint)(i+2);
+ (out+j)[1] = (uint)(i+0);
+ (out+j+2)[0] = (uint)(i+0);
+ (out+j+2)[1] = (uint)(i+1);
+ (out+j+4)[0] = (uint)(i+1);
+ (out+j+4)[1] = (uint)(i+3);
+ (out+j+6)[0] = (uint)(i+3);
+ (out+j+6)[1] = (uint)(i+2);
+ }
+}
+static void generate_polygon_uint(
+ unsigned nr,
+ void *_out )
+{
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)(0);
+ (out+j)[1] = (uint)(i+1);
+ (out+j+2)[0] = (uint)(i+1);
+ (out+j+2)[1] = (uint)(i+2);
+ (out+j+4)[0] = (uint)(i+2);
+ (out+j+4)[1] = (uint)(0);
+ }
+}
+static void translate_tris_ubyte2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[i];
+ }
+}
+static void translate_tristrip_ubyte2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1/*+(i&1)*/];
+ (out+j+2)[0] = (ushort)in[i+1/*+(i&1)*/];
+ (out+j+2)[1] = (ushort)in[i+2/*-(i&1)*/];
+ (out+j+4)[0] = (ushort)in[i+2/*-(i&1)*/];
+ (out+j+4)[1] = (ushort)in[i];
+ }
+}
+static void translate_trifan_ubyte2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[0];
+ }
+}
+static void translate_quads_ubyte2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (ushort)in[i+0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[i+3];
+ (out+j+6)[0] = (ushort)in[i+3];
+ (out+j+6)[1] = (ushort)in[i+0];
+ }
+}
+static void translate_quadstrip_ubyte2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[i+0];
+ (out+j+2)[0] = (ushort)in[i+0];
+ (out+j+2)[1] = (ushort)in[i+1];
+ (out+j+4)[0] = (ushort)in[i+1];
+ (out+j+4)[1] = (ushort)in[i+3];
+ (out+j+6)[0] = (ushort)in[i+3];
+ (out+j+6)[1] = (ushort)in[i+2];
+ }
+}
+static void translate_polygon_ubyte2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[0];
+ }
+}
+static void translate_tris_ubyte2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[i];
+ }
+}
+static void translate_tristrip_ubyte2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1/*+(i&1)*/];
+ (out+j+2)[0] = (uint)in[i+1/*+(i&1)*/];
+ (out+j+2)[1] = (uint)in[i+2/*-(i&1)*/];
+ (out+j+4)[0] = (uint)in[i+2/*-(i&1)*/];
+ (out+j+4)[1] = (uint)in[i];
+ }
+}
+static void translate_trifan_ubyte2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[0];
+ }
+}
+static void translate_quads_ubyte2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (uint)in[i+0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[i+3];
+ (out+j+6)[0] = (uint)in[i+3];
+ (out+j+6)[1] = (uint)in[i+0];
+ }
+}
+static void translate_quadstrip_ubyte2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[i+0];
+ (out+j+2)[0] = (uint)in[i+0];
+ (out+j+2)[1] = (uint)in[i+1];
+ (out+j+4)[0] = (uint)in[i+1];
+ (out+j+4)[1] = (uint)in[i+3];
+ (out+j+6)[0] = (uint)in[i+3];
+ (out+j+6)[1] = (uint)in[i+2];
+ }
+}
+static void translate_polygon_ubyte2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ubyte*in = (const ubyte*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[0];
+ }
+}
+static void translate_tris_ushort2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[i];
+ }
+}
+static void translate_tristrip_ushort2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1/*+(i&1)*/];
+ (out+j+2)[0] = (ushort)in[i+1/*+(i&1)*/];
+ (out+j+2)[1] = (ushort)in[i+2/*-(i&1)*/];
+ (out+j+4)[0] = (ushort)in[i+2/*-(i&1)*/];
+ (out+j+4)[1] = (ushort)in[i];
+ }
+}
+static void translate_trifan_ushort2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[0];
+ }
+}
+static void translate_quads_ushort2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (ushort)in[i+0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[i+3];
+ (out+j+6)[0] = (ushort)in[i+3];
+ (out+j+6)[1] = (ushort)in[i+0];
+ }
+}
+static void translate_quadstrip_ushort2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[i+0];
+ (out+j+2)[0] = (ushort)in[i+0];
+ (out+j+2)[1] = (ushort)in[i+1];
+ (out+j+4)[0] = (ushort)in[i+1];
+ (out+j+4)[1] = (ushort)in[i+3];
+ (out+j+6)[0] = (ushort)in[i+3];
+ (out+j+6)[1] = (ushort)in[i+2];
+ }
+}
+static void translate_polygon_ushort2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[0];
+ }
+}
+static void translate_tris_ushort2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[i];
+ }
+}
+static void translate_tristrip_ushort2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1/*+(i&1)*/];
+ (out+j+2)[0] = (uint)in[i+1/*+(i&1)*/];
+ (out+j+2)[1] = (uint)in[i+2/*-(i&1)*/];
+ (out+j+4)[0] = (uint)in[i+2/*-(i&1)*/];
+ (out+j+4)[1] = (uint)in[i];
+ }
+}
+static void translate_trifan_ushort2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[0];
+ }
+}
+static void translate_quads_ushort2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (uint)in[i+0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[i+3];
+ (out+j+6)[0] = (uint)in[i+3];
+ (out+j+6)[1] = (uint)in[i+0];
+ }
+}
+static void translate_quadstrip_ushort2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[i+0];
+ (out+j+2)[0] = (uint)in[i+0];
+ (out+j+2)[1] = (uint)in[i+1];
+ (out+j+4)[0] = (uint)in[i+1];
+ (out+j+4)[1] = (uint)in[i+3];
+ (out+j+6)[0] = (uint)in[i+3];
+ (out+j+6)[1] = (uint)in[i+2];
+ }
+}
+static void translate_polygon_ushort2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const ushort*in = (const ushort*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[0];
+ }
+}
+static void translate_tris_uint2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[i];
+ }
+}
+static void translate_tristrip_uint2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[i];
+ (out+j)[1] = (ushort)in[i+1/*+(i&1)*/];
+ (out+j+2)[0] = (ushort)in[i+1/*+(i&1)*/];
+ (out+j+2)[1] = (ushort)in[i+2/*-(i&1)*/];
+ (out+j+4)[0] = (ushort)in[i+2/*-(i&1)*/];
+ (out+j+4)[1] = (ushort)in[i];
+ }
+}
+static void translate_trifan_uint2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[0];
+ }
+}
+static void translate_quads_uint2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (ushort)in[i+0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[i+3];
+ (out+j+6)[0] = (ushort)in[i+3];
+ (out+j+6)[1] = (ushort)in[i+0];
+ }
+}
+static void translate_quadstrip_uint2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (ushort)in[i+2];
+ (out+j)[1] = (ushort)in[i+0];
+ (out+j+2)[0] = (ushort)in[i+0];
+ (out+j+2)[1] = (ushort)in[i+1];
+ (out+j+4)[0] = (ushort)in[i+1];
+ (out+j+4)[1] = (ushort)in[i+3];
+ (out+j+6)[0] = (ushort)in[i+3];
+ (out+j+6)[1] = (ushort)in[i+2];
+ }
+}
+static void translate_polygon_uint2ushort(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ ushort *out = (ushort*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (ushort)in[0];
+ (out+j)[1] = (ushort)in[i+1];
+ (out+j+2)[0] = (ushort)in[i+1];
+ (out+j+2)[1] = (ushort)in[i+2];
+ (out+j+4)[0] = (ushort)in[i+2];
+ (out+j+4)[1] = (ushort)in[0];
+ }
+}
+static void translate_tris_uint2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i+=3) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[i];
+ }
+}
+static void translate_tristrip_uint2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[i];
+ (out+j)[1] = (uint)in[i+1/*+(i&1)*/];
+ (out+j+2)[0] = (uint)in[i+1/*+(i&1)*/];
+ (out+j+2)[1] = (uint)in[i+2/*-(i&1)*/];
+ (out+j+4)[0] = (uint)in[i+2/*-(i&1)*/];
+ (out+j+4)[1] = (uint)in[i];
+ }
+}
+static void translate_trifan_uint2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[0];
+ }
+}
+static void translate_quads_uint2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=4) {
+ (out+j)[0] = (uint)in[i+0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[i+3];
+ (out+j+6)[0] = (uint)in[i+3];
+ (out+j+6)[1] = (uint)in[i+0];
+ }
+}
+static void translate_quadstrip_uint2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=8, i+=2) {
+ (out+j)[0] = (uint)in[i+2];
+ (out+j)[1] = (uint)in[i+0];
+ (out+j+2)[0] = (uint)in[i+0];
+ (out+j+2)[1] = (uint)in[i+1];
+ (out+j+4)[0] = (uint)in[i+1];
+ (out+j+4)[1] = (uint)in[i+3];
+ (out+j+6)[0] = (uint)in[i+3];
+ (out+j+6)[1] = (uint)in[i+2];
+ }
+}
+static void translate_polygon_uint2uint(
+ const void * _in,
+ unsigned nr,
+ void *_out )
+{
+ const uint*in = (const uint*)_in;
+ uint *out = (uint*)_out;
+ unsigned i, j;
+ (void)j;
+ for (j = i = 0; j < nr; j+=6, i++) {
+ (out+j)[0] = (uint)in[0];
+ (out+j)[1] = (uint)in[i+1];
+ (out+j+2)[0] = (uint)in[i+1];
+ (out+j+2)[1] = (uint)in[i+2];
+ (out+j+4)[0] = (uint)in[i+2];
+ (out+j+4)[1] = (uint)in[0];
+ }
+}
+void u_unfilled_init( void )
+{
+ static int firsttime = 1;
+ if (!firsttime) return;
+ firsttime = 0;
+generate_line[OUT_USHORT][PIPE_PRIM_TRIANGLES] = generate_tris_ushort;
+generate_line[OUT_USHORT][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_ushort;
+generate_line[OUT_USHORT][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_ushort;
+generate_line[OUT_USHORT][PIPE_PRIM_QUADS] = generate_quads_ushort;
+generate_line[OUT_USHORT][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_ushort;
+generate_line[OUT_USHORT][PIPE_PRIM_POLYGON] = generate_polygon_ushort;
+generate_line[OUT_UINT][PIPE_PRIM_TRIANGLES] = generate_tris_uint;
+generate_line[OUT_UINT][PIPE_PRIM_TRIANGLE_FAN] = generate_trifan_uint;
+generate_line[OUT_UINT][PIPE_PRIM_TRIANGLE_STRIP] = generate_tristrip_uint;
+generate_line[OUT_UINT][PIPE_PRIM_QUADS] = generate_quads_uint;
+generate_line[OUT_UINT][PIPE_PRIM_QUAD_STRIP] = generate_quadstrip_uint;
+generate_line[OUT_UINT][PIPE_PRIM_POLYGON] = generate_polygon_uint;
+translate_line[IN_UBYTE][OUT_USHORT][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2ushort;
+translate_line[IN_UBYTE][OUT_USHORT][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2ushort;
+translate_line[IN_UBYTE][OUT_USHORT][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2ushort;
+translate_line[IN_UBYTE][OUT_USHORT][PIPE_PRIM_QUADS] = translate_quads_ubyte2ushort;
+translate_line[IN_UBYTE][OUT_USHORT][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2ushort;
+translate_line[IN_UBYTE][OUT_USHORT][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2ushort;
+translate_line[IN_UBYTE][OUT_UINT][PIPE_PRIM_TRIANGLES] = translate_tris_ubyte2uint;
+translate_line[IN_UBYTE][OUT_UINT][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ubyte2uint;
+translate_line[IN_UBYTE][OUT_UINT][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ubyte2uint;
+translate_line[IN_UBYTE][OUT_UINT][PIPE_PRIM_QUADS] = translate_quads_ubyte2uint;
+translate_line[IN_UBYTE][OUT_UINT][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ubyte2uint;
+translate_line[IN_UBYTE][OUT_UINT][PIPE_PRIM_POLYGON] = translate_polygon_ubyte2uint;
+translate_line[IN_USHORT][OUT_USHORT][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2ushort;
+translate_line[IN_USHORT][OUT_USHORT][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2ushort;
+translate_line[IN_USHORT][OUT_USHORT][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2ushort;
+translate_line[IN_USHORT][OUT_USHORT][PIPE_PRIM_QUADS] = translate_quads_ushort2ushort;
+translate_line[IN_USHORT][OUT_USHORT][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2ushort;
+translate_line[IN_USHORT][OUT_USHORT][PIPE_PRIM_POLYGON] = translate_polygon_ushort2ushort;
+translate_line[IN_USHORT][OUT_UINT][PIPE_PRIM_TRIANGLES] = translate_tris_ushort2uint;
+translate_line[IN_USHORT][OUT_UINT][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_ushort2uint;
+translate_line[IN_USHORT][OUT_UINT][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_ushort2uint;
+translate_line[IN_USHORT][OUT_UINT][PIPE_PRIM_QUADS] = translate_quads_ushort2uint;
+translate_line[IN_USHORT][OUT_UINT][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_ushort2uint;
+translate_line[IN_USHORT][OUT_UINT][PIPE_PRIM_POLYGON] = translate_polygon_ushort2uint;
+translate_line[IN_UINT][OUT_USHORT][PIPE_PRIM_TRIANGLES] = translate_tris_uint2ushort;
+translate_line[IN_UINT][OUT_USHORT][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2ushort;
+translate_line[IN_UINT][OUT_USHORT][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2ushort;
+translate_line[IN_UINT][OUT_USHORT][PIPE_PRIM_QUADS] = translate_quads_uint2ushort;
+translate_line[IN_UINT][OUT_USHORT][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2ushort;
+translate_line[IN_UINT][OUT_USHORT][PIPE_PRIM_POLYGON] = translate_polygon_uint2ushort;
+translate_line[IN_UINT][OUT_UINT][PIPE_PRIM_TRIANGLES] = translate_tris_uint2uint;
+translate_line[IN_UINT][OUT_UINT][PIPE_PRIM_TRIANGLE_FAN] = translate_trifan_uint2uint;
+translate_line[IN_UINT][OUT_UINT][PIPE_PRIM_TRIANGLE_STRIP] = translate_tristrip_uint2uint;
+translate_line[IN_UINT][OUT_UINT][PIPE_PRIM_QUADS] = translate_quads_uint2uint;
+translate_line[IN_UINT][OUT_UINT][PIPE_PRIM_QUAD_STRIP] = translate_quadstrip_uint2uint;
+translate_line[IN_UINT][OUT_UINT][PIPE_PRIM_POLYGON] = translate_polygon_uint2uint;
+}
+#include "indices/u_unfilled_indices.c"
diff --git a/src/gallium/auxiliary/indices/u_unfilled_gen.py b/src/gallium/auxiliary/indices/u_unfilled_gen.py
new file mode 100644
index 0000000000..36896ce605
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_unfilled_gen.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+copyright = '''
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+'''
+
+GENERATE, UBYTE, USHORT, UINT = 'generate', 'ubyte', 'ushort', 'uint'
+FIRST, LAST = 'first', 'last'
+
+INTYPES = (GENERATE, UBYTE, USHORT, UINT)
+OUTTYPES = (USHORT, UINT)
+PRIMS=('tris',
+ 'trifan',
+ 'tristrip',
+ 'quads',
+ 'quadstrip',
+ 'polygon')
+
+LONGPRIMS=('PIPE_PRIM_TRIANGLES',
+ 'PIPE_PRIM_TRIANGLE_FAN',
+ 'PIPE_PRIM_TRIANGLE_STRIP',
+ 'PIPE_PRIM_QUADS',
+ 'PIPE_PRIM_QUAD_STRIP',
+ 'PIPE_PRIM_POLYGON')
+
+longprim = dict(zip(PRIMS, LONGPRIMS))
+intype_idx = dict(ubyte='IN_UBYTE', ushort='IN_USHORT', uint='IN_UINT')
+outtype_idx = dict(ushort='OUT_USHORT', uint='OUT_UINT')
+
+
+def prolog():
+ print '''/* File automatically generated by u_unfilled_gen.py */'''
+ print copyright
+ print r'''
+
+/**
+ * @file
+ * Functions to translate and generate index lists
+ */
+
+#include "indices/u_indices.h"
+#include "indices/u_indices_priv.h"
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+static unsigned out_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return OUT_UINT;
+ case 2: return OUT_USHORT;
+ default: assert(0); return OUT_USHORT;
+ }
+}
+
+static unsigned in_size_idx( unsigned index_size )
+{
+ switch (index_size) {
+ case 4: return IN_UINT;
+ case 2: return IN_USHORT;
+ case 1: return IN_UBYTE;
+ default: assert(0); return IN_UBYTE;
+ }
+}
+
+
+static u_generate_func generate_line[OUT_COUNT][PRIM_COUNT];
+static u_translate_func translate_line[IN_COUNT][OUT_COUNT][PRIM_COUNT];
+
+'''
+
+def vert( intype, outtype, v0 ):
+ if intype == GENERATE:
+ return '(' + outtype + ')(' + v0 + ')'
+ else:
+ return '(' + outtype + ')in[' + v0 + ']'
+
+def line( intype, outtype, ptr, v0, v1 ):
+ print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
+ print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
+
+# XXX: have the opportunity here to avoid over-drawing shared lines in
+# tristrips, fans, etc, by integrating this into the calling functions
+# and only emitting each line at most once.
+#
+def do_tri( intype, outtype, ptr, v0, v1, v2 ):
+ line( intype, outtype, ptr, v0, v1 )
+ line( intype, outtype, ptr + '+2', v1, v2 )
+ line( intype, outtype, ptr + '+4', v2, v0 )
+
+def do_quad( intype, outtype, ptr, v0, v1, v2, v3 ):
+ line( intype, outtype, ptr, v0, v1 )
+ line( intype, outtype, ptr + '+2', v1, v2 )
+ line( intype, outtype, ptr + '+4', v2, v3 )
+ line( intype, outtype, ptr + '+6', v3, v0 )
+
+def name(intype, outtype, prim):
+ if intype == GENERATE:
+ return 'generate_' + prim + '_' + outtype
+ else:
+ return 'translate_' + prim + '_' + intype + '2' + outtype
+
+def preamble(intype, outtype, prim):
+ print 'static void ' + name( intype, outtype, prim ) + '('
+ if intype != GENERATE:
+ print ' const void * _in,'
+ print ' unsigned nr,'
+ print ' void *_out )'
+ print '{'
+ if intype != GENERATE:
+ print ' const ' + intype + '*in = (const ' + intype + '*)_in;'
+ print ' ' + outtype + ' *out = (' + outtype + '*)_out;'
+ print ' unsigned i, j;'
+ print ' (void)j;'
+
+def postamble():
+ print '}'
+
+
+def tris(intype, outtype):
+ preamble(intype, outtype, prim='tris')
+ print ' for (j = i = 0; j < nr; j+=6, i+=3) { '
+ do_tri( intype, outtype, 'out+j', 'i', 'i+1', 'i+2' );
+ print ' }'
+ postamble()
+
+
+def tristrip(intype, outtype):
+ preamble(intype, outtype, prim='tristrip')
+ print ' for (j = i = 0; j < nr; j+=6, i++) { '
+ do_tri( intype, outtype, 'out+j', 'i', 'i+1/*+(i&1)*/', 'i+2/*-(i&1)*/' );
+ print ' }'
+ postamble()
+
+
+def trifan(intype, outtype):
+ preamble(intype, outtype, prim='trifan')
+ print ' for (j = i = 0; j < nr; j+=6, i++) { '
+ do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2' );
+ print ' }'
+ postamble()
+
+
+
+def polygon(intype, outtype):
+ preamble(intype, outtype, prim='polygon')
+ print ' for (j = i = 0; j < nr; j+=6, i++) { '
+ do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2' );
+ print ' }'
+ postamble()
+
+
+def quads(intype, outtype):
+ preamble(intype, outtype, prim='quads')
+ print ' for (j = i = 0; j < nr; j+=8, i+=4) { '
+ do_quad( intype, outtype, 'out+j', 'i+0', 'i+1', 'i+2', 'i+3' );
+ print ' }'
+ postamble()
+
+
+def quadstrip(intype, outtype):
+ preamble(intype, outtype, prim='quadstrip')
+ print ' for (j = i = 0; j < nr; j+=8, i+=2) { '
+ do_quad( intype, outtype, 'out+j', 'i+2', 'i+0', 'i+1', 'i+3' );
+ print ' }'
+ postamble()
+
+
+def emit_funcs():
+ for intype in INTYPES:
+ for outtype in OUTTYPES:
+ tris(intype, outtype)
+ tristrip(intype, outtype)
+ trifan(intype, outtype)
+ quads(intype, outtype)
+ quadstrip(intype, outtype)
+ polygon(intype, outtype)
+
+def init(intype, outtype, prim):
+ if intype == GENERATE:
+ print ('generate_line[' +
+ outtype_idx[outtype] +
+ '][' + longprim[prim] +
+ '] = ' + name( intype, outtype, prim ) + ';')
+ else:
+ print ('translate_line[' +
+ intype_idx[intype] +
+ '][' + outtype_idx[outtype] +
+ '][' + longprim[prim] +
+ '] = ' + name( intype, outtype, prim ) + ';')
+
+
+def emit_all_inits():
+ for intype in INTYPES:
+ for outtype in OUTTYPES:
+ for prim in PRIMS:
+ init(intype, outtype, prim)
+
+def emit_init():
+ print 'void u_unfilled_init( void )'
+ print '{'
+ print ' static int firsttime = 1;'
+ print ' if (!firsttime) return;'
+ print ' firsttime = 0;'
+ emit_all_inits()
+ print '}'
+
+
+
+
+def epilog():
+ print '#include "indices/u_unfilled_indices.c"'
+
+
+def main():
+ prolog()
+ emit_funcs()
+ emit_init()
+ epilog()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/auxiliary/indices/u_unfilled_indices.c b/src/gallium/auxiliary/indices/u_unfilled_indices.c
new file mode 100644
index 0000000000..26c5d4d4c7
--- /dev/null
+++ b/src/gallium/auxiliary/indices/u_unfilled_indices.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "u_indices.h"
+#include "u_indices_priv.h"
+
+
+static void translate_ubyte_ushort( const void *in,
+ unsigned nr,
+ void *out )
+{
+ const ubyte *in_ub = (const ubyte *)in;
+ ushort *out_us = (ushort *)out;
+ unsigned i;
+ for (i = 0; i < nr; i++)
+ out_us[i] = (ushort) in_ub[i];
+}
+
+static void translate_memcpy_ushort( const void *in,
+ unsigned nr,
+ void *out )
+{
+ memcpy(out, in, nr*sizeof(short));
+}
+
+static void translate_memcpy_uint( const void *in,
+ unsigned nr,
+ void *out )
+{
+ memcpy(out, in, nr*sizeof(int));
+}
+
+
+static void generate_linear_ushort( unsigned nr,
+ void *out )
+{
+ ushort *out_us = (ushort *)out;
+ unsigned i;
+ for (i = 0; i < nr; i++)
+ out_us[i] = (ushort) i;
+}
+
+static void generate_linear_uint( unsigned nr,
+ void *out )
+{
+ unsigned *out_ui = (unsigned *)out;
+ unsigned i;
+ for (i = 0; i < nr; i++)
+ out_ui[i] = i;
+}
+
+
+static unsigned nr_lines( unsigned prim,
+ unsigned nr )
+{
+ switch (prim) {
+ case PIPE_PRIM_TRIANGLES:
+ return (nr / 3) * 6;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ return (nr - 2) * 6;
+ case PIPE_PRIM_TRIANGLE_FAN:
+ return (nr - 2) * 6;
+ case PIPE_PRIM_QUADS:
+ return (nr / 4) * 8;
+ case PIPE_PRIM_QUAD_STRIP:
+ return (nr - 2) / 2 * 8;
+ case PIPE_PRIM_POLYGON:
+ return (nr - 2) * 6;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+
+
+int u_unfilled_translator( unsigned prim,
+ unsigned in_index_size,
+ unsigned nr,
+ unsigned unfilled_mode,
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_translate_func *out_translate )
+{
+ unsigned in_idx;
+ unsigned out_idx;
+
+ u_unfilled_init();
+
+ in_idx = in_size_idx(in_index_size);
+ *out_index_size = (in_index_size == 4) ? 4 : 2;
+ out_idx = out_size_idx(*out_index_size);
+
+ if (unfilled_mode == PIPE_POLYGON_MODE_POINT)
+ {
+ *out_prim = PIPE_PRIM_POINTS;
+ *out_nr = nr;
+
+ switch (in_index_size)
+ {
+ case 1:
+ *out_translate = translate_ubyte_ushort;
+ return U_TRANSLATE_NORMAL;
+ case 2:
+ *out_translate = translate_memcpy_uint;
+ return U_TRANSLATE_MEMCPY;
+ case 4:
+ *out_translate = translate_memcpy_ushort;
+ return U_TRANSLATE_MEMCPY;
+ default:
+ *out_translate = translate_memcpy_uint;
+ *out_nr = 0;
+ assert(0);
+ return U_TRANSLATE_ERROR;
+ }
+ }
+ else {
+ assert(unfilled_mode == PIPE_POLYGON_MODE_LINE);
+ *out_prim = PIPE_PRIM_LINES;
+ *out_translate = translate_line[in_idx][out_idx][prim];
+ *out_nr = nr_lines( prim, nr );
+ return U_TRANSLATE_NORMAL;
+ }
+}
+
+
+
+int u_unfilled_generator( unsigned prim,
+ unsigned start,
+ unsigned nr,
+ unsigned unfilled_mode,
+ unsigned *out_prim,
+ unsigned *out_index_size,
+ unsigned *out_nr,
+ u_generate_func *out_generate )
+{
+ unsigned out_idx;
+
+ u_unfilled_init();
+
+ *out_index_size = ((start + nr) > 0xfffe) ? 4 : 2;
+ out_idx = out_size_idx(*out_index_size);
+
+ if (unfilled_mode == PIPE_POLYGON_MODE_POINT) {
+
+ if (*out_index_size == 4)
+ *out_generate = generate_linear_uint;
+ else
+ *out_generate = generate_linear_ushort;
+
+ *out_prim = PIPE_PRIM_POINTS;
+ *out_nr = nr;
+ return U_GENERATE_LINEAR;
+ }
+ else {
+ assert(unfilled_mode == PIPE_POLYGON_MODE_LINE);
+ *out_prim = PIPE_PRIM_LINES;
+ *out_generate = generate_line[out_idx][prim];
+ *out_nr = nr_lines( prim, nr );
+
+ return U_GENERATE_REUSABLE;
+ }
+}
+
diff --git a/src/gallium/auxiliary/pipebuffer/Makefile b/src/gallium/auxiliary/pipebuffer/Makefile
new file mode 100644
index 0000000000..1c00ba8d98
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/Makefile
@@ -0,0 +1,19 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = pipebuffer
+
+C_SOURCES = \
+ pb_buffer_fenced.c \
+ pb_buffer_malloc.c \
+ pb_bufmgr_alt.c \
+ pb_bufmgr_cache.c \
+ pb_bufmgr_debug.c \
+ pb_bufmgr_fenced.c \
+ pb_bufmgr_mm.c \
+ pb_bufmgr_ondemand.c \
+ pb_bufmgr_pool.c \
+ pb_bufmgr_slab.c \
+ pb_validate.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/pipebuffer/SConscript b/src/gallium/auxiliary/pipebuffer/SConscript
new file mode 100644
index 0000000000..8e9f06abe4
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/SConscript
@@ -0,0 +1,19 @@
+Import('*')
+
+pipebuffer = env.ConvenienceLibrary(
+ target = 'pipebuffer',
+ source = [
+ 'pb_buffer_fenced.c',
+ 'pb_buffer_malloc.c',
+ 'pb_bufmgr_alt.c',
+ 'pb_bufmgr_cache.c',
+ 'pb_bufmgr_debug.c',
+ 'pb_bufmgr_fenced.c',
+ 'pb_bufmgr_mm.c',
+ 'pb_bufmgr_ondemand.c',
+ 'pb_bufmgr_pool.c',
+ 'pb_bufmgr_slab.c',
+ 'pb_validate.c',
+ ])
+
+auxiliaries.insert(0, pipebuffer)
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer.h b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
new file mode 100644
index 0000000000..2590546cb4
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer.h
@@ -0,0 +1,286 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Generic code for buffers.
+ *
+ * Behind a pipe buffle handle there can be DMA buffers, client (or user)
+ * buffers, regular malloced buffers, etc. This file provides an abstract base
+ * buffer handle that allows the driver to cope with all those kinds of buffers
+ * in a more flexible way.
+ *
+ * There is no obligation of a winsys driver to use this library. And a pipe
+ * driver should be completly agnostic about it.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFFER_H_
+#define PB_BUFFER_H_
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_error.h"
+#include "pipe/p_state.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pb_vtbl;
+struct pb_validate;
+
+
+/**
+ * Buffer description.
+ *
+ * Used when allocating the buffer.
+ */
+struct pb_desc
+{
+ unsigned alignment;
+ unsigned usage;
+};
+
+
+/**
+ * Size. Regular (32bit) unsigned for now.
+ */
+typedef unsigned pb_size;
+
+
+/**
+ * Base class for all pb_* buffers.
+ */
+struct pb_buffer
+{
+ struct pipe_buffer base;
+
+ /**
+ * Pointer to the virtual function table.
+ *
+ * Avoid accessing this table directly. Use the inline functions below
+ * instead to avoid mistakes.
+ */
+ const struct pb_vtbl *vtbl;
+};
+
+
+/**
+ * Virtual function table for the buffer storage operations.
+ *
+ * Note that creation is not done through this table.
+ */
+struct pb_vtbl
+{
+ void (*destroy)( struct pb_buffer *buf );
+
+ /**
+ * Map the entire data store of a buffer object into the client's address.
+ * flags is bitmask of PIPE_BUFFER_FLAG_READ/WRITE.
+ */
+ void *(*map)( struct pb_buffer *buf,
+ unsigned flags );
+
+ void (*unmap)( struct pb_buffer *buf );
+
+ enum pipe_error (*validate)( struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags );
+
+ void (*fence)( struct pb_buffer *buf,
+ struct pipe_fence_handle *fence );
+
+ /**
+ * Get the base buffer and the offset.
+ *
+ * A buffer can be subdivided in smaller buffers. This method should return
+ * the underlaying buffer, and the relative offset.
+ *
+ * Buffers without an underlaying base buffer should return themselves, with
+ * a zero offset.
+ *
+ * Note that this will increase the reference count of the base buffer.
+ */
+ void (*get_base_buffer)( struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset );
+
+};
+
+
+static INLINE struct pipe_buffer *
+pb_pipe_buffer( struct pb_buffer *pbuf )
+{
+ assert(pbuf);
+ return &pbuf->base;
+}
+
+
+static INLINE struct pb_buffer *
+pb_buffer( struct pipe_buffer *buf )
+{
+ assert(buf);
+ /* Could add a magic cookie check on debug builds.
+ */
+ return (struct pb_buffer *)buf;
+}
+
+
+/* Accessor functions for pb->vtbl:
+ */
+static INLINE void *
+pb_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ assert(buf);
+ if(!buf)
+ return NULL;
+ assert(pipe_is_referenced(&buf->base.reference));
+ return buf->vtbl->map(buf, flags);
+}
+
+
+static INLINE void
+pb_unmap(struct pb_buffer *buf)
+{
+ assert(buf);
+ if(!buf)
+ return;
+ assert(pipe_is_referenced(&buf->base.reference));
+ buf->vtbl->unmap(buf);
+}
+
+
+static INLINE void
+pb_get_base_buffer( struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset )
+{
+ assert(buf);
+ if(!buf) {
+ base_buf = NULL;
+ offset = 0;
+ return;
+ }
+ assert(pipe_is_referenced(&buf->base.reference));
+ assert(buf->vtbl->get_base_buffer);
+ buf->vtbl->get_base_buffer(buf, base_buf, offset);
+ assert(*base_buf);
+ assert(*offset < (*base_buf)->base.size);
+}
+
+
+static INLINE enum pipe_error
+pb_validate(struct pb_buffer *buf, struct pb_validate *vl, unsigned flags)
+{
+ assert(buf);
+ if(!buf)
+ return PIPE_ERROR;
+ assert(buf->vtbl->validate);
+ return buf->vtbl->validate(buf, vl, flags);
+}
+
+
+static INLINE void
+pb_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence)
+{
+ assert(buf);
+ if(!buf)
+ return;
+ assert(buf->vtbl->fence);
+ buf->vtbl->fence(buf, fence);
+}
+
+
+static INLINE void
+pb_destroy(struct pb_buffer *buf)
+{
+ assert(buf);
+ if(!buf)
+ return;
+ assert(!pipe_is_referenced(&buf->base.reference));
+ buf->vtbl->destroy(buf);
+}
+
+static INLINE void
+pb_reference(struct pb_buffer **dst,
+ struct pb_buffer *src)
+{
+ struct pb_buffer *old = *dst;
+
+ if (pipe_reference((struct pipe_reference**)dst, &src->base.reference))
+ pb_destroy( old );
+}
+
+
+/**
+ * Utility function to check whether the provided alignment is consistent with
+ * the requested or not.
+ */
+static INLINE boolean
+pb_check_alignment(pb_size requested, pb_size provided)
+{
+ if(!requested)
+ return TRUE;
+ if(requested > provided)
+ return FALSE;
+ if(provided % requested != 0)
+ return FALSE;
+ return TRUE;
+}
+
+
+/**
+ * Utility function to check whether the provided alignment is consistent with
+ * the requested or not.
+ */
+static INLINE boolean
+pb_check_usage(unsigned requested, unsigned provided)
+{
+ return (requested & provided) == requested ? TRUE : FALSE;
+}
+
+
+/**
+ * Malloc-based buffer to store data that can't be used by the graphics
+ * hardware.
+ */
+struct pb_buffer *
+pb_malloc_buffer_create(pb_size size,
+ const struct pb_desc *desc);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_BUFFER_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
new file mode 100644
index 0000000000..109ac7c9d6
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c
@@ -0,0 +1,597 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of fenced buffers.
+ *
+ * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+#include <unistd.h>
+#include <sched.h>
+#endif
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct fenced_buffer_list
+{
+ pipe_mutex mutex;
+
+ struct pb_fence_ops *ops;
+
+ pb_size numDelayed;
+ struct list_head delayed;
+
+#ifdef DEBUG
+ pb_size numUnfenced;
+ struct list_head unfenced;
+#endif
+};
+
+
+/**
+ * Wrapper around a pipe buffer which adds fencing and reference counting.
+ */
+struct fenced_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+
+ /* FIXME: protect access with mutex */
+
+ /**
+ * A bitmask of PIPE_BUFFER_USAGE_CPU/GPU_READ/WRITE describing the current
+ * buffer usage.
+ */
+ unsigned flags;
+
+ unsigned mapcount;
+ struct pb_validate *vl;
+ unsigned validation_flags;
+ struct pipe_fence_handle *fence;
+
+ struct list_head head;
+ struct fenced_buffer_list *list;
+};
+
+
+static INLINE struct fenced_buffer *
+fenced_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct fenced_buffer *)buf;
+}
+
+
+static INLINE void
+_fenced_buffer_add(struct fenced_buffer *fenced_buf)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ assert(pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ assert(fenced_buf->fence);
+
+#ifdef DEBUG
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
+#endif
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_list->delayed);
+ ++fenced_list->numDelayed;
+}
+
+
+/**
+ * Actually destroy the buffer.
+ */
+static INLINE void
+_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ assert(!fenced_buf->fence);
+#ifdef DEBUG
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numUnfenced);
+ --fenced_list->numUnfenced;
+#else
+ (void)fenced_list;
+#endif
+ pb_reference(&fenced_buf->buffer, NULL);
+ FREE(fenced_buf);
+}
+
+
+static INLINE void
+_fenced_buffer_remove(struct fenced_buffer_list *fenced_list,
+ struct fenced_buffer *fenced_buf)
+{
+ struct pb_fence_ops *ops = fenced_list->ops;
+
+ assert(fenced_buf->fence);
+ assert(fenced_buf->list == fenced_list);
+
+ ops->fence_reference(ops, &fenced_buf->fence, NULL);
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ assert(fenced_buf->head.prev);
+ assert(fenced_buf->head.next);
+
+ LIST_DEL(&fenced_buf->head);
+ assert(fenced_list->numDelayed);
+ --fenced_list->numDelayed;
+
+#ifdef DEBUG
+ LIST_ADDTAIL(&fenced_buf->head, &fenced_list->unfenced);
+ ++fenced_list->numUnfenced;
+#endif
+
+ /**
+ * FIXME!!!
+ */
+
+ if(!pipe_is_referenced(&fenced_buf->base.base.reference))
+ _fenced_buffer_destroy(fenced_buf);
+}
+
+
+static INLINE enum pipe_error
+_fenced_buffer_finish(struct fenced_buffer *fenced_buf)
+{
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pb_fence_ops *ops = fenced_list->ops;
+
+#if 0
+ debug_warning("waiting for GPU");
+#endif
+
+ assert(fenced_buf->fence);
+ if(fenced_buf->fence) {
+ if(ops->fence_finish(ops, fenced_buf->fence, 0) != 0) {
+ return PIPE_ERROR;
+ }
+ /* Remove from the fenced list */
+ /* TODO: remove consequents */
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ }
+
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+ return PIPE_OK;
+}
+
+
+/**
+ * Free as many fenced buffers from the list head as possible.
+ */
+static void
+_fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
+{
+ struct pb_fence_ops *ops = fenced_list->ops;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+ struct pipe_fence_handle *prev_fence = NULL;
+
+ curr = fenced_list->delayed.next;
+ next = curr->next;
+ while(curr != &fenced_list->delayed) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+
+ if(fenced_buf->fence != prev_fence) {
+ int signaled;
+ if (wait)
+ signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
+ else
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ if (signaled != 0)
+ break;
+ prev_fence = fenced_buf->fence;
+ }
+ else {
+ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
+ }
+
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+
+ curr = next;
+ next = curr->next;
+ }
+}
+
+
+static void
+fenced_buffer_destroy(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+
+ pipe_mutex_lock(fenced_list->mutex);
+ assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
+ if (fenced_buf->fence) {
+ struct pb_fence_ops *ops = fenced_list->ops;
+ if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0) {
+ struct list_head *curr, *prev;
+ curr = &fenced_buf->head;
+ prev = curr->prev;
+ do {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ curr = prev;
+ prev = curr->prev;
+ } while (curr != &fenced_list->delayed);
+ }
+ else {
+ /* delay destruction */
+ }
+ }
+ else {
+ _fenced_buffer_destroy(fenced_buf);
+ }
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+
+
+static void *
+fenced_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ struct fenced_buffer_list *fenced_list = fenced_buf->list;
+ struct pb_fence_ops *ops = fenced_list->ops;
+ void *map;
+
+ assert(!(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE));
+
+ /* Serialize writes */
+ if((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_WRITE) ||
+ ((fenced_buf->flags & PIPE_BUFFER_USAGE_GPU_READ) && (flags & PIPE_BUFFER_USAGE_CPU_WRITE))) {
+ if(flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
+ /* Don't wait for the GPU to finish writing */
+ if(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ else
+ return NULL;
+ }
+ else {
+ /* Wait for the GPU to finish writing */
+ _fenced_buffer_finish(fenced_buf);
+ }
+ }
+
+#if 0
+ /* Check for CPU write access (read is OK) */
+ if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
+ /* this is legal -- just for debugging */
+ debug_warning("concurrent CPU writes");
+ }
+#endif
+
+ map = pb_map(fenced_buf->buffer, flags);
+ if(map) {
+ ++fenced_buf->mapcount;
+ fenced_buf->flags |= flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ }
+
+ return map;
+}
+
+
+static void
+fenced_buffer_unmap(struct pb_buffer *buf)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ assert(fenced_buf->mapcount);
+ if(fenced_buf->mapcount) {
+ pb_unmap(fenced_buf->buffer);
+ --fenced_buf->mapcount;
+ if(!fenced_buf->mapcount)
+ fenced_buf->flags &= ~PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ }
+}
+
+
+static enum pipe_error
+fenced_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ enum pipe_error ret;
+
+ if(!vl) {
+ /* invalidate */
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+ return PIPE_OK;
+ }
+
+ assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
+ flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ /* Buffer cannot be validated in two different lists */
+ if(fenced_buf->vl && fenced_buf->vl != vl)
+ return PIPE_ERROR_RETRY;
+
+#if 0
+ /* Do not validate if buffer is still mapped */
+ if(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE) {
+ /* TODO: wait for the thread that mapped the buffer to unmap it */
+ return PIPE_ERROR_RETRY;
+ }
+ /* Final sanity checking */
+ assert(!(fenced_buf->flags & PIPE_BUFFER_USAGE_CPU_READ_WRITE));
+ assert(!fenced_buf->mapcount);
+#endif
+
+ if(fenced_buf->vl == vl &&
+ (fenced_buf->validation_flags & flags) == flags) {
+ /* Nothing to do -- buffer already validated */
+ return PIPE_OK;
+ }
+
+ ret = pb_validate(fenced_buf->buffer, vl, flags);
+ if (ret != PIPE_OK)
+ return ret;
+
+ fenced_buf->vl = vl;
+ fenced_buf->validation_flags |= flags;
+
+ return PIPE_OK;
+}
+
+
+static void
+fenced_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct fenced_buffer *fenced_buf;
+ struct fenced_buffer_list *fenced_list;
+ struct pb_fence_ops *ops;
+
+ fenced_buf = fenced_buffer(buf);
+ fenced_list = fenced_buf->list;
+ ops = fenced_list->ops;
+
+ if(fence == fenced_buf->fence) {
+ /* Nothing to do */
+ return;
+ }
+
+ assert(fenced_buf->vl);
+ assert(fenced_buf->validation_flags);
+
+ pipe_mutex_lock(fenced_list->mutex);
+ if (fenced_buf->fence)
+ _fenced_buffer_remove(fenced_list, fenced_buf);
+ if (fence) {
+ ops->fence_reference(ops, &fenced_buf->fence, fence);
+ fenced_buf->flags |= fenced_buf->validation_flags;
+ _fenced_buffer_add(fenced_buf);
+ }
+ pipe_mutex_unlock(fenced_list->mutex);
+
+ pb_fence(fenced_buf->buffer, fence);
+
+ fenced_buf->vl = NULL;
+ fenced_buf->validation_flags = 0;
+}
+
+
+static void
+fenced_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct fenced_buffer *fenced_buf = fenced_buffer(buf);
+ pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
+}
+
+
+static const struct pb_vtbl
+fenced_buffer_vtbl = {
+ fenced_buffer_destroy,
+ fenced_buffer_map,
+ fenced_buffer_unmap,
+ fenced_buffer_validate,
+ fenced_buffer_fence,
+ fenced_buffer_get_base_buffer
+};
+
+
+struct pb_buffer *
+fenced_buffer_create(struct fenced_buffer_list *fenced_list,
+ struct pb_buffer *buffer)
+{
+ struct fenced_buffer *buf;
+
+ if(!buffer)
+ return NULL;
+
+ buf = CALLOC_STRUCT(fenced_buffer);
+ if(!buf) {
+ pb_reference(&buffer, NULL);
+ return NULL;
+ }
+
+ pipe_reference_init(&buf->base.base.reference, 1);
+ buf->base.base.alignment = buffer->base.alignment;
+ buf->base.base.usage = buffer->base.usage;
+ buf->base.base.size = buffer->base.size;
+
+ buf->base.vtbl = &fenced_buffer_vtbl;
+ buf->buffer = buffer;
+ buf->list = fenced_list;
+
+#ifdef DEBUG
+ pipe_mutex_lock(fenced_list->mutex);
+ LIST_ADDTAIL(&buf->head, &fenced_list->unfenced);
+ ++fenced_list->numUnfenced;
+ pipe_mutex_unlock(fenced_list->mutex);
+#endif
+
+ return &buf->base;
+}
+
+
+struct fenced_buffer_list *
+fenced_buffer_list_create(struct pb_fence_ops *ops)
+{
+ struct fenced_buffer_list *fenced_list;
+
+ fenced_list = CALLOC_STRUCT(fenced_buffer_list);
+ if (!fenced_list)
+ return NULL;
+
+ fenced_list->ops = ops;
+
+ LIST_INITHEAD(&fenced_list->delayed);
+ fenced_list->numDelayed = 0;
+
+#ifdef DEBUG
+ LIST_INITHEAD(&fenced_list->unfenced);
+ fenced_list->numUnfenced = 0;
+#endif
+
+ pipe_mutex_init(fenced_list->mutex);
+
+ return fenced_list;
+}
+
+
+void
+fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait)
+{
+ pipe_mutex_lock(fenced_list->mutex);
+ _fenced_buffer_list_check_free(fenced_list, wait);
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+
+
+#ifdef DEBUG
+void
+fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list)
+{
+ struct pb_fence_ops *ops = fenced_list->ops;
+ struct list_head *curr, *next;
+ struct fenced_buffer *fenced_buf;
+
+ pipe_mutex_lock(fenced_list->mutex);
+
+ debug_printf("%10s %7s %7s %10s %s\n",
+ "buffer", "size", "refcount", "fence", "signalled");
+
+ curr = fenced_list->unfenced.next;
+ next = curr->next;
+ while(curr != &fenced_list->unfenced) {
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ assert(!fenced_buf->fence);
+ debug_printf("%10p %7u %7u\n",
+ fenced_buf,
+ fenced_buf->base.base.size,
+ fenced_buf->base.base.reference.count);
+ curr = next;
+ next = curr->next;
+ }
+
+ curr = fenced_list->delayed.next;
+ next = curr->next;
+ while(curr != &fenced_list->delayed) {
+ int signaled;
+ fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
+ signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
+ debug_printf("%10p %7u %7u %10p %s\n",
+ fenced_buf,
+ fenced_buf->base.base.size,
+ fenced_buf->base.base.reference.count,
+ fenced_buf->fence,
+ signaled == 0 ? "y" : "n");
+ curr = next;
+ next = curr->next;
+ }
+
+ pipe_mutex_unlock(fenced_list->mutex);
+}
+#endif
+
+
+void
+fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list)
+{
+ pipe_mutex_lock(fenced_list->mutex);
+
+ /* Wait on outstanding fences */
+ while (fenced_list->numDelayed) {
+ pipe_mutex_unlock(fenced_list->mutex);
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+ sched_yield();
+#endif
+ _fenced_buffer_list_check_free(fenced_list, 1);
+ pipe_mutex_lock(fenced_list->mutex);
+ }
+
+#ifdef DEBUG
+ //assert(!fenced_list->numUnfenced);
+#endif
+
+ pipe_mutex_unlock(fenced_list->mutex);
+
+ fenced_list->ops->destroy(fenced_list->ops);
+
+ FREE(fenced_list);
+}
+
+
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h
new file mode 100644
index 0000000000..034ca1e024
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.h
@@ -0,0 +1,142 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer fencing.
+ *
+ * "Fenced buffers" is actually a misnomer. They should be referred as
+ * "fenceable buffers", i.e, buffers that can be fenced, but I couldn't find
+ * the word "fenceable" in the dictionary.
+ *
+ * A "fenced buffer" is a decorator around a normal buffer, which adds two
+ * special properties:
+ * - the ability for the destruction to be delayed by a fence;
+ * - reference counting.
+ *
+ * Usually DMA buffers have a life-time that will extend the life-time of its
+ * handle. The end-of-life is dictated by the fence signalling.
+ *
+ * Between the handle's destruction, and the fence signalling, the buffer is
+ * stored in a fenced buffer list.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFFER_FENCED_H_
+#define PB_BUFFER_FENCED_H_
+
+
+#include "util/u_debug.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_buffer;
+struct pipe_fence_handle;
+
+
+/**
+ * List of buffers which are awaiting fence signalling.
+ */
+struct fenced_buffer_list;
+
+
+struct pb_fence_ops
+{
+ void (*destroy)( struct pb_fence_ops *ops );
+
+ /** Set ptr = fence, with reference counting */
+ void (*fence_reference)( struct pb_fence_ops *ops,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence );
+
+ /**
+ * Checks whether the fence has been signalled.
+ * \param flags driver-specific meaning
+ * \return zero on success.
+ */
+ int (*fence_signalled)( struct pb_fence_ops *ops,
+ struct pipe_fence_handle *fence,
+ unsigned flag );
+
+ /**
+ * Wait for the fence to finish.
+ * \param flags driver-specific meaning
+ * \return zero on success.
+ */
+ int (*fence_finish)( struct pb_fence_ops *ops,
+ struct pipe_fence_handle *fence,
+ unsigned flag );
+};
+
+
+/**
+ * Create a fenced buffer list.
+ *
+ * See also fenced_bufmgr_create for a more convenient way to use this.
+ */
+struct fenced_buffer_list *
+fenced_buffer_list_create(struct pb_fence_ops *ops);
+
+
+/**
+ * Walk the fenced buffer list to check and free signalled buffers.
+ */
+void
+fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
+ int wait);
+
+
+#ifdef DEBUG
+void
+fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list);
+#endif
+
+
+void
+fenced_buffer_list_destroy(struct fenced_buffer_list *fenced_list);
+
+
+/**
+ * Wrap a buffer in a fenced buffer.
+ *
+ * NOTE: this will not increase the buffer reference count.
+ */
+struct pb_buffer *
+fenced_buffer_create(struct fenced_buffer_list *fenced,
+ struct pb_buffer *buffer);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_BUFFER_FENCED_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c
new file mode 100644
index 0000000000..6bdce5fcb0
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_malloc.c
@@ -0,0 +1,188 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Implementation of malloc-based buffers to store data that can't be processed
+ * by the hardware.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct malloc_buffer
+{
+ struct pb_buffer base;
+ void *data;
+};
+
+
+extern const struct pb_vtbl malloc_buffer_vtbl;
+
+static INLINE struct malloc_buffer *
+malloc_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ if (!buf)
+ return NULL;
+ assert(buf->vtbl == &malloc_buffer_vtbl);
+ return (struct malloc_buffer *)buf;
+}
+
+
+static void
+malloc_buffer_destroy(struct pb_buffer *buf)
+{
+ align_free(malloc_buffer(buf)->data);
+ FREE(buf);
+}
+
+
+static void *
+malloc_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ return malloc_buffer(buf)->data;
+}
+
+
+static void
+malloc_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static enum pipe_error
+malloc_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ assert(0);
+ return PIPE_ERROR;
+}
+
+
+static void
+malloc_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ assert(0);
+}
+
+
+static void
+malloc_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
+
+
+const struct pb_vtbl
+malloc_buffer_vtbl = {
+ malloc_buffer_destroy,
+ malloc_buffer_map,
+ malloc_buffer_unmap,
+ malloc_buffer_validate,
+ malloc_buffer_fence,
+ malloc_buffer_get_base_buffer
+};
+
+
+struct pb_buffer *
+pb_malloc_buffer_create(pb_size size,
+ const struct pb_desc *desc)
+{
+ struct malloc_buffer *buf;
+
+ /* TODO: do a single allocation */
+
+ buf = CALLOC_STRUCT(malloc_buffer);
+ if(!buf)
+ return NULL;
+
+ pipe_reference_init(&buf->base.base.reference, 1);
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+ buf->base.vtbl = &malloc_buffer_vtbl;
+
+ buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
+ if(!buf->data) {
+ FREE(buf);
+ return NULL;
+ }
+
+ return &buf->base;
+}
+
+
+static struct pb_buffer *
+pb_malloc_bufmgr_create_buffer(struct pb_manager *mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ return pb_malloc_buffer_create(size, desc);
+}
+
+
+static void
+pb_malloc_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+pb_malloc_bufmgr_destroy(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static struct pb_manager
+pb_malloc_bufmgr = {
+ pb_malloc_bufmgr_destroy,
+ pb_malloc_bufmgr_create_buffer,
+ pb_malloc_bufmgr_flush
+};
+
+
+struct pb_manager *
+pb_malloc_bufmgr_create(void)
+{
+ return &pb_malloc_bufmgr;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
new file mode 100644
index 0000000000..39ab8e722c
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
@@ -0,0 +1,214 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer management.
+ *
+ * A buffer manager does only one basic thing: it creates buffers. Actually,
+ * "buffer factory" would probably a more accurate description.
+ *
+ * You can chain buffer managers so that you can have a finer grained memory
+ * management and pooling.
+ *
+ * For example, for a simple batch buffer manager you would chain:
+ * - the native buffer manager, which provides DMA memory from the graphics
+ * memory space;
+ * - the pool buffer manager, which keep around a pool of equally sized buffers
+ * to avoid latency associated with the native buffer manager;
+ * - the fenced buffer manager, which will delay buffer destruction until the
+ * the moment the card finishing processing it.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_BUFMGR_H_
+#define PB_BUFMGR_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pb_desc;
+struct pipe_buffer;
+
+
+/**
+ * Abstract base class for all buffer managers.
+ */
+struct pb_manager
+{
+ void
+ (*destroy)( struct pb_manager *mgr );
+
+ struct pb_buffer *
+ (*create_buffer)( struct pb_manager *mgr,
+ pb_size size,
+ const struct pb_desc *desc);
+
+ /**
+ * Flush all temporary-held buffers.
+ *
+ * Used mostly to aid debugging memory issues or to clean up resources when
+ * the drivers are long lived.
+ */
+ void
+ (*flush)( struct pb_manager *mgr );
+};
+
+
+/**
+ * Malloc buffer provider.
+ *
+ * Simple wrapper around pb_malloc_buffer_create for convenience.
+ */
+struct pb_manager *
+pb_malloc_bufmgr_create(void);
+
+
+/**
+ * Static buffer pool sub-allocator.
+ *
+ * Manages the allocation of equally sized buffers. It does so by allocating
+ * a single big buffer and divide it equally sized buffers.
+ *
+ * It is meant to manage the allocation of batch buffer pools.
+ */
+struct pb_manager *
+pool_bufmgr_create(struct pb_manager *provider,
+ pb_size n, pb_size size,
+ const struct pb_desc *desc);
+
+
+/**
+ * Static sub-allocator based the old memory manager.
+ *
+ * It managers buffers of different sizes. It does so by allocating a buffer
+ * with the size of the heap, and then using the old mm memory manager to manage
+ * that heap.
+ */
+struct pb_manager *
+mm_bufmgr_create(struct pb_manager *provider,
+ pb_size size, pb_size align2);
+
+/**
+ * Same as mm_bufmgr_create.
+ *
+ * Buffer will be release when the manager is destroyed.
+ */
+struct pb_manager *
+mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
+ pb_size size, pb_size align2);
+
+
+/**
+ * Slab sub-allocator.
+ */
+struct pb_manager *
+pb_slab_manager_create(struct pb_manager *provider,
+ pb_size bufSize,
+ pb_size slabSize,
+ const struct pb_desc *desc);
+
+/**
+ * Allow a range of buffer size, by aggregating multiple slabs sub-allocators
+ * with different bucket sizes.
+ */
+struct pb_manager *
+pb_slab_range_manager_create(struct pb_manager *provider,
+ pb_size minBufSize,
+ pb_size maxBufSize,
+ pb_size slabSize,
+ const struct pb_desc *desc);
+
+
+/**
+ * Time-based buffer cache.
+ *
+ * This manager keeps a cache of destroyed buffers during a time interval.
+ */
+struct pb_manager *
+pb_cache_manager_create(struct pb_manager *provider,
+ unsigned usecs);
+
+
+struct pb_fence_ops;
+
+/**
+ * Fenced buffer manager.
+ *
+ * This manager is just meant for convenience. It wraps the buffers returned
+ * by another manager in fenced buffers, so that
+ *
+ * NOTE: the buffer manager that provides the buffers will be destroyed
+ * at the same time.
+ */
+struct pb_manager *
+fenced_bufmgr_create(struct pb_manager *provider,
+ struct pb_fence_ops *ops);
+
+
+struct pb_manager *
+pb_alt_manager_create(struct pb_manager *provider1,
+ struct pb_manager *provider2);
+
+
+/**
+ * Ondemand buffer manager.
+ *
+ * Buffers are created in malloc'ed memory (fast and cached), and the constents
+ * is transfered to a buffer from the provider (typically in slow uncached
+ * memory) when there is an attempt to validate the buffer.
+ *
+ * Ideal for situations where one does not know before hand whether a given
+ * buffer will effectively be used by the hardware or not.
+ */
+struct pb_manager *
+pb_ondemand_manager_create(struct pb_manager *provider);
+
+
+/**
+ * Debug buffer manager to detect buffer under- and overflows.
+ *
+ * Under/overflow sizes should be a multiple of the largest alignment
+ */
+struct pb_manager *
+pb_debug_manager_create(struct pb_manager *provider,
+ pb_size underflow_size, pb_size overflow_size);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_BUFMGR_H_*/
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c
new file mode 100644
index 0000000000..f60c836f18
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_alt.c
@@ -0,0 +1,120 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Allocate buffers from two alternative buffer providers.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct pb_alt_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider1;
+ struct pb_manager *provider2;
+};
+
+
+static INLINE struct pb_alt_manager *
+pb_alt_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_alt_manager *)mgr;
+}
+
+
+static struct pb_buffer *
+pb_alt_manager_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pb_alt_manager *mgr = pb_alt_manager(_mgr);
+ struct pb_buffer *buf;
+
+ buf = mgr->provider1->create_buffer(mgr->provider1, size, desc);
+ if(buf)
+ return buf;
+
+ buf = mgr->provider2->create_buffer(mgr->provider2, size, desc);
+ return buf;
+}
+
+
+static void
+pb_alt_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_alt_manager *mgr = pb_alt_manager(_mgr);
+
+ assert(mgr->provider1->flush);
+ if(mgr->provider1->flush)
+ mgr->provider1->flush(mgr->provider1);
+
+ assert(mgr->provider2->flush);
+ if(mgr->provider2->flush)
+ mgr->provider2->flush(mgr->provider2);
+}
+
+
+static void
+pb_alt_manager_destroy(struct pb_manager *mgr)
+{
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_alt_manager_create(struct pb_manager *provider1,
+ struct pb_manager *provider2)
+{
+ struct pb_alt_manager *mgr;
+
+ if(!provider1 || !provider2)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_alt_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_alt_manager_destroy;
+ mgr->base.create_buffer = pb_alt_manager_create_buffer;
+ mgr->base.flush = pb_alt_manager_flush;
+ mgr->provider1 = provider1;
+ mgr->provider2 = provider2;
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
new file mode 100644
index 0000000000..57d1ede45a
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
@@ -0,0 +1,385 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer cache.
+ *
+ * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pb_cache_manager;
+
+
+/**
+ * Wrapper around a pipe buffer which adds delayed destruction.
+ */
+struct pb_cache_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+ struct pb_cache_manager *mgr;
+
+ /** Caching time interval */
+ struct util_time start, end;
+
+ struct list_head head;
+};
+
+
+struct pb_cache_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+ unsigned usecs;
+
+ pipe_mutex mutex;
+
+ struct list_head delayed;
+ pb_size numDelayed;
+};
+
+
+static INLINE struct pb_cache_buffer *
+pb_cache_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_cache_buffer *)buf;
+}
+
+
+static INLINE struct pb_cache_manager *
+pb_cache_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_cache_manager *)mgr;
+}
+
+
+/**
+ * Actually destroy the buffer.
+ */
+static INLINE void
+_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
+{
+ struct pb_cache_manager *mgr = buf->mgr;
+
+ LIST_DEL(&buf->head);
+ assert(mgr->numDelayed);
+ --mgr->numDelayed;
+ assert(!pipe_is_referenced(&buf->base.base.reference));
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+}
+
+
+/**
+ * Free as many cache buffers from the list head as possible.
+ */
+static void
+_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
+{
+ struct list_head *curr, *next;
+ struct pb_cache_buffer *buf;
+ struct util_time now;
+
+ util_time_get(&now);
+
+ curr = mgr->delayed.next;
+ next = curr->next;
+ while(curr != &mgr->delayed) {
+ buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+
+ if(!util_time_timeout(&buf->start, &buf->end, &now))
+ break;
+
+ _pb_cache_buffer_destroy(buf);
+
+ curr = next;
+ next = curr->next;
+ }
+}
+
+
+static void
+pb_cache_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ struct pb_cache_manager *mgr = buf->mgr;
+
+ pipe_mutex_lock(mgr->mutex);
+ assert(!pipe_is_referenced(&buf->base.base.reference));
+
+ _pb_cache_buffer_list_check_free(mgr);
+
+ util_time_get(&buf->start);
+ util_time_add(&buf->start, mgr->usecs, &buf->end);
+ LIST_ADDTAIL(&buf->head, &mgr->delayed);
+ ++mgr->numDelayed;
+ pipe_mutex_unlock(mgr->mutex);
+}
+
+
+static void *
+pb_cache_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ return pb_map(buf->buffer, flags);
+}
+
+
+static void
+pb_cache_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_unmap(buf->buffer);
+}
+
+
+static enum pipe_error
+pb_cache_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ return pb_validate(buf->buffer, vl, flags);
+}
+
+
+static void
+pb_cache_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_fence(buf->buffer, fence);
+}
+
+
+static void
+pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+}
+
+
+const struct pb_vtbl
+pb_cache_buffer_vtbl = {
+ pb_cache_buffer_destroy,
+ pb_cache_buffer_map,
+ pb_cache_buffer_unmap,
+ pb_cache_buffer_validate,
+ pb_cache_buffer_fence,
+ pb_cache_buffer_get_base_buffer
+};
+
+
+static INLINE boolean
+pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ if(buf->base.base.size < size)
+ return FALSE;
+
+ /* be lenient with size */
+ if(buf->base.base.size >= 2*size)
+ return FALSE;
+
+ if(!pb_check_alignment(desc->alignment, buf->base.base.alignment))
+ return FALSE;
+
+ if(!pb_check_usage(desc->usage, buf->base.base.usage))
+ return FALSE;
+
+ return TRUE;
+}
+
+
+static struct pb_buffer *
+pb_cache_manager_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
+ struct pb_cache_buffer *buf;
+ struct pb_cache_buffer *curr_buf;
+ struct list_head *curr, *next;
+ struct util_time now;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ buf = NULL;
+ curr = mgr->delayed.next;
+ next = curr->next;
+
+ /* search in the expired buffers, freeing them in the process */
+ util_time_get(&now);
+ while(curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc))
+ buf = curr_buf;
+ else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now))
+ _pb_cache_buffer_destroy(curr_buf);
+ else
+ /* This buffer (and all hereafter) are still hot in cache */
+ break;
+ curr = next;
+ next = curr->next;
+ }
+
+ /* keep searching in the hot buffers */
+ if(!buf) {
+ while(curr != &mgr->delayed) {
+ curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ if(pb_cache_is_buffer_compat(curr_buf, size, desc)) {
+ buf = curr_buf;
+ break;
+ }
+ /* no need to check the timeout here */
+ curr = next;
+ next = curr->next;
+ }
+ }
+
+ if(buf) {
+ LIST_DEL(&buf->head);
+ pipe_mutex_unlock(mgr->mutex);
+ /* Increase refcount */
+ pb_reference((struct pb_buffer**)&buf, &buf->base);
+ return &buf->base;
+ }
+
+ pipe_mutex_unlock(mgr->mutex);
+
+ buf = CALLOC_STRUCT(pb_cache_buffer);
+ if(!buf)
+ return NULL;
+
+ buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
+ if(!buf->buffer) {
+ FREE(buf);
+ return NULL;
+ }
+
+ assert(pipe_is_referenced(&buf->buffer->base.reference));
+ assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment));
+ assert(pb_check_usage(desc->usage, buf->buffer->base.usage));
+ assert(buf->buffer->base.size >= size);
+
+ pipe_reference_init(&buf->base.base.reference, 1);
+ buf->base.base.alignment = buf->buffer->base.alignment;
+ buf->base.base.usage = buf->buffer->base.usage;
+ buf->base.base.size = buf->buffer->base.size;
+
+ buf->base.vtbl = &pb_cache_buffer_vtbl;
+ buf->mgr = mgr;
+
+ return &buf->base;
+}
+
+
+static void
+pb_cache_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
+ struct list_head *curr, *next;
+ struct pb_cache_buffer *buf;
+
+ pipe_mutex_lock(mgr->mutex);
+ curr = mgr->delayed.next;
+ next = curr->next;
+ while(curr != &mgr->delayed) {
+ buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
+ _pb_cache_buffer_destroy(buf);
+ curr = next;
+ next = curr->next;
+ }
+ pipe_mutex_unlock(mgr->mutex);
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_cache_manager_destroy(struct pb_manager *mgr)
+{
+ pb_cache_manager_flush(mgr);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_cache_manager_create(struct pb_manager *provider,
+ unsigned usecs)
+{
+ struct pb_cache_manager *mgr;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_cache_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_cache_manager_destroy;
+ mgr->base.create_buffer = pb_cache_manager_create_buffer;
+ mgr->base.flush = pb_cache_manager_flush;
+ mgr->provider = provider;
+ mgr->usecs = usecs;
+ LIST_INITHEAD(&mgr->delayed);
+ mgr->numDelayed = 0;
+ pipe_mutex_init(mgr->mutex);
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
new file mode 100644
index 0000000000..1b4df28c70
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c
@@ -0,0 +1,494 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Debug buffer manager to detect buffer under- and overflows.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+#include "util/u_debug_stack.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+#ifdef DEBUG
+
+
+#define PB_DEBUG_CREATE_BACKTRACE 8
+#define PB_DEBUG_MAP_BACKTRACE 8
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pb_debug_manager;
+
+
+/**
+ * Wrapper around a pipe buffer which adds delayed destruction.
+ */
+struct pb_debug_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_buffer *buffer;
+ struct pb_debug_manager *mgr;
+
+ pb_size underflow_size;
+ pb_size overflow_size;
+
+ struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
+
+ pipe_mutex mutex;
+ unsigned map_count;
+ struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
+
+ struct list_head head;
+};
+
+
+struct pb_debug_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ pb_size underflow_size;
+ pb_size overflow_size;
+
+ pipe_mutex mutex;
+ struct list_head list;
+};
+
+
+static INLINE struct pb_debug_buffer *
+pb_debug_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_debug_buffer *)buf;
+}
+
+
+static INLINE struct pb_debug_manager *
+pb_debug_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_debug_manager *)mgr;
+}
+
+
+static const uint8_t random_pattern[32] = {
+ 0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
+ 0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
+ 0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
+ 0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
+};
+
+
+static INLINE void
+fill_random_pattern(uint8_t *dst, pb_size size)
+{
+ pb_size i = 0;
+ while(size--) {
+ *dst++ = random_pattern[i++];
+ i &= sizeof(random_pattern) - 1;
+ }
+}
+
+
+static INLINE boolean
+check_random_pattern(const uint8_t *dst, pb_size size,
+ pb_size *min_ofs, pb_size *max_ofs)
+{
+ boolean result = TRUE;
+ pb_size i;
+ *min_ofs = size;
+ *max_ofs = 0;
+ for(i = 0; i < size; ++i) {
+ if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
+ *min_ofs = MIN2(*min_ofs, i);
+ *max_ofs = MAX2(*max_ofs, i);
+ result = FALSE;
+ }
+ }
+ return result;
+}
+
+
+static void
+pb_debug_buffer_fill(struct pb_debug_buffer *buf)
+{
+ uint8_t *map;
+
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
+ assert(map);
+ if(map) {
+ fill_random_pattern(map, buf->underflow_size);
+ fill_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size);
+ pb_unmap(buf->buffer);
+ }
+}
+
+
+/**
+ * Check for under/over flows.
+ *
+ * Should be called with the buffer unmaped.
+ */
+static void
+pb_debug_buffer_check(struct pb_debug_buffer *buf)
+{
+ uint8_t *map;
+
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ assert(map);
+ if(map) {
+ boolean underflow, overflow;
+ pb_size min_ofs, max_ofs;
+
+ underflow = !check_random_pattern(map, buf->underflow_size,
+ &min_ofs, &max_ofs);
+ if(underflow) {
+ debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
+ buf->underflow_size - min_ofs,
+ min_ofs == 0 ? "+" : "",
+ buf->underflow_size - max_ofs);
+ }
+
+ overflow = !check_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size,
+ &min_ofs, &max_ofs);
+ if(overflow) {
+ debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
+ buf->base.base.size,
+ min_ofs,
+ max_ofs,
+ max_ofs == buf->overflow_size - 1 ? "+" : "");
+ }
+
+ if(underflow || overflow)
+ debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
+
+ debug_assert(!underflow && !overflow);
+
+ /* re-fill if not aborted */
+ if(underflow)
+ fill_random_pattern(map, buf->underflow_size);
+ if(overflow)
+ fill_random_pattern(map + buf->underflow_size + buf->base.base.size,
+ buf->overflow_size);
+
+ pb_unmap(buf->buffer);
+ }
+}
+
+
+static void
+pb_debug_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ struct pb_debug_manager *mgr = buf->mgr;
+
+ assert(!pipe_is_referenced(&buf->base.base.reference));
+
+ pb_debug_buffer_check(buf);
+
+ pipe_mutex_lock(mgr->mutex);
+ LIST_DEL(&buf->head);
+ pipe_mutex_unlock(mgr->mutex);
+
+ pipe_mutex_destroy(buf->mutex);
+
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+}
+
+
+static void *
+pb_debug_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ void *map;
+
+ pb_debug_buffer_check(buf);
+
+ map = pb_map(buf->buffer, flags);
+ if(!map)
+ return NULL;
+
+ if(map) {
+ pipe_mutex_lock(buf->mutex);
+ ++buf->map_count;
+ debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
+ pipe_mutex_unlock(buf->mutex);
+ }
+
+ return (uint8_t *)map + buf->underflow_size;
+}
+
+
+static void
+pb_debug_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+
+ pipe_mutex_lock(buf->mutex);
+ assert(buf->map_count);
+ if(buf->map_count)
+ --buf->map_count;
+ pipe_mutex_unlock(buf->mutex);
+
+ pb_unmap(buf->buffer);
+
+ pb_debug_buffer_check(buf);
+}
+
+
+static void
+pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+ *offset += buf->underflow_size;
+}
+
+
+static enum pipe_error
+pb_debug_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+
+ pipe_mutex_lock(buf->mutex);
+ if(buf->map_count) {
+ debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
+ debug_printf("last map backtrace is\n");
+ debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
+ }
+ pipe_mutex_unlock(buf->mutex);
+
+ pb_debug_buffer_check(buf);
+
+ return pb_validate(buf->buffer, vl, flags);
+}
+
+
+static void
+pb_debug_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
+ pb_fence(buf->buffer, fence);
+}
+
+
+const struct pb_vtbl
+pb_debug_buffer_vtbl = {
+ pb_debug_buffer_destroy,
+ pb_debug_buffer_map,
+ pb_debug_buffer_unmap,
+ pb_debug_buffer_validate,
+ pb_debug_buffer_fence,
+ pb_debug_buffer_get_base_buffer
+};
+
+
+static void
+pb_debug_manager_dump(struct pb_debug_manager *mgr)
+{
+ struct list_head *curr, *next;
+ struct pb_debug_buffer *buf;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ curr = mgr->list.next;
+ next = curr->next;
+ while(curr != &mgr->list) {
+ buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
+
+ debug_printf("buffer = %p\n", buf);
+ debug_printf(" .size = %p\n", buf->base.base.size);
+ debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
+
+ curr = next;
+ next = curr->next;
+ }
+
+ pipe_mutex_unlock(mgr->mutex);
+}
+
+
+static struct pb_buffer *
+pb_debug_manager_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
+ struct pb_debug_buffer *buf;
+ struct pb_desc real_desc;
+ pb_size real_size;
+
+ buf = CALLOC_STRUCT(pb_debug_buffer);
+ if(!buf)
+ return NULL;
+
+ real_size = mgr->underflow_size + size + mgr->overflow_size;
+ real_desc = *desc;
+ real_desc.usage |= PIPE_BUFFER_USAGE_CPU_WRITE;
+ real_desc.usage |= PIPE_BUFFER_USAGE_CPU_READ;
+
+ buf->buffer = mgr->provider->create_buffer(mgr->provider,
+ real_size,
+ &real_desc);
+ if(!buf->buffer) {
+ FREE(buf);
+#if 0
+ pipe_mutex_lock(mgr->mutex);
+ debug_printf("%s: failed to create buffer\n", __FUNCTION__);
+ if(!LIST_IS_EMPTY(&mgr->list))
+ pb_debug_manager_dump(mgr);
+ pipe_mutex_unlock(mgr->mutex);
+#endif
+ return NULL;
+ }
+
+ assert(pipe_is_referenced(&buf->buffer->base.reference));
+ assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment));
+ assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage));
+ assert(buf->buffer->base.size >= real_size);
+
+ pipe_reference_init(&buf->base.base.reference, 1);
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+
+ buf->base.vtbl = &pb_debug_buffer_vtbl;
+ buf->mgr = mgr;
+
+ buf->underflow_size = mgr->underflow_size;
+ buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size;
+
+ debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
+
+ pb_debug_buffer_fill(buf);
+
+ pipe_mutex_init(buf->mutex);
+
+ pipe_mutex_lock(mgr->mutex);
+ LIST_ADDTAIL(&buf->head, &mgr->list);
+ pipe_mutex_unlock(mgr->mutex);
+
+ return &buf->base;
+}
+
+
+static void
+pb_debug_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_debug_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
+
+ pipe_mutex_lock(mgr->mutex);
+ if(!LIST_IS_EMPTY(&mgr->list)) {
+ debug_printf("%s: unfreed buffers\n", __FUNCTION__);
+ pb_debug_manager_dump(mgr);
+ }
+ pipe_mutex_unlock(mgr->mutex);
+
+ pipe_mutex_destroy(mgr->mutex);
+ mgr->provider->destroy(mgr->provider);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_debug_manager_create(struct pb_manager *provider,
+ pb_size underflow_size, pb_size overflow_size)
+{
+ struct pb_debug_manager *mgr;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_debug_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_debug_manager_destroy;
+ mgr->base.create_buffer = pb_debug_manager_create_buffer;
+ mgr->base.flush = pb_debug_manager_flush;
+ mgr->provider = provider;
+ mgr->underflow_size = underflow_size;
+ mgr->overflow_size = overflow_size;
+
+ pipe_mutex_init(mgr->mutex);
+ LIST_INITHEAD(&mgr->list);
+
+ return &mgr->base;
+}
+
+
+#else /* !DEBUG */
+
+
+struct pb_manager *
+pb_debug_manager_create(struct pb_manager *provider,
+ pb_size underflow_size, pb_size overflow_size)
+{
+ return provider;
+}
+
+
+#endif /* !DEBUG */
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
new file mode 100644
index 0000000000..97dd1427fd
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_fenced.c
@@ -0,0 +1,152 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * A buffer manager that wraps buffers in fenced buffers.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.dot.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+#include "pb_bufmgr.h"
+
+
+struct fenced_pb_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ struct fenced_buffer_list *fenced_list;
+};
+
+
+static INLINE struct fenced_pb_manager *
+fenced_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct fenced_pb_manager *)mgr;
+}
+
+
+static struct pb_buffer *
+fenced_bufmgr_create_buffer(struct pb_manager *mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+ struct pb_buffer *buf;
+ struct pb_buffer *fenced_buf;
+
+ /* check for free buffers before allocating new ones */
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, 0);
+
+ buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc);
+ if(!buf) {
+ /* try harder to get a buffer */
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, 1);
+
+ buf = fenced_mgr->provider->create_buffer(fenced_mgr->provider, size, desc);
+ if(!buf) {
+#if 0
+ fenced_buffer_list_dump(fenced_mgr->fenced_list);
+#endif
+
+ /* give up */
+ return NULL;
+ }
+ }
+
+ fenced_buf = fenced_buffer_create(fenced_mgr->fenced_list, buf);
+ if(!fenced_buf) {
+ pb_reference(&buf, NULL);
+ }
+
+ return fenced_buf;
+}
+
+
+static void
+fenced_bufmgr_flush(struct pb_manager *mgr)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+
+ fenced_buffer_list_check_free(fenced_mgr->fenced_list, TRUE);
+
+ assert(fenced_mgr->provider->flush);
+ if(fenced_mgr->provider->flush)
+ fenced_mgr->provider->flush(fenced_mgr->provider);
+}
+
+
+static void
+fenced_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct fenced_pb_manager *fenced_mgr = fenced_pb_manager(mgr);
+
+ fenced_buffer_list_destroy(fenced_mgr->fenced_list);
+
+ if(fenced_mgr->provider)
+ fenced_mgr->provider->destroy(fenced_mgr->provider);
+
+ FREE(fenced_mgr);
+}
+
+
+struct pb_manager *
+fenced_bufmgr_create(struct pb_manager *provider,
+ struct pb_fence_ops *ops)
+{
+ struct fenced_pb_manager *fenced_mgr;
+
+ if(!provider)
+ return NULL;
+
+ fenced_mgr = CALLOC_STRUCT(fenced_pb_manager);
+ if (!fenced_mgr)
+ return NULL;
+
+ fenced_mgr->base.destroy = fenced_bufmgr_destroy;
+ fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
+ fenced_mgr->base.flush = fenced_bufmgr_flush;
+
+ fenced_mgr->provider = provider;
+ fenced_mgr->fenced_list = fenced_buffer_list_create(ops);
+ if(!fenced_mgr->fenced_list) {
+ FREE(fenced_mgr);
+ return NULL;
+ }
+
+ return &fenced_mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
new file mode 100644
index 0000000000..6400fc5b0a
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_mm.c
@@ -0,0 +1,318 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Buffer manager using the old texture memory manager.
+ *
+ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_defines.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_mm.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct mm_pb_manager
+{
+ struct pb_manager base;
+
+ pipe_mutex mutex;
+
+ pb_size size;
+ struct mem_block *heap;
+
+ pb_size align2;
+
+ struct pb_buffer *buffer;
+ void *map;
+};
+
+
+static INLINE struct mm_pb_manager *
+mm_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct mm_pb_manager *)mgr;
+}
+
+
+struct mm_buffer
+{
+ struct pb_buffer base;
+
+ struct mm_pb_manager *mgr;
+
+ struct mem_block *block;
+};
+
+
+static INLINE struct mm_buffer *
+mm_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct mm_buffer *)buf;
+}
+
+
+static void
+mm_buffer_destroy(struct pb_buffer *buf)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+
+ assert(!pipe_is_referenced(&mm_buf->base.base.reference));
+
+ pipe_mutex_lock(mm->mutex);
+ u_mmFreeMem(mm_buf->block);
+ FREE(mm_buf);
+ pipe_mutex_unlock(mm->mutex);
+}
+
+
+static void *
+mm_buffer_map(struct pb_buffer *buf,
+ unsigned flags)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+
+ return (unsigned char *) mm->map + mm_buf->block->ofs;
+}
+
+
+static void
+mm_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static enum pipe_error
+mm_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ return pb_validate(mm->buffer, vl, flags);
+}
+
+
+static void
+mm_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ pb_fence(mm->buffer, fence);
+}
+
+
+static void
+mm_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct mm_buffer *mm_buf = mm_buffer(buf);
+ struct mm_pb_manager *mm = mm_buf->mgr;
+ pb_get_base_buffer(mm->buffer, base_buf, offset);
+ *offset += mm_buf->block->ofs;
+}
+
+
+static const struct pb_vtbl
+mm_buffer_vtbl = {
+ mm_buffer_destroy,
+ mm_buffer_map,
+ mm_buffer_unmap,
+ mm_buffer_validate,
+ mm_buffer_fence,
+ mm_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+mm_bufmgr_create_buffer(struct pb_manager *mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct mm_pb_manager *mm = mm_pb_manager(mgr);
+ struct mm_buffer *mm_buf;
+
+ /* We don't handle alignments larger then the one initially setup */
+ assert(pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2));
+ if(!pb_check_alignment(desc->alignment, (pb_size)1 << mm->align2))
+ return NULL;
+
+ pipe_mutex_lock(mm->mutex);
+
+ mm_buf = CALLOC_STRUCT(mm_buffer);
+ if (!mm_buf) {
+ pipe_mutex_unlock(mm->mutex);
+ return NULL;
+ }
+
+ pipe_reference_init(&mm_buf->base.base.reference, 1);
+ mm_buf->base.base.alignment = desc->alignment;
+ mm_buf->base.base.usage = desc->usage;
+ mm_buf->base.base.size = size;
+
+ mm_buf->base.vtbl = &mm_buffer_vtbl;
+
+ mm_buf->mgr = mm;
+
+ mm_buf->block = u_mmAllocMem(mm->heap, (int)size, (int)mm->align2, 0);
+ if(!mm_buf->block) {
+#if 0
+ debug_printf("warning: heap full\n");
+ mmDumpMemInfo(mm->heap);
+#endif
+ FREE(mm_buf);
+ pipe_mutex_unlock(mm->mutex);
+ return NULL;
+ }
+
+ /* Some sanity checks */
+ assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size);
+ assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size);
+
+ pipe_mutex_unlock(mm->mutex);
+ return SUPER(mm_buf);
+}
+
+
+static void
+mm_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+mm_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct mm_pb_manager *mm = mm_pb_manager(mgr);
+
+ pipe_mutex_lock(mm->mutex);
+
+ u_mmDestroy(mm->heap);
+
+ pb_unmap(mm->buffer);
+ pb_reference(&mm->buffer, NULL);
+
+ pipe_mutex_unlock(mm->mutex);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
+ pb_size size, pb_size align2)
+{
+ struct mm_pb_manager *mm;
+
+ if(!buffer)
+ return NULL;
+
+ mm = CALLOC_STRUCT(mm_pb_manager);
+ if (!mm)
+ return NULL;
+
+ mm->base.destroy = mm_bufmgr_destroy;
+ mm->base.create_buffer = mm_bufmgr_create_buffer;
+ mm->base.flush = mm_bufmgr_flush;
+
+ mm->size = size;
+ mm->align2 = align2; /* 64-byte alignment */
+
+ pipe_mutex_init(mm->mutex);
+
+ mm->buffer = buffer;
+
+ mm->map = pb_map(mm->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!mm->map)
+ goto failure;
+
+ mm->heap = u_mmInit(0, (int)size);
+ if (!mm->heap)
+ goto failure;
+
+ return SUPER(mm);
+
+failure:
+if(mm->heap)
+ u_mmDestroy(mm->heap);
+ if(mm->map)
+ pb_unmap(mm->buffer);
+ if(mm)
+ FREE(mm);
+ return NULL;
+}
+
+
+struct pb_manager *
+mm_bufmgr_create(struct pb_manager *provider,
+ pb_size size, pb_size align2)
+{
+ struct pb_buffer *buffer;
+ struct pb_manager *mgr;
+ struct pb_desc desc;
+
+ if(!provider)
+ return NULL;
+
+ memset(&desc, 0, sizeof(desc));
+ desc.alignment = 1 << align2;
+
+ buffer = provider->create_buffer(provider, size, &desc);
+ if (!buffer)
+ return NULL;
+
+ mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
+ if (!mgr) {
+ pb_reference(&buffer, NULL);
+ return NULL;
+ }
+
+ return mgr;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c
new file mode 100644
index 0000000000..cb32d25136
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_ondemand.c
@@ -0,0 +1,305 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * A variation of malloc buffers which get transferred to real graphics memory
+ * when there is an attempt to validate them.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct pb_ondemand_manager;
+
+
+struct pb_ondemand_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_ondemand_manager *mgr;
+
+ /** Regular malloc'ed memory */
+ void *data;
+ unsigned mapcount;
+
+ /** Real buffer */
+ struct pb_buffer *buffer;
+ pb_size size;
+ struct pb_desc desc;
+};
+
+
+struct pb_ondemand_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+};
+
+
+extern const struct pb_vtbl pb_ondemand_buffer_vtbl;
+
+static INLINE struct pb_ondemand_buffer *
+pb_ondemand_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ if (!buf)
+ return NULL;
+ assert(buf->vtbl == &pb_ondemand_buffer_vtbl);
+ return (struct pb_ondemand_buffer *)buf;
+}
+
+static INLINE struct pb_ondemand_manager *
+pb_ondemand_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_ondemand_manager *)mgr;
+}
+
+
+static void
+pb_ondemand_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ pb_reference(&buf->buffer, NULL);
+
+ align_free(buf->data);
+
+ FREE(buf);
+}
+
+
+static void *
+pb_ondemand_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ if(buf->buffer) {
+ assert(!buf->data);
+ return pb_map(buf->buffer, flags);
+ }
+ else {
+ assert(buf->data);
+ ++buf->mapcount;
+ return buf->data;
+ }
+}
+
+
+static void
+pb_ondemand_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ if(buf->buffer) {
+ assert(!buf->data);
+ pb_unmap(buf->buffer);
+ }
+ else {
+ assert(buf->data);
+ assert(buf->mapcount);
+ if(buf->mapcount)
+ --buf->mapcount;
+ }
+}
+
+
+static enum pipe_error
+pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf)
+{
+ if(!buf->buffer) {
+ struct pb_manager *provider = buf->mgr->provider;
+ uint8_t *map;
+
+ assert(!buf->mapcount);
+
+ buf->buffer = provider->create_buffer(provider, buf->size, &buf->desc);
+ if(!buf->buffer)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ map = pb_map(buf->buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ if(!map) {
+ pb_reference(&buf->buffer, NULL);
+ return PIPE_ERROR;
+ }
+
+ memcpy(map, buf->data, buf->size);
+
+ pb_unmap(buf->buffer);
+
+ if(!buf->mapcount) {
+ FREE(buf->data);
+ buf->data = NULL;
+ }
+ }
+
+ return PIPE_OK;
+}
+
+static enum pipe_error
+pb_ondemand_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+ enum pipe_error ret;
+
+ assert(!buf->mapcount);
+ if(buf->mapcount)
+ return PIPE_ERROR;
+
+ ret = pb_ondemand_buffer_instantiate(buf);
+ if(ret != PIPE_OK)
+ return ret;
+
+ return pb_validate(buf->buffer, vl, flags);
+}
+
+
+static void
+pb_ondemand_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ assert(buf->buffer);
+ if(!buf->buffer)
+ return;
+
+ pb_fence(buf->buffer, fence);
+}
+
+
+static void
+pb_ondemand_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
+
+ if(pb_ondemand_buffer_instantiate(buf) != PIPE_OK) {
+ assert(0);
+ *base_buf = &buf->base;
+ *offset = 0;
+ return;
+ }
+
+ pb_get_base_buffer(buf->buffer, base_buf, offset);
+}
+
+
+const struct pb_vtbl
+pb_ondemand_buffer_vtbl = {
+ pb_ondemand_buffer_destroy,
+ pb_ondemand_buffer_map,
+ pb_ondemand_buffer_unmap,
+ pb_ondemand_buffer_validate,
+ pb_ondemand_buffer_fence,
+ pb_ondemand_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+pb_ondemand_manager_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
+ struct pb_ondemand_buffer *buf;
+
+ buf = CALLOC_STRUCT(pb_ondemand_buffer);
+ if(!buf)
+ return NULL;
+
+ pipe_reference_init(&buf->base.base.reference, 1);
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+ buf->base.base.size = size;
+ buf->base.vtbl = &pb_ondemand_buffer_vtbl;
+
+ buf->mgr = mgr;
+
+ buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
+ if(!buf->data) {
+ FREE(buf);
+ return NULL;
+ }
+
+ buf->size = size;
+ buf->desc = *desc;
+
+ return &buf->base;
+}
+
+
+static void
+pb_ondemand_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
+
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_ondemand_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_ondemand_manager *mgr = pb_ondemand_manager(_mgr);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_ondemand_manager_create(struct pb_manager *provider)
+{
+ struct pb_ondemand_manager *mgr;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_ondemand_manager);
+ if(!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_ondemand_manager_destroy;
+ mgr->base.create_buffer = pb_ondemand_manager_create_buffer;
+ mgr->base.flush = pb_ondemand_manager_flush;
+
+ mgr->provider = provider;
+
+ return &mgr->base;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
new file mode 100644
index 0000000000..7fd65ed226
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_pool.c
@@ -0,0 +1,321 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Batch buffer pool management.
+ *
+ * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
+ * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+/**
+ * Convenience macro (type safe).
+ */
+#define SUPER(__derived) (&(__derived)->base)
+
+
+struct pool_pb_manager
+{
+ struct pb_manager base;
+
+ pipe_mutex mutex;
+
+ pb_size bufSize;
+ pb_size bufAlign;
+
+ pb_size numFree;
+ pb_size numTot;
+
+ struct list_head free;
+
+ struct pb_buffer *buffer;
+ void *map;
+
+ struct pool_buffer *bufs;
+};
+
+
+static INLINE struct pool_pb_manager *
+pool_pb_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pool_pb_manager *)mgr;
+}
+
+
+struct pool_buffer
+{
+ struct pb_buffer base;
+
+ struct pool_pb_manager *mgr;
+
+ struct list_head head;
+
+ pb_size start;
+};
+
+
+static INLINE struct pool_buffer *
+pool_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pool_buffer *)buf;
+}
+
+
+
+static void
+pool_buffer_destroy(struct pb_buffer *buf)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+
+ assert(!pipe_is_referenced(&pool_buf->base.base.reference));
+
+ pipe_mutex_lock(pool->mutex);
+ LIST_ADD(&pool_buf->head, &pool->free);
+ pool->numFree++;
+ pipe_mutex_unlock(pool->mutex);
+}
+
+
+static void *
+pool_buffer_map(struct pb_buffer *buf, unsigned flags)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ void *map;
+
+ pipe_mutex_lock(pool->mutex);
+ map = (unsigned char *) pool->map + pool_buf->start;
+ pipe_mutex_unlock(pool->mutex);
+ return map;
+}
+
+
+static void
+pool_buffer_unmap(struct pb_buffer *buf)
+{
+ /* No-op */
+}
+
+
+static enum pipe_error
+pool_buffer_validate(struct pb_buffer *buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ return pb_validate(pool->buffer, vl, flags);
+}
+
+
+static void
+pool_buffer_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ pb_fence(pool->buffer, fence);
+}
+
+
+static void
+pool_buffer_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct pool_buffer *pool_buf = pool_buffer(buf);
+ struct pool_pb_manager *pool = pool_buf->mgr;
+ pb_get_base_buffer(pool->buffer, base_buf, offset);
+ *offset += pool_buf->start;
+}
+
+
+static const struct pb_vtbl
+pool_buffer_vtbl = {
+ pool_buffer_destroy,
+ pool_buffer_map,
+ pool_buffer_unmap,
+ pool_buffer_validate,
+ pool_buffer_fence,
+ pool_buffer_get_base_buffer
+};
+
+
+static struct pb_buffer *
+pool_bufmgr_create_buffer(struct pb_manager *mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pool_pb_manager *pool = pool_pb_manager(mgr);
+ struct pool_buffer *pool_buf;
+ struct list_head *item;
+
+ assert(size == pool->bufSize);
+ assert(pool->bufAlign % desc->alignment == 0);
+
+ pipe_mutex_lock(pool->mutex);
+
+ if (pool->numFree == 0) {
+ pipe_mutex_unlock(pool->mutex);
+ debug_printf("warning: out of fixed size buffer objects\n");
+ return NULL;
+ }
+
+ item = pool->free.next;
+
+ if (item == &pool->free) {
+ pipe_mutex_unlock(pool->mutex);
+ debug_printf("error: fixed size buffer pool corruption\n");
+ return NULL;
+ }
+
+ LIST_DEL(item);
+ --pool->numFree;
+
+ pipe_mutex_unlock(pool->mutex);
+
+ pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
+ assert(!pipe_is_referenced(&pool_buf->base.base.reference));
+ pipe_reference_init(&pool_buf->base.base.reference, 1);
+ pool_buf->base.base.alignment = desc->alignment;
+ pool_buf->base.base.usage = desc->usage;
+
+ return SUPER(pool_buf);
+}
+
+
+static void
+pool_bufmgr_flush(struct pb_manager *mgr)
+{
+ /* No-op */
+}
+
+
+static void
+pool_bufmgr_destroy(struct pb_manager *mgr)
+{
+ struct pool_pb_manager *pool = pool_pb_manager(mgr);
+ pipe_mutex_lock(pool->mutex);
+
+ FREE(pool->bufs);
+
+ pb_unmap(pool->buffer);
+ pb_reference(&pool->buffer, NULL);
+
+ pipe_mutex_unlock(pool->mutex);
+
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pool_bufmgr_create(struct pb_manager *provider,
+ pb_size numBufs,
+ pb_size bufSize,
+ const struct pb_desc *desc)
+{
+ struct pool_pb_manager *pool;
+ struct pool_buffer *pool_buf;
+ pb_size i;
+
+ if(!provider)
+ return NULL;
+
+ pool = CALLOC_STRUCT(pool_pb_manager);
+ if (!pool)
+ return NULL;
+
+ pool->base.destroy = pool_bufmgr_destroy;
+ pool->base.create_buffer = pool_bufmgr_create_buffer;
+ pool->base.flush = pool_bufmgr_flush;
+
+ LIST_INITHEAD(&pool->free);
+
+ pool->numTot = numBufs;
+ pool->numFree = numBufs;
+ pool->bufSize = bufSize;
+ pool->bufAlign = desc->alignment;
+
+ pipe_mutex_init(pool->mutex);
+
+ pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc);
+ if (!pool->buffer)
+ goto failure;
+
+ pool->map = pb_map(pool->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!pool->map)
+ goto failure;
+
+ pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
+ if (!pool->bufs)
+ goto failure;
+
+ pool_buf = pool->bufs;
+ for (i = 0; i < numBufs; ++i) {
+ pipe_reference_init(&pool_buf->base.base.reference, 0);
+ pool_buf->base.base.alignment = 0;
+ pool_buf->base.base.usage = 0;
+ pool_buf->base.base.size = bufSize;
+ pool_buf->base.vtbl = &pool_buffer_vtbl;
+ pool_buf->mgr = pool;
+ pool_buf->start = i * bufSize;
+ LIST_ADDTAIL(&pool_buf->head, &pool->free);
+ pool_buf++;
+ }
+
+ return SUPER(pool);
+
+failure:
+ if(pool->bufs)
+ FREE(pool->bufs);
+ if(pool->map)
+ pb_unmap(pool->buffer);
+ if(pool->buffer)
+ pb_reference(&pool->buffer, NULL);
+ if(pool)
+ FREE(pool);
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
new file mode 100644
index 0000000000..e7352e90db
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
@@ -0,0 +1,584 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, FREE of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * S-lab pool implementation.
+ *
+ * @sa http://en.wikipedia.org/wiki/Slab_allocation
+ *
+ * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "util/u_time.h"
+
+#include "pb_buffer.h"
+#include "pb_bufmgr.h"
+
+
+struct pb_slab;
+
+
+/**
+ * Buffer in a slab.
+ *
+ * Sub-allocation of a contiguous buffer.
+ */
+struct pb_slab_buffer
+{
+ struct pb_buffer base;
+
+ struct pb_slab *slab;
+
+ struct list_head head;
+
+ unsigned mapCount;
+
+ /** Offset relative to the start of the slab buffer. */
+ pb_size start;
+
+ /** Use when validating, to signal that all mappings are finished */
+ /* TODO: Actually validation does not reach this stage yet */
+ pipe_condvar event;
+};
+
+
+/**
+ * Slab -- a contiguous piece of memory.
+ */
+struct pb_slab
+{
+ struct list_head head;
+ struct list_head freeBuffers;
+ pb_size numBuffers;
+ pb_size numFree;
+
+ struct pb_slab_buffer *buffers;
+ struct pb_slab_manager *mgr;
+
+ /** Buffer from the provider */
+ struct pb_buffer *bo;
+
+ void *virtual;
+};
+
+
+/**
+ * It adds/removes slabs as needed in order to meet the allocation/destruction
+ * of individual buffers.
+ */
+struct pb_slab_manager
+{
+ struct pb_manager base;
+
+ /** From where we get our buffers */
+ struct pb_manager *provider;
+
+ /** Size of the buffers we hand on downstream */
+ pb_size bufSize;
+
+ /** Size of the buffers we request upstream */
+ pb_size slabSize;
+
+ /**
+ * Alignment, usage to be used to allocate the slab buffers.
+ *
+ * We can only provide buffers which are consistent (in alignment, usage)
+ * with this description.
+ */
+ struct pb_desc desc;
+
+ /**
+ * Partial slabs
+ *
+ * Full slabs are not stored in any list. Empty slabs are destroyed
+ * immediatly.
+ */
+ struct list_head slabs;
+
+ pipe_mutex mutex;
+};
+
+
+/**
+ * Wrapper around several slabs, therefore capable of handling buffers of
+ * multiple sizes.
+ *
+ * This buffer manager just dispatches buffer allocations to the appropriate slab
+ * manager, according to the requested buffer size, or by passes the slab
+ * managers altogether for even greater sizes.
+ *
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+struct pb_slab_range_manager
+{
+ struct pb_manager base;
+
+ struct pb_manager *provider;
+
+ pb_size minBufSize;
+ pb_size maxBufSize;
+
+ /** @sa pb_slab_manager::desc */
+ struct pb_desc desc;
+
+ unsigned numBuckets;
+ pb_size *bucketSizes;
+
+ /** Array of pb_slab_manager, one for each bucket size */
+ struct pb_manager **buckets;
+};
+
+
+static INLINE struct pb_slab_buffer *
+pb_slab_buffer(struct pb_buffer *buf)
+{
+ assert(buf);
+ return (struct pb_slab_buffer *)buf;
+}
+
+
+static INLINE struct pb_slab_manager *
+pb_slab_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_slab_manager *)mgr;
+}
+
+
+static INLINE struct pb_slab_range_manager *
+pb_slab_range_manager(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct pb_slab_range_manager *)mgr;
+}
+
+
+/**
+ * Delete a buffer from the slab delayed list and put
+ * it on the slab FREE list.
+ */
+static void
+pb_slab_buffer_destroy(struct pb_buffer *_buf)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ struct pb_slab *slab = buf->slab;
+ struct pb_slab_manager *mgr = slab->mgr;
+ struct list_head *list = &buf->head;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ assert(!pipe_is_referenced(&buf->base.base.reference));
+
+ buf->mapCount = 0;
+
+ LIST_DEL(list);
+ LIST_ADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ LIST_ADDTAIL(&slab->head, &mgr->slabs);
+
+ /* If the slab becomes totally empty, free it */
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ LIST_DELINIT(list);
+ pb_reference(&slab->bo, NULL);
+ FREE(slab->buffers);
+ FREE(slab);
+ }
+
+ pipe_mutex_unlock(mgr->mutex);
+}
+
+
+static void *
+pb_slab_buffer_map(struct pb_buffer *_buf,
+ unsigned flags)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+
+ ++buf->mapCount;
+ return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
+}
+
+
+static void
+pb_slab_buffer_unmap(struct pb_buffer *_buf)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+
+ --buf->mapCount;
+ if (buf->mapCount == 0)
+ pipe_condvar_broadcast(buf->event);
+}
+
+
+static enum pipe_error
+pb_slab_buffer_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ return pb_validate(buf->slab->bo, vl, flags);
+}
+
+
+static void
+pb_slab_buffer_fence(struct pb_buffer *_buf,
+ struct pipe_fence_handle *fence)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ pb_fence(buf->slab->bo, fence);
+}
+
+
+static void
+pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
+ struct pb_buffer **base_buf,
+ pb_size *offset)
+{
+ struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
+ pb_get_base_buffer(buf->slab->bo, base_buf, offset);
+ *offset += buf->start;
+}
+
+
+static const struct pb_vtbl
+pb_slab_buffer_vtbl = {
+ pb_slab_buffer_destroy,
+ pb_slab_buffer_map,
+ pb_slab_buffer_unmap,
+ pb_slab_buffer_validate,
+ pb_slab_buffer_fence,
+ pb_slab_buffer_get_base_buffer
+};
+
+
+/**
+ * Create a new slab.
+ *
+ * Called when we ran out of free slabs.
+ */
+static enum pipe_error
+pb_slab_create(struct pb_slab_manager *mgr)
+{
+ struct pb_slab *slab;
+ struct pb_slab_buffer *buf;
+ unsigned numBuffers;
+ unsigned i;
+ enum pipe_error ret;
+
+ slab = CALLOC_STRUCT(pb_slab);
+ if (!slab)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
+ if(!slab->bo) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err0;
+ }
+
+ /* Note down the slab virtual address. All mappings are accessed directly
+ * through this address so it is required that the buffer is pinned. */
+ slab->virtual = pb_map(slab->bo,
+ PIPE_BUFFER_USAGE_CPU_READ |
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(!slab->virtual) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err1;
+ }
+ pb_unmap(slab->bo);
+
+ numBuffers = slab->bo->base.size / mgr->bufSize;
+
+ slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto out_err1;
+ }
+
+ LIST_INITHEAD(&slab->head);
+ LIST_INITHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->mgr = mgr;
+
+ buf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ pipe_reference_init(&buf->base.base.reference, 0);
+ buf->base.base.size = mgr->bufSize;
+ buf->base.base.alignment = 0;
+ buf->base.base.usage = 0;
+ buf->base.vtbl = &pb_slab_buffer_vtbl;
+ buf->slab = slab;
+ buf->start = i* mgr->bufSize;
+ buf->mapCount = 0;
+ pipe_condvar_init(buf->event);
+ LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
+ slab->numFree++;
+ buf++;
+ }
+
+ /* Add this slab to the list of partial slabs */
+ LIST_ADDTAIL(&slab->head, &mgr->slabs);
+
+ return PIPE_OK;
+
+out_err1:
+ pb_reference(&slab->bo, NULL);
+out_err0:
+ FREE(slab);
+ return ret;
+}
+
+
+static struct pb_buffer *
+pb_slab_manager_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+ static struct pb_slab_buffer *buf;
+ struct pb_slab *slab;
+ struct list_head *list;
+
+ /* check size */
+ assert(size <= mgr->bufSize);
+ if(size > mgr->bufSize)
+ return NULL;
+
+ /* check if we can provide the requested alignment */
+ assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
+ if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
+ return NULL;
+ assert(pb_check_alignment(desc->alignment, mgr->bufSize));
+ if(!pb_check_alignment(desc->alignment, mgr->bufSize))
+ return NULL;
+
+ assert(pb_check_usage(desc->usage, mgr->desc.usage));
+ if(!pb_check_usage(desc->usage, mgr->desc.usage))
+ return NULL;
+
+ pipe_mutex_lock(mgr->mutex);
+
+ /* Create a new slab, if we run out of partial slabs */
+ if (mgr->slabs.next == &mgr->slabs) {
+ (void) pb_slab_create(mgr);
+ if (mgr->slabs.next == &mgr->slabs) {
+ pipe_mutex_unlock(mgr->mutex);
+ return NULL;
+ }
+ }
+
+ /* Allocate the buffer from a partial (or just created) slab */
+ list = mgr->slabs.next;
+ slab = LIST_ENTRY(struct pb_slab, list, head);
+
+ /* If totally full remove from the partial slab list */
+ if (--slab->numFree == 0)
+ LIST_DELINIT(list);
+
+ list = slab->freeBuffers.next;
+ LIST_DELINIT(list);
+
+ pipe_mutex_unlock(mgr->mutex);
+ buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
+
+ pipe_reference_init(&buf->base.base.reference, 1);
+ buf->base.base.alignment = desc->alignment;
+ buf->base.base.usage = desc->usage;
+
+ return &buf->base;
+}
+
+
+static void
+pb_slab_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_slab_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
+
+ /* TODO: cleanup all allocated buffers */
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_slab_manager_create(struct pb_manager *provider,
+ pb_size bufSize,
+ pb_size slabSize,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_manager *mgr;
+
+ mgr = CALLOC_STRUCT(pb_slab_manager);
+ if (!mgr)
+ return NULL;
+
+ mgr->base.destroy = pb_slab_manager_destroy;
+ mgr->base.create_buffer = pb_slab_manager_create_buffer;
+ mgr->base.flush = pb_slab_manager_flush;
+
+ mgr->provider = provider;
+ mgr->bufSize = bufSize;
+ mgr->slabSize = slabSize;
+ mgr->desc = *desc;
+
+ LIST_INITHEAD(&mgr->slabs);
+
+ pipe_mutex_init(mgr->mutex);
+
+ return &mgr->base;
+}
+
+
+static struct pb_buffer *
+pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+ pb_size bufSize;
+ unsigned i;
+
+ bufSize = mgr->minBufSize;
+ for (i = 0; i < mgr->numBuckets; ++i) {
+ if(bufSize >= size)
+ return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
+ bufSize *= 2;
+ }
+
+ /* Fall back to allocate a buffer object directly from the provider. */
+ return mgr->provider->create_buffer(mgr->provider, size, desc);
+}
+
+
+static void
+pb_slab_range_manager_flush(struct pb_manager *_mgr)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+
+ /* Individual slabs don't hold any temporary buffers so no need to call them */
+
+ assert(mgr->provider->flush);
+ if(mgr->provider->flush)
+ mgr->provider->flush(mgr->provider);
+}
+
+
+static void
+pb_slab_range_manager_destroy(struct pb_manager *_mgr)
+{
+ struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
+ unsigned i;
+
+ for (i = 0; i < mgr->numBuckets; ++i)
+ mgr->buckets[i]->destroy(mgr->buckets[i]);
+ FREE(mgr->buckets);
+ FREE(mgr->bucketSizes);
+ FREE(mgr);
+}
+
+
+struct pb_manager *
+pb_slab_range_manager_create(struct pb_manager *provider,
+ pb_size minBufSize,
+ pb_size maxBufSize,
+ pb_size slabSize,
+ const struct pb_desc *desc)
+{
+ struct pb_slab_range_manager *mgr;
+ pb_size bufSize;
+ unsigned i;
+
+ if(!provider)
+ return NULL;
+
+ mgr = CALLOC_STRUCT(pb_slab_range_manager);
+ if (!mgr)
+ goto out_err0;
+
+ mgr->base.destroy = pb_slab_range_manager_destroy;
+ mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
+ mgr->base.flush = pb_slab_range_manager_flush;
+
+ mgr->provider = provider;
+ mgr->minBufSize = minBufSize;
+ mgr->maxBufSize = maxBufSize;
+
+ mgr->numBuckets = 1;
+ bufSize = minBufSize;
+ while(bufSize < maxBufSize) {
+ bufSize *= 2;
+ ++mgr->numBuckets;
+ }
+
+ mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
+ if (!mgr->buckets)
+ goto out_err1;
+
+ bufSize = minBufSize;
+ for (i = 0; i < mgr->numBuckets; ++i) {
+ mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
+ if(!mgr->buckets[i])
+ goto out_err2;
+ bufSize *= 2;
+ }
+
+ return &mgr->base;
+
+out_err2:
+ for (i = 0; i < mgr->numBuckets; ++i)
+ if(mgr->buckets[i])
+ mgr->buckets[i]->destroy(mgr->buckets[i]);
+ FREE(mgr->buckets);
+out_err1:
+ FREE(mgr);
+out_err0:
+ return NULL;
+}
diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.c b/src/gallium/auxiliary/pipebuffer/pb_validate.c
new file mode 100644
index 0000000000..150fd50618
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_validate.c
@@ -0,0 +1,193 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Buffer validation.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+#include "util/u_memory.h"
+#include "util/u_debug.h"
+
+#include "pb_buffer.h"
+#include "pb_buffer_fenced.h"
+#include "pb_validate.h"
+
+
+#define PB_VALIDATE_INITIAL_SIZE 1 /* 512 */
+
+
+struct pb_validate_entry
+{
+ struct pb_buffer *buf;
+ unsigned flags;
+};
+
+
+struct pb_validate
+{
+ struct pb_validate_entry *entries;
+ unsigned used;
+ unsigned size;
+};
+
+
+enum pipe_error
+pb_validate_add_buffer(struct pb_validate *vl,
+ struct pb_buffer *buf,
+ unsigned flags)
+{
+ assert(buf);
+ if(!buf)
+ return PIPE_ERROR;
+
+ assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
+ flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;
+
+ /* We only need to store one reference for each buffer, so avoid storing
+ * consecutive references for the same buffer. It might not be the most
+ * common pattern, but it is easy to implement.
+ */
+ if(vl->used && vl->entries[vl->used - 1].buf == buf) {
+ vl->entries[vl->used - 1].flags |= flags;
+ return PIPE_OK;
+ }
+
+ /* Grow the table */
+ if(vl->used == vl->size) {
+ unsigned new_size;
+ struct pb_validate_entry *new_entries;
+
+ new_size = vl->size * 2;
+ if(!new_size)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ new_entries = (struct pb_validate_entry *)REALLOC(vl->entries,
+ vl->size*sizeof(struct pb_validate_entry),
+ new_size*sizeof(struct pb_validate_entry));
+ if(!new_entries)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memset(new_entries + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_validate_entry));
+
+ vl->size = new_size;
+ vl->entries = new_entries;
+ }
+
+ assert(!vl->entries[vl->used].buf);
+ pb_reference(&vl->entries[vl->used].buf, buf);
+ vl->entries[vl->used].flags = flags;
+ ++vl->used;
+
+ return PIPE_OK;
+}
+
+
+enum pipe_error
+pb_validate_foreach(struct pb_validate *vl,
+ enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
+ void *data)
+{
+ unsigned i;
+ for(i = 0; i < vl->used; ++i) {
+ enum pipe_error ret;
+ ret = callback(vl->entries[i].buf, data);
+ if(ret != PIPE_OK)
+ return ret;
+ }
+ return PIPE_OK;
+}
+
+
+enum pipe_error
+pb_validate_validate(struct pb_validate *vl)
+{
+ unsigned i;
+
+ for(i = 0; i < vl->used; ++i) {
+ enum pipe_error ret;
+ ret = pb_validate(vl->entries[i].buf, vl, vl->entries[i].flags);
+ if(ret != PIPE_OK) {
+ while(i--)
+ pb_validate(vl->entries[i].buf, NULL, 0);
+ return ret;
+ }
+ }
+
+ return PIPE_OK;
+}
+
+
+void
+pb_validate_fence(struct pb_validate *vl,
+ struct pipe_fence_handle *fence)
+{
+ unsigned i;
+ for(i = 0; i < vl->used; ++i) {
+ pb_fence(vl->entries[i].buf, fence);
+ pb_reference(&vl->entries[i].buf, NULL);
+ }
+ vl->used = 0;
+}
+
+
+void
+pb_validate_destroy(struct pb_validate *vl)
+{
+ unsigned i;
+ for(i = 0; i < vl->used; ++i)
+ pb_reference(&vl->entries[i].buf, NULL);
+ FREE(vl->entries);
+ FREE(vl);
+}
+
+
+struct pb_validate *
+pb_validate_create()
+{
+ struct pb_validate *vl;
+
+ vl = CALLOC_STRUCT(pb_validate);
+ if(!vl)
+ return NULL;
+
+ vl->size = PB_VALIDATE_INITIAL_SIZE;
+ vl->entries = (struct pb_validate_entry *)CALLOC(vl->size, sizeof(struct pb_validate_entry));
+ if(!vl->entries) {
+ FREE(vl);
+ return NULL;
+ }
+
+ return vl;
+}
+
diff --git a/src/gallium/auxiliary/pipebuffer/pb_validate.h b/src/gallium/auxiliary/pipebuffer/pb_validate.h
new file mode 100644
index 0000000000..dfb84df1ce
--- /dev/null
+++ b/src/gallium/auxiliary/pipebuffer/pb_validate.h
@@ -0,0 +1,97 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Buffer validation.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef PB_VALIDATE_H_
+#define PB_VALIDATE_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_error.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pb_buffer;
+struct pipe_fence_handle;
+
+
+/**
+ * Buffer validation list.
+ *
+ * It holds a list of buffers to be validated and fenced when flushing.
+ */
+struct pb_validate;
+
+
+enum pipe_error
+pb_validate_add_buffer(struct pb_validate *vl,
+ struct pb_buffer *buf,
+ unsigned flags);
+
+enum pipe_error
+pb_validate_foreach(struct pb_validate *vl,
+ enum pipe_error (*callback)(struct pb_buffer *buf, void *data),
+ void *data);
+
+/**
+ * Validate all buffers for hardware access.
+ *
+ * Should be called right before issuing commands to the hardware.
+ */
+enum pipe_error
+pb_validate_validate(struct pb_validate *vl);
+
+/**
+ * Fence all buffers and clear the list.
+ *
+ * Should be called right after issuing commands to the hardware.
+ */
+void
+pb_validate_fence(struct pb_validate *vl,
+ struct pipe_fence_handle *fence);
+
+struct pb_validate *
+pb_validate_create(void);
+
+void
+pb_validate_destroy(struct pb_validate *vl);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*PB_VALIDATE_H_*/
diff --git a/src/gallium/auxiliary/rbug/Makefile b/src/gallium/auxiliary/rbug/Makefile
new file mode 100644
index 0000000000..cd12e8468f
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/Makefile
@@ -0,0 +1,14 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = rbug
+
+C_SOURCES = \
+ rbug_connection.c \
+ rbug_core.c \
+ rbug_texture.c \
+ rbug_context.c \
+ rbug_shader.c \
+ rbug_demarshal.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/rbug/README b/src/gallium/auxiliary/rbug/README
new file mode 100644
index 0000000000..33d76371de
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/README
@@ -0,0 +1,21 @@
+ GALLIUM REMOTE DEBUGGING COMMON CODE
+
+= About =
+
+This directory contains the common code for the Gallium 3D remote debugging
+driver and clients. The code is two parts the connection managment code and
+the (de)marsheller.
+
+The code currently uses tcp and ip4v for connections.
+
+Information about driver integration can be found in:
+
+src/gallium/drivers/trace/README
+
+for information about applications look in:
+
+progs/rbug/README
+
+
+--
+Jakob Bornecrantz <jakob@vmware.com>
diff --git a/src/gallium/auxiliary/rbug/SConscript b/src/gallium/auxiliary/rbug/SConscript
new file mode 100644
index 0000000000..4a9afb45d3
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/SConscript
@@ -0,0 +1,14 @@
+Import('*')
+
+rbug = env.ConvenienceLibrary(
+ target = 'rbug',
+ source = [
+ 'rbug_core.c',
+ 'rbug_shader.c',
+ 'rbug_context.c',
+ 'rbug_texture.c',
+ 'rbug_demarshal.c',
+ 'rbug_connection.c',
+ ])
+
+auxiliaries.insert(0, rbug)
diff --git a/src/gallium/auxiliary/rbug/rbug.h b/src/gallium/auxiliary/rbug/rbug.h
new file mode 100644
index 0000000000..259bfc6c79
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Include all for users the remote debugger protocol code.
+ */
+
+#include "rbug/rbug_core.h"
+#include "rbug/rbug_shader.h"
+#include "rbug/rbug_context.h"
+#include "rbug/rbug_texture.h"
+#include "rbug/rbug_connection.h"
diff --git a/src/gallium/auxiliary/rbug/rbug_connection.c b/src/gallium/auxiliary/rbug/rbug_connection.c
new file mode 100644
index 0000000000..52acb700af
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_connection.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "rbug/rbug.h"
+#include "rbug/rbug_internal.h"
+
+#include "util/u_network.h"
+
+struct rbug_connection
+{
+ int socket;
+ uint32_t send_serial;
+ uint32_t recv_serial;
+ enum rbug_opcode opcode;
+};
+
+/**
+ * Create a rbug connection from a socket created with u_socket.
+ *
+ * Result:
+ * A new allocated connection using socket as communication path
+ */
+struct rbug_connection *
+rbug_from_socket(int socket)
+{
+ struct rbug_connection *c = CALLOC_STRUCT(rbug_connection);
+ c->socket = socket;
+ return c;
+}
+
+/**
+ * Free a connection, also closes socket.
+ */
+void
+rbug_disconnect(struct rbug_connection *c)
+{
+ u_socket_close(c->socket);
+ FREE(c);
+}
+
+/**
+ * Waits for a message to be fully received.
+ * Also returns the serial for the message, serial is not touched for replys.
+ *
+ * Result:
+ * demarshaled message on success, NULL on connection error
+ */
+struct rbug_header *
+rbug_get_message(struct rbug_connection *c, uint32_t *serial)
+{
+ struct rbug_proto_header header;
+ struct rbug_header *out;
+ struct rbug_proto_header *data;
+ size_t length = 0;
+ size_t read = 0;
+ int ret;
+
+
+ ret = u_socket_peek(c->socket, &header, sizeof(header));
+ if (ret <= 0) {
+ return NULL;
+ }
+
+ length = (size_t)header.length * 4;
+ data = MALLOC(length);
+ if (!data) {
+ return NULL;
+ }
+
+ do {
+ uint8_t *ptr = ((uint8_t*)data) + read;
+ ret = u_socket_recv(c->socket, ptr, length - read);
+
+ if (ret <= 0) {
+ FREE(data);
+ return NULL;
+ }
+
+ read += ret;
+ } while(read < length);
+
+ out = rbug_demarshal(data);
+ if (!out)
+ FREE(data);
+ else if (serial)
+ *serial = c->recv_serial++;
+ else
+ c->recv_serial++;
+
+ return out;
+}
+
+/**
+ * Frees a message and associated data.
+ */
+void
+rbug_free_header(struct rbug_header *header)
+{
+ if (!header)
+ return;
+
+ FREE(header->__message);
+ FREE(header);
+}
+
+/**
+ * Internal function used by rbug_send_* functions.
+ *
+ * Start sending a message.
+ */
+int
+rbug_connection_send_start(struct rbug_connection *c, enum rbug_opcode opcode, uint32_t length)
+{
+ c->opcode = opcode;
+ return 0;
+}
+
+/**
+ * Internal function used by rbug_send_* functions.
+ *
+ * Write data to the socket.
+ */
+int
+rbug_connection_write(struct rbug_connection *c, void *to, uint32_t size)
+{
+ int ret = u_socket_send(c->socket, to, size);
+ return ret;
+}
+
+/**
+ * Internal function used by rbug_send_* functions.
+ *
+ * Finish writeing data to the socket.
+ * Ups the send_serial and sets the serial argument if supplied.
+ */
+int rbug_connection_send_finish(struct rbug_connection *c, uint32_t *serial)
+{
+ if (c->opcode < 0)
+ return 0;
+ else if (serial)
+ *serial = c->send_serial++;
+ else
+ c->send_serial++;
+
+ return 0;
+}
diff --git a/src/gallium/auxiliary/rbug/rbug_connection.h b/src/gallium/auxiliary/rbug/rbug_connection.h
new file mode 100644
index 0000000000..1f2c9ff347
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_connection.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file contains the function defentions for connection see c file for
+ * more comments covering function use.
+ */
+
+#ifndef _RBUG_CONNECTION_H_
+#define _RBUG_CONNECTION_H_
+
+#include "rbug/rbug_proto.h"
+
+struct rbug_connection * rbug_from_socket(int socket);
+
+void rbug_disconnect(struct rbug_connection *c);
+
+struct rbug_header * rbug_get_message(struct rbug_connection *c, uint32_t *serial);
+
+void rbug_free_header(struct rbug_header *header);
+
+struct rbug_header * rbug_demarshal(struct rbug_proto_header *header);
+
+#endif
diff --git a/src/gallium/auxiliary/rbug/rbug_context.c b/src/gallium/auxiliary/rbug/rbug_context.c
new file mode 100644
index 0000000000..1832425658
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_context.c
@@ -0,0 +1,759 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds the function implementation for one of the rbug extensions.
+ * Prototypes and declerations of functions and structs is in the same folder
+ * in the header file matching this file's name.
+ *
+ * The functions starting rbug_send_* encodes a call to the write format and
+ * sends that to the supplied connection, while functions starting with
+ * rbug_demarshal_* demarshal data in the wire protocol.
+ *
+ * Functions ending with _reply are replies to requests.
+ */
+
+#include "rbug_internal.h"
+#include "rbug/rbug_context.h"
+
+int rbug_send_context_list(struct rbug_connection *__con,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_LIST));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_LIST, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_info(struct rbug_connection *__con,
+ rbug_context_t context,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_INFO));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_INFO, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_draw_block(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t block,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(4); /* block */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_DRAW_BLOCK));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(4, rbug_block_t, block); /* block */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_DRAW_BLOCK, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_draw_step(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t step,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(4); /* step */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_DRAW_STEP));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(4, rbug_block_t, step); /* step */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_DRAW_STEP, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_draw_unblock(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t unblock,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(4); /* unblock */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_DRAW_UNBLOCK));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(4, rbug_block_t, unblock); /* unblock */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_DRAW_UNBLOCK, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_draw_rule(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t vertex,
+ rbug_shader_t fragment,
+ rbug_texture_t texture,
+ rbug_texture_t surface,
+ rbug_block_t block,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(8); /* vertex */
+ LEN(8); /* fragment */
+ LEN(8); /* texture */
+ LEN(8); /* surface */
+ LEN(4); /* block */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_DRAW_RULE));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(8, rbug_shader_t, vertex); /* vertex */
+ WRITE(8, rbug_shader_t, fragment); /* fragment */
+ WRITE(8, rbug_texture_t, texture); /* texture */
+ WRITE(8, rbug_texture_t, surface); /* surface */
+ WRITE(4, rbug_block_t, block); /* block */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_DRAW_RULE, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_flush(struct rbug_connection *__con,
+ rbug_context_t context,
+ int32_t flags,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(4); /* flags */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_FLUSH));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(4, int32_t, flags); /* flags */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_FLUSH, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_list_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_context_t *contexts,
+ uint32_t contexts_len,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN_ARRAY(8, contexts); /* contexts */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_LIST_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE_ARRAY(8, rbug_context_t, contexts); /* contexts */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_LIST_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_info_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_shader_t vertex,
+ rbug_shader_t fragment,
+ rbug_texture_t *texs,
+ uint32_t texs_len,
+ rbug_texture_t *cbufs,
+ uint32_t cbufs_len,
+ rbug_texture_t zsbuf,
+ rbug_block_t blocker,
+ rbug_block_t blocked,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN(8); /* vertex */
+ LEN(8); /* fragment */
+ LEN_ARRAY(8, texs); /* texs */
+ LEN_ARRAY(8, cbufs); /* cbufs */
+ LEN(8); /* zsbuf */
+ LEN(4); /* blocker */
+ LEN(4); /* blocked */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_INFO_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE(8, rbug_shader_t, vertex); /* vertex */
+ WRITE(8, rbug_shader_t, fragment); /* fragment */
+ WRITE_ARRAY(8, rbug_texture_t, texs); /* texs */
+ WRITE_ARRAY(8, rbug_texture_t, cbufs); /* cbufs */
+ WRITE(8, rbug_texture_t, zsbuf); /* zsbuf */
+ WRITE(4, rbug_block_t, blocker); /* blocker */
+ WRITE(4, rbug_block_t, blocked); /* blocked */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_INFO_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_context_draw_blocked(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t block,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(4); /* block */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_CONTEXT_DRAW_BLOCKED));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(4, rbug_block_t, block); /* block */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_CONTEXT_DRAW_BLOCKED, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+struct rbug_proto_context_list * rbug_demarshal_context_list(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_list *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_LIST)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+
+ return ret;
+}
+
+struct rbug_proto_context_info * rbug_demarshal_context_info(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_info *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_INFO)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+
+ return ret;
+}
+
+struct rbug_proto_context_draw_block * rbug_demarshal_context_draw_block(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_draw_block *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_DRAW_BLOCK)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(4, rbug_block_t, block); /* block */
+
+ return ret;
+}
+
+struct rbug_proto_context_draw_step * rbug_demarshal_context_draw_step(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_draw_step *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_DRAW_STEP)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(4, rbug_block_t, step); /* step */
+
+ return ret;
+}
+
+struct rbug_proto_context_draw_unblock * rbug_demarshal_context_draw_unblock(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_draw_unblock *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_DRAW_UNBLOCK)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(4, rbug_block_t, unblock); /* unblock */
+
+ return ret;
+}
+
+struct rbug_proto_context_draw_rule * rbug_demarshal_context_draw_rule(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_draw_rule *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_DRAW_RULE)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(8, rbug_shader_t, vertex); /* vertex */
+ READ(8, rbug_shader_t, fragment); /* fragment */
+ READ(8, rbug_texture_t, texture); /* texture */
+ READ(8, rbug_texture_t, surface); /* surface */
+ READ(4, rbug_block_t, block); /* block */
+
+ return ret;
+}
+
+struct rbug_proto_context_flush * rbug_demarshal_context_flush(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_flush *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_FLUSH)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(4, int32_t, flags); /* flags */
+
+ return ret;
+}
+
+struct rbug_proto_context_list_reply * rbug_demarshal_context_list_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_list_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_LIST_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ_ARRAY(8, rbug_context_t, contexts); /* contexts */
+
+ return ret;
+}
+
+struct rbug_proto_context_info_reply * rbug_demarshal_context_info_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_info_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_INFO_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ(8, rbug_shader_t, vertex); /* vertex */
+ READ(8, rbug_shader_t, fragment); /* fragment */
+ READ_ARRAY(8, rbug_texture_t, texs); /* texs */
+ READ_ARRAY(8, rbug_texture_t, cbufs); /* cbufs */
+ READ(8, rbug_texture_t, zsbuf); /* zsbuf */
+ READ(4, rbug_block_t, blocker); /* blocker */
+ READ(4, rbug_block_t, blocked); /* blocked */
+
+ return ret;
+}
+
+struct rbug_proto_context_draw_blocked * rbug_demarshal_context_draw_blocked(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_context_draw_blocked *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_CONTEXT_DRAW_BLOCKED)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(4, rbug_block_t, block); /* block */
+
+ return ret;
+}
diff --git a/src/gallium/auxiliary/rbug/rbug_context.h b/src/gallium/auxiliary/rbug/rbug_context.h
new file mode 100644
index 0000000000..da61c2365b
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_context.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds structs decelerations and function prototypes for one of
+ * the rbug extensions. Implementation of the functions is in the same folder
+ * in the c file matching this file's name.
+ *
+ * The structs what is returned from the demarshal functions. The functions
+ * starting rbug_send_* encodes a call to the write format and sends that to
+ * the supplied connection, while functions starting with rbug_demarshal_*
+ * demarshal data from the wire protocol.
+ *
+ * Structs and functions ending with _reply are replies to requests.
+ */
+
+#ifndef _RBUG_PROTO_CONTEXT_H_
+#define _RBUG_PROTO_CONTEXT_H_
+
+#include "rbug/rbug_proto.h"
+#include "rbug/rbug_core.h"
+
+typedef enum
+{
+ RBUG_BLOCK_BEFORE = 1,
+ RBUG_BLOCK_AFTER = 2,
+ RBUG_BLOCK_RULE = 4,
+ RBUG_BLOCK_MASK = 7,
+} rbug_block_t;
+
+struct rbug_proto_context_list
+{
+ struct rbug_header header;
+};
+
+struct rbug_proto_context_info
+{
+ struct rbug_header header;
+ rbug_context_t context;
+};
+
+struct rbug_proto_context_draw_block
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_block_t block;
+};
+
+struct rbug_proto_context_draw_step
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_block_t step;
+};
+
+struct rbug_proto_context_draw_unblock
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_block_t unblock;
+};
+
+struct rbug_proto_context_draw_rule
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_shader_t vertex;
+ rbug_shader_t fragment;
+ rbug_texture_t texture;
+ rbug_texture_t surface;
+ rbug_block_t block;
+};
+
+struct rbug_proto_context_flush
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ int32_t flags;
+};
+
+struct rbug_proto_context_list_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ rbug_context_t *contexts;
+ uint32_t contexts_len;
+};
+
+struct rbug_proto_context_info_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ rbug_shader_t vertex;
+ rbug_shader_t fragment;
+ rbug_texture_t *texs;
+ uint32_t texs_len;
+ rbug_texture_t *cbufs;
+ uint32_t cbufs_len;
+ rbug_texture_t zsbuf;
+ rbug_block_t blocker;
+ rbug_block_t blocked;
+};
+
+struct rbug_proto_context_draw_blocked
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_block_t block;
+};
+
+int rbug_send_context_list(struct rbug_connection *__con,
+ uint32_t *__serial);
+
+int rbug_send_context_info(struct rbug_connection *__con,
+ rbug_context_t context,
+ uint32_t *__serial);
+
+int rbug_send_context_draw_block(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t block,
+ uint32_t *__serial);
+
+int rbug_send_context_draw_step(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t step,
+ uint32_t *__serial);
+
+int rbug_send_context_draw_unblock(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t unblock,
+ uint32_t *__serial);
+
+int rbug_send_context_draw_rule(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t vertex,
+ rbug_shader_t fragment,
+ rbug_texture_t texture,
+ rbug_texture_t surface,
+ rbug_block_t block,
+ uint32_t *__serial);
+
+int rbug_send_context_flush(struct rbug_connection *__con,
+ rbug_context_t context,
+ int32_t flags,
+ uint32_t *__serial);
+
+int rbug_send_context_list_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_context_t *contexts,
+ uint32_t contexts_len,
+ uint32_t *__serial);
+
+int rbug_send_context_info_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_shader_t vertex,
+ rbug_shader_t fragment,
+ rbug_texture_t *texs,
+ uint32_t texs_len,
+ rbug_texture_t *cbufs,
+ uint32_t cbufs_len,
+ rbug_texture_t zsbuf,
+ rbug_block_t blocker,
+ rbug_block_t blocked,
+ uint32_t *__serial);
+
+int rbug_send_context_draw_blocked(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_block_t block,
+ uint32_t *__serial);
+
+struct rbug_proto_context_list * rbug_demarshal_context_list(struct rbug_proto_header *header);
+
+struct rbug_proto_context_info * rbug_demarshal_context_info(struct rbug_proto_header *header);
+
+struct rbug_proto_context_draw_block * rbug_demarshal_context_draw_block(struct rbug_proto_header *header);
+
+struct rbug_proto_context_draw_step * rbug_demarshal_context_draw_step(struct rbug_proto_header *header);
+
+struct rbug_proto_context_draw_unblock * rbug_demarshal_context_draw_unblock(struct rbug_proto_header *header);
+
+struct rbug_proto_context_draw_rule * rbug_demarshal_context_draw_rule(struct rbug_proto_header *header);
+
+struct rbug_proto_context_flush * rbug_demarshal_context_flush(struct rbug_proto_header *header);
+
+struct rbug_proto_context_list_reply * rbug_demarshal_context_list_reply(struct rbug_proto_header *header);
+
+struct rbug_proto_context_info_reply * rbug_demarshal_context_info_reply(struct rbug_proto_header *header);
+
+struct rbug_proto_context_draw_blocked * rbug_demarshal_context_draw_blocked(struct rbug_proto_header *header);
+
+#endif
diff --git a/src/gallium/auxiliary/rbug/rbug_core.c b/src/gallium/auxiliary/rbug/rbug_core.c
new file mode 100644
index 0000000000..876ae5a0ce
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_core.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds the function implementation for one of the rbug extensions.
+ * Prototypes and declerations of functions and structs is in the same folder
+ * in the header file matching this file's name.
+ *
+ * The functions starting rbug_send_* encodes a call to the write format and
+ * sends that to the supplied connection, while functions starting with
+ * rbug_demarshal_* demarshal data in the wire protocol.
+ *
+ * Functions ending with _reply are replies to requests.
+ */
+
+#include "rbug_internal.h"
+#include "rbug/rbug_core.h"
+
+int rbug_send_noop(struct rbug_connection *__con,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_NOOP));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_NOOP, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_ping(struct rbug_connection *__con,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_PING));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_PING, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_error(struct rbug_connection *__con,
+ uint32_t error,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* error */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_ERROR));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, error); /* error */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_ERROR, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_ping_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_PING_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_PING_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_error_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t error,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN(4); /* error */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_ERROR_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE(4, uint32_t, error); /* error */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_ERROR_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+struct rbug_proto_noop * rbug_demarshal_noop(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_noop *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_NOOP)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+
+ return ret;
+}
+
+struct rbug_proto_ping * rbug_demarshal_ping(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_ping *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_PING)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+
+ return ret;
+}
+
+struct rbug_proto_error * rbug_demarshal_error(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_error *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_ERROR)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, error); /* error */
+
+ return ret;
+}
+
+struct rbug_proto_ping_reply * rbug_demarshal_ping_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_ping_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_PING_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+
+ return ret;
+}
+
+struct rbug_proto_error_reply * rbug_demarshal_error_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_error_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_ERROR_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ(4, uint32_t, error); /* error */
+
+ return ret;
+}
diff --git a/src/gallium/auxiliary/rbug/rbug_core.h b/src/gallium/auxiliary/rbug/rbug_core.h
new file mode 100644
index 0000000000..99a36a0163
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_core.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds structs decelerations and function prototypes for one of
+ * the rbug extensions. Implementation of the functions is in the same folder
+ * in the c file matching this file's name.
+ *
+ * The structs what is returned from the demarshal functions. The functions
+ * starting rbug_send_* encodes a call to the write format and sends that to
+ * the supplied connection, while functions starting with rbug_demarshal_*
+ * demarshal data from the wire protocol.
+ *
+ * Structs and functions ending with _reply are replies to requests.
+ */
+
+#ifndef _RBUG_PROTO_CORE_H_
+#define _RBUG_PROTO_CORE_H_
+
+#include "rbug/rbug_proto.h"
+
+typedef uint64_t rbug_shader_t;
+typedef uint64_t rbug_context_t;
+typedef uint64_t rbug_texture_t;
+
+struct rbug_proto_noop
+{
+ struct rbug_header header;
+};
+
+struct rbug_proto_ping
+{
+ struct rbug_header header;
+};
+
+struct rbug_proto_error
+{
+ struct rbug_header header;
+ uint32_t error;
+};
+
+struct rbug_proto_ping_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+};
+
+struct rbug_proto_error_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ uint32_t error;
+};
+
+int rbug_send_noop(struct rbug_connection *__con,
+ uint32_t *__serial);
+
+int rbug_send_ping(struct rbug_connection *__con,
+ uint32_t *__serial);
+
+int rbug_send_error(struct rbug_connection *__con,
+ uint32_t error,
+ uint32_t *__serial);
+
+int rbug_send_ping_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t *__serial);
+
+int rbug_send_error_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t error,
+ uint32_t *__serial);
+
+struct rbug_proto_noop * rbug_demarshal_noop(struct rbug_proto_header *header);
+
+struct rbug_proto_ping * rbug_demarshal_ping(struct rbug_proto_header *header);
+
+struct rbug_proto_error * rbug_demarshal_error(struct rbug_proto_header *header);
+
+struct rbug_proto_ping_reply * rbug_demarshal_ping_reply(struct rbug_proto_header *header);
+
+struct rbug_proto_error_reply * rbug_demarshal_error_reply(struct rbug_proto_header *header);
+
+#endif
diff --git a/src/gallium/auxiliary/rbug/rbug_demarshal.c b/src/gallium/auxiliary/rbug/rbug_demarshal.c
new file mode 100644
index 0000000000..47390fbcee
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_demarshal.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "rbug.h"
+
+/**
+ * Small function that looks at the proto_header and selects the correct
+ * demarshal functions and return the result.
+ */
+struct rbug_header * rbug_demarshal(struct rbug_proto_header *header)
+{
+ switch(header->opcode) {
+ case RBUG_OP_NOOP:
+ return (struct rbug_header *)rbug_demarshal_noop(header);
+ case RBUG_OP_PING:
+ return (struct rbug_header *)rbug_demarshal_ping(header);
+ case RBUG_OP_ERROR:
+ return (struct rbug_header *)rbug_demarshal_error(header);
+ case RBUG_OP_PING_REPLY:
+ return (struct rbug_header *)rbug_demarshal_ping_reply(header);
+ case RBUG_OP_ERROR_REPLY:
+ return (struct rbug_header *)rbug_demarshal_error_reply(header);
+ case RBUG_OP_TEXTURE_LIST:
+ return (struct rbug_header *)rbug_demarshal_texture_list(header);
+ case RBUG_OP_TEXTURE_INFO:
+ return (struct rbug_header *)rbug_demarshal_texture_info(header);
+ case RBUG_OP_TEXTURE_WRITE:
+ return (struct rbug_header *)rbug_demarshal_texture_write(header);
+ case RBUG_OP_TEXTURE_READ:
+ return (struct rbug_header *)rbug_demarshal_texture_read(header);
+ case RBUG_OP_TEXTURE_LIST_REPLY:
+ return (struct rbug_header *)rbug_demarshal_texture_list_reply(header);
+ case RBUG_OP_TEXTURE_INFO_REPLY:
+ return (struct rbug_header *)rbug_demarshal_texture_info_reply(header);
+ case RBUG_OP_TEXTURE_READ_REPLY:
+ return (struct rbug_header *)rbug_demarshal_texture_read_reply(header);
+ case RBUG_OP_CONTEXT_LIST:
+ return (struct rbug_header *)rbug_demarshal_context_list(header);
+ case RBUG_OP_CONTEXT_INFO:
+ return (struct rbug_header *)rbug_demarshal_context_info(header);
+ case RBUG_OP_CONTEXT_DRAW_BLOCK:
+ return (struct rbug_header *)rbug_demarshal_context_draw_block(header);
+ case RBUG_OP_CONTEXT_DRAW_STEP:
+ return (struct rbug_header *)rbug_demarshal_context_draw_step(header);
+ case RBUG_OP_CONTEXT_DRAW_UNBLOCK:
+ return (struct rbug_header *)rbug_demarshal_context_draw_unblock(header);
+ case RBUG_OP_CONTEXT_DRAW_RULE:
+ return (struct rbug_header *)rbug_demarshal_context_draw_rule(header);
+ case RBUG_OP_CONTEXT_FLUSH:
+ return (struct rbug_header *)rbug_demarshal_context_flush(header);
+ case RBUG_OP_CONTEXT_LIST_REPLY:
+ return (struct rbug_header *)rbug_demarshal_context_list_reply(header);
+ case RBUG_OP_CONTEXT_INFO_REPLY:
+ return (struct rbug_header *)rbug_demarshal_context_info_reply(header);
+ case RBUG_OP_CONTEXT_DRAW_BLOCKED:
+ return (struct rbug_header *)rbug_demarshal_context_draw_blocked(header);
+ case RBUG_OP_SHADER_LIST:
+ return (struct rbug_header *)rbug_demarshal_shader_list(header);
+ case RBUG_OP_SHADER_INFO:
+ return (struct rbug_header *)rbug_demarshal_shader_info(header);
+ case RBUG_OP_SHADER_DISABLE:
+ return (struct rbug_header *)rbug_demarshal_shader_disable(header);
+ case RBUG_OP_SHADER_REPLACE:
+ return (struct rbug_header *)rbug_demarshal_shader_replace(header);
+ case RBUG_OP_SHADER_LIST_REPLY:
+ return (struct rbug_header *)rbug_demarshal_shader_list_reply(header);
+ case RBUG_OP_SHADER_INFO_REPLY:
+ return (struct rbug_header *)rbug_demarshal_shader_info_reply(header);
+ default:
+ return NULL;
+ }
+}
diff --git a/src/gallium/auxiliary/rbug/rbug_internal.h b/src/gallium/auxiliary/rbug/rbug_internal.h
new file mode 100644
index 0000000000..4aba1a810f
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_internal.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file is internal to the rbug protocol code, and contains asorted
+ * features needed by the code.
+ */
+
+#ifndef _RBUG_INTERNAL_H_
+#define _RBUG_INTERNAL_H_
+
+#include "rbug/rbug_proto.h"
+
+#include "util/u_memory.h"
+#include "util/u_debug.h"
+#include <errno.h>
+
+int rbug_connection_send_start(struct rbug_connection *con, enum rbug_opcode opcode, uint32_t length);
+int rbug_connection_write(struct rbug_connection *con, void *data, uint32_t size);
+int rbug_connection_send_finish(struct rbug_connection *con, uint32_t *c);
+
+/**
+ * Only works with multiples of 2
+ */
+#define PAD(from, to) \
+do { \
+ from = (from + to - 1) & ~(to - 1); \
+} while(0)
+
+#define LEN(size) \
+do { \
+ PAD(__len, size); \
+ __len += size; \
+} while(0)
+
+#define LEN_ARRAY(size, name) \
+do { \
+ LEN(4); \
+ PAD(__len, size); \
+ __len += size * name##_len; \
+} while(0)
+
+#define WRITE(size, type, name) \
+do { \
+ PAD(__pos, size); \
+ *((type *)(&__data[__pos])) = name; \
+ __pos += size; \
+} while(0)
+
+#define WRITE_ARRAY(size, type, name) \
+do { \
+ WRITE(4, uint32_t, name##_len); \
+ PAD(__pos, size); \
+ memcpy(&__data[__pos], name, size * name##_len); \
+ __pos += size * name##_len; \
+} while(0)
+
+#define READ(size, type, name) \
+do { \
+ PAD(pos, size); \
+ pos += size; \
+ if (pos > len) \
+ break; \
+ ret->name = *((type *)(&data[pos - size])); \
+} while(0)
+
+#define READ_ARRAY(size, type, name) \
+do { \
+ READ(4, uint32_t, name##_len); \
+ if (pos > len) \
+ break; \
+ PAD(pos, size); \
+ pos += size * ret->name##_len; \
+ if (pos > len) \
+ break; \
+ ret->name = (type *)&data[pos - size * ret->name##_len]; \
+} while(0)
+
+#endif
diff --git a/src/gallium/auxiliary/rbug/rbug_proto.h b/src/gallium/auxiliary/rbug/rbug_proto.h
new file mode 100644
index 0000000000..d273be0166
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_proto.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds common definitions of the gallium remote debugging protocol.
+ */
+
+#ifndef _RBUG_PROTO_H_
+#define _RBUG_PROTO_H_
+
+#include "pipe/p_compiler.h"
+
+/**
+ * Uniqe indentifier for each command.
+ *
+ * Replys are designated by negative.
+ */
+enum rbug_opcode
+{
+ RBUG_OP_NOOP = 0,
+ RBUG_OP_PING = 1,
+ RBUG_OP_ERROR = 2,
+ RBUG_OP_PING_REPLY = -1,
+ RBUG_OP_ERROR_REPLY = -2,
+ RBUG_OP_TEXTURE_LIST = 256,
+ RBUG_OP_TEXTURE_INFO = 257,
+ RBUG_OP_TEXTURE_WRITE = 258,
+ RBUG_OP_TEXTURE_READ = 259,
+ RBUG_OP_TEXTURE_LIST_REPLY = -256,
+ RBUG_OP_TEXTURE_INFO_REPLY = -257,
+ RBUG_OP_TEXTURE_READ_REPLY = -259,
+ RBUG_OP_CONTEXT_LIST = 512,
+ RBUG_OP_CONTEXT_INFO = 513,
+ RBUG_OP_CONTEXT_DRAW_BLOCK = 514,
+ RBUG_OP_CONTEXT_DRAW_STEP = 515,
+ RBUG_OP_CONTEXT_DRAW_UNBLOCK = 516,
+ RBUG_OP_CONTEXT_DRAW_RULE = 518,
+ RBUG_OP_CONTEXT_FLUSH = 519,
+ RBUG_OP_CONTEXT_LIST_REPLY = -512,
+ RBUG_OP_CONTEXT_INFO_REPLY = -513,
+ RBUG_OP_CONTEXT_DRAW_BLOCKED = 517,
+ RBUG_OP_SHADER_LIST = 768,
+ RBUG_OP_SHADER_INFO = 769,
+ RBUG_OP_SHADER_DISABLE = 770,
+ RBUG_OP_SHADER_REPLACE = 771,
+ RBUG_OP_SHADER_LIST_REPLY = -768,
+ RBUG_OP_SHADER_INFO_REPLY = -769,
+};
+
+/**
+ * Header for demarshaled message.
+ */
+struct rbug_header
+{
+ enum rbug_opcode opcode;
+ void *__message;
+};
+
+/**
+ * Header for a message in wire format.
+ */
+struct rbug_proto_header
+{
+ int32_t opcode;
+ uint32_t length;
+};
+
+/**
+ * Forward declare connection here, as this file is included by all users.
+ */
+struct rbug_connection;
+
+#endif
diff --git a/src/gallium/auxiliary/rbug/rbug_shader.c b/src/gallium/auxiliary/rbug/rbug_shader.c
new file mode 100644
index 0000000000..fccd2f55ef
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_shader.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds the function implementation for one of the rbug extensions.
+ * Prototypes and declerations of functions and structs is in the same folder
+ * in the header file matching this file's name.
+ *
+ * The functions starting rbug_send_* encodes a call to the write format and
+ * sends that to the supplied connection, while functions starting with
+ * rbug_demarshal_* demarshal data in the wire protocol.
+ *
+ * Functions ending with _reply are replies to requests.
+ */
+
+#include "rbug_internal.h"
+#include "rbug/rbug_shader.h"
+
+int rbug_send_shader_list(struct rbug_connection *__con,
+ rbug_context_t context,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_SHADER_LIST));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_SHADER_LIST, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_shader_info(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t shader,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(8); /* shader */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_SHADER_INFO));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(8, rbug_shader_t, shader); /* shader */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_SHADER_INFO, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_shader_disable(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t shader,
+ uint8_t disable,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(8); /* shader */
+ LEN(1); /* disable */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_SHADER_DISABLE));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(8, rbug_shader_t, shader); /* shader */
+ WRITE(1, uint8_t, disable); /* disable */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_SHADER_DISABLE, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_shader_replace(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t shader,
+ uint32_t *tokens,
+ uint32_t tokens_len,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* context */
+ LEN(8); /* shader */
+ LEN_ARRAY(4, tokens); /* tokens */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_SHADER_REPLACE));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_context_t, context); /* context */
+ WRITE(8, rbug_shader_t, shader); /* shader */
+ WRITE_ARRAY(4, uint32_t, tokens); /* tokens */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_SHADER_REPLACE, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_shader_list_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_shader_t *shaders,
+ uint32_t shaders_len,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN_ARRAY(8, shaders); /* shaders */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_SHADER_LIST_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE_ARRAY(8, rbug_shader_t, shaders); /* shaders */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_SHADER_LIST_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_shader_info_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t *original,
+ uint32_t original_len,
+ uint32_t *replaced,
+ uint32_t replaced_len,
+ uint8_t disabled,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN_ARRAY(4, original); /* original */
+ LEN_ARRAY(4, replaced); /* replaced */
+ LEN(1); /* disabled */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_SHADER_INFO_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE_ARRAY(4, uint32_t, original); /* original */
+ WRITE_ARRAY(4, uint32_t, replaced); /* replaced */
+ WRITE(1, uint8_t, disabled); /* disabled */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_SHADER_INFO_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+struct rbug_proto_shader_list * rbug_demarshal_shader_list(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_shader_list *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_SHADER_LIST)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+
+ return ret;
+}
+
+struct rbug_proto_shader_info * rbug_demarshal_shader_info(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_shader_info *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_SHADER_INFO)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(8, rbug_shader_t, shader); /* shader */
+
+ return ret;
+}
+
+struct rbug_proto_shader_disable * rbug_demarshal_shader_disable(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_shader_disable *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_SHADER_DISABLE)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(8, rbug_shader_t, shader); /* shader */
+ READ(1, uint8_t, disable); /* disable */
+
+ return ret;
+}
+
+struct rbug_proto_shader_replace * rbug_demarshal_shader_replace(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_shader_replace *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_SHADER_REPLACE)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_context_t, context); /* context */
+ READ(8, rbug_shader_t, shader); /* shader */
+ READ_ARRAY(4, uint32_t, tokens); /* tokens */
+
+ return ret;
+}
+
+struct rbug_proto_shader_list_reply * rbug_demarshal_shader_list_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_shader_list_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_SHADER_LIST_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ_ARRAY(8, rbug_shader_t, shaders); /* shaders */
+
+ return ret;
+}
+
+struct rbug_proto_shader_info_reply * rbug_demarshal_shader_info_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_shader_info_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_SHADER_INFO_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ_ARRAY(4, uint32_t, original); /* original */
+ READ_ARRAY(4, uint32_t, replaced); /* replaced */
+ READ(1, uint8_t, disabled); /* disabled */
+
+ return ret;
+}
diff --git a/src/gallium/auxiliary/rbug/rbug_shader.h b/src/gallium/auxiliary/rbug/rbug_shader.h
new file mode 100644
index 0000000000..b5d886781d
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_shader.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds structs decelerations and function prototypes for one of
+ * the rbug extensions. Implementation of the functions is in the same folder
+ * in the c file matching this file's name.
+ *
+ * The structs what is returned from the demarshal functions. The functions
+ * starting rbug_send_* encodes a call to the write format and sends that to
+ * the supplied connection, while functions starting with rbug_demarshal_*
+ * demarshal data from the wire protocol.
+ *
+ * Structs and functions ending with _reply are replies to requests.
+ */
+
+#ifndef _RBUG_PROTO_SHADER_H_
+#define _RBUG_PROTO_SHADER_H_
+
+#include "rbug/rbug_proto.h"
+#include "rbug/rbug_core.h"
+
+struct rbug_proto_shader_list
+{
+ struct rbug_header header;
+ rbug_context_t context;
+};
+
+struct rbug_proto_shader_info
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_shader_t shader;
+};
+
+struct rbug_proto_shader_disable
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_shader_t shader;
+ uint8_t disable;
+};
+
+struct rbug_proto_shader_replace
+{
+ struct rbug_header header;
+ rbug_context_t context;
+ rbug_shader_t shader;
+ uint32_t *tokens;
+ uint32_t tokens_len;
+};
+
+struct rbug_proto_shader_list_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ rbug_shader_t *shaders;
+ uint32_t shaders_len;
+};
+
+struct rbug_proto_shader_info_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ uint32_t *original;
+ uint32_t original_len;
+ uint32_t *replaced;
+ uint32_t replaced_len;
+ uint8_t disabled;
+};
+
+int rbug_send_shader_list(struct rbug_connection *__con,
+ rbug_context_t context,
+ uint32_t *__serial);
+
+int rbug_send_shader_info(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t shader,
+ uint32_t *__serial);
+
+int rbug_send_shader_disable(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t shader,
+ uint8_t disable,
+ uint32_t *__serial);
+
+int rbug_send_shader_replace(struct rbug_connection *__con,
+ rbug_context_t context,
+ rbug_shader_t shader,
+ uint32_t *tokens,
+ uint32_t tokens_len,
+ uint32_t *__serial);
+
+int rbug_send_shader_list_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_shader_t *shaders,
+ uint32_t shaders_len,
+ uint32_t *__serial);
+
+int rbug_send_shader_info_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t *original,
+ uint32_t original_len,
+ uint32_t *replaced,
+ uint32_t replaced_len,
+ uint8_t disabled,
+ uint32_t *__serial);
+
+struct rbug_proto_shader_list * rbug_demarshal_shader_list(struct rbug_proto_header *header);
+
+struct rbug_proto_shader_info * rbug_demarshal_shader_info(struct rbug_proto_header *header);
+
+struct rbug_proto_shader_disable * rbug_demarshal_shader_disable(struct rbug_proto_header *header);
+
+struct rbug_proto_shader_replace * rbug_demarshal_shader_replace(struct rbug_proto_header *header);
+
+struct rbug_proto_shader_list_reply * rbug_demarshal_shader_list_reply(struct rbug_proto_header *header);
+
+struct rbug_proto_shader_info_reply * rbug_demarshal_shader_info_reply(struct rbug_proto_header *header);
+
+#endif
diff --git a/src/gallium/auxiliary/rbug/rbug_texture.c b/src/gallium/auxiliary/rbug/rbug_texture.c
new file mode 100644
index 0000000000..5a918fe6bc
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_texture.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds the function implementation for one of the rbug extensions.
+ * Prototypes and declerations of functions and structs is in the same folder
+ * in the header file matching this file's name.
+ *
+ * The functions starting rbug_send_* encodes a call to the write format and
+ * sends that to the supplied connection, while functions starting with
+ * rbug_demarshal_* demarshal data in the wire protocol.
+ *
+ * Functions ending with _reply are replies to requests.
+ */
+
+#include "rbug_internal.h"
+#include "rbug/rbug_texture.h"
+
+int rbug_send_texture_list(struct rbug_connection *__con,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_LIST));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_LIST, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_texture_info(struct rbug_connection *__con,
+ rbug_texture_t texture,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* texture */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_INFO));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_texture_t, texture); /* texture */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_INFO, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_texture_write(struct rbug_connection *__con,
+ rbug_texture_t texture,
+ uint32_t face,
+ uint32_t level,
+ uint32_t zslice,
+ uint32_t x,
+ uint32_t y,
+ uint32_t w,
+ uint32_t h,
+ uint8_t *data,
+ uint32_t data_len,
+ uint32_t stride,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* texture */
+ LEN(4); /* face */
+ LEN(4); /* level */
+ LEN(4); /* zslice */
+ LEN(4); /* x */
+ LEN(4); /* y */
+ LEN(4); /* w */
+ LEN(4); /* h */
+ LEN_ARRAY(1, data); /* data */
+ LEN(4); /* stride */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_WRITE));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_texture_t, texture); /* texture */
+ WRITE(4, uint32_t, face); /* face */
+ WRITE(4, uint32_t, level); /* level */
+ WRITE(4, uint32_t, zslice); /* zslice */
+ WRITE(4, uint32_t, x); /* x */
+ WRITE(4, uint32_t, y); /* y */
+ WRITE(4, uint32_t, w); /* w */
+ WRITE(4, uint32_t, h); /* h */
+ WRITE_ARRAY(1, uint8_t, data); /* data */
+ WRITE(4, uint32_t, stride); /* stride */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_WRITE, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_texture_read(struct rbug_connection *__con,
+ rbug_texture_t texture,
+ uint32_t face,
+ uint32_t level,
+ uint32_t zslice,
+ uint32_t x,
+ uint32_t y,
+ uint32_t w,
+ uint32_t h,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(8); /* texture */
+ LEN(4); /* face */
+ LEN(4); /* level */
+ LEN(4); /* zslice */
+ LEN(4); /* x */
+ LEN(4); /* y */
+ LEN(4); /* w */
+ LEN(4); /* h */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_READ));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(8, rbug_texture_t, texture); /* texture */
+ WRITE(4, uint32_t, face); /* face */
+ WRITE(4, uint32_t, level); /* level */
+ WRITE(4, uint32_t, zslice); /* zslice */
+ WRITE(4, uint32_t, x); /* x */
+ WRITE(4, uint32_t, y); /* y */
+ WRITE(4, uint32_t, w); /* w */
+ WRITE(4, uint32_t, h); /* h */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_READ, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_texture_list_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_texture_t *textures,
+ uint32_t textures_len,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN_ARRAY(8, textures); /* textures */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_LIST_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE_ARRAY(8, rbug_texture_t, textures); /* textures */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_LIST_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_texture_info_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t target,
+ uint32_t format,
+ uint32_t *width,
+ uint32_t width_len,
+ uint32_t *height,
+ uint32_t height_len,
+ uint32_t *depth,
+ uint32_t depth_len,
+ uint32_t blockw,
+ uint32_t blockh,
+ uint32_t blocksize,
+ uint32_t last_level,
+ uint32_t nr_samples,
+ uint32_t tex_usage,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN(4); /* target */
+ LEN(4); /* format */
+ LEN_ARRAY(4, width); /* width */
+ LEN_ARRAY(4, height); /* height */
+ LEN_ARRAY(4, depth); /* depth */
+ LEN(4); /* blockw */
+ LEN(4); /* blockh */
+ LEN(4); /* blocksize */
+ LEN(4); /* last_level */
+ LEN(4); /* nr_samples */
+ LEN(4); /* tex_usage */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_INFO_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE(4, uint32_t, target); /* target */
+ WRITE(4, uint32_t, format); /* format */
+ WRITE_ARRAY(4, uint32_t, width); /* width */
+ WRITE_ARRAY(4, uint32_t, height); /* height */
+ WRITE_ARRAY(4, uint32_t, depth); /* depth */
+ WRITE(4, uint32_t, blockw); /* blockw */
+ WRITE(4, uint32_t, blockh); /* blockh */
+ WRITE(4, uint32_t, blocksize); /* blocksize */
+ WRITE(4, uint32_t, last_level); /* last_level */
+ WRITE(4, uint32_t, nr_samples); /* nr_samples */
+ WRITE(4, uint32_t, tex_usage); /* tex_usage */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_INFO_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+int rbug_send_texture_read_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t format,
+ uint32_t blockw,
+ uint32_t blockh,
+ uint32_t blocksize,
+ uint8_t *data,
+ uint32_t data_len,
+ uint32_t stride,
+ uint32_t *__serial)
+{
+ uint32_t __len = 0;
+ uint32_t __pos = 0;
+ uint8_t *__data = NULL;
+ int __ret = 0;
+
+ LEN(8); /* header */
+ LEN(4); /* serial */
+ LEN(4); /* format */
+ LEN(4); /* blockw */
+ LEN(4); /* blockh */
+ LEN(4); /* blocksize */
+ LEN_ARRAY(1, data); /* data */
+ LEN(4); /* stride */
+
+ /* align */
+ PAD(__len, 8);
+
+ __data = (uint8_t*)MALLOC(__len);
+ if (!__data)
+ return -ENOMEM;
+
+ WRITE(4, int32_t, ((int32_t)RBUG_OP_TEXTURE_READ_REPLY));
+ WRITE(4, uint32_t, ((uint32_t)(__len / 4)));
+ WRITE(4, uint32_t, serial); /* serial */
+ WRITE(4, uint32_t, format); /* format */
+ WRITE(4, uint32_t, blockw); /* blockw */
+ WRITE(4, uint32_t, blockh); /* blockh */
+ WRITE(4, uint32_t, blocksize); /* blocksize */
+ WRITE_ARRAY(1, uint8_t, data); /* data */
+ WRITE(4, uint32_t, stride); /* stride */
+
+ /* final pad */
+ PAD(__pos, 8);
+
+ if (__pos != __len) {
+ __ret = -EINVAL;
+ } else {
+ rbug_connection_send_start(__con, RBUG_OP_TEXTURE_READ_REPLY, __len);
+ rbug_connection_write(__con, __data, __len);
+ __ret = rbug_connection_send_finish(__con, __serial);
+ }
+
+ FREE(__data);
+ return __ret;
+}
+
+struct rbug_proto_texture_list * rbug_demarshal_texture_list(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_list *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_LIST)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+
+ return ret;
+}
+
+struct rbug_proto_texture_info * rbug_demarshal_texture_info(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_info *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_INFO)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_texture_t, texture); /* texture */
+
+ return ret;
+}
+
+struct rbug_proto_texture_write * rbug_demarshal_texture_write(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_write *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_WRITE)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_texture_t, texture); /* texture */
+ READ(4, uint32_t, face); /* face */
+ READ(4, uint32_t, level); /* level */
+ READ(4, uint32_t, zslice); /* zslice */
+ READ(4, uint32_t, x); /* x */
+ READ(4, uint32_t, y); /* y */
+ READ(4, uint32_t, w); /* w */
+ READ(4, uint32_t, h); /* h */
+ READ_ARRAY(1, uint8_t, data); /* data */
+ READ(4, uint32_t, stride); /* stride */
+
+ return ret;
+}
+
+struct rbug_proto_texture_read * rbug_demarshal_texture_read(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_read *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_READ)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(8, rbug_texture_t, texture); /* texture */
+ READ(4, uint32_t, face); /* face */
+ READ(4, uint32_t, level); /* level */
+ READ(4, uint32_t, zslice); /* zslice */
+ READ(4, uint32_t, x); /* x */
+ READ(4, uint32_t, y); /* y */
+ READ(4, uint32_t, w); /* w */
+ READ(4, uint32_t, h); /* h */
+
+ return ret;
+}
+
+struct rbug_proto_texture_list_reply * rbug_demarshal_texture_list_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_list_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_LIST_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ_ARRAY(8, rbug_texture_t, textures); /* textures */
+
+ return ret;
+}
+
+struct rbug_proto_texture_info_reply * rbug_demarshal_texture_info_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_info_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_INFO_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ(4, uint32_t, target); /* target */
+ READ(4, uint32_t, format); /* format */
+ READ_ARRAY(4, uint32_t, width); /* width */
+ READ_ARRAY(4, uint32_t, height); /* height */
+ READ_ARRAY(4, uint32_t, depth); /* depth */
+ READ(4, uint32_t, blockw); /* blockw */
+ READ(4, uint32_t, blockh); /* blockh */
+ READ(4, uint32_t, blocksize); /* blocksize */
+ READ(4, uint32_t, last_level); /* last_level */
+ READ(4, uint32_t, nr_samples); /* nr_samples */
+ READ(4, uint32_t, tex_usage); /* tex_usage */
+
+ return ret;
+}
+
+struct rbug_proto_texture_read_reply * rbug_demarshal_texture_read_reply(struct rbug_proto_header *header)
+{
+ uint32_t len = 0;
+ uint32_t pos = 0;
+ uint8_t *data = NULL;
+ struct rbug_proto_texture_read_reply *ret;
+
+ if (!header)
+ return NULL;
+ if (header->opcode != (int16_t)RBUG_OP_TEXTURE_READ_REPLY)
+ return NULL;
+
+ pos = 0;
+ len = header->length * 4;
+ data = (uint8_t*)&header[1];
+ ret = MALLOC(sizeof(*ret));
+ if (!ret)
+ return NULL;
+
+ ret->header.__message = header;
+ ret->header.opcode = header->opcode;
+
+ READ(4, uint32_t, serial); /* serial */
+ READ(4, uint32_t, format); /* format */
+ READ(4, uint32_t, blockw); /* blockw */
+ READ(4, uint32_t, blockh); /* blockh */
+ READ(4, uint32_t, blocksize); /* blocksize */
+ READ_ARRAY(1, uint8_t, data); /* data */
+ READ(4, uint32_t, stride); /* stride */
+
+ return ret;
+}
diff --git a/src/gallium/auxiliary/rbug/rbug_texture.h b/src/gallium/auxiliary/rbug/rbug_texture.h
new file mode 100644
index 0000000000..fbb247e1d4
--- /dev/null
+++ b/src/gallium/auxiliary/rbug/rbug_texture.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This file holds structs decelerations and function prototypes for one of
+ * the rbug extensions. Implementation of the functions is in the same folder
+ * in the c file matching this file's name.
+ *
+ * The structs what is returned from the demarshal functions. The functions
+ * starting rbug_send_* encodes a call to the write format and sends that to
+ * the supplied connection, while functions starting with rbug_demarshal_*
+ * demarshal data from the wire protocol.
+ *
+ * Structs and functions ending with _reply are replies to requests.
+ */
+
+#ifndef _RBUG_PROTO_TEXTURE_H_
+#define _RBUG_PROTO_TEXTURE_H_
+
+#include "rbug/rbug_proto.h"
+#include "rbug/rbug_core.h"
+
+struct rbug_proto_texture_list
+{
+ struct rbug_header header;
+};
+
+struct rbug_proto_texture_info
+{
+ struct rbug_header header;
+ rbug_texture_t texture;
+};
+
+struct rbug_proto_texture_write
+{
+ struct rbug_header header;
+ rbug_texture_t texture;
+ uint32_t face;
+ uint32_t level;
+ uint32_t zslice;
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+ uint8_t *data;
+ uint32_t data_len;
+ uint32_t stride;
+};
+
+struct rbug_proto_texture_read
+{
+ struct rbug_header header;
+ rbug_texture_t texture;
+ uint32_t face;
+ uint32_t level;
+ uint32_t zslice;
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+struct rbug_proto_texture_list_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ rbug_texture_t *textures;
+ uint32_t textures_len;
+};
+
+struct rbug_proto_texture_info_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ uint32_t target;
+ uint32_t format;
+ uint32_t *width;
+ uint32_t width_len;
+ uint32_t *height;
+ uint32_t height_len;
+ uint32_t *depth;
+ uint32_t depth_len;
+ uint32_t blockw;
+ uint32_t blockh;
+ uint32_t blocksize;
+ uint32_t last_level;
+ uint32_t nr_samples;
+ uint32_t tex_usage;
+};
+
+struct rbug_proto_texture_read_reply
+{
+ struct rbug_header header;
+ uint32_t serial;
+ uint32_t format;
+ uint32_t blockw;
+ uint32_t blockh;
+ uint32_t blocksize;
+ uint8_t *data;
+ uint32_t data_len;
+ uint32_t stride;
+};
+
+int rbug_send_texture_list(struct rbug_connection *__con,
+ uint32_t *__serial);
+
+int rbug_send_texture_info(struct rbug_connection *__con,
+ rbug_texture_t texture,
+ uint32_t *__serial);
+
+int rbug_send_texture_write(struct rbug_connection *__con,
+ rbug_texture_t texture,
+ uint32_t face,
+ uint32_t level,
+ uint32_t zslice,
+ uint32_t x,
+ uint32_t y,
+ uint32_t w,
+ uint32_t h,
+ uint8_t *data,
+ uint32_t data_len,
+ uint32_t stride,
+ uint32_t *__serial);
+
+int rbug_send_texture_read(struct rbug_connection *__con,
+ rbug_texture_t texture,
+ uint32_t face,
+ uint32_t level,
+ uint32_t zslice,
+ uint32_t x,
+ uint32_t y,
+ uint32_t w,
+ uint32_t h,
+ uint32_t *__serial);
+
+int rbug_send_texture_list_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ rbug_texture_t *textures,
+ uint32_t textures_len,
+ uint32_t *__serial);
+
+int rbug_send_texture_info_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t target,
+ uint32_t format,
+ uint32_t *width,
+ uint32_t width_len,
+ uint32_t *height,
+ uint32_t height_len,
+ uint32_t *depth,
+ uint32_t depth_len,
+ uint32_t blockw,
+ uint32_t blockh,
+ uint32_t blocksize,
+ uint32_t last_level,
+ uint32_t nr_samples,
+ uint32_t tex_usage,
+ uint32_t *__serial);
+
+int rbug_send_texture_read_reply(struct rbug_connection *__con,
+ uint32_t serial,
+ uint32_t format,
+ uint32_t blockw,
+ uint32_t blockh,
+ uint32_t blocksize,
+ uint8_t *data,
+ uint32_t data_len,
+ uint32_t stride,
+ uint32_t *__serial);
+
+struct rbug_proto_texture_list * rbug_demarshal_texture_list(struct rbug_proto_header *header);
+
+struct rbug_proto_texture_info * rbug_demarshal_texture_info(struct rbug_proto_header *header);
+
+struct rbug_proto_texture_write * rbug_demarshal_texture_write(struct rbug_proto_header *header);
+
+struct rbug_proto_texture_read * rbug_demarshal_texture_read(struct rbug_proto_header *header);
+
+struct rbug_proto_texture_list_reply * rbug_demarshal_texture_list_reply(struct rbug_proto_header *header);
+
+struct rbug_proto_texture_info_reply * rbug_demarshal_texture_info_reply(struct rbug_proto_header *header);
+
+struct rbug_proto_texture_read_reply * rbug_demarshal_texture_read_reply(struct rbug_proto_header *header);
+
+#endif
diff --git a/src/gallium/auxiliary/rtasm/Makefile b/src/gallium/auxiliary/rtasm/Makefile
new file mode 100644
index 0000000000..ab8ea464c6
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/Makefile
@@ -0,0 +1,13 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = rtasm
+
+C_SOURCES = \
+ rtasm_cpu.c \
+ rtasm_execmem.c \
+ rtasm_x86sse.c \
+ rtasm_ppc.c \
+ rtasm_ppc_spe.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/rtasm/SConscript b/src/gallium/auxiliary/rtasm/SConscript
new file mode 100644
index 0000000000..eb48368acc
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/SConscript
@@ -0,0 +1,13 @@
+Import('*')
+
+rtasm = env.ConvenienceLibrary(
+ target = 'rtasm',
+ source = [
+ 'rtasm_cpu.c',
+ 'rtasm_execmem.c',
+ 'rtasm_x86sse.c',
+ 'rtasm_ppc.c',
+ 'rtasm_ppc_spe.c',
+ ])
+
+auxiliaries.insert(0, rtasm)
diff --git a/src/gallium/auxiliary/rtasm/rtasm_cpu.c b/src/gallium/auxiliary/rtasm/rtasm_cpu.c
new file mode 100644
index 0000000000..2e15751e50
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_cpu.c
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_debug.h"
+#include "rtasm_cpu.h"
+
+
+#if defined(PIPE_ARCH_X86)
+static boolean rtasm_sse_enabled(void)
+{
+ static boolean firsttime = 1;
+ static boolean enabled;
+
+ /* This gets called quite often at the moment:
+ */
+ if (firsttime) {
+ enabled = !debug_get_bool_option("GALLIUM_NOSSE", FALSE);
+ firsttime = FALSE;
+ }
+ return enabled;
+}
+#endif
+
+int rtasm_cpu_has_sse(void)
+{
+ /* FIXME: actually detect this at run-time */
+#if defined(PIPE_ARCH_X86)
+ return rtasm_sse_enabled();
+#else
+ return 0;
+#endif
+}
+
+int rtasm_cpu_has_sse2(void)
+{
+ /* FIXME: actually detect this at run-time */
+#if defined(PIPE_ARCH_X86)
+ return rtasm_sse_enabled();
+#else
+ return 0;
+#endif
+}
diff --git a/src/gallium/auxiliary/rtasm/rtasm_cpu.h b/src/gallium/auxiliary/rtasm/rtasm_cpu.h
new file mode 100644
index 0000000000..ebc71634fd
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_cpu.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Runtime detection of CPU capabilities.
+ */
+
+#ifndef _RTASM_CPU_H_
+#define _RTASM_CPU_H_
+
+
+int rtasm_cpu_has_sse(void);
+
+int rtasm_cpu_has_sse2(void);
+
+
+#endif /* _RTASM_CPU_H_ */
diff --git a/src/gallium/auxiliary/rtasm/rtasm_execmem.c b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
new file mode 100644
index 0000000000..01811d5011
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_execmem.c
@@ -0,0 +1,141 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * \file exemem.c
+ * Functions for allocating executable memory.
+ *
+ * \author Keith Whitwell
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+#include "util/u_memory.h"
+
+#include "rtasm_execmem.h"
+
+#if defined(PIPE_OS_BSD)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+
+
+/*
+ * Allocate a large block of memory which can hold code then dole it out
+ * in pieces by means of the generic memory manager code.
+ */
+
+#include <unistd.h>
+#include <sys/mman.h>
+#include "pipe/p_thread.h"
+#include "util/u_mm.h"
+
+#define EXEC_HEAP_SIZE (10*1024*1024)
+
+pipe_static_mutex(exec_mutex);
+
+static struct mem_block *exec_heap = NULL;
+static unsigned char *exec_mem = NULL;
+
+
+static void
+init_heap(void)
+{
+ if (!exec_heap)
+ exec_heap = u_mmInit( 0, EXEC_HEAP_SIZE );
+
+ if (!exec_mem)
+ exec_mem = (unsigned char *) mmap(0, EXEC_HEAP_SIZE,
+ PROT_EXEC | PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+}
+
+
+void *
+rtasm_exec_malloc(size_t size)
+{
+ struct mem_block *block = NULL;
+ void *addr = NULL;
+
+ pipe_mutex_lock(exec_mutex);
+
+ init_heap();
+
+ if (exec_heap) {
+ size = (size + 31) & ~31; /* next multiple of 32 bytes */
+ block = u_mmAllocMem( exec_heap, size, 5, 0 ); /* 5 -> 32-byte alignment */
+ }
+
+ if (block)
+ addr = exec_mem + block->ofs;
+ else
+ debug_printf("rtasm_exec_malloc failed\n");
+
+ pipe_mutex_unlock(exec_mutex);
+
+ return addr;
+}
+
+
+void
+rtasm_exec_free(void *addr)
+{
+ pipe_mutex_lock(exec_mutex);
+
+ if (exec_heap) {
+ struct mem_block *block = u_mmFindBlock(exec_heap, (unsigned char *)addr - exec_mem);
+
+ if (block)
+ u_mmFreeMem(block);
+ }
+
+ pipe_mutex_unlock(exec_mutex);
+}
+
+
+#else /* PIPE_OS_LINUX || PIPE_OS_BSD || PIPE_OS_SOLARIS */
+
+/*
+ * Just use regular memory.
+ */
+
+void *
+rtasm_exec_malloc(size_t size)
+{
+ return MALLOC( size );
+}
+
+
+void
+rtasm_exec_free(void *addr)
+{
+ FREE(addr);
+}
+
+
+#endif /* PIPE_OS_LINUX || PIPE_OS_BSD || PIPE_OS_SOLARIS */
diff --git a/src/gallium/auxiliary/rtasm/rtasm_execmem.h b/src/gallium/auxiliary/rtasm/rtasm_execmem.h
new file mode 100644
index 0000000000..155c6d34e0
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_execmem.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file exemem.c
+ * Functions for allocating executable memory.
+ *
+ * \author Keith Whitwell
+ */
+
+#ifndef _RTASM_EXECMEM_H_
+#define _RTASM_EXECMEM_H_
+
+#include "pipe/p_compiler.h"
+
+
+extern void *
+rtasm_exec_malloc( size_t size );
+
+
+extern void
+rtasm_exec_free( void *addr );
+
+
+#endif
diff --git a/src/gallium/auxiliary/rtasm/rtasm_ppc.c b/src/gallium/auxiliary/rtasm/rtasm_ppc.c
new file mode 100644
index 0000000000..ef4b306cb6
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_ppc.c
@@ -0,0 +1,1077 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2008 Tungsten Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * PPC code generation.
+ * For reference, see http://www.power.org/resources/reading/PowerISA_V2.05.pdf
+ * ABI info: http://www.cs.utsa.edu/~whaley/teach/cs6463FHPO/LEC/lec12_ho.pdf
+ *
+ * Other PPC refs:
+ * http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600719DF2
+ * http://www.ibm.com/developerworks/eserver/library/es-archguide-v2.html
+ * http://www.freescale.com/files/product/doc/MPCFPE32B.pdf
+ *
+ * \author Brian Paul
+ */
+
+
+#include <stdio.h>
+#include "util/u_memory.h"
+#include "util/u_debug.h"
+#include "rtasm_execmem.h"
+#include "rtasm_ppc.h"
+
+
+void
+ppc_init_func(struct ppc_function *p)
+{
+ uint i;
+
+ memset(p, 0, sizeof(*p));
+
+ p->num_inst = 0;
+ p->max_inst = 100; /* first guess at buffer size */
+ p->store = rtasm_exec_malloc(p->max_inst * PPC_INST_SIZE);
+ p->reg_used = 0x0;
+ p->fp_used = 0x0;
+ p->vec_used = 0x0;
+
+ p->print = FALSE;
+ p->indent = 0;
+
+ /* only allow using gp registers 3..12 for now */
+ for (i = 0; i < 3; i++)
+ ppc_reserve_register(p, i);
+ for (i = 12; i < PPC_NUM_REGS; i++)
+ ppc_reserve_register(p, i);
+}
+
+
+void
+ppc_release_func(struct ppc_function *p)
+{
+ assert(p->num_inst <= p->max_inst);
+ if (p->store != NULL) {
+ rtasm_exec_free(p->store);
+ }
+ p->store = NULL;
+}
+
+
+uint
+ppc_num_instructions(const struct ppc_function *p)
+{
+ return p->num_inst;
+}
+
+
+void (*ppc_get_func(struct ppc_function *p))(void)
+{
+#if 0
+ DUMP_END();
+ if (DISASSEM && p->store)
+ debug_printf("disassemble %p %p\n", p->store, p->csr);
+
+ if (p->store == p->error_overflow)
+ return (void (*)(void)) NULL;
+ else
+#endif
+ return (void (*)(void)) p->store;
+}
+
+
+void
+ppc_dump_func(const struct ppc_function *p)
+{
+ uint i;
+ for (i = 0; i < p->num_inst; i++) {
+ debug_printf("%3u: 0x%08x\n", i, p->store[i]);
+ }
+}
+
+
+void
+ppc_print_code(struct ppc_function *p, boolean enable)
+{
+ p->print = enable;
+}
+
+
+void
+ppc_indent(struct ppc_function *p, int spaces)
+{
+ p->indent += spaces;
+}
+
+
+static void
+indent(const struct ppc_function *p)
+{
+ int i;
+ for (i = 0; i < p->indent; i++) {
+ putchar(' ');
+ }
+}
+
+
+void
+ppc_comment(struct ppc_function *p, int rel_indent, const char *s)
+{
+ if (p->print) {
+ p->indent += rel_indent;
+ indent(p);
+ p->indent -= rel_indent;
+ printf("# %s\n", s);
+ }
+}
+
+
+/**
+ * Mark a register as being unavailable.
+ */
+int
+ppc_reserve_register(struct ppc_function *p, int reg)
+{
+ assert(reg < PPC_NUM_REGS);
+ p->reg_used |= (1 << reg);
+ return reg;
+}
+
+
+/**
+ * Allocate a general purpose register.
+ * \return register index or -1 if none left.
+ */
+int
+ppc_allocate_register(struct ppc_function *p)
+{
+ unsigned i;
+ for (i = 0; i < PPC_NUM_REGS; i++) {
+ const uint32_t mask = 1 << i;
+ if ((p->reg_used & mask) == 0) {
+ p->reg_used |= mask;
+ return i;
+ }
+ }
+ printf("OUT OF PPC registers!\n");
+ return -1;
+}
+
+
+/**
+ * Mark the given general purpose register as "unallocated".
+ */
+void
+ppc_release_register(struct ppc_function *p, int reg)
+{
+ assert(reg < PPC_NUM_REGS);
+ assert(p->reg_used & (1 << reg));
+ p->reg_used &= ~(1 << reg);
+}
+
+
+/**
+ * Allocate a floating point register.
+ * \return register index or -1 if none left.
+ */
+int
+ppc_allocate_fp_register(struct ppc_function *p)
+{
+ unsigned i;
+ for (i = 0; i < PPC_NUM_FP_REGS; i++) {
+ const uint32_t mask = 1 << i;
+ if ((p->fp_used & mask) == 0) {
+ p->fp_used |= mask;
+ return i;
+ }
+ }
+ printf("OUT OF PPC FP registers!\n");
+ return -1;
+}
+
+
+/**
+ * Mark the given floating point register as "unallocated".
+ */
+void
+ppc_release_fp_register(struct ppc_function *p, int reg)
+{
+ assert(reg < PPC_NUM_FP_REGS);
+ assert(p->fp_used & (1 << reg));
+ p->fp_used &= ~(1 << reg);
+}
+
+
+/**
+ * Allocate a vector register.
+ * \return register index or -1 if none left.
+ */
+int
+ppc_allocate_vec_register(struct ppc_function *p)
+{
+ unsigned i;
+ for (i = 0; i < PPC_NUM_VEC_REGS; i++) {
+ const uint32_t mask = 1 << i;
+ if ((p->vec_used & mask) == 0) {
+ p->vec_used |= mask;
+ return i;
+ }
+ }
+ printf("OUT OF PPC VEC registers!\n");
+ return -1;
+}
+
+
+/**
+ * Mark the given vector register as "unallocated".
+ */
+void
+ppc_release_vec_register(struct ppc_function *p, int reg)
+{
+ assert(reg < PPC_NUM_VEC_REGS);
+ assert(p->vec_used & (1 << reg));
+ p->vec_used &= ~(1 << reg);
+}
+
+
+/**
+ * Append instruction to instruction buffer. Grow buffer if out of room.
+ */
+static void
+emit_instruction(struct ppc_function *p, uint32_t inst_bits)
+{
+ if (!p->store)
+ return; /* out of memory, drop the instruction */
+
+ if (p->num_inst == p->max_inst) {
+ /* allocate larger buffer */
+ uint32_t *newbuf;
+ p->max_inst *= 2; /* 2x larger */
+ newbuf = rtasm_exec_malloc(p->max_inst * PPC_INST_SIZE);
+ if (newbuf) {
+ memcpy(newbuf, p->store, p->num_inst * PPC_INST_SIZE);
+ }
+ rtasm_exec_free(p->store);
+ p->store = newbuf;
+ if (!p->store) {
+ /* out of memory */
+ p->num_inst = 0;
+ return;
+ }
+ }
+
+ p->store[p->num_inst++] = inst_bits;
+}
+
+
+union vx_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned vD:5;
+ unsigned vA:5;
+ unsigned vB:5;
+ unsigned op2:11;
+ } inst;
+};
+
+static INLINE void
+emit_vx(struct ppc_function *p, uint op2, uint vD, uint vA, uint vB,
+ const char *format, boolean transpose)
+{
+ union vx_inst inst;
+ inst.inst.op = 4;
+ inst.inst.vD = vD;
+ inst.inst.vA = vA;
+ inst.inst.vB = vB;
+ inst.inst.op2 = op2;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ if (transpose)
+ printf(format, vD, vB, vA);
+ else
+ printf(format, vD, vA, vB);
+ }
+}
+
+
+union vxr_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned vD:5;
+ unsigned vA:5;
+ unsigned vB:5;
+ unsigned rC:1;
+ unsigned op2:10;
+ } inst;
+};
+
+static INLINE void
+emit_vxr(struct ppc_function *p, uint op2, uint vD, uint vA, uint vB,
+ const char *format)
+{
+ union vxr_inst inst;
+ inst.inst.op = 4;
+ inst.inst.vD = vD;
+ inst.inst.vA = vA;
+ inst.inst.vB = vB;
+ inst.inst.rC = 0;
+ inst.inst.op2 = op2;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf(format, vD, vA, vB);
+ }
+}
+
+
+union va_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned vD:5;
+ unsigned vA:5;
+ unsigned vB:5;
+ unsigned vC:5;
+ unsigned op2:6;
+ } inst;
+};
+
+static INLINE void
+emit_va(struct ppc_function *p, uint op2, uint vD, uint vA, uint vB, uint vC,
+ const char *format)
+{
+ union va_inst inst;
+ inst.inst.op = 4;
+ inst.inst.vD = vD;
+ inst.inst.vA = vA;
+ inst.inst.vB = vB;
+ inst.inst.vC = vC;
+ inst.inst.op2 = op2;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf(format, vD, vA, vB, vC);
+ }
+}
+
+
+union i_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned li:24;
+ unsigned aa:1;
+ unsigned lk:1;
+ } inst;
+};
+
+static INLINE void
+emit_i(struct ppc_function *p, uint op, uint li, uint aa, uint lk)
+{
+ union i_inst inst;
+ inst.inst.op = op;
+ inst.inst.li = li;
+ inst.inst.aa = aa;
+ inst.inst.lk = lk;
+ emit_instruction(p, inst.bits);
+}
+
+
+union xl_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned bo:5;
+ unsigned bi:5;
+ unsigned unused:3;
+ unsigned bh:2;
+ unsigned op2:10;
+ unsigned lk:1;
+ } inst;
+};
+
+static INLINE void
+emit_xl(struct ppc_function *p, uint op, uint bo, uint bi, uint bh,
+ uint op2, uint lk)
+{
+ union xl_inst inst;
+ inst.inst.op = op;
+ inst.inst.bo = bo;
+ inst.inst.bi = bi;
+ inst.inst.unused = 0x0;
+ inst.inst.bh = bh;
+ inst.inst.op2 = op2;
+ inst.inst.lk = lk;
+ emit_instruction(p, inst.bits);
+}
+
+static INLINE void
+dump_xl(const char *name, uint inst)
+{
+ union xl_inst i;
+
+ i.bits = inst;
+ debug_printf("%s = 0x%08x\n", name, inst);
+ debug_printf(" op: %d 0x%x\n", i.inst.op, i.inst.op);
+ debug_printf(" bo: %d 0x%x\n", i.inst.bo, i.inst.bo);
+ debug_printf(" bi: %d 0x%x\n", i.inst.bi, i.inst.bi);
+ debug_printf(" unused: %d 0x%x\n", i.inst.unused, i.inst.unused);
+ debug_printf(" bh: %d 0x%x\n", i.inst.bh, i.inst.bh);
+ debug_printf(" op2: %d 0x%x\n", i.inst.op2, i.inst.op2);
+ debug_printf(" lk: %d 0x%x\n", i.inst.lk, i.inst.lk);
+}
+
+
+union x_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned vrs:5;
+ unsigned ra:5;
+ unsigned rb:5;
+ unsigned op2:10;
+ unsigned unused:1;
+ } inst;
+};
+
+static INLINE void
+emit_x(struct ppc_function *p, uint op, uint vrs, uint ra, uint rb, uint op2,
+ const char *format)
+{
+ union x_inst inst;
+ inst.inst.op = op;
+ inst.inst.vrs = vrs;
+ inst.inst.ra = ra;
+ inst.inst.rb = rb;
+ inst.inst.op2 = op2;
+ inst.inst.unused = 0x0;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf(format, vrs, ra, rb);
+ }
+}
+
+
+union d_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned rt:5;
+ unsigned ra:5;
+ unsigned si:16;
+ } inst;
+};
+
+static INLINE void
+emit_d(struct ppc_function *p, uint op, uint rt, uint ra, int si,
+ const char *format, boolean transpose)
+{
+ union d_inst inst;
+ assert(si >= -32768);
+ assert(si <= 32767);
+ inst.inst.op = op;
+ inst.inst.rt = rt;
+ inst.inst.ra = ra;
+ inst.inst.si = (unsigned) (si & 0xffff);
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ if (transpose)
+ printf(format, rt, si, ra);
+ else
+ printf(format, rt, ra, si);
+ }
+}
+
+
+union a_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned frt:5;
+ unsigned fra:5;
+ unsigned frb:5;
+ unsigned unused:5;
+ unsigned op2:5;
+ unsigned rc:1;
+ } inst;
+};
+
+static INLINE void
+emit_a(struct ppc_function *p, uint op, uint frt, uint fra, uint frb, uint op2,
+ uint rc, const char *format)
+{
+ union a_inst inst;
+ inst.inst.op = op;
+ inst.inst.frt = frt;
+ inst.inst.fra = fra;
+ inst.inst.frb = frb;
+ inst.inst.unused = 0x0;
+ inst.inst.op2 = op2;
+ inst.inst.rc = rc;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf(format, frt, fra, frb);
+ }
+}
+
+
+union xo_inst {
+ uint32_t bits;
+ struct {
+ unsigned op:6;
+ unsigned rt:5;
+ unsigned ra:5;
+ unsigned rb:5;
+ unsigned oe:1;
+ unsigned op2:9;
+ unsigned rc:1;
+ } inst;
+};
+
+static INLINE void
+emit_xo(struct ppc_function *p, uint op, uint rt, uint ra, uint rb, uint oe,
+ uint op2, uint rc, const char *format)
+{
+ union xo_inst inst;
+ inst.inst.op = op;
+ inst.inst.rt = rt;
+ inst.inst.ra = ra;
+ inst.inst.rb = rb;
+ inst.inst.oe = oe;
+ inst.inst.op2 = op2;
+ inst.inst.rc = rc;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf(format, rt, ra, rb);
+ }
+}
+
+
+
+
+
+/**
+ ** float vector arithmetic
+ **/
+
+/** vector float add */
+void
+ppc_vaddfp(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 10, vD, vA, vB, "vaddfp\t%u, v%u, v%u\n", FALSE);
+}
+
+/** vector float substract */
+void
+ppc_vsubfp(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 74, vD, vA, vB, "vsubfp\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector float min */
+void
+ppc_vminfp(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1098, vD, vA, vB, "vminfp\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector float max */
+void
+ppc_vmaxfp(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1034, vD, vA, vB, "vmaxfp\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector float mult add: vD = vA * vB + vC */
+void
+ppc_vmaddfp(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC)
+{
+ /* note arg order */
+ emit_va(p, 46, vD, vA, vC, vB, "vmaddfp\tv%u, v%u, v%u, v%u\n");
+}
+
+/** vector float negative mult subtract: vD = vA - vB * vC */
+void
+ppc_vnmsubfp(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC)
+{
+ /* note arg order */
+ emit_va(p, 47, vD, vB, vA, vC, "vnmsubfp\tv%u, v%u, v%u, v%u\n");
+}
+
+/** vector float compare greater than */
+void
+ppc_vcmpgtfpx(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vxr(p, 710, vD, vA, vB, "vcmpgtfpx\tv%u, v%u, v%u");
+}
+
+/** vector float compare greater than or equal to */
+void
+ppc_vcmpgefpx(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vxr(p, 454, vD, vA, vB, "vcmpgefpx\tv%u, v%u, v%u");
+}
+
+/** vector float compare equal */
+void
+ppc_vcmpeqfpx(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vxr(p, 198, vD, vA, vB, "vcmpeqfpx\tv%u, v%u, v%u");
+}
+
+/** vector float 2^x */
+void
+ppc_vexptefp(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 394, vD, 0, vB, "vexptefp\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float log2(x) */
+void
+ppc_vlogefp(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 458, vD, 0, vB, "vlogefp\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float reciprocol */
+void
+ppc_vrefp(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 266, vD, 0, vB, "vrefp\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float reciprocol sqrt estimate */
+void
+ppc_vrsqrtefp(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 330, vD, 0, vB, "vrsqrtefp\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float round to negative infinity */
+void
+ppc_vrfim(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 714, vD, 0, vB, "vrfim\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float round to positive infinity */
+void
+ppc_vrfip(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 650, vD, 0, vB, "vrfip\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float round to nearest int */
+void
+ppc_vrfin(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 522, vD, 0, vB, "vrfin\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector float round to int toward zero */
+void
+ppc_vrfiz(struct ppc_function *p, uint vD, uint vB)
+{
+ emit_vx(p, 586, vD, 0, vB, "vrfiz\tv%u, 0%u, v%u\n", FALSE);
+}
+
+/** vector store: store vR at mem[rA+rB] */
+void
+ppc_stvx(struct ppc_function *p, uint vR, uint rA, uint rB)
+{
+ emit_x(p, 31, vR, rA, rB, 231, "stvx\tv%u, r%u, r%u\n");
+}
+
+/** vector load: vR = mem[rA+rB] */
+void
+ppc_lvx(struct ppc_function *p, uint vR, uint rA, uint rB)
+{
+ emit_x(p, 31, vR, rA, rB, 103, "lvx\tv%u, r%u, r%u\n");
+}
+
+/** load vector element word: vR = mem_word[ra+rb] */
+void
+ppc_lvewx(struct ppc_function *p, uint vR, uint rA, uint rB)
+{
+ emit_x(p, 31, vR, rA, rB, 71, "lvewx\tv%u, r%u, r%u\n");
+}
+
+
+
+
+/**
+ ** vector bitwise operations
+ **/
+
+/** vector and */
+void
+ppc_vand(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1028, vD, vA, vB, "vand\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector and complement */
+void
+ppc_vandc(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1092, vD, vA, vB, "vandc\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector or */
+void
+ppc_vor(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1156, vD, vA, vB, "vor\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector nor */
+void
+ppc_vnor(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1284, vD, vA, vB, "vnor\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** vector xor */
+void
+ppc_vxor(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 1220, vD, vA, vB, "vxor\tv%u, v%u, v%u\n", FALSE);
+}
+
+/** Pseudo-instruction: vector move */
+void
+ppc_vmove(struct ppc_function *p, uint vD, uint vA)
+{
+ boolean print = p->print;
+ p->print = FALSE;
+ ppc_vor(p, vD, vA, vA);
+ if (print) {
+ indent(p);
+ printf("vor\tv%u, v%u, v%u \t# v%u = v%u\n", vD, vA, vA, vD, vA);
+ }
+ p->print = print;
+}
+
+/** Set vector register to {0,0,0,0} */
+void
+ppc_vzero(struct ppc_function *p, uint vr)
+{
+ boolean print = p->print;
+ p->print = FALSE;
+ ppc_vxor(p, vr, vr, vr);
+ if (print) {
+ indent(p);
+ printf("vxor\tv%u, v%u, v%u \t# v%u = {0,0,0,0}\n", vr, vr, vr, vr);
+ }
+ p->print = print;
+}
+
+
+
+
+/**
+ ** Vector shuffle / select / splat / etc
+ **/
+
+/** vector permute */
+void
+ppc_vperm(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC)
+{
+ emit_va(p, 43, vD, vA, vB, vC, "vperm\tr%u, r%u, r%u, r%u");
+}
+
+/** vector select */
+void
+ppc_vsel(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC)
+{
+ emit_va(p, 42, vD, vA, vB, vC, "vsel\tr%u, r%u, r%u, r%u");
+}
+
+/** vector splat byte */
+void
+ppc_vspltb(struct ppc_function *p, uint vD, uint vB, uint imm)
+{
+ emit_vx(p, 42, vD, imm, vB, "vspltb\tv%u, v%u, %u\n", TRUE);
+}
+
+/** vector splat half word */
+void
+ppc_vsplthw(struct ppc_function *p, uint vD, uint vB, uint imm)
+{
+ emit_vx(p, 588, vD, imm, vB, "vsplthw\tv%u, v%u, %u\n", TRUE);
+}
+
+/** vector splat word */
+void
+ppc_vspltw(struct ppc_function *p, uint vD, uint vB, uint imm)
+{
+ emit_vx(p, 652, vD, imm, vB, "vspltw\tv%u, v%u, %u\n", TRUE);
+}
+
+/** vector splat signed immediate word */
+void
+ppc_vspltisw(struct ppc_function *p, uint vD, int imm)
+{
+ assert(imm >= -16);
+ assert(imm < 15);
+ emit_vx(p, 908, vD, imm, 0, "vspltisw\tv%u, %d, %u\n", FALSE);
+}
+
+/** vector shift left word: vD[word] = vA[word] << (vB[word] & 0x1f) */
+void
+ppc_vslw(struct ppc_function *p, uint vD, uint vA, uint vB)
+{
+ emit_vx(p, 388, vD, vA, vB, "vslw\tv%u, v%u, v%u\n", FALSE);
+}
+
+
+
+
+/**
+ ** integer arithmetic
+ **/
+
+/** rt = ra + imm */
+void
+ppc_addi(struct ppc_function *p, uint rt, uint ra, int imm)
+{
+ emit_d(p, 14, rt, ra, imm, "addi\tr%u, r%u, %d\n", FALSE);
+}
+
+/** rt = ra + (imm << 16) */
+void
+ppc_addis(struct ppc_function *p, uint rt, uint ra, int imm)
+{
+ emit_d(p, 15, rt, ra, imm, "addis\tr%u, r%u, %d\n", FALSE);
+}
+
+/** rt = ra + rb */
+void
+ppc_add(struct ppc_function *p, uint rt, uint ra, uint rb)
+{
+ emit_xo(p, 31, rt, ra, rb, 0, 266, 0, "add\tr%u, r%u, r%u\n");
+}
+
+/** rt = ra AND ra */
+void
+ppc_and(struct ppc_function *p, uint rt, uint ra, uint rb)
+{
+ emit_x(p, 31, ra, rt, rb, 28, "and\tr%u, r%u, r%u\n"); /* note argument order */
+}
+
+/** rt = ra AND imm */
+void
+ppc_andi(struct ppc_function *p, uint rt, uint ra, int imm)
+{
+ /* note argument order */
+ emit_d(p, 28, ra, rt, imm, "andi\tr%u, r%u, %d\n", FALSE);
+}
+
+/** rt = ra OR ra */
+void
+ppc_or(struct ppc_function *p, uint rt, uint ra, uint rb)
+{
+ emit_x(p, 31, ra, rt, rb, 444, "or\tr%u, r%u, r%u\n"); /* note argument order */
+}
+
+/** rt = ra OR imm */
+void
+ppc_ori(struct ppc_function *p, uint rt, uint ra, int imm)
+{
+ /* note argument order */
+ emit_d(p, 24, ra, rt, imm, "ori\tr%u, r%u, %d\n", FALSE);
+}
+
+/** rt = ra XOR ra */
+void
+ppc_xor(struct ppc_function *p, uint rt, uint ra, uint rb)
+{
+ emit_x(p, 31, ra, rt, rb, 316, "xor\tr%u, r%u, r%u\n"); /* note argument order */
+}
+
+/** rt = ra XOR imm */
+void
+ppc_xori(struct ppc_function *p, uint rt, uint ra, int imm)
+{
+ /* note argument order */
+ emit_d(p, 26, ra, rt, imm, "xori\tr%u, r%u, %d\n", FALSE);
+}
+
+/** pseudo instruction: move: rt = ra */
+void
+ppc_mr(struct ppc_function *p, uint rt, uint ra)
+{
+ ppc_or(p, rt, ra, ra);
+}
+
+/** pseudo instruction: load immediate: rt = imm */
+void
+ppc_li(struct ppc_function *p, uint rt, int imm)
+{
+ boolean print = p->print;
+ p->print = FALSE;
+ ppc_addi(p, rt, 0, imm);
+ if (print) {
+ indent(p);
+ printf("addi\tr%u, r0, %d \t# r%u = %d\n", rt, imm, rt, imm);
+ }
+ p->print = print;
+}
+
+/** rt = imm << 16 */
+void
+ppc_lis(struct ppc_function *p, uint rt, int imm)
+{
+ ppc_addis(p, rt, 0, imm);
+}
+
+/** rt = imm */
+void
+ppc_load_int(struct ppc_function *p, uint rt, int imm)
+{
+ ppc_lis(p, rt, (imm >> 16)); /* rt = imm >> 16 */
+ ppc_ori(p, rt, rt, (imm & 0xffff)); /* rt = rt | (imm & 0xffff) */
+}
+
+
+
+
+/**
+ ** integer load/store
+ **/
+
+/** store rs at memory[(ra)+d],
+ * then update ra = (ra)+d
+ */
+void
+ppc_stwu(struct ppc_function *p, uint rs, uint ra, int d)
+{
+ emit_d(p, 37, rs, ra, d, "stwu\tr%u, %d(r%u)\n", TRUE);
+}
+
+/** store rs at memory[(ra)+d] */
+void
+ppc_stw(struct ppc_function *p, uint rs, uint ra, int d)
+{
+ emit_d(p, 36, rs, ra, d, "stw\tr%u, %d(r%u)\n", TRUE);
+}
+
+/** Load rt = mem[(ra)+d]; then zero set high 32 bits to zero. */
+void
+ppc_lwz(struct ppc_function *p, uint rt, uint ra, int d)
+{
+ emit_d(p, 32, rt, ra, d, "lwz\tr%u, %d(r%u)\n", TRUE);
+}
+
+
+
+/**
+ ** Float (non-vector) arithmetic
+ **/
+
+/** add: frt = fra + frb */
+void
+ppc_fadd(struct ppc_function *p, uint frt, uint fra, uint frb)
+{
+ emit_a(p, 63, frt, fra, frb, 21, 0, "fadd\tf%u, f%u, f%u\n");
+}
+
+/** sub: frt = fra - frb */
+void
+ppc_fsub(struct ppc_function *p, uint frt, uint fra, uint frb)
+{
+ emit_a(p, 63, frt, fra, frb, 20, 0, "fsub\tf%u, f%u, f%u\n");
+}
+
+/** convert to int: rt = (int) ra */
+void
+ppc_fctiwz(struct ppc_function *p, uint rt, uint fra)
+{
+ emit_x(p, 63, rt, 0, fra, 15, "fctiwz\tr%u, r%u, r%u\n");
+}
+
+/** store frs at mem[(ra)+offset] */
+void
+ppc_stfs(struct ppc_function *p, uint frs, uint ra, int offset)
+{
+ emit_d(p, 52, frs, ra, offset, "stfs\tr%u, %d(r%u)\n", TRUE);
+}
+
+/** store frs at mem[(ra)+(rb)] */
+void
+ppc_stfiwx(struct ppc_function *p, uint frs, uint ra, uint rb)
+{
+ emit_x(p, 31, frs, ra, rb, 983, "stfiwx\tr%u, r%u, r%u\n");
+}
+
+/** load frt = mem[(ra)+offset] */
+void
+ppc_lfs(struct ppc_function *p, uint frt, uint ra, int offset)
+{
+ emit_d(p, 48, frt, ra, offset, "stfs\tr%u, %d(r%u)\n", TRUE);
+}
+
+
+
+
+
+/**
+ ** branch instructions
+ **/
+
+/** BLR: Branch to link register (p. 35) */
+void
+ppc_blr(struct ppc_function *p)
+{
+ emit_i(p, 18, 0, 0, 1);
+ if (p->print) {
+ indent(p);
+ printf("blr\n");
+ }
+}
+
+/** Branch Conditional to Link Register (p. 36) */
+void
+ppc_bclr(struct ppc_function *p, uint condOp, uint branchHint, uint condReg)
+{
+ emit_xl(p, 19, condOp, condReg, branchHint, 16, 0);
+ if (p->print) {
+ indent(p);
+ printf("bclr\t%u %u %u\n", condOp, branchHint, condReg);
+ }
+}
+
+/** Pseudo instruction: return from subroutine */
+void
+ppc_return(struct ppc_function *p)
+{
+ ppc_bclr(p, BRANCH_COND_ALWAYS, BRANCH_HINT_SUB_RETURN, 0);
+}
diff --git a/src/gallium/auxiliary/rtasm/rtasm_ppc.h b/src/gallium/auxiliary/rtasm/rtasm_ppc.h
new file mode 100644
index 0000000000..93e5f5187d
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_ppc.h
@@ -0,0 +1,342 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2008 Tungsten Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * PPC code generation.
+ * \author Brian Paul
+ */
+
+
+#ifndef RTASM_PPC_H
+#define RTASM_PPC_H
+
+
+#include "pipe/p_compiler.h"
+
+
+#define PPC_INST_SIZE 4 /**< 4 bytes / instruction */
+
+#define PPC_NUM_REGS 32
+#define PPC_NUM_FP_REGS 32
+#define PPC_NUM_VEC_REGS 32
+
+/** Stack pointer register */
+#define PPC_REG_SP 1
+
+/** Branch conditions */
+#define BRANCH_COND_ALWAYS 0x14 /* binary 1z1zz (z=ignored) */
+
+/** Branch hints */
+#define BRANCH_HINT_SUB_RETURN 0x0 /* binary 00 */
+
+
+struct ppc_function
+{
+ uint32_t *store; /**< instruction buffer */
+ uint num_inst;
+ uint max_inst;
+ uint32_t reg_used; /** used/free general-purpose registers bitmask */
+ uint32_t fp_used; /** used/free floating point registers bitmask */
+ uint32_t vec_used; /** used/free vector registers bitmask */
+ int indent;
+ boolean print;
+};
+
+
+
+extern void ppc_init_func(struct ppc_function *p);
+extern void ppc_release_func(struct ppc_function *p);
+extern uint ppc_num_instructions(const struct ppc_function *p);
+extern void (*ppc_get_func( struct ppc_function *p ))( void );
+extern void ppc_dump_func(const struct ppc_function *p);
+
+extern void ppc_print_code(struct ppc_function *p, boolean enable);
+extern void ppc_indent(struct ppc_function *p, int spaces);
+extern void ppc_comment(struct ppc_function *p, int rel_indent, const char *s);
+
+extern int ppc_reserve_register(struct ppc_function *p, int reg);
+extern int ppc_allocate_register(struct ppc_function *p);
+extern void ppc_release_register(struct ppc_function *p, int reg);
+extern int ppc_allocate_fp_register(struct ppc_function *p);
+extern void ppc_release_fp_register(struct ppc_function *p, int reg);
+extern int ppc_allocate_vec_register(struct ppc_function *p);
+extern void ppc_release_vec_register(struct ppc_function *p, int reg);
+
+
+
+/**
+ ** float vector arithmetic
+ **/
+
+/** vector float add */
+extern void
+ppc_vaddfp(struct ppc_function *p,uint vD, uint vA, uint vB);
+
+/** vector float substract */
+extern void
+ppc_vsubfp(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector float min */
+extern void
+ppc_vminfp(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector float max */
+extern void
+ppc_vmaxfp(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector float mult add: vD = vA * vB + vC */
+extern void
+ppc_vmaddfp(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC);
+
+/** vector float negative mult subtract: vD = vA - vB * vC */
+extern void
+ppc_vnmsubfp(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC);
+
+/** vector float compare greater than */
+extern void
+ppc_vcmpgtfpx(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector float compare greater than or equal to */
+extern void
+ppc_vcmpgefpx(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector float compare equal */
+extern void
+ppc_vcmpeqfpx(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector float 2^x */
+extern void
+ppc_vexptefp(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float log2(x) */
+extern void
+ppc_vlogefp(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float reciprocol */
+extern void
+ppc_vrefp(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float reciprocol sqrt estimate */
+extern void
+ppc_vrsqrtefp(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float round to negative infinity */
+extern void
+ppc_vrfim(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float round to positive infinity */
+extern void
+ppc_vrfip(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float round to nearest int */
+extern void
+ppc_vrfin(struct ppc_function *p, uint vD, uint vB);
+
+/** vector float round to int toward zero */
+extern void
+ppc_vrfiz(struct ppc_function *p, uint vD, uint vB);
+
+
+/** vector store: store vR at mem[vA+vB] */
+extern void
+ppc_stvx(struct ppc_function *p, uint vR, uint vA, uint vB);
+
+/** vector load: vR = mem[vA+vB] */
+extern void
+ppc_lvx(struct ppc_function *p, uint vR, uint vA, uint vB);
+
+/** load vector element word: vR = mem_word[vA+vB] */
+extern void
+ppc_lvewx(struct ppc_function *p, uint vR, uint vA, uint vB);
+
+
+
+/**
+ ** vector bitwise operations
+ **/
+
+
+/** vector and */
+extern void
+ppc_vand(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector and complement */
+extern void
+ppc_vandc(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector or */
+extern void
+ppc_vor(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector nor */
+extern void
+ppc_vnor(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** vector xor */
+extern void
+ppc_vxor(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+/** Pseudo-instruction: vector move */
+extern void
+ppc_vmove(struct ppc_function *p, uint vD, uint vA);
+
+/** Set vector register to {0,0,0,0} */
+extern void
+ppc_vzero(struct ppc_function *p, uint vr);
+
+
+
+/**
+ ** Vector shuffle / select / splat / etc
+ **/
+
+/** vector permute */
+extern void
+ppc_vperm(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC);
+
+/** vector select */
+extern void
+ppc_vsel(struct ppc_function *p, uint vD, uint vA, uint vB, uint vC);
+
+/** vector splat byte */
+extern void
+ppc_vspltb(struct ppc_function *p, uint vD, uint vB, uint imm);
+
+/** vector splat half word */
+extern void
+ppc_vsplthw(struct ppc_function *p, uint vD, uint vB, uint imm);
+
+/** vector splat word */
+extern void
+ppc_vspltw(struct ppc_function *p, uint vD, uint vB, uint imm);
+
+/** vector splat signed immediate word */
+extern void
+ppc_vspltisw(struct ppc_function *p, uint vD, int imm);
+
+/** vector shift left word: vD[word] = vA[word] << (vB[word] & 0x1f) */
+extern void
+ppc_vslw(struct ppc_function *p, uint vD, uint vA, uint vB);
+
+
+
+/**
+ ** scalar arithmetic
+ **/
+
+extern void
+ppc_add(struct ppc_function *p, uint rt, uint ra, uint rb);
+
+extern void
+ppc_addi(struct ppc_function *p, uint rt, uint ra, int imm);
+
+extern void
+ppc_addis(struct ppc_function *p, uint rt, uint ra, int imm);
+
+extern void
+ppc_and(struct ppc_function *p, uint rt, uint ra, uint rb);
+
+extern void
+ppc_andi(struct ppc_function *p, uint rt, uint ra, int imm);
+
+extern void
+ppc_or(struct ppc_function *p, uint rt, uint ra, uint rb);
+
+extern void
+ppc_ori(struct ppc_function *p, uint rt, uint ra, int imm);
+
+extern void
+ppc_xor(struct ppc_function *p, uint rt, uint ra, uint rb);
+
+extern void
+ppc_xori(struct ppc_function *p, uint rt, uint ra, int imm);
+
+extern void
+ppc_mr(struct ppc_function *p, uint rt, uint ra);
+
+extern void
+ppc_li(struct ppc_function *p, uint rt, int imm);
+
+extern void
+ppc_lis(struct ppc_function *p, uint rt, int imm);
+
+extern void
+ppc_load_int(struct ppc_function *p, uint rt, int imm);
+
+
+
+/**
+ ** scalar load/store
+ **/
+
+extern void
+ppc_stwu(struct ppc_function *p, uint rs, uint ra, int d);
+
+extern void
+ppc_stw(struct ppc_function *p, uint rs, uint ra, int d);
+
+extern void
+ppc_lwz(struct ppc_function *p, uint rs, uint ra, int d);
+
+
+
+/**
+ ** Float (non-vector) arithmetic
+ **/
+
+extern void
+ppc_fadd(struct ppc_function *p, uint frt, uint fra, uint frb);
+
+extern void
+ppc_fsub(struct ppc_function *p, uint frt, uint fra, uint frb);
+
+extern void
+ppc_fctiwz(struct ppc_function *p, uint rt, uint ra);
+
+extern void
+ppc_stfs(struct ppc_function *p, uint frs, uint ra, int offset);
+
+extern void
+ppc_stfiwx(struct ppc_function *p, uint frs, uint ra, uint rb);
+
+extern void
+ppc_lfs(struct ppc_function *p, uint frt, uint ra, int offset);
+
+
+
+/**
+ ** branch instructions
+ **/
+
+extern void
+ppc_blr(struct ppc_function *p);
+
+void
+ppc_bclr(struct ppc_function *p, uint condOp, uint branchHint, uint condReg);
+
+extern void
+ppc_return(struct ppc_function *p);
+
+
+#endif /* RTASM_PPC_H */
diff --git a/src/gallium/auxiliary/rtasm/rtasm_ppc_spe.c b/src/gallium/auxiliary/rtasm/rtasm_ppc_spe.c
new file mode 100644
index 0000000000..53a0e722cf
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_ppc_spe.c
@@ -0,0 +1,1067 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file
+ * Real-time assembly generation interface for Cell B.E. SPEs.
+ *
+ * \author Ian Romanick <idr@us.ibm.com>
+ * \author Brian Paul
+ */
+
+
+#include <stdio.h>
+#include "pipe/p_compiler.h"
+#include "util/u_memory.h"
+#include "rtasm_ppc_spe.h"
+
+
+#ifdef GALLIUM_CELL
+/**
+ * SPE instruction types
+ *
+ * There are 6 primary instruction encodings used on the Cell's SPEs. Each of
+ * the following unions encodes one type.
+ *
+ * \bug
+ * If, at some point, we start generating SPE code from a little-endian host
+ * these unions will not work.
+ */
+/*@{*/
+/**
+ * Encode one output register with two input registers
+ */
+union spe_inst_RR {
+ uint32_t bits;
+ struct {
+ unsigned op:11;
+ unsigned rB:7;
+ unsigned rA:7;
+ unsigned rT:7;
+ } inst;
+};
+
+
+/**
+ * Encode one output register with three input registers
+ */
+union spe_inst_RRR {
+ uint32_t bits;
+ struct {
+ unsigned op:4;
+ unsigned rT:7;
+ unsigned rB:7;
+ unsigned rA:7;
+ unsigned rC:7;
+ } inst;
+};
+
+
+/**
+ * Encode one output register with one input reg. and a 7-bit signed immed
+ */
+union spe_inst_RI7 {
+ uint32_t bits;
+ struct {
+ unsigned op:11;
+ unsigned i7:7;
+ unsigned rA:7;
+ unsigned rT:7;
+ } inst;
+};
+
+
+/**
+ * Encode one output register with one input reg. and an 8-bit signed immed
+ */
+union spe_inst_RI8 {
+ uint32_t bits;
+ struct {
+ unsigned op:10;
+ unsigned i8:8;
+ unsigned rA:7;
+ unsigned rT:7;
+ } inst;
+};
+
+
+/**
+ * Encode one output register with one input reg. and a 10-bit signed immed
+ */
+union spe_inst_RI10 {
+ uint32_t bits;
+ struct {
+ unsigned op:8;
+ unsigned i10:10;
+ unsigned rA:7;
+ unsigned rT:7;
+ } inst;
+};
+
+
+/**
+ * Encode one output register with a 16-bit signed immediate
+ */
+union spe_inst_RI16 {
+ uint32_t bits;
+ struct {
+ unsigned op:9;
+ unsigned i16:16;
+ unsigned rT:7;
+ } inst;
+};
+
+
+/**
+ * Encode one output register with a 18-bit signed immediate
+ */
+union spe_inst_RI18 {
+ uint32_t bits;
+ struct {
+ unsigned op:7;
+ unsigned i18:18;
+ unsigned rT:7;
+ } inst;
+};
+/*@}*/
+
+
+static void
+indent(const struct spe_function *p)
+{
+ int i;
+ for (i = 0; i < p->indent; i++) {
+ putchar(' ');
+ }
+}
+
+
+static const char *
+rem_prefix(const char *longname)
+{
+ return longname + 4;
+}
+
+
+static const char *
+reg_name(int reg)
+{
+ switch (reg) {
+ case SPE_REG_SP:
+ return "$sp";
+ case SPE_REG_RA:
+ return "$lr";
+ default:
+ {
+ /* cycle through four buffers to handle multiple calls per printf */
+ static char buf[4][10];
+ static int b = 0;
+ b = (b + 1) % 4;
+ sprintf(buf[b], "$%d", reg);
+ return buf[b];
+ }
+ }
+}
+
+
+static void
+emit_instruction(struct spe_function *p, uint32_t inst_bits)
+{
+ if (!p->store)
+ return; /* out of memory, drop the instruction */
+
+ if (p->num_inst == p->max_inst) {
+ /* allocate larger buffer */
+ uint32_t *newbuf;
+ p->max_inst *= 2; /* 2x larger */
+ newbuf = align_malloc(p->max_inst * SPE_INST_SIZE, 16);
+ if (newbuf) {
+ memcpy(newbuf, p->store, p->num_inst * SPE_INST_SIZE);
+ }
+ align_free(p->store);
+ p->store = newbuf;
+ if (!p->store) {
+ /* out of memory */
+ p->num_inst = 0;
+ return;
+ }
+ }
+
+ p->store[p->num_inst++] = inst_bits;
+}
+
+
+
+static void emit_RR(struct spe_function *p, unsigned op, int rT,
+ int rA, int rB, const char *name)
+{
+ union spe_inst_RR inst;
+ inst.inst.op = op;
+ inst.inst.rB = rB;
+ inst.inst.rA = rA;
+ inst.inst.rT = rT;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, %s, %s\n",
+ rem_prefix(name), reg_name(rT), reg_name(rA), reg_name(rB));
+ }
+}
+
+
+static void emit_RRR(struct spe_function *p, unsigned op, int rT,
+ int rA, int rB, int rC, const char *name)
+{
+ union spe_inst_RRR inst;
+ inst.inst.op = op;
+ inst.inst.rT = rT;
+ inst.inst.rB = rB;
+ inst.inst.rA = rA;
+ inst.inst.rC = rC;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, %s, %s, %s\n", rem_prefix(name), reg_name(rT),
+ reg_name(rA), reg_name(rB), reg_name(rC));
+ }
+}
+
+
+static void emit_RI7(struct spe_function *p, unsigned op, int rT,
+ int rA, int imm, const char *name)
+{
+ union spe_inst_RI7 inst;
+ inst.inst.op = op;
+ inst.inst.i7 = imm;
+ inst.inst.rA = rA;
+ inst.inst.rT = rT;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, %s, 0x%x\n",
+ rem_prefix(name), reg_name(rT), reg_name(rA), imm);
+ }
+}
+
+
+
+static void emit_RI8(struct spe_function *p, unsigned op, int rT,
+ int rA, int imm, const char *name)
+{
+ union spe_inst_RI8 inst;
+ inst.inst.op = op;
+ inst.inst.i8 = imm;
+ inst.inst.rA = rA;
+ inst.inst.rT = rT;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, %s, 0x%x\n",
+ rem_prefix(name), reg_name(rT), reg_name(rA), imm);
+ }
+}
+
+
+
+static void emit_RI10(struct spe_function *p, unsigned op, int rT,
+ int rA, int imm, const char *name)
+{
+ union spe_inst_RI10 inst;
+ inst.inst.op = op;
+ inst.inst.i10 = imm;
+ inst.inst.rA = rA;
+ inst.inst.rT = rT;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, %s, 0x%x\n",
+ rem_prefix(name), reg_name(rT), reg_name(rA), imm);
+ }
+}
+
+
+/** As above, but do range checking on signed immediate value */
+static void emit_RI10s(struct spe_function *p, unsigned op, int rT,
+ int rA, int imm, const char *name)
+{
+ assert(imm <= 511);
+ assert(imm >= -512);
+ emit_RI10(p, op, rT, rA, imm, name);
+}
+
+
+static void emit_RI16(struct spe_function *p, unsigned op, int rT,
+ int imm, const char *name)
+{
+ union spe_inst_RI16 inst;
+ inst.inst.op = op;
+ inst.inst.i16 = imm;
+ inst.inst.rT = rT;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
+ }
+}
+
+
+static void emit_RI18(struct spe_function *p, unsigned op, int rT,
+ int imm, const char *name)
+{
+ union spe_inst_RI18 inst;
+ inst.inst.op = op;
+ inst.inst.i18 = imm;
+ inst.inst.rT = rT;
+ emit_instruction(p, inst.bits);
+ if (p->print) {
+ indent(p);
+ printf("%s\t%s, 0x%x\n", rem_prefix(name), reg_name(rT), imm);
+ }
+}
+
+
+#define EMIT(_name, _op) \
+void _name (struct spe_function *p) \
+{ \
+ emit_RR(p, _op, 0, 0, 0, __FUNCTION__); \
+}
+
+#define EMIT_(_name, _op) \
+void _name (struct spe_function *p, int rT) \
+{ \
+ emit_RR(p, _op, rT, 0, 0, __FUNCTION__); \
+}
+
+#define EMIT_R(_name, _op) \
+void _name (struct spe_function *p, int rT, int rA) \
+{ \
+ emit_RR(p, _op, rT, rA, 0, __FUNCTION__); \
+}
+
+#define EMIT_RR(_name, _op) \
+void _name (struct spe_function *p, int rT, int rA, int rB) \
+{ \
+ emit_RR(p, _op, rT, rA, rB, __FUNCTION__); \
+}
+
+#define EMIT_RRR(_name, _op) \
+void _name (struct spe_function *p, int rT, int rA, int rB, int rC) \
+{ \
+ emit_RRR(p, _op, rT, rA, rB, rC, __FUNCTION__); \
+}
+
+#define EMIT_RI7(_name, _op) \
+void _name (struct spe_function *p, int rT, int rA, int imm) \
+{ \
+ emit_RI7(p, _op, rT, rA, imm, __FUNCTION__); \
+}
+
+#define EMIT_RI8(_name, _op, bias) \
+void _name (struct spe_function *p, int rT, int rA, int imm) \
+{ \
+ emit_RI8(p, _op, rT, rA, bias - imm, __FUNCTION__); \
+}
+
+#define EMIT_RI10(_name, _op) \
+void _name (struct spe_function *p, int rT, int rA, int imm) \
+{ \
+ emit_RI10(p, _op, rT, rA, imm, __FUNCTION__); \
+}
+
+#define EMIT_RI10s(_name, _op) \
+void _name (struct spe_function *p, int rT, int rA, int imm) \
+{ \
+ emit_RI10s(p, _op, rT, rA, imm, __FUNCTION__); \
+}
+
+#define EMIT_RI16(_name, _op) \
+void _name (struct spe_function *p, int rT, int imm) \
+{ \
+ emit_RI16(p, _op, rT, imm, __FUNCTION__); \
+}
+
+#define EMIT_RI18(_name, _op) \
+void _name (struct spe_function *p, int rT, int imm) \
+{ \
+ emit_RI18(p, _op, rT, imm, __FUNCTION__); \
+}
+
+#define EMIT_I16(_name, _op) \
+void _name (struct spe_function *p, int imm) \
+{ \
+ emit_RI16(p, _op, 0, imm, __FUNCTION__); \
+}
+
+#include "rtasm_ppc_spe.h"
+
+
+
+/**
+ * Initialize an spe_function.
+ * \param code_size initial size of instruction buffer to allocate, in bytes.
+ * If zero, use a default.
+ */
+void spe_init_func(struct spe_function *p, unsigned code_size)
+{
+ uint i;
+
+ if (!code_size)
+ code_size = 64;
+
+ p->num_inst = 0;
+ p->max_inst = code_size / SPE_INST_SIZE;
+ p->store = align_malloc(code_size, 16);
+
+ p->set_count = 0;
+ memset(p->regs, 0, SPE_NUM_REGS * sizeof(p->regs[0]));
+
+ /* Conservatively treat R0 - R2 and R80 - R127 as non-volatile.
+ */
+ p->regs[0] = p->regs[1] = p->regs[2] = 1;
+ for (i = 80; i <= 127; i++) {
+ p->regs[i] = 1;
+ }
+
+ p->print = FALSE;
+ p->indent = 0;
+}
+
+
+void spe_release_func(struct spe_function *p)
+{
+ assert(p->num_inst <= p->max_inst);
+ if (p->store != NULL) {
+ align_free(p->store);
+ }
+ p->store = NULL;
+}
+
+
+/** Return current code size in bytes. */
+unsigned spe_code_size(const struct spe_function *p)
+{
+ return p->num_inst * SPE_INST_SIZE;
+}
+
+
+/**
+ * Allocate a SPE register.
+ * \return register index or -1 if none left.
+ */
+int spe_allocate_available_register(struct spe_function *p)
+{
+ unsigned i;
+ for (i = 0; i < SPE_NUM_REGS; i++) {
+ if (p->regs[i] == 0) {
+ p->regs[i] = 1;
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+
+/**
+ * Mark the given SPE register as "allocated".
+ */
+int spe_allocate_register(struct spe_function *p, int reg)
+{
+ assert(reg < SPE_NUM_REGS);
+ assert(p->regs[reg] == 0);
+ p->regs[reg] = 1;
+ return reg;
+}
+
+
+/**
+ * Mark the given SPE register as "unallocated". Note that this should
+ * only be used on registers allocated in the current register set; an
+ * assertion will fail if an attempt is made to deallocate a register
+ * allocated in an earlier register set.
+ */
+void spe_release_register(struct spe_function *p, int reg)
+{
+ assert(reg >= 0);
+ assert(reg < SPE_NUM_REGS);
+ assert(p->regs[reg] == 1);
+
+ p->regs[reg] = 0;
+}
+
+/**
+ * Start a new set of registers. This can be called if
+ * it will be difficult later to determine exactly what
+ * registers were actually allocated during a code generation
+ * sequence, and you really just want to deallocate all of them.
+ */
+void spe_allocate_register_set(struct spe_function *p)
+{
+ uint i;
+
+ /* Keep track of the set count. If it ever wraps around to 0,
+ * we're in trouble.
+ */
+ p->set_count++;
+ assert(p->set_count > 0);
+
+ /* Increment the allocation count of all registers currently
+ * allocated. Then any registers that are allocated in this set
+ * will be the only ones with a count of 1; they'll all be released
+ * when the register set is released.
+ */
+ for (i = 0; i < SPE_NUM_REGS; i++) {
+ if (p->regs[i] > 0)
+ p->regs[i]++;
+ }
+}
+
+void spe_release_register_set(struct spe_function *p)
+{
+ uint i;
+
+ /* If the set count drops below zero, we're in trouble. */
+ assert(p->set_count > 0);
+ p->set_count--;
+
+ /* Drop the allocation level of all registers. Any allocated
+ * during this register set will drop to 0 and then become
+ * available.
+ */
+ for (i = 0; i < SPE_NUM_REGS; i++) {
+ if (p->regs[i] > 0)
+ p->regs[i]--;
+ }
+}
+
+
+unsigned
+spe_get_registers_used(const struct spe_function *p, ubyte used[])
+{
+ unsigned i, num = 0;
+ /* only count registers in the range available to callers */
+ for (i = 2; i < 80; i++) {
+ if (p->regs[i]) {
+ used[num++] = i;
+ }
+ }
+ return num;
+}
+
+
+void
+spe_print_code(struct spe_function *p, boolean enable)
+{
+ p->print = enable;
+}
+
+
+void
+spe_indent(struct spe_function *p, int spaces)
+{
+ p->indent += spaces;
+}
+
+
+void
+spe_comment(struct spe_function *p, int rel_indent, const char *s)
+{
+ if (p->print) {
+ p->indent += rel_indent;
+ indent(p);
+ p->indent -= rel_indent;
+ printf("# %s\n", s);
+ }
+}
+
+
+/**
+ * Load quad word.
+ * NOTE: offset is in bytes and the least significant 4 bits must be zero!
+ */
+void spe_lqd(struct spe_function *p, int rT, int rA, int offset)
+{
+ const boolean pSave = p->print;
+
+ /* offset must be a multiple of 16 */
+ assert(offset % 16 == 0);
+ /* offset must fit in 10-bit signed int field, after shifting */
+ assert((offset >> 4) <= 511);
+ assert((offset >> 4) >= -512);
+
+ p->print = FALSE;
+ emit_RI10(p, 0x034, rT, rA, offset >> 4, "spe_lqd");
+ p->print = pSave;
+
+ if (p->print) {
+ indent(p);
+ printf("lqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
+ }
+}
+
+
+/**
+ * Store quad word.
+ * NOTE: offset is in bytes and the least significant 4 bits must be zero!
+ */
+void spe_stqd(struct spe_function *p, int rT, int rA, int offset)
+{
+ const boolean pSave = p->print;
+
+ /* offset must be a multiple of 16 */
+ assert(offset % 16 == 0);
+ /* offset must fit in 10-bit signed int field, after shifting */
+ assert((offset >> 4) <= 511);
+ assert((offset >> 4) >= -512);
+
+ p->print = FALSE;
+ emit_RI10(p, 0x024, rT, rA, offset >> 4, "spe_stqd");
+ p->print = pSave;
+
+ if (p->print) {
+ indent(p);
+ printf("stqd\t%s, %d(%s)\n", reg_name(rT), offset, reg_name(rA));
+ }
+}
+
+
+/**
+ * For branch instructions:
+ * \param d if 1, disable interupts if branch is taken
+ * \param e if 1, enable interupts if branch is taken
+ * If d and e are both zero, don't change interupt status (right?)
+ */
+
+/** Branch Indirect to address in rA */
+void spe_bi(struct spe_function *p, int rA, int d, int e)
+{
+ emit_RI7(p, 0x1a8, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Interupt Return */
+void spe_iret(struct spe_function *p, int rA, int d, int e)
+{
+ emit_RI7(p, 0x1aa, 0, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Branch indirect and set link on external data */
+void spe_bisled(struct spe_function *p, int rT, int rA, int d,
+ int e)
+{
+ emit_RI7(p, 0x1ab, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Branch indirect and set link. Save PC in rT, jump to rA. */
+void spe_bisl(struct spe_function *p, int rT, int rA, int d,
+ int e)
+{
+ emit_RI7(p, 0x1a9, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Branch indirect if zero word. If rT.word[0]==0, jump to rA. */
+void spe_biz(struct spe_function *p, int rT, int rA, int d, int e)
+{
+ emit_RI7(p, 0x128, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Branch indirect if non-zero word. If rT.word[0]!=0, jump to rA. */
+void spe_binz(struct spe_function *p, int rT, int rA, int d, int e)
+{
+ emit_RI7(p, 0x129, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Branch indirect if zero halfword. If rT.halfword[1]==0, jump to rA. */
+void spe_bihz(struct spe_function *p, int rT, int rA, int d, int e)
+{
+ emit_RI7(p, 0x12a, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+/** Branch indirect if non-zero halfword. If rT.halfword[1]!=0, jump to rA. */
+void spe_bihnz(struct spe_function *p, int rT, int rA, int d, int e)
+{
+ emit_RI7(p, 0x12b, rT, rA, (d << 5) | (e << 4), __FUNCTION__);
+}
+
+
+/* Hint-for-branch instructions
+ */
+#if 0
+hbr;
+hbra;
+hbrr;
+#endif
+
+
+/* Control instructions
+ */
+#if 0
+stop;
+EMIT_RR (spe_stopd, 0x140);
+EMIT_ (spe_nop, 0x201);
+sync;
+EMIT_ (spe_dsync, 0x003);
+EMIT_R (spe_mfspr, 0x00c);
+EMIT_R (spe_mtspr, 0x10c);
+#endif
+
+
+/**
+ ** Helper / "macro" instructions.
+ ** Use somewhat verbose names as a reminder that these aren't native
+ ** SPE instructions.
+ **/
+
+
+void
+spe_load_float(struct spe_function *p, int rT, float x)
+{
+ if (x == 0.0f) {
+ spe_il(p, rT, 0x0);
+ }
+ else if (x == 0.5f) {
+ spe_ilhu(p, rT, 0x3f00);
+ }
+ else if (x == 1.0f) {
+ spe_ilhu(p, rT, 0x3f80);
+ }
+ else if (x == -1.0f) {
+ spe_ilhu(p, rT, 0xbf80);
+ }
+ else {
+ union {
+ float f;
+ unsigned u;
+ } bits;
+ bits.f = x;
+ spe_ilhu(p, rT, bits.u >> 16);
+ spe_iohl(p, rT, bits.u & 0xffff);
+ }
+}
+
+
+void
+spe_load_int(struct spe_function *p, int rT, int i)
+{
+ if (-32768 <= i && i <= 32767) {
+ spe_il(p, rT, i);
+ }
+ else {
+ spe_ilhu(p, rT, i >> 16);
+ if (i & 0xffff)
+ spe_iohl(p, rT, i & 0xffff);
+ }
+}
+
+void spe_load_uint(struct spe_function *p, int rT, uint ui)
+{
+ /* If the whole value is in the lower 18 bits, use ila, which
+ * doesn't sign-extend. Otherwise, if the two halfwords of
+ * the constant are identical, use ilh. Otherwise, if every byte of
+ * the desired value is 0x00 or 0xff, we can use Form Select Mask for
+ * Bytes Immediate (fsmbi) to load the value in a single instruction.
+ * Otherwise, in the general case, we have to use ilhu followed by iohl.
+ */
+ if ((ui & 0x0003ffff) == ui) {
+ spe_ila(p, rT, ui);
+ }
+ else if ((ui >> 16) == (ui & 0xffff)) {
+ spe_ilh(p, rT, ui & 0xffff);
+ }
+ else if (
+ ((ui & 0x000000ff) == 0 || (ui & 0x000000ff) == 0x000000ff) &&
+ ((ui & 0x0000ff00) == 0 || (ui & 0x0000ff00) == 0x0000ff00) &&
+ ((ui & 0x00ff0000) == 0 || (ui & 0x00ff0000) == 0x00ff0000) &&
+ ((ui & 0xff000000) == 0 || (ui & 0xff000000) == 0xff000000)
+ ) {
+ uint mask = 0;
+ /* fsmbi duplicates each bit in the given mask eight times,
+ * using a 16-bit value to initialize a 16-byte quadword.
+ * Each 4-bit nybble of the mask corresponds to a full word
+ * of the result; look at the value and figure out the mask
+ * (replicated for each word in the quadword), and then
+ * form the "select mask" to get the value.
+ */
+ if ((ui & 0x000000ff) == 0x000000ff) mask |= 0x1111;
+ if ((ui & 0x0000ff00) == 0x0000ff00) mask |= 0x2222;
+ if ((ui & 0x00ff0000) == 0x00ff0000) mask |= 0x4444;
+ if ((ui & 0xff000000) == 0xff000000) mask |= 0x8888;
+ spe_fsmbi(p, rT, mask);
+ }
+ else {
+ /* The general case: this usually uses two instructions, but
+ * may use only one if the low-order 16 bits of each word are 0.
+ */
+ spe_ilhu(p, rT, ui >> 16);
+ if (ui & 0xffff)
+ spe_iohl(p, rT, ui & 0xffff);
+ }
+}
+
+/**
+ * This function is constructed identically to spe_xor_uint() below.
+ * Changes to one should be made in the other.
+ */
+void
+spe_and_uint(struct spe_function *p, int rT, int rA, uint ui)
+{
+ /* If we can, emit a single instruction, either And Byte Immediate
+ * (which uses the same constant across each byte), And Halfword Immediate
+ * (which sign-extends a 10-bit immediate to 16 bits and uses that
+ * across each halfword), or And Word Immediate (which sign-extends
+ * a 10-bit immediate to 32 bits).
+ *
+ * Otherwise, we'll need to use a temporary register.
+ */
+ uint tmp;
+
+ /* If the upper 23 bits are all 0s or all 1s, sign extension
+ * will work and we can use And Word Immediate
+ */
+ tmp = ui & 0xfffffe00;
+ if (tmp == 0xfffffe00 || tmp == 0) {
+ spe_andi(p, rT, rA, ui & 0x000003ff);
+ return;
+ }
+
+ /* If the ui field is symmetric along halfword boundaries and
+ * the upper 7 bits of each halfword are all 0s or 1s, we
+ * can use And Halfword Immediate
+ */
+ tmp = ui & 0xfe00fe00;
+ if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
+ spe_andhi(p, rT, rA, ui & 0x000003ff);
+ return;
+ }
+
+ /* If the ui field is symmetric in each byte, then we can use
+ * the And Byte Immediate instruction.
+ */
+ tmp = ui & 0x000000ff;
+ if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
+ spe_andbi(p, rT, rA, tmp);
+ return;
+ }
+
+ /* Otherwise, we'll have to use a temporary register. */
+ int tmp_reg = spe_allocate_available_register(p);
+ spe_load_uint(p, tmp_reg, ui);
+ spe_and(p, rT, rA, tmp_reg);
+ spe_release_register(p, tmp_reg);
+}
+
+
+/**
+ * This function is constructed identically to spe_and_uint() above.
+ * Changes to one should be made in the other.
+ */
+void
+spe_xor_uint(struct spe_function *p, int rT, int rA, uint ui)
+{
+ /* If we can, emit a single instruction, either Exclusive Or Byte
+ * Immediate (which uses the same constant across each byte), Exclusive
+ * Or Halfword Immediate (which sign-extends a 10-bit immediate to
+ * 16 bits and uses that across each halfword), or Exclusive Or Word
+ * Immediate (which sign-extends a 10-bit immediate to 32 bits).
+ *
+ * Otherwise, we'll need to use a temporary register.
+ */
+ uint tmp;
+
+ /* If the upper 23 bits are all 0s or all 1s, sign extension
+ * will work and we can use Exclusive Or Word Immediate
+ */
+ tmp = ui & 0xfffffe00;
+ if (tmp == 0xfffffe00 || tmp == 0) {
+ spe_xori(p, rT, rA, ui & 0x000003ff);
+ return;
+ }
+
+ /* If the ui field is symmetric along halfword boundaries and
+ * the upper 7 bits of each halfword are all 0s or 1s, we
+ * can use Exclusive Or Halfword Immediate
+ */
+ tmp = ui & 0xfe00fe00;
+ if ((tmp == 0xfe00fe00 || tmp == 0) && ((ui >> 16) == (ui & 0x0000ffff))) {
+ spe_xorhi(p, rT, rA, ui & 0x000003ff);
+ return;
+ }
+
+ /* If the ui field is symmetric in each byte, then we can use
+ * the Exclusive Or Byte Immediate instruction.
+ */
+ tmp = ui & 0x000000ff;
+ if ((ui >> 24) == tmp && ((ui >> 16) & 0xff) == tmp && ((ui >> 8) & 0xff) == tmp) {
+ spe_xorbi(p, rT, rA, tmp);
+ return;
+ }
+
+ /* Otherwise, we'll have to use a temporary register. */
+ int tmp_reg = spe_allocate_available_register(p);
+ spe_load_uint(p, tmp_reg, ui);
+ spe_xor(p, rT, rA, tmp_reg);
+ spe_release_register(p, tmp_reg);
+}
+
+void
+spe_compare_equal_uint(struct spe_function *p, int rT, int rA, uint ui)
+{
+ /* If the comparison value is 9 bits or less, it fits inside a
+ * Compare Equal Word Immediate instruction.
+ */
+ if ((ui & 0x000001ff) == ui) {
+ spe_ceqi(p, rT, rA, ui);
+ }
+ /* Otherwise, we're going to have to load a word first. */
+ else {
+ int tmp_reg = spe_allocate_available_register(p);
+ spe_load_uint(p, tmp_reg, ui);
+ spe_ceq(p, rT, rA, tmp_reg);
+ spe_release_register(p, tmp_reg);
+ }
+}
+
+void
+spe_compare_greater_uint(struct spe_function *p, int rT, int rA, uint ui)
+{
+ /* If the comparison value is 10 bits or less, it fits inside a
+ * Compare Logical Greater Than Word Immediate instruction.
+ */
+ if ((ui & 0x000003ff) == ui) {
+ spe_clgti(p, rT, rA, ui);
+ }
+ /* Otherwise, we're going to have to load a word first. */
+ else {
+ int tmp_reg = spe_allocate_available_register(p);
+ spe_load_uint(p, tmp_reg, ui);
+ spe_clgt(p, rT, rA, tmp_reg);
+ spe_release_register(p, tmp_reg);
+ }
+}
+
+void
+spe_splat(struct spe_function *p, int rT, int rA)
+{
+ /* Use a temporary, just in case rT == rA */
+ int tmp_reg = spe_allocate_available_register(p);
+ /* Duplicate bytes 0, 1, 2, and 3 across the whole register */
+ spe_ila(p, tmp_reg, 0x00010203);
+ spe_shufb(p, rT, rA, rA, tmp_reg);
+ spe_release_register(p, tmp_reg);
+}
+
+
+void
+spe_complement(struct spe_function *p, int rT, int rA)
+{
+ spe_nor(p, rT, rA, rA);
+}
+
+
+void
+spe_move(struct spe_function *p, int rT, int rA)
+{
+ /* Use different instructions depending on the instruction address
+ * to take advantage of the dual pipelines.
+ */
+ if (p->num_inst & 1)
+ spe_shlqbyi(p, rT, rA, 0); /* odd pipe */
+ else
+ spe_ori(p, rT, rA, 0); /* even pipe */
+}
+
+
+void
+spe_zero(struct spe_function *p, int rT)
+{
+ spe_xor(p, rT, rT, rT);
+}
+
+
+void
+spe_splat_word(struct spe_function *p, int rT, int rA, int word)
+{
+ assert(word >= 0);
+ assert(word <= 3);
+
+ if (word == 0) {
+ int tmp1 = rT;
+ spe_ila(p, tmp1, 66051);
+ spe_shufb(p, rT, rA, rA, tmp1);
+ }
+ else {
+ /* XXX review this, we may not need the rotqbyi instruction */
+ int tmp1 = rT;
+ int tmp2 = spe_allocate_available_register(p);
+
+ spe_ila(p, tmp1, 66051);
+ spe_rotqbyi(p, tmp2, rA, 4 * word);
+ spe_shufb(p, rT, tmp2, tmp2, tmp1);
+
+ spe_release_register(p, tmp2);
+ }
+}
+
+/**
+ * For each 32-bit float element of rA and rB, choose the smaller of the
+ * two, compositing them into the rT register.
+ *
+ * The Float Compare Greater Than (fcgt) instruction will put 1s into
+ * compare_reg where rA > rB, and 0s where rA <= rB.
+ *
+ * Then the Select Bits (selb) instruction will take bits from rA where
+ * compare_reg is 0, and from rB where compare_reg is 1; i.e., from rA
+ * where rA <= rB and from rB where rB > rA, which is exactly the
+ * "min" operation.
+ *
+ * The compare_reg could in many cases be the same as rT, unless
+ * rT == rA || rt == rB. But since this is common in constructions
+ * like "x = min(x, a)", we always allocate a new register to be safe.
+ */
+void
+spe_float_min(struct spe_function *p, int rT, int rA, int rB)
+{
+ int compare_reg = spe_allocate_available_register(p);
+ spe_fcgt(p, compare_reg, rA, rB);
+ spe_selb(p, rT, rA, rB, compare_reg);
+ spe_release_register(p, compare_reg);
+}
+
+/**
+ * For each 32-bit float element of rA and rB, choose the greater of the
+ * two, compositing them into the rT register.
+ *
+ * The logic is similar to that of spe_float_min() above; the only
+ * difference is that the registers on spe_selb() have been reversed,
+ * so that the larger of the two is selected instead of the smaller.
+ */
+void
+spe_float_max(struct spe_function *p, int rT, int rA, int rB)
+{
+ int compare_reg = spe_allocate_available_register(p);
+ spe_fcgt(p, compare_reg, rA, rB);
+ spe_selb(p, rT, rB, rA, compare_reg);
+ spe_release_register(p, compare_reg);
+}
+
+#endif /* GALLIUM_CELL */
diff --git a/src/gallium/auxiliary/rtasm/rtasm_ppc_spe.h b/src/gallium/auxiliary/rtasm/rtasm_ppc_spe.h
new file mode 100644
index 0000000000..65d9c77415
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_ppc_spe.h
@@ -0,0 +1,433 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file
+ * Real-time assembly generation interface for Cell B.E. SPEs.
+ * For details, see /opt/cell/sdk/docs/arch/SPU_ISA_v1.2_27Jan2007_pub.pdf
+ *
+ * \author Ian Romanick <idr@us.ibm.com>
+ * \author Brian Paul
+ */
+
+#ifndef RTASM_PPC_SPE_H
+#define RTASM_PPC_SPE_H
+
+/** 4 bytes per instruction */
+#define SPE_INST_SIZE 4
+
+/** number of general-purpose SIMD registers */
+#define SPE_NUM_REGS 128
+
+/** Return Address register (aka $lr / Link Register) */
+#define SPE_REG_RA 0
+
+/** Stack Pointer register (aka $sp) */
+#define SPE_REG_SP 1
+
+
+struct spe_function
+{
+ uint32_t *store; /**< instruction buffer */
+ uint num_inst;
+ uint max_inst;
+
+ /**
+ * The "set count" reflects the number of nested register sets
+ * are allowed. In the unlikely case that we exceed the set count,
+ * register allocation will start to be confused, which is critical
+ * enough that we check for it.
+ */
+ unsigned char set_count;
+
+ /**
+ * Flags for used and unused registers. Each byte corresponds to a
+ * register; a 0 in that byte means that the register is available.
+ * A value of 1 means that the register was allocated in the current
+ * register set. Any other value N means that the register was allocated
+ * N register sets ago.
+ *
+ * \sa
+ * spe_allocate_register, spe_allocate_available_register,
+ * spe_allocate_register_set, spe_release_register_set, spe_release_register,
+ */
+ unsigned char regs[SPE_NUM_REGS];
+
+ boolean print; /**< print/dump instructions as they're emitted? */
+ int indent; /**< number of spaces to indent */
+};
+
+
+extern void spe_init_func(struct spe_function *p, uint code_size);
+extern void spe_release_func(struct spe_function *p);
+extern uint spe_code_size(const struct spe_function *p);
+
+extern int spe_allocate_available_register(struct spe_function *p);
+extern int spe_allocate_register(struct spe_function *p, int reg);
+extern void spe_release_register(struct spe_function *p, int reg);
+extern void spe_allocate_register_set(struct spe_function *p);
+extern void spe_release_register_set(struct spe_function *p);
+
+extern uint spe_get_registers_used(const struct spe_function *p, ubyte used[]);
+
+extern void spe_print_code(struct spe_function *p, boolean enable);
+extern void spe_indent(struct spe_function *p, int spaces);
+extern void spe_comment(struct spe_function *p, int rel_indent, const char *s);
+
+
+#endif /* RTASM_PPC_SPE_H */
+
+#ifndef EMIT
+#define EMIT(_name, _op) \
+ extern void _name (struct spe_function *p);
+#define EMIT_(_name, _op) \
+ extern void _name (struct spe_function *p, int rT);
+#define EMIT_R(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int rA);
+#define EMIT_RR(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int rA, int rB);
+#define EMIT_RRR(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int rA, int rB, int rC);
+#define EMIT_RI7(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int rA, int imm);
+#define EMIT_RI8(_name, _op, bias) \
+ extern void _name (struct spe_function *p, int rT, int rA, int imm);
+#define EMIT_RI10(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int rA, int imm);
+#define EMIT_RI10s(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int rA, int imm);
+#define EMIT_RI16(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int imm);
+#define EMIT_RI18(_name, _op) \
+ extern void _name (struct spe_function *p, int rT, int imm);
+#define EMIT_I16(_name, _op) \
+ extern void _name (struct spe_function *p, int imm);
+#define UNDEF_EMIT_MACROS
+#endif /* EMIT */
+
+
+/* Memory load / store instructions
+ */
+EMIT_RR (spe_lqx, 0x1c4)
+EMIT_RI16(spe_lqa, 0x061)
+EMIT_RI16(spe_lqr, 0x067)
+EMIT_RR (spe_stqx, 0x144)
+EMIT_RI16(spe_stqa, 0x041)
+EMIT_RI16(spe_stqr, 0x047)
+EMIT_RI7 (spe_cbd, 0x1f4)
+EMIT_RR (spe_cbx, 0x1d4)
+EMIT_RI7 (spe_chd, 0x1f5)
+EMIT_RI7 (spe_chx, 0x1d5)
+EMIT_RI7 (spe_cwd, 0x1f6)
+EMIT_RI7 (spe_cwx, 0x1d6)
+EMIT_RI7 (spe_cdd, 0x1f7)
+EMIT_RI7 (spe_cdx, 0x1d7)
+
+
+/* Constant formation instructions
+ */
+EMIT_RI16(spe_ilh, 0x083)
+EMIT_RI16(spe_ilhu, 0x082)
+EMIT_RI16(spe_il, 0x081)
+EMIT_RI18(spe_ila, 0x021)
+EMIT_RI16(spe_iohl, 0x0c1)
+EMIT_RI16(spe_fsmbi, 0x065)
+
+
+
+/* Integer and logical instructions
+ */
+EMIT_RR (spe_ah, 0x0c8)
+EMIT_RI10(spe_ahi, 0x01d)
+EMIT_RR (spe_a, 0x0c0)
+EMIT_RI10s(spe_ai, 0x01c)
+EMIT_RR (spe_sfh, 0x048)
+EMIT_RI10(spe_sfhi, 0x00d)
+EMIT_RR (spe_sf, 0x040)
+EMIT_RI10(spe_sfi, 0x00c)
+EMIT_RR (spe_addx, 0x340)
+EMIT_RR (spe_cg, 0x0c2)
+EMIT_RR (spe_cgx, 0x342)
+EMIT_RR (spe_sfx, 0x341)
+EMIT_RR (spe_bg, 0x042)
+EMIT_RR (spe_bgx, 0x343)
+EMIT_RR (spe_mpy, 0x3c4)
+EMIT_RR (spe_mpyu, 0x3cc)
+EMIT_RI10(spe_mpyi, 0x074)
+EMIT_RI10(spe_mpyui, 0x075)
+EMIT_RRR (spe_mpya, 0x00c)
+EMIT_RR (spe_mpyh, 0x3c5)
+EMIT_RR (spe_mpys, 0x3c7)
+EMIT_RR (spe_mpyhh, 0x3c6)
+EMIT_RR (spe_mpyhha, 0x346)
+EMIT_RR (spe_mpyhhu, 0x3ce)
+EMIT_RR (spe_mpyhhau, 0x34e)
+EMIT_R (spe_clz, 0x2a5)
+EMIT_R (spe_cntb, 0x2b4)
+EMIT_R (spe_fsmb, 0x1b6)
+EMIT_R (spe_fsmh, 0x1b5)
+EMIT_R (spe_fsm, 0x1b4)
+EMIT_R (spe_gbb, 0x1b2)
+EMIT_R (spe_gbh, 0x1b1)
+EMIT_R (spe_gb, 0x1b0)
+EMIT_RR (spe_avgb, 0x0d3)
+EMIT_RR (spe_absdb, 0x053)
+EMIT_RR (spe_sumb, 0x253)
+EMIT_R (spe_xsbh, 0x2b6)
+EMIT_R (spe_xshw, 0x2ae)
+EMIT_R (spe_xswd, 0x2a6)
+EMIT_RR (spe_and, 0x0c1)
+EMIT_RR (spe_andc, 0x2c1)
+EMIT_RI10s(spe_andbi, 0x016)
+EMIT_RI10s(spe_andhi, 0x015)
+EMIT_RI10s(spe_andi, 0x014)
+EMIT_RR (spe_or, 0x041)
+EMIT_RR (spe_orc, 0x2c9)
+EMIT_RI10s(spe_orbi, 0x006)
+EMIT_RI10s(spe_orhi, 0x005)
+EMIT_RI10s(spe_ori, 0x004)
+EMIT_R (spe_orx, 0x1f0)
+EMIT_RR (spe_xor, 0x241)
+EMIT_RI10s(spe_xorbi, 0x046)
+EMIT_RI10s(spe_xorhi, 0x045)
+EMIT_RI10s(spe_xori, 0x044)
+EMIT_RR (spe_nand, 0x0c9)
+EMIT_RR (spe_nor, 0x049)
+EMIT_RR (spe_eqv, 0x249)
+EMIT_RRR (spe_selb, 0x008)
+EMIT_RRR (spe_shufb, 0x00b)
+
+
+/* Shift and rotate instructions
+ */
+EMIT_RR (spe_shlh, 0x05f)
+EMIT_RI7 (spe_shlhi, 0x07f)
+EMIT_RR (spe_shl, 0x05b)
+EMIT_RI7 (spe_shli, 0x07b)
+EMIT_RR (spe_shlqbi, 0x1db)
+EMIT_RI7 (spe_shlqbii, 0x1fb)
+EMIT_RR (spe_shlqby, 0x1df)
+EMIT_RI7 (spe_shlqbyi, 0x1ff)
+EMIT_RR (spe_shlqbybi, 0x1cf)
+EMIT_RR (spe_roth, 0x05c)
+EMIT_RI7 (spe_rothi, 0x07c)
+EMIT_RR (spe_rot, 0x058)
+EMIT_RI7 (spe_roti, 0x078)
+EMIT_RR (spe_rotqby, 0x1dc)
+EMIT_RI7 (spe_rotqbyi, 0x1fc)
+EMIT_RR (spe_rotqbybi, 0x1cc)
+EMIT_RR (spe_rotqbi, 0x1d8)
+EMIT_RI7 (spe_rotqbii, 0x1f8)
+EMIT_RR (spe_rothm, 0x05d)
+EMIT_RI7 (spe_rothmi, 0x07d)
+EMIT_RR (spe_rotm, 0x059)
+EMIT_RI7 (spe_rotmi, 0x079)
+EMIT_RR (spe_rotqmby, 0x1dd)
+EMIT_RI7 (spe_rotqmbyi, 0x1fd)
+EMIT_RR (spe_rotqmbybi, 0x1cd)
+EMIT_RR (spe_rotqmbi, 0x1c9)
+EMIT_RI7 (spe_rotqmbii, 0x1f9)
+EMIT_RR (spe_rotmah, 0x05e)
+EMIT_RI7 (spe_rotmahi, 0x07e)
+EMIT_RR (spe_rotma, 0x05a)
+EMIT_RI7 (spe_rotmai, 0x07a)
+
+
+/* Compare, branch, and halt instructions
+ */
+EMIT_RR (spe_heq, 0x3d8)
+EMIT_RI10(spe_heqi, 0x07f)
+EMIT_RR (spe_hgt, 0x258)
+EMIT_RI10(spe_hgti, 0x04f)
+EMIT_RR (spe_hlgt, 0x2d8)
+EMIT_RI10(spe_hlgti, 0x05f)
+EMIT_RR (spe_ceqb, 0x3d0)
+EMIT_RI10(spe_ceqbi, 0x07e)
+EMIT_RR (spe_ceqh, 0x3c8)
+EMIT_RI10(spe_ceqhi, 0x07d)
+EMIT_RR (spe_ceq, 0x3c0)
+EMIT_RI10(spe_ceqi, 0x07c)
+EMIT_RR (spe_cgtb, 0x250)
+EMIT_RI10(spe_cgtbi, 0x04e)
+EMIT_RR (spe_cgth, 0x248)
+EMIT_RI10(spe_cgthi, 0x04d)
+EMIT_RR (spe_cgt, 0x240)
+EMIT_RI10(spe_cgti, 0x04c)
+EMIT_RR (spe_clgtb, 0x2d0)
+EMIT_RI10(spe_clgtbi, 0x05e)
+EMIT_RR (spe_clgth, 0x2c8)
+EMIT_RI10(spe_clgthi, 0x05d)
+EMIT_RR (spe_clgt, 0x2c0)
+EMIT_RI10(spe_clgti, 0x05c)
+EMIT_I16 (spe_br, 0x064)
+EMIT_I16 (spe_bra, 0x060)
+EMIT_RI16(spe_brsl, 0x066)
+EMIT_RI16(spe_brasl, 0x062)
+EMIT_RI16(spe_brnz, 0x042)
+EMIT_RI16(spe_brz, 0x040)
+EMIT_RI16(spe_brhnz, 0x046)
+EMIT_RI16(spe_brhz, 0x044)
+
+/* Control instructions
+ */
+EMIT (spe_lnop, 0x001)
+
+extern void
+spe_lqd(struct spe_function *p, int rT, int rA, int offset);
+
+extern void
+spe_stqd(struct spe_function *p, int rT, int rA, int offset);
+
+extern void spe_bi(struct spe_function *p, int rA, int d, int e);
+extern void spe_iret(struct spe_function *p, int rA, int d, int e);
+extern void spe_bisled(struct spe_function *p, int rT, int rA,
+ int d, int e);
+extern void spe_bisl(struct spe_function *p, int rT, int rA,
+ int d, int e);
+extern void spe_biz(struct spe_function *p, int rT, int rA,
+ int d, int e);
+extern void spe_binz(struct spe_function *p, int rT, int rA,
+ int d, int e);
+extern void spe_bihz(struct spe_function *p, int rT, int rA,
+ int d, int e);
+extern void spe_bihnz(struct spe_function *p, int rT, int rA,
+ int d, int e);
+
+
+/** Load/splat immediate float into rT. */
+extern void
+spe_load_float(struct spe_function *p, int rT, float x);
+
+/** Load/splat immediate int into rT. */
+extern void
+spe_load_int(struct spe_function *p, int rT, int i);
+
+/** Load/splat immediate unsigned int into rT. */
+extern void
+spe_load_uint(struct spe_function *p, int rT, uint ui);
+
+/** And immediate value into rT. */
+extern void
+spe_and_uint(struct spe_function *p, int rT, int rA, uint ui);
+
+/** Xor immediate value into rT. */
+extern void
+spe_xor_uint(struct spe_function *p, int rT, int rA, uint ui);
+
+/** Compare equal with immediate value. */
+extern void
+spe_compare_equal_uint(struct spe_function *p, int rT, int rA, uint ui);
+
+/** Compare greater with immediate value. */
+extern void
+spe_compare_greater_uint(struct spe_function *p, int rT, int rA, uint ui);
+
+/** Replicate word 0 of rA across rT. */
+extern void
+spe_splat(struct spe_function *p, int rT, int rA);
+
+/** rT = complement_all_bits(rA). */
+extern void
+spe_complement(struct spe_function *p, int rT, int rA);
+
+/** rT = rA. */
+extern void
+spe_move(struct spe_function *p, int rT, int rA);
+
+/** rT = {0,0,0,0}. */
+extern void
+spe_zero(struct spe_function *p, int rT);
+
+/** rT = splat(rA, word) */
+extern void
+spe_splat_word(struct spe_function *p, int rT, int rA, int word);
+
+/** rT = float min(rA, rB) */
+extern void
+spe_float_min(struct spe_function *p, int rT, int rA, int rB);
+
+/** rT = float max(rA, rB) */
+extern void
+spe_float_max(struct spe_function *p, int rT, int rA, int rB);
+
+
+/* Floating-point instructions
+ */
+EMIT_RR (spe_fa, 0x2c4)
+EMIT_RR (spe_dfa, 0x2cc)
+EMIT_RR (spe_fs, 0x2c5)
+EMIT_RR (spe_dfs, 0x2cd)
+EMIT_RR (spe_fm, 0x2c6)
+EMIT_RR (spe_dfm, 0x2ce)
+EMIT_RRR (spe_fma, 0x00e)
+EMIT_RR (spe_dfma, 0x35c)
+EMIT_RRR (spe_fnms, 0x00d)
+EMIT_RR (spe_dfnms, 0x35e)
+EMIT_RRR (spe_fms, 0x00f)
+EMIT_RR (spe_dfms, 0x35d)
+EMIT_RR (spe_dfnma, 0x35f)
+EMIT_R (spe_frest, 0x1b8)
+EMIT_R (spe_frsqest, 0x1b9)
+EMIT_RR (spe_fi, 0x3d4)
+EMIT_RI8 (spe_csflt, 0x1da, 155)
+EMIT_RI8 (spe_cflts, 0x1d8, 173)
+EMIT_RI8 (spe_cuflt, 0x1db, 155)
+EMIT_RI8 (spe_cfltu, 0x1d9, 173)
+EMIT_R (spe_frds, 0x3b9)
+EMIT_R (spe_fesd, 0x3b8)
+EMIT_RR (spe_dfceq, 0x3c3)
+EMIT_RR (spe_dfcmeq, 0x3cb)
+EMIT_RR (spe_dfcgt, 0x2c3)
+EMIT_RR (spe_dfcmgt, 0x2cb)
+EMIT_RI7 (spe_dftsv, 0x3bf)
+EMIT_RR (spe_fceq, 0x3c2)
+EMIT_RR (spe_fcmeq, 0x3ca)
+EMIT_RR (spe_fcgt, 0x2c2)
+EMIT_RR (spe_fcmgt, 0x2ca)
+EMIT_R (spe_fscrwr, 0x3ba)
+EMIT_ (spe_fscrrd, 0x398)
+
+
+/* Channel instructions
+ */
+EMIT_R (spe_rdch, 0x00d)
+EMIT_R (spe_rdchcnt, 0x00f)
+EMIT_R (spe_wrch, 0x10d)
+
+
+#ifdef UNDEF_EMIT_MACROS
+#undef EMIT
+#undef EMIT_
+#undef EMIT_R
+#undef EMIT_RR
+#undef EMIT_RRR
+#undef EMIT_RI7
+#undef EMIT_RI8
+#undef EMIT_RI10
+#undef EMIT_RI10s
+#undef EMIT_RI16
+#undef EMIT_RI18
+#undef EMIT_I16
+#undef UNDEF_EMIT_MACROS
+#endif /* EMIT_ */
diff --git a/src/gallium/auxiliary/rtasm/rtasm_x86sse.c b/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
new file mode 100644
index 0000000000..1acf3c373e
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_x86sse.c
@@ -0,0 +1,1757 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_X86)
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "util/u_pointer.h"
+
+#include "rtasm_execmem.h"
+#include "rtasm_x86sse.h"
+
+#define DISASSEM 0
+#define X86_TWOB 0x0f
+
+
+#define DUMP_SSE 0
+
+
+void x86_print_reg( struct x86_reg reg )
+{
+ if (reg.mod != mod_REG)
+ debug_printf( "[" );
+
+ switch( reg.file ) {
+ case file_REG32:
+ switch( reg.idx ) {
+ case reg_AX: debug_printf( "EAX" ); break;
+ case reg_CX: debug_printf( "ECX" ); break;
+ case reg_DX: debug_printf( "EDX" ); break;
+ case reg_BX: debug_printf( "EBX" ); break;
+ case reg_SP: debug_printf( "ESP" ); break;
+ case reg_BP: debug_printf( "EBP" ); break;
+ case reg_SI: debug_printf( "ESI" ); break;
+ case reg_DI: debug_printf( "EDI" ); break;
+ }
+ break;
+ case file_MMX:
+ debug_printf( "MMX%u", reg.idx );
+ break;
+ case file_XMM:
+ debug_printf( "XMM%u", reg.idx );
+ break;
+ case file_x87:
+ debug_printf( "fp%u", reg.idx );
+ break;
+ }
+
+ if (reg.mod == mod_DISP8 ||
+ reg.mod == mod_DISP32)
+ debug_printf("+%d", reg.disp);
+
+ if (reg.mod != mod_REG)
+ debug_printf( "]" );
+}
+
+#if DUMP_SSE
+
+#define DUMP_START() debug_printf( "\n" )
+#define DUMP_END() debug_printf( "\n" )
+
+#define DUMP() do { \
+ const char *foo = __FUNCTION__; \
+ while (*foo && *foo != '_') \
+ foo++; \
+ if (*foo) \
+ foo++; \
+ debug_printf( "\n% 4x% 15s ", p->csr - p->store, foo ); \
+} while (0)
+
+#define DUMP_I( I ) do { \
+ DUMP(); \
+ debug_printf( "%u", I ); \
+} while( 0 )
+
+#define DUMP_R( R0 ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+} while( 0 )
+
+#define DUMP_RR( R0, R1 ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+ debug_printf( ", " ); \
+ x86_print_reg( R1 ); \
+} while( 0 )
+
+#define DUMP_RI( R0, I ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+ debug_printf( ", %u", I ); \
+} while( 0 )
+
+#define DUMP_RRI( R0, R1, I ) do { \
+ DUMP(); \
+ x86_print_reg( R0 ); \
+ debug_printf( ", " ); \
+ x86_print_reg( R1 ); \
+ debug_printf( ", %u", I ); \
+} while( 0 )
+
+#else
+
+#define DUMP_START()
+#define DUMP_END()
+#define DUMP( )
+#define DUMP_I( I )
+#define DUMP_R( R0 )
+#define DUMP_RR( R0, R1 )
+#define DUMP_RI( R0, I )
+#define DUMP_RRI( R0, R1, I )
+
+#endif
+
+
+static void do_realloc( struct x86_function *p )
+{
+ if (p->store == p->error_overflow) {
+ p->csr = p->store;
+ }
+ else if (p->size == 0) {
+ p->size = 1024;
+ p->store = rtasm_exec_malloc(p->size);
+ p->csr = p->store;
+ }
+ else {
+ uintptr_t used = pointer_to_uintptr( p->csr ) - pointer_to_uintptr( p->store );
+ unsigned char *tmp = p->store;
+ p->size *= 2;
+ p->store = rtasm_exec_malloc(p->size);
+
+ if (p->store) {
+ memcpy(p->store, tmp, used);
+ p->csr = p->store + used;
+ }
+ else {
+ p->csr = p->store;
+ }
+
+ rtasm_exec_free(tmp);
+ }
+
+ if (p->store == NULL) {
+ p->store = p->csr = p->error_overflow;
+ p->size = sizeof(p->error_overflow);
+ }
+}
+
+/* Emit bytes to the instruction stream:
+ */
+static unsigned char *reserve( struct x86_function *p, int bytes )
+{
+ if (p->csr + bytes - p->store > (int) p->size)
+ do_realloc(p);
+
+ {
+ unsigned char *csr = p->csr;
+ p->csr += bytes;
+ return csr;
+ }
+}
+
+
+
+static void emit_1b( struct x86_function *p, char b0 )
+{
+ char *csr = (char *)reserve(p, 1);
+ *csr = b0;
+}
+
+static void emit_1i( struct x86_function *p, int i0 )
+{
+ int *icsr = (int *)reserve(p, sizeof(i0));
+ *icsr = i0;
+}
+
+static void emit_1ub( struct x86_function *p, unsigned char b0 )
+{
+ unsigned char *csr = reserve(p, 1);
+ *csr++ = b0;
+}
+
+static void emit_2ub( struct x86_function *p, unsigned char b0, unsigned char b1 )
+{
+ unsigned char *csr = reserve(p, 2);
+ *csr++ = b0;
+ *csr++ = b1;
+}
+
+static void emit_3ub( struct x86_function *p, unsigned char b0, unsigned char b1, unsigned char b2 )
+{
+ unsigned char *csr = reserve(p, 3);
+ *csr++ = b0;
+ *csr++ = b1;
+ *csr++ = b2;
+}
+
+
+/* Build a modRM byte + possible displacement. No treatment of SIB
+ * indexing. BZZT - no way to encode an absolute address.
+ *
+ * This is the "/r" field in the x86 manuals...
+ */
+static void emit_modrm( struct x86_function *p,
+ struct x86_reg reg,
+ struct x86_reg regmem )
+{
+ unsigned char val = 0;
+
+ assert(reg.mod == mod_REG);
+
+ val |= regmem.mod << 6; /* mod field */
+ val |= reg.idx << 3; /* reg field */
+ val |= regmem.idx; /* r/m field */
+
+ emit_1ub(p, val);
+
+ /* Oh-oh we've stumbled into the SIB thing.
+ */
+ if (regmem.file == file_REG32 &&
+ regmem.idx == reg_SP &&
+ regmem.mod != mod_REG) {
+ emit_1ub(p, 0x24); /* simplistic! */
+ }
+
+ switch (regmem.mod) {
+ case mod_REG:
+ case mod_INDIRECT:
+ break;
+ case mod_DISP8:
+ emit_1b(p, (char) regmem.disp);
+ break;
+ case mod_DISP32:
+ emit_1i(p, regmem.disp);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+/* Emits the "/0".."/7" specialized versions of the modrm ("/r") bytes.
+ */
+static void emit_modrm_noreg( struct x86_function *p,
+ unsigned op,
+ struct x86_reg regmem )
+{
+ struct x86_reg dummy = x86_make_reg(file_REG32, op);
+ emit_modrm(p, dummy, regmem);
+}
+
+/* Many x86 instructions have two opcodes to cope with the situations
+ * where the destination is a register or memory reference
+ * respectively. This function selects the correct opcode based on
+ * the arguments presented.
+ */
+static void emit_op_modrm( struct x86_function *p,
+ unsigned char op_dst_is_reg,
+ unsigned char op_dst_is_mem,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ switch (dst.mod) {
+ case mod_REG:
+ emit_1ub(p, op_dst_is_reg);
+ emit_modrm(p, dst, src);
+ break;
+ case mod_INDIRECT:
+ case mod_DISP32:
+ case mod_DISP8:
+ assert(src.mod == mod_REG);
+ emit_1ub(p, op_dst_is_mem);
+ emit_modrm(p, src, dst);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+
+
+
+
+
+
+/* Create and manipulate registers and regmem values:
+ */
+struct x86_reg x86_make_reg( enum x86_reg_file file,
+ enum x86_reg_name idx )
+{
+ struct x86_reg reg;
+
+ reg.file = file;
+ reg.idx = idx;
+ reg.mod = mod_REG;
+ reg.disp = 0;
+
+ return reg;
+}
+
+struct x86_reg x86_make_disp( struct x86_reg reg,
+ int disp )
+{
+ assert(reg.file == file_REG32);
+
+ if (reg.mod == mod_REG)
+ reg.disp = disp;
+ else
+ reg.disp += disp;
+
+ if (reg.disp == 0 && reg.idx != reg_BP)
+ reg.mod = mod_INDIRECT;
+ else if (reg.disp <= 127 && reg.disp >= -128)
+ reg.mod = mod_DISP8;
+ else
+ reg.mod = mod_DISP32;
+
+ return reg;
+}
+
+struct x86_reg x86_deref( struct x86_reg reg )
+{
+ return x86_make_disp(reg, 0);
+}
+
+struct x86_reg x86_get_base_reg( struct x86_reg reg )
+{
+ return x86_make_reg( reg.file, reg.idx );
+}
+
+int x86_get_label( struct x86_function *p )
+{
+ return p->csr - p->store;
+}
+
+
+
+/***********************************************************************
+ * x86 instructions
+ */
+
+
+void x86_jcc( struct x86_function *p,
+ enum x86_cc cc,
+ int label )
+{
+ int offset = label - (x86_get_label(p) + 2);
+ DUMP_I(cc);
+
+ if (offset < 0) {
+ /*assert(p->csr - p->store > -offset);*/
+ if (p->csr - p->store <= -offset) {
+ /* probably out of memory (using the error_overflow buffer) */
+ return;
+ }
+ }
+
+ if (offset <= 127 && offset >= -128) {
+ emit_1ub(p, 0x70 + cc);
+ emit_1b(p, (char) offset);
+ }
+ else {
+ offset = label - (x86_get_label(p) + 6);
+ emit_2ub(p, 0x0f, 0x80 + cc);
+ emit_1i(p, offset);
+ }
+}
+
+/* Always use a 32bit offset for forward jumps:
+ */
+int x86_jcc_forward( struct x86_function *p,
+ enum x86_cc cc )
+{
+ DUMP_I(cc);
+ emit_2ub(p, 0x0f, 0x80 + cc);
+ emit_1i(p, 0);
+ return x86_get_label(p);
+}
+
+int x86_jmp_forward( struct x86_function *p)
+{
+ DUMP();
+ emit_1ub(p, 0xe9);
+ emit_1i(p, 0);
+ return x86_get_label(p);
+}
+
+int x86_call_forward( struct x86_function *p)
+{
+ DUMP();
+
+ emit_1ub(p, 0xe8);
+ emit_1i(p, 0);
+ return x86_get_label(p);
+}
+
+/* Fixup offset from forward jump:
+ */
+void x86_fixup_fwd_jump( struct x86_function *p,
+ int fixup )
+{
+ *(int *)(p->store + fixup - 4) = x86_get_label(p) - fixup;
+}
+
+void x86_jmp( struct x86_function *p, int label)
+{
+ DUMP_I( label );
+ emit_1ub(p, 0xe9);
+ emit_1i(p, label - x86_get_label(p) - 4);
+}
+
+void x86_call( struct x86_function *p, struct x86_reg reg)
+{
+ DUMP_R( reg );
+ emit_1ub(p, 0xff);
+ emit_modrm_noreg(p, 2, reg);
+}
+
+
+void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ assert(dst.file == file_REG32);
+ assert(dst.mod == mod_REG);
+ emit_1ub(p, 0xb8 + dst.idx);
+ emit_1i(p, imm);
+}
+
+/**
+ * Immediate group 1 instructions.
+ */
+static INLINE void
+x86_group1_imm( struct x86_function *p,
+ unsigned op, struct x86_reg dst, int imm )
+{
+ assert(dst.file == file_REG32);
+ assert(dst.mod == mod_REG);
+ if(-0x80 <= imm && imm < 0x80) {
+ emit_1ub(p, 0x83);
+ emit_modrm_noreg(p, op, dst);
+ emit_1b(p, (char)imm);
+ }
+ else {
+ emit_1ub(p, 0x81);
+ emit_modrm_noreg(p, op, dst);
+ emit_1i(p, imm);
+ }
+}
+
+void x86_add_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 0, dst, imm);
+}
+
+void x86_or_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 1, dst, imm);
+}
+
+void x86_and_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 4, dst, imm);
+}
+
+void x86_sub_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 5, dst, imm);
+}
+
+void x86_xor_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 6, dst, imm);
+}
+
+void x86_cmp_imm( struct x86_function *p, struct x86_reg dst, int imm )
+{
+ DUMP_RI( dst, imm );
+ x86_group1_imm(p, 7, dst, imm);
+}
+
+
+void x86_push( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ if (reg.mod == mod_REG)
+ emit_1ub(p, 0x50 + reg.idx);
+ else
+ {
+ emit_1ub(p, 0xff);
+ emit_modrm_noreg(p, 6, reg);
+ }
+
+
+ p->stack_offset += 4;
+}
+
+void x86_push_imm32( struct x86_function *p,
+ int imm32 )
+{
+ DUMP_I( imm32 );
+ emit_1ub(p, 0x68);
+ emit_1i(p, imm32);
+
+ p->stack_offset += 4;
+}
+
+
+void x86_pop( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ assert(reg.mod == mod_REG);
+ emit_1ub(p, 0x58 + reg.idx);
+ p->stack_offset -= 4;
+}
+
+void x86_inc( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ assert(reg.mod == mod_REG);
+ emit_1ub(p, 0x40 + reg.idx);
+}
+
+void x86_dec( struct x86_function *p,
+ struct x86_reg reg )
+{
+ DUMP_R( reg );
+ assert(reg.mod == mod_REG);
+ emit_1ub(p, 0x48 + reg.idx);
+}
+
+void x86_ret( struct x86_function *p )
+{
+ DUMP();
+ assert(p->stack_offset == 0);
+ emit_1ub(p, 0xc3);
+}
+
+void x86_retw( struct x86_function *p, unsigned short imm )
+{
+ DUMP();
+ emit_3ub(p, 0xc2, imm & 0xff, (imm >> 8) & 0xff);
+}
+
+void x86_sahf( struct x86_function *p )
+{
+ DUMP();
+ emit_1ub(p, 0x9e);
+}
+
+void x86_mov( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x8b, 0x89, dst, src );
+}
+
+void x86_xor( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x33, 0x31, dst, src );
+}
+
+void x86_cmp( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x3b, 0x39, dst, src );
+}
+
+void x86_lea( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, 0x8d);
+ emit_modrm( p, dst, src );
+}
+
+void x86_test( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, 0x85);
+ emit_modrm( p, dst, src );
+}
+
+void x86_add( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm(p, 0x03, 0x01, dst, src );
+}
+
+/* Calculate EAX * src, results in EDX:EAX.
+ */
+void x86_mul( struct x86_function *p,
+ struct x86_reg src )
+{
+ DUMP_R( src );
+ emit_1ub(p, 0xf7);
+ emit_modrm_noreg(p, 4, src );
+}
+
+
+void x86_imul( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0xAF);
+ emit_modrm(p, dst, src);
+}
+
+
+void x86_sub( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm(p, 0x2b, 0x29, dst, src );
+}
+
+void x86_or( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x0b, 0x09, dst, src );
+}
+
+void x86_and( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_op_modrm( p, 0x23, 0x21, dst, src );
+}
+
+
+
+/***********************************************************************
+ * SSE instructions
+ */
+
+void sse_prefetchnta( struct x86_function *p, struct x86_reg ptr)
+{
+ DUMP_R( ptr );
+ assert(ptr.mod != mod_REG);
+ emit_2ub(p, 0x0f, 0x18);
+ emit_modrm_noreg(p, 0, ptr);
+}
+
+void sse_prefetch0( struct x86_function *p, struct x86_reg ptr)
+{
+ DUMP_R( ptr );
+ assert(ptr.mod != mod_REG);
+ emit_2ub(p, 0x0f, 0x18);
+ emit_modrm_noreg(p, 1, ptr);
+}
+
+void sse_prefetch1( struct x86_function *p, struct x86_reg ptr)
+{
+ DUMP_R( ptr );
+ assert(ptr.mod != mod_REG);
+ emit_2ub(p, 0x0f, 0x18);
+ emit_modrm_noreg(p, 2, ptr);
+}
+
+void sse_movntps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src)
+{
+ DUMP_RR( dst, src );
+
+ assert(dst.mod != mod_REG);
+ assert(src.mod == mod_REG);
+ emit_2ub(p, 0x0f, 0x2b);
+ emit_modrm(p, src, dst);
+}
+
+
+
+
+void sse_movss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, 0xF3, X86_TWOB);
+ emit_op_modrm( p, 0x10, 0x11, dst, src );
+}
+
+void sse_movaps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x28, 0x29, dst, src );
+}
+
+void sse_movups( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x10, 0x11, dst, src );
+}
+
+void sse_movhps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod != mod_REG || src.mod != mod_REG);
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x16, 0x17, dst, src ); /* cf movlhps */
+}
+
+void sse_movlps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod != mod_REG || src.mod != mod_REG);
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x12, 0x13, dst, src ); /* cf movhlps */
+}
+
+void sse_maxps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5F);
+ emit_modrm( p, dst, src );
+}
+
+void sse_maxss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x5F);
+ emit_modrm( p, dst, src );
+}
+
+void sse_divss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x5E);
+ emit_modrm( p, dst, src );
+}
+
+void sse_minps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5D);
+ emit_modrm( p, dst, src );
+}
+
+void sse_subps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5C);
+ emit_modrm( p, dst, src );
+}
+
+void sse_mulps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x59);
+ emit_modrm( p, dst, src );
+}
+
+void sse_mulss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x59);
+ emit_modrm( p, dst, src );
+}
+
+void sse_addps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x58);
+ emit_modrm( p, dst, src );
+}
+
+void sse_addss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x58);
+ emit_modrm( p, dst, src );
+}
+
+void sse_andnps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x55);
+ emit_modrm( p, dst, src );
+}
+
+void sse_andps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x54);
+ emit_modrm( p, dst, src );
+}
+
+void sse_rsqrtps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x52);
+ emit_modrm( p, dst, src );
+}
+
+void sse_rsqrtss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x52);
+ emit_modrm( p, dst, src );
+
+}
+
+void sse_movhlps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod == mod_REG && src.mod == mod_REG);
+ emit_2ub(p, X86_TWOB, 0x12);
+ emit_modrm( p, dst, src );
+}
+
+void sse_movlhps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.mod == mod_REG && src.mod == mod_REG);
+ emit_2ub(p, X86_TWOB, 0x16);
+ emit_modrm( p, dst, src );
+}
+
+void sse_orps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x56);
+ emit_modrm( p, dst, src );
+}
+
+void sse_xorps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x57);
+ emit_modrm( p, dst, src );
+}
+
+void sse_cvtps2pi( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.file == file_MMX &&
+ (src.file == file_XMM || src.mod != mod_REG));
+
+ p->need_emms = 1;
+
+ emit_2ub(p, X86_TWOB, 0x2d);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_cvtdq2ps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x5b);
+ emit_modrm( p, dst, src );
+}
+
+
+/* Shufps can also be used to implement a reduced swizzle when dest ==
+ * arg0.
+ */
+void sse_shufps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src,
+ unsigned char shuf)
+{
+ DUMP_RRI( dst, src, shuf );
+ emit_2ub(p, X86_TWOB, 0xC6);
+ emit_modrm(p, dst, src);
+ emit_1ub(p, shuf);
+}
+
+void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub( p, X86_TWOB, 0x15 );
+ emit_modrm( p, dst, src );
+}
+
+void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub( p, X86_TWOB, 0x14 );
+ emit_modrm( p, dst, src );
+}
+
+void sse_cmpps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src,
+ enum sse_cc cc)
+{
+ DUMP_RRI( dst, src, cc );
+ emit_2ub(p, X86_TWOB, 0xC2);
+ emit_modrm(p, dst, src);
+ emit_1ub(p, cc);
+}
+
+void sse_pmovmskb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src)
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0xD7);
+ emit_modrm(p, dst, src);
+}
+
+void sse_movmskps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src)
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x50);
+ emit_modrm(p, dst, src);
+}
+
+/***********************************************************************
+ * SSE2 instructions
+ */
+
+/**
+ * Perform a reduced swizzle:
+ */
+void sse2_pshufd( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src,
+ unsigned char shuf)
+{
+ DUMP_RRI( dst, src, shuf );
+ emit_3ub(p, 0x66, X86_TWOB, 0x70);
+ emit_modrm(p, dst, src);
+ emit_1ub(p, shuf);
+}
+
+void sse2_cvttps2dq( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub( p, 0xF3, X86_TWOB, 0x5B );
+ emit_modrm( p, dst, src );
+}
+
+void sse2_cvtps2dq( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x5B);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_packssdw( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x6B);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_packsswb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x63);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_packuswb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x67);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_punpcklbw( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0x66, X86_TWOB, 0x60);
+ emit_modrm( p, dst, src );
+}
+
+
+void sse2_rcpps( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, X86_TWOB, 0x53);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_rcpss( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_3ub(p, 0xF3, X86_TWOB, 0x53);
+ emit_modrm( p, dst, src );
+}
+
+void sse2_movd( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ emit_2ub(p, 0x66, X86_TWOB);
+ emit_op_modrm( p, 0x6e, 0x7e, dst, src );
+}
+
+
+
+
+/***********************************************************************
+ * x87 instructions
+ */
+static void note_x87_pop( struct x86_function *p )
+{
+ p->x87_stack--;
+ assert(p->x87_stack >= 0);
+}
+
+static void note_x87_push( struct x86_function *p )
+{
+ p->x87_stack++;
+ assert(p->x87_stack <= 7);
+}
+
+void x87_assert_stack_empty( struct x86_function *p )
+{
+ assert (p->x87_stack == 0);
+}
+
+
+void x87_fist( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ emit_1ub(p, 0xdb);
+ emit_modrm_noreg(p, 2, dst);
+}
+
+void x87_fistp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ emit_1ub(p, 0xdb);
+ emit_modrm_noreg(p, 3, dst);
+ note_x87_pop(p);
+}
+
+void x87_fild( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ emit_1ub(p, 0xdf);
+ emit_modrm_noreg(p, 0, arg);
+ note_x87_push(p);
+}
+
+void x87_fldz( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xee);
+ note_x87_push(p);
+}
+
+
+void x87_fldcw( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_REG32);
+ assert(arg.mod != mod_REG);
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 5, arg);
+}
+
+void x87_fld1( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe8);
+ note_x87_push(p);
+}
+
+void x87_fldl2e( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xea);
+ note_x87_push(p);
+}
+
+void x87_fldln2( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xed);
+ note_x87_push(p);
+}
+
+void x87_fwait( struct x86_function *p )
+{
+ DUMP();
+ emit_1ub(p, 0x9b);
+}
+
+void x87_fnclex( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xdb, 0xe2);
+}
+
+void x87_fclex( struct x86_function *p )
+{
+ x87_fwait(p);
+ x87_fnclex(p);
+}
+
+void x87_fcmovb( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xda, 0xc0+arg.idx);
+}
+
+void x87_fcmove( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xda, 0xc8+arg.idx);
+}
+
+void x87_fcmovbe( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xda, 0xd0+arg.idx);
+}
+
+void x87_fcmovnb( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdb, 0xc0+arg.idx);
+}
+
+void x87_fcmovne( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdb, 0xc8+arg.idx);
+}
+
+void x87_fcmovnbe( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdb, 0xd0+arg.idx);
+}
+
+
+
+static void x87_arith_op( struct x86_function *p, struct x86_reg dst, struct x86_reg arg,
+ unsigned char dst0ub0,
+ unsigned char dst0ub1,
+ unsigned char arg0ub0,
+ unsigned char arg0ub1,
+ unsigned char argmem_noreg)
+{
+ assert(dst.file == file_x87);
+
+ if (arg.file == file_x87) {
+ if (dst.idx == 0)
+ emit_2ub(p, dst0ub0, dst0ub1+arg.idx);
+ else if (arg.idx == 0)
+ emit_2ub(p, arg0ub0, arg0ub1+arg.idx);
+ else
+ assert(0);
+ }
+ else if (dst.idx == 0) {
+ assert(arg.file == file_REG32);
+ emit_1ub(p, 0xd8);
+ emit_modrm_noreg(p, argmem_noreg, arg);
+ }
+ else
+ assert(0);
+}
+
+void x87_fmul( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xc8,
+ 0xdc, 0xc8,
+ 4);
+}
+
+void x87_fsub( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xe0,
+ 0xdc, 0xe8,
+ 4);
+}
+
+void x87_fsubr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xe8,
+ 0xdc, 0xe0,
+ 5);
+}
+
+void x87_fadd( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xc0,
+ 0xdc, 0xc0,
+ 0);
+}
+
+void x87_fdiv( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xf0,
+ 0xdc, 0xf8,
+ 6);
+}
+
+void x87_fdivr( struct x86_function *p, struct x86_reg dst, struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ x87_arith_op(p, dst, src,
+ 0xd8, 0xf8,
+ 0xdc, 0xf0,
+ 7);
+}
+
+void x87_fmulp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xc8+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fsubp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xe8+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fsubrp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xe0+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_faddp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xc0+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fdivp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xf8+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_fdivrp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_x87);
+ assert(dst.idx >= 1);
+ emit_2ub(p, 0xde, 0xf0+dst.idx);
+ note_x87_pop(p);
+}
+
+void x87_ftst( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe4);
+}
+
+void x87_fucom( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdd, 0xe0+arg.idx);
+}
+
+void x87_fucomp( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xdd, 0xe8+arg.idx);
+ note_x87_pop(p);
+}
+
+void x87_fucompp( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xda, 0xe9);
+ note_x87_pop(p); /* pop twice */
+ note_x87_pop(p); /* pop twice */
+}
+
+void x87_fxch( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ assert(arg.file == file_x87);
+ emit_2ub(p, 0xd9, 0xc8+arg.idx);
+}
+
+void x87_fabs( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe1);
+}
+
+void x87_fchs( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xe0);
+}
+
+void x87_fcos( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xff);
+}
+
+
+void x87_fprndint( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfc);
+}
+
+void x87_fscale( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfd);
+}
+
+void x87_fsin( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfe);
+}
+
+void x87_fsincos( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfb);
+}
+
+void x87_fsqrt( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xfa);
+}
+
+void x87_fxtract( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf4);
+}
+
+/* st0 = (2^st0)-1
+ *
+ * Restrictions: -1.0 <= st0 <= 1.0
+ */
+void x87_f2xm1( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf0);
+}
+
+/* st1 = st1 * log2(st0);
+ * pop_stack;
+ */
+void x87_fyl2x( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf1);
+ note_x87_pop(p);
+}
+
+/* st1 = st1 * log2(st0 + 1.0);
+ * pop_stack;
+ *
+ * A fast operation, with restrictions: -.29 < st0 < .29
+ */
+void x87_fyl2xp1( struct x86_function *p )
+{
+ DUMP();
+ emit_2ub(p, 0xd9, 0xf9);
+ note_x87_pop(p);
+}
+
+
+void x87_fld( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ if (arg.file == file_x87)
+ emit_2ub(p, 0xd9, 0xc0 + arg.idx);
+ else {
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 0, arg);
+ }
+ note_x87_push(p);
+}
+
+void x87_fst( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xdd, 0xd0 + dst.idx);
+ else {
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 2, dst);
+ }
+}
+
+void x87_fstp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xdd, 0xd8 + dst.idx);
+ else {
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 3, dst);
+ }
+ note_x87_pop(p);
+}
+
+void x87_fpop( struct x86_function *p )
+{
+ x87_fstp( p, x86_make_reg( file_x87, 0 ));
+}
+
+
+void x87_fcom( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xd8, 0xd0 + dst.idx);
+ else {
+ emit_1ub(p, 0xd8);
+ emit_modrm_noreg(p, 2, dst);
+ }
+}
+
+
+void x87_fcomp( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ if (dst.file == file_x87)
+ emit_2ub(p, 0xd8, 0xd8 + dst.idx);
+ else {
+ emit_1ub(p, 0xd8);
+ emit_modrm_noreg(p, 3, dst);
+ }
+ note_x87_pop(p);
+}
+
+void x87_fcomi( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ emit_2ub(p, 0xdb, 0xf0+arg.idx);
+}
+
+void x87_fcomip( struct x86_function *p, struct x86_reg arg )
+{
+ DUMP_R( arg );
+ emit_2ub(p, 0xdb, 0xf0+arg.idx);
+ note_x87_pop(p);
+}
+
+
+void x87_fnstsw( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_REG32);
+
+ if (dst.idx == reg_AX &&
+ dst.mod == mod_REG)
+ emit_2ub(p, 0xdf, 0xe0);
+ else {
+ emit_1ub(p, 0xdd);
+ emit_modrm_noreg(p, 7, dst);
+ }
+}
+
+
+void x87_fnstcw( struct x86_function *p, struct x86_reg dst )
+{
+ DUMP_R( dst );
+ assert(dst.file == file_REG32);
+
+ emit_1ub(p, 0x9b); /* WAIT -- needed? */
+ emit_1ub(p, 0xd9);
+ emit_modrm_noreg(p, 7, dst);
+}
+
+
+
+
+/***********************************************************************
+ * MMX instructions
+ */
+
+void mmx_emms( struct x86_function *p )
+{
+ DUMP();
+ assert(p->need_emms);
+ emit_2ub(p, 0x0f, 0x77);
+ p->need_emms = 0;
+}
+
+void mmx_packssdw( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.file == file_MMX &&
+ (src.file == file_MMX || src.mod != mod_REG));
+
+ p->need_emms = 1;
+
+ emit_2ub(p, X86_TWOB, 0x6b);
+ emit_modrm( p, dst, src );
+}
+
+void mmx_packuswb( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ assert(dst.file == file_MMX &&
+ (src.file == file_MMX || src.mod != mod_REG));
+
+ p->need_emms = 1;
+
+ emit_2ub(p, X86_TWOB, 0x67);
+ emit_modrm( p, dst, src );
+}
+
+void mmx_movd( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ p->need_emms = 1;
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x6e, 0x7e, dst, src );
+}
+
+void mmx_movq( struct x86_function *p,
+ struct x86_reg dst,
+ struct x86_reg src )
+{
+ DUMP_RR( dst, src );
+ p->need_emms = 1;
+ emit_1ub(p, X86_TWOB);
+ emit_op_modrm( p, 0x6f, 0x7f, dst, src );
+}
+
+
+/***********************************************************************
+ * Helper functions
+ */
+
+
+void x86_cdecl_caller_push_regs( struct x86_function *p )
+{
+ x86_push(p, x86_make_reg(file_REG32, reg_AX));
+ x86_push(p, x86_make_reg(file_REG32, reg_CX));
+ x86_push(p, x86_make_reg(file_REG32, reg_DX));
+}
+
+void x86_cdecl_caller_pop_regs( struct x86_function *p )
+{
+ x86_pop(p, x86_make_reg(file_REG32, reg_DX));
+ x86_pop(p, x86_make_reg(file_REG32, reg_CX));
+ x86_pop(p, x86_make_reg(file_REG32, reg_AX));
+}
+
+
+/* Retreive a reference to one of the function arguments, taking into
+ * account any push/pop activity:
+ */
+struct x86_reg x86_fn_arg( struct x86_function *p,
+ unsigned arg )
+{
+ return x86_make_disp(x86_make_reg(file_REG32, reg_SP),
+ p->stack_offset + arg * 4); /* ??? */
+}
+
+
+void x86_init_func( struct x86_function *p )
+{
+ p->size = 0;
+ p->store = NULL;
+ p->csr = p->store;
+ DUMP_START();
+}
+
+void x86_init_func_size( struct x86_function *p, unsigned code_size )
+{
+ p->size = code_size;
+ p->store = rtasm_exec_malloc(code_size);
+ if (p->store == NULL) {
+ p->store = p->error_overflow;
+ }
+ p->csr = p->store;
+ DUMP_START();
+}
+
+void x86_release_func( struct x86_function *p )
+{
+ if (p->store && p->store != p->error_overflow)
+ rtasm_exec_free(p->store);
+
+ p->store = NULL;
+ p->csr = NULL;
+ p->size = 0;
+}
+
+
+void (*x86_get_func( struct x86_function *p ))(void)
+{
+ DUMP_END();
+ if (DISASSEM && p->store)
+ debug_printf("disassemble %p %p\n", p->store, p->csr);
+
+ if (p->store == p->error_overflow)
+ return (void (*)(void)) NULL;
+ else
+ return (void (*)(void)) p->store;
+}
+
+#else
+
+void x86sse_dummy( void )
+{
+}
+
+#endif
diff --git a/src/gallium/auxiliary/rtasm/rtasm_x86sse.h b/src/gallium/auxiliary/rtasm/rtasm_x86sse.h
new file mode 100644
index 0000000000..731a651796
--- /dev/null
+++ b/src/gallium/auxiliary/rtasm/rtasm_x86sse.h
@@ -0,0 +1,320 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _RTASM_X86SSE_H_
+#define _RTASM_X86SSE_H_
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_X86)
+
+/* It is up to the caller to ensure that instructions issued are
+ * suitable for the host cpu. There are no checks made in this module
+ * for mmx/sse/sse2 support on the cpu.
+ */
+struct x86_reg {
+ unsigned file:3;
+ unsigned idx:3;
+ unsigned mod:2; /* mod_REG if this is just a register */
+ int disp:24; /* only +/- 23bits of offset - should be enough... */
+};
+
+struct x86_function {
+ unsigned size;
+ unsigned char *store;
+ unsigned char *csr;
+
+ unsigned stack_offset:16;
+ unsigned need_emms:8;
+ int x87_stack:8;
+
+ unsigned char error_overflow[4];
+};
+
+enum x86_reg_file {
+ file_REG32,
+ file_MMX,
+ file_XMM,
+ file_x87
+};
+
+/* Values for mod field of modr/m byte
+ */
+enum x86_reg_mod {
+ mod_INDIRECT,
+ mod_DISP8,
+ mod_DISP32,
+ mod_REG
+};
+
+enum x86_reg_name {
+ reg_AX,
+ reg_CX,
+ reg_DX,
+ reg_BX,
+ reg_SP,
+ reg_BP,
+ reg_SI,
+ reg_DI
+};
+
+
+enum x86_cc {
+ cc_O, /* overflow */
+ cc_NO, /* not overflow */
+ cc_NAE, /* not above or equal / carry */
+ cc_AE, /* above or equal / not carry */
+ cc_E, /* equal / zero */
+ cc_NE /* not equal / not zero */
+};
+
+enum sse_cc {
+ cc_Equal,
+ cc_LessThan,
+ cc_LessThanEqual,
+ cc_Unordered,
+ cc_NotEqual,
+ cc_NotLessThan,
+ cc_NotLessThanEqual,
+ cc_Ordered
+};
+
+#define cc_Z cc_E
+#define cc_NZ cc_NE
+
+/* Begin/end/retreive function creation:
+ */
+
+
+void x86_init_func( struct x86_function *p );
+void x86_init_func_size( struct x86_function *p, unsigned code_size );
+void x86_release_func( struct x86_function *p );
+void (*x86_get_func( struct x86_function *p ))( void );
+
+/* Debugging:
+ */
+void x86_print_reg( struct x86_reg reg );
+
+
+/* Create and manipulate registers and regmem values:
+ */
+struct x86_reg x86_make_reg( enum x86_reg_file file,
+ enum x86_reg_name idx );
+
+struct x86_reg x86_make_disp( struct x86_reg reg,
+ int disp );
+
+struct x86_reg x86_deref( struct x86_reg reg );
+
+struct x86_reg x86_get_base_reg( struct x86_reg reg );
+
+
+/* Labels, jumps and fixup:
+ */
+int x86_get_label( struct x86_function *p );
+
+void x86_jcc( struct x86_function *p,
+ enum x86_cc cc,
+ int label );
+
+int x86_jcc_forward( struct x86_function *p,
+ enum x86_cc cc );
+
+int x86_jmp_forward( struct x86_function *p);
+
+int x86_call_forward( struct x86_function *p);
+
+void x86_fixup_fwd_jump( struct x86_function *p,
+ int fixup );
+
+void x86_jmp( struct x86_function *p, int label );
+
+/* void x86_call( struct x86_function *p, void (*label)() ); */
+void x86_call( struct x86_function *p, struct x86_reg reg);
+
+void x86_mov_reg_imm( struct x86_function *p, struct x86_reg dst, int imm );
+void x86_add_imm( struct x86_function *p, struct x86_reg dst, int imm );
+void x86_or_imm( struct x86_function *p, struct x86_reg dst, int imm );
+void x86_and_imm( struct x86_function *p, struct x86_reg dst, int imm );
+void x86_sub_imm( struct x86_function *p, struct x86_reg dst, int imm );
+void x86_xor_imm( struct x86_function *p, struct x86_reg dst, int imm );
+void x86_cmp_imm( struct x86_function *p, struct x86_reg dst, int imm );
+
+
+/* Macro for sse_shufps() and sse2_pshufd():
+ */
+#define SHUF(_x,_y,_z,_w) (((_x)<<0) | ((_y)<<2) | ((_z)<<4) | ((_w)<<6))
+#define SHUF_NOOP RSW(0,1,2,3)
+#define GET_SHUF(swz, idx) (((swz) >> ((idx)*2)) & 0x3)
+
+void mmx_emms( struct x86_function *p );
+void mmx_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void mmx_movq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void mmx_packssdw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void mmx_packuswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+
+void sse2_cvtps2dq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_cvttps2dq( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_cvtdq2ps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_movd( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_packssdw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_packsswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_packuswb( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_pshufd( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
+ unsigned char shuf );
+void sse2_rcpps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse2_rcpss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+
+
+void sse_prefetchnta( struct x86_function *p, struct x86_reg ptr);
+void sse_prefetch0( struct x86_function *p, struct x86_reg ptr);
+void sse_prefetch1( struct x86_function *p, struct x86_reg ptr);
+
+void sse_movntps( struct x86_function *p, struct x86_reg dst, struct x86_reg src);
+
+void sse_addps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_addss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_cvtps2pi( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_divss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_andnps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_andps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_cmpps( struct x86_function *p, struct x86_reg dst, struct x86_reg src,
+ enum sse_cc cc );
+void sse_maxps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_maxss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_minps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movaps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movhlps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movlhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movlps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movups( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_mulps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_mulss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_orps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_xorps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_subps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_rsqrtps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_rsqrtss( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_shufps( struct x86_function *p, struct x86_reg dest, struct x86_reg arg0,
+ unsigned char shuf );
+void sse_unpckhps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_unpcklps( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_pmovmskb( struct x86_function *p, struct x86_reg dest, struct x86_reg src );
+void sse2_punpcklbw( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void sse_movmskps( struct x86_function *p, struct x86_reg dst, struct x86_reg src);
+
+void x86_add( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_and( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_cmp( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_dec( struct x86_function *p, struct x86_reg reg );
+void x86_inc( struct x86_function *p, struct x86_reg reg );
+void x86_lea( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_mov( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_mul( struct x86_function *p, struct x86_reg src );
+void x86_imul( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_or( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_pop( struct x86_function *p, struct x86_reg reg );
+void x86_push( struct x86_function *p, struct x86_reg reg );
+void x86_push_imm32( struct x86_function *p, int imm );
+void x86_ret( struct x86_function *p );
+void x86_retw( struct x86_function *p, unsigned short imm );
+void x86_sub( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_test( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_xor( struct x86_function *p, struct x86_reg dst, struct x86_reg src );
+void x86_sahf( struct x86_function *p );
+
+
+void x86_cdecl_caller_push_regs( struct x86_function *p );
+void x86_cdecl_caller_pop_regs( struct x86_function *p );
+
+void x87_assert_stack_empty( struct x86_function *p );
+
+void x87_f2xm1( struct x86_function *p );
+void x87_fabs( struct x86_function *p );
+void x87_fadd( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
+void x87_faddp( struct x86_function *p, struct x86_reg dst );
+void x87_fchs( struct x86_function *p );
+void x87_fclex( struct x86_function *p );
+void x87_fcmovb( struct x86_function *p, struct x86_reg src );
+void x87_fcmovbe( struct x86_function *p, struct x86_reg src );
+void x87_fcmove( struct x86_function *p, struct x86_reg src );
+void x87_fcmovnb( struct x86_function *p, struct x86_reg src );
+void x87_fcmovnbe( struct x86_function *p, struct x86_reg src );
+void x87_fcmovne( struct x86_function *p, struct x86_reg src );
+void x87_fcom( struct x86_function *p, struct x86_reg dst );
+void x87_fcomi( struct x86_function *p, struct x86_reg dst );
+void x87_fcomip( struct x86_function *p, struct x86_reg dst );
+void x87_fcomp( struct x86_function *p, struct x86_reg dst );
+void x87_fcos( struct x86_function *p );
+void x87_fdiv( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
+void x87_fdivp( struct x86_function *p, struct x86_reg dst );
+void x87_fdivr( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
+void x87_fdivrp( struct x86_function *p, struct x86_reg dst );
+void x87_fild( struct x86_function *p, struct x86_reg arg );
+void x87_fist( struct x86_function *p, struct x86_reg dst );
+void x87_fistp( struct x86_function *p, struct x86_reg dst );
+void x87_fld( struct x86_function *p, struct x86_reg arg );
+void x87_fld1( struct x86_function *p );
+void x87_fldcw( struct x86_function *p, struct x86_reg arg );
+void x87_fldl2e( struct x86_function *p );
+void x87_fldln2( struct x86_function *p );
+void x87_fldz( struct x86_function *p );
+void x87_fmul( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
+void x87_fmulp( struct x86_function *p, struct x86_reg dst );
+void x87_fnclex( struct x86_function *p );
+void x87_fprndint( struct x86_function *p );
+void x87_fpop( struct x86_function *p );
+void x87_fscale( struct x86_function *p );
+void x87_fsin( struct x86_function *p );
+void x87_fsincos( struct x86_function *p );
+void x87_fsqrt( struct x86_function *p );
+void x87_fst( struct x86_function *p, struct x86_reg dst );
+void x87_fstp( struct x86_function *p, struct x86_reg dst );
+void x87_fsub( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
+void x87_fsubp( struct x86_function *p, struct x86_reg dst );
+void x87_fsubr( struct x86_function *p, struct x86_reg dst, struct x86_reg arg );
+void x87_fsubrp( struct x86_function *p, struct x86_reg dst );
+void x87_ftst( struct x86_function *p );
+void x87_fxch( struct x86_function *p, struct x86_reg dst );
+void x87_fxtract( struct x86_function *p );
+void x87_fyl2x( struct x86_function *p );
+void x87_fyl2xp1( struct x86_function *p );
+void x87_fwait( struct x86_function *p );
+void x87_fnstcw( struct x86_function *p, struct x86_reg dst );
+void x87_fnstsw( struct x86_function *p, struct x86_reg dst );
+void x87_fucompp( struct x86_function *p );
+void x87_fucomp( struct x86_function *p, struct x86_reg arg );
+void x87_fucom( struct x86_function *p, struct x86_reg arg );
+
+
+
+/* Retreive a reference to one of the function arguments, taking into
+ * account any push/pop activity. Note - doesn't track explict
+ * manipulation of ESP by other instructions.
+ */
+struct x86_reg x86_fn_arg( struct x86_function *p, unsigned arg );
+
+#endif
+#endif
diff --git a/src/gallium/auxiliary/sct/Makefile b/src/gallium/auxiliary/sct/Makefile
new file mode 100644
index 0000000000..a7d111b689
--- /dev/null
+++ b/src/gallium/auxiliary/sct/Makefile
@@ -0,0 +1,9 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = sct
+
+C_SOURCES = \
+ sct.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/sct/SConscript b/src/gallium/auxiliary/sct/SConscript
new file mode 100644
index 0000000000..76927d973f
--- /dev/null
+++ b/src/gallium/auxiliary/sct/SConscript
@@ -0,0 +1,9 @@
+Import('*')
+
+sct = env.ConvenienceLibrary(
+ target = 'sct',
+ source = [
+ 'sct.c'
+ ])
+
+auxiliaries.insert(0, sct)
diff --git a/src/gallium/auxiliary/sct/sct.c b/src/gallium/auxiliary/sct/sct.c
new file mode 100644
index 0000000000..722d2b7e66
--- /dev/null
+++ b/src/gallium/auxiliary/sct/sct.c
@@ -0,0 +1,453 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "pipe/p_state.h"
+#include "sct.h"
+
+
+struct texture_list
+{
+ struct pipe_texture *texture;
+ struct texture_list *next;
+};
+
+
+
+#define MAX_SURFACES ((PIPE_MAX_COLOR_BUFS) + 1)
+
+struct sct_context
+{
+ const struct pipe_context *context;
+
+ /** surfaces the context is drawing into */
+ struct pipe_surface *surfaces[MAX_SURFACES];
+
+ /** currently bound textures */
+ struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
+
+ /** previously bound textures, used but not flushed */
+ struct texture_list *textures_used;
+
+ boolean needs_flush;
+
+ struct sct_context *next;
+};
+
+
+
+struct sct_surface
+{
+ const struct pipe_surface *surface;
+
+ /** list of contexts drawing to this surface */
+ struct sct_context_list *contexts;
+
+ struct sct_surface *next;
+};
+
+
+
+/**
+ * Find the surface_info for the given pipe_surface
+ */
+static struct sct_surface *
+find_surface_info(struct surface_context_tracker *sct,
+ const struct pipe_surface *surface)
+{
+ struct sct_surface *si;
+ for (si = sct->surfaces; si; si = si->next)
+ if (si->surface == surface)
+ return si;
+ return NULL;
+}
+
+
+/**
+ * As above, but create new surface_info if surface is new.
+ */
+static struct sct_surface *
+find_create_surface_info(struct surface_context_tracker *sct,
+ const struct pipe_surface *surface)
+{
+ struct sct_surface *si = find_surface_info(sct, surface);
+ if (si)
+ return si;
+
+ /* alloc new */
+ si = CALLOC_STRUCT(sct_surface);
+ if (si) {
+ si->surface = surface;
+
+ /* insert at head */
+ si->next = sct->surfaces;
+ sct->surfaces = si;
+ }
+
+ return si;
+}
+
+
+/**
+ * Find a context_info for the given context.
+ */
+static struct sct_context *
+find_context_info(struct surface_context_tracker *sct,
+ const struct pipe_context *context)
+{
+ struct sct_context *ci;
+ for (ci = sct->contexts; ci; ci = ci->next)
+ if (ci->context == context)
+ return ci;
+ return NULL;
+}
+
+
+/**
+ * As above, but create new context_info if context is new.
+ */
+static struct sct_context *
+find_create_context_info(struct surface_context_tracker *sct,
+ const struct pipe_context *context)
+{
+ struct sct_context *ci = find_context_info(sct, context);
+ if (ci)
+ return ci;
+
+ /* alloc new */
+ ci = CALLOC_STRUCT(sct_context);
+ if (ci) {
+ ci->context = context;
+
+ /* insert at head */
+ ci->next = sct->contexts;
+ sct->contexts = ci;
+ }
+
+ return ci;
+}
+
+
+/**
+ * Is the context already bound to the surface?
+ */
+static boolean
+find_surface_context(const struct sct_surface *si,
+ const struct pipe_context *context)
+{
+ const struct sct_context_list *cl;
+ for (cl = si->contexts; cl; cl = cl->next) {
+ if (cl->context == context) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+
+/**
+ * Add a context to the list of contexts associated with a surface.
+ */
+static void
+add_context_to_surface(struct sct_surface *si,
+ const struct pipe_context *context)
+{
+ struct sct_context_list *cl = CALLOC_STRUCT(sct_context_list);
+ if (cl) {
+ cl->context = context;
+ /* insert at head of list of contexts */
+ cl->next = si->contexts;
+ si->contexts = cl;
+ }
+}
+
+
+/**
+ * Remove a context from the list of contexts associated with a surface.
+ */
+static void
+remove_context_from_surface(struct sct_surface *si,
+ const struct pipe_context *context)
+{
+ struct sct_context_list *prev = NULL, *curr, *next;
+
+ for (curr = si->contexts; curr; curr = next) {
+ if (curr->context == context) {
+ /* remove */
+ if (prev)
+ prev->next = curr->next;
+ else
+ si->contexts = curr->next;
+ next = curr->next;
+ FREE(curr);
+ }
+ else {
+ prev = curr;
+ next = curr->next;
+ }
+ }
+}
+
+
+/**
+ * Unbind context from surface.
+ */
+static void
+unbind_context_surface(struct surface_context_tracker *sct,
+ struct pipe_context *context,
+ struct pipe_surface *surface)
+{
+ struct sct_surface *si = find_surface_info(sct, surface);
+ if (si) {
+ remove_context_from_surface(si, context);
+ }
+}
+
+
+/**
+ * Bind context to a set of surfaces (color + Z).
+ * Like MakeCurrent().
+ */
+void
+sct_bind_surfaces(struct surface_context_tracker *sct,
+ struct pipe_context *context,
+ uint num_surf,
+ struct pipe_surface **surfaces)
+{
+ struct sct_context *ci = find_create_context_info(sct, context);
+ uint i;
+
+ if (!ci) {
+ return; /* out of memory */
+ }
+
+ /* unbind currently bound surfaces */
+ for (i = 0; i < MAX_SURFACES; i++) {
+ if (ci->surfaces[i]) {
+ unbind_context_surface(sct, context, ci->surfaces[i]);
+ }
+ }
+
+ /* bind new surfaces */
+ for (i = 0; i < num_surf; i++) {
+ struct sct_surface *si = find_create_surface_info(sct, surfaces[i]);
+ if (!find_surface_context(si, context)) {
+ add_context_to_surface(si, context);
+ }
+ }
+}
+
+
+/**
+ * Return list of contexts bound to a surface.
+ */
+const struct sct_context_list *
+sct_get_surface_contexts(struct surface_context_tracker *sct,
+ const struct pipe_surface *surface)
+{
+ const struct sct_surface *si = find_surface_info(sct, surface);
+ return si->contexts;
+}
+
+
+
+static boolean
+find_texture(const struct sct_context *ci,
+ const struct pipe_texture *texture)
+{
+ const struct texture_list *tl;
+
+ for (tl = ci->textures_used; tl; tl = tl->next) {
+ if (tl->texture == texture) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+
+/**
+ * Add the given texture to the context's list of used textures.
+ */
+static void
+add_texture_used(struct sct_context *ci,
+ struct pipe_texture *texture)
+{
+ if (!find_texture(ci, texture)) {
+ /* add to list */
+ struct texture_list *tl = CALLOC_STRUCT(texture_list);
+ if (tl) {
+ pipe_texture_reference(&tl->texture, texture);
+ /* insert at head */
+ tl->next = ci->textures_used;
+ ci->textures_used = tl;
+ }
+ }
+}
+
+
+/**
+ * Bind a texture to a rendering context.
+ */
+void
+sct_bind_texture(struct surface_context_tracker *sct,
+ struct pipe_context *context,
+ uint unit,
+ struct pipe_texture *tex)
+{
+ struct sct_context *ci = find_context_info(sct, context);
+
+ if (ci->textures[unit] != tex) {
+ /* put texture on the 'used' list */
+ add_texture_used(ci, tex);
+ /* bind new */
+ pipe_texture_reference(&ci->textures[unit], tex);
+ }
+}
+
+
+/**
+ * Check if the given texture has been used by the rendering context
+ * since the last call to sct_flush_textures().
+ */
+boolean
+sct_is_texture_used(struct surface_context_tracker *sct,
+ const struct pipe_context *context,
+ const struct pipe_texture *texture)
+{
+ const struct sct_context *ci = find_context_info(sct, context);
+ return find_texture(ci, texture);
+}
+
+
+/**
+ * To be called when the image contents of a texture are changed, such
+ * as for gl[Copy]TexSubImage().
+ * XXX this may not be needed
+ */
+void
+sct_update_texture(struct pipe_texture *tex)
+{
+
+}
+
+
+/**
+ * When a scene is flushed/rendered we can release the list of
+ * used textures.
+ */
+void
+sct_flush_textures(struct surface_context_tracker *sct,
+ struct pipe_context *context)
+{
+ struct sct_context *ci = find_context_info(sct, context);
+ struct texture_list *tl, *next;
+ uint i;
+
+ for (tl = ci->textures_used; tl; tl = next) {
+ next = tl->next;
+ pipe_texture_reference(&tl->texture, NULL);
+ FREE(tl);
+ }
+ ci->textures_used = NULL;
+
+ /* put the currently bound textures on the 'used' list */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ add_texture_used(ci, ci->textures[i]);
+ }
+}
+
+
+
+void
+sct_destroy_context(struct surface_context_tracker *sct,
+ struct pipe_context *context)
+{
+ /* XXX should we require an unbinding first? */
+ {
+ struct sct_surface *si;
+ for (si = sct->surfaces; si; si = si->next) {
+ remove_context_from_surface(si, context);
+ }
+ }
+
+ /* remove context from context_info list */
+ {
+ struct sct_context *ci, *next, *prev = NULL;
+ for (ci = sct->contexts; ci; ci = next) {
+ next = ci->next;
+ if (ci->context == context) {
+ if (prev)
+ prev->next = ci->next;
+ else
+ sct->contexts = ci->next;
+ FREE(ci);
+ }
+ else {
+ prev = ci;
+ }
+ }
+ }
+
+}
+
+
+void
+sct_destroy_surface(struct surface_context_tracker *sct,
+ struct pipe_surface *surface)
+{
+ if (1) {
+ /* debug/sanity: no context should be bound to surface */
+ struct sct_context *ci;
+ uint i;
+ for (ci = sct->contexts; ci; ci = ci->next) {
+ for (i = 0; i < MAX_SURFACES; i++) {
+ assert(ci->surfaces[i] != surface);
+ }
+ }
+ }
+
+ /* remove surface from sct_surface list */
+ {
+ struct sct_surface *si, *next, *prev = NULL;
+ for (si = sct->surfaces; si; si = next) {
+ next = si->next;
+ if (si->surface == surface) {
+ /* unlink */
+ if (prev)
+ prev->next = si->next;
+ else
+ sct->surfaces = si->next;
+ FREE(si);
+ }
+ else {
+ prev = si;
+ }
+ }
+ }
+}
diff --git a/src/gallium/auxiliary/sct/sct.h b/src/gallium/auxiliary/sct/sct.h
new file mode 100644
index 0000000000..cf7c4d3bdf
--- /dev/null
+++ b/src/gallium/auxiliary/sct/sct.h
@@ -0,0 +1,123 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Surface/Context Tracking
+ *
+ * For some drivers, we need to monitor the binding between contexts and
+ * surfaces/textures.
+ * This code may evolve quite a bit...
+ */
+
+
+#ifndef SCT_H
+#define SCT_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_context;
+struct pipe_surface;
+
+struct sct_context;
+struct sct_surface;
+
+
+/**
+ * Per-device info, basically
+ */
+struct surface_context_tracker
+{
+ struct sct_context *contexts;
+ struct sct_surface *surfaces;
+};
+
+
+
+/**
+ * Simple linked list of contexts
+ */
+struct sct_context_list
+{
+ const struct pipe_context *context;
+ struct sct_context_list *next;
+};
+
+
+
+extern void
+sct_bind_surfaces(struct surface_context_tracker *sct,
+ struct pipe_context *context,
+ uint num_surf,
+ struct pipe_surface **surfaces);
+
+
+extern void
+sct_bind_texture(struct surface_context_tracker *sct,
+ struct pipe_context *context,
+ uint unit,
+ struct pipe_texture *texture);
+
+
+extern void
+sct_update_texture(struct pipe_texture *tex);
+
+
+extern boolean
+sct_is_texture_used(struct surface_context_tracker *sct,
+ const struct pipe_context *context,
+ const struct pipe_texture *texture);
+
+extern void
+sct_flush_textures(struct surface_context_tracker *sct,
+ struct pipe_context *context);
+
+
+extern const struct sct_context_list *
+sct_get_surface_contexts(struct surface_context_tracker *sct,
+ const struct pipe_surface *surf);
+
+
+extern void
+sct_destroy_context(struct surface_context_tracker *sct,
+ struct pipe_context *context);
+
+
+extern void
+sct_destroy_surface(struct surface_context_tracker *sct,
+ struct pipe_surface *surface);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SCT_H */
diff --git a/src/gallium/auxiliary/sct/usage.c b/src/gallium/auxiliary/sct/usage.c
new file mode 100644
index 0000000000..6227f19962
--- /dev/null
+++ b/src/gallium/auxiliary/sct/usage.c
@@ -0,0 +1,61 @@
+/* surface / context tracking */
+
+
+/*
+
+context A:
+ render to texture T
+
+context B:
+ texture from T
+
+-----------------------
+
+flush surface:
+ which contexts are bound to the surface?
+
+-----------------------
+
+glTexSubImage():
+ which contexts need to be flushed?
+
+ */
+
+
+/*
+
+in MakeCurrent():
+
+ call sct_bind_surfaces(context, list of surfaces) to update the
+ dependencies between context and surfaces
+
+
+in SurfaceFlush(), or whatever it is in D3D:
+
+ call sct_get_surface_contexts(surface) to get a list of contexts
+ which are currently bound to the surface.
+
+
+
+in BindTexture():
+
+ call sct_bind_texture(context, texture) to indicate that the texture
+ is used in the scene.
+
+
+in glTexSubImage() or RenderToTexture():
+
+ call sct_is_texture_used(context, texture) to determine if the texture
+ has been used in the scene, but the scene's not flushed. If TRUE is
+ returned it means the scene has to be rendered/flushed before the contents
+ of the texture can be changed.
+
+
+in psb_scene_flush/terminate():
+
+ call sct_flush_textures(context) to tell the SCT that the textures which
+ were used in the scene can be released.
+
+
+
+*/
diff --git a/src/gallium/auxiliary/tgsi/Makefile b/src/gallium/auxiliary/tgsi/Makefile
new file mode 100644
index 0000000000..5f0a580b09
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/Makefile
@@ -0,0 +1,22 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = tgsi
+
+C_SOURCES = \
+ tgsi_sanity.c \
+ tgsi_build.c \
+ tgsi_dump.c \
+ tgsi_exec.c \
+ tgsi_info.c \
+ tgsi_iterate.c \
+ tgsi_parse.c \
+ tgsi_ppc.c \
+ tgsi_scan.c \
+ tgsi_sse2.c \
+ tgsi_text.c \
+ tgsi_transform.c \
+ tgsi_ureg.c \
+ tgsi_util.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/tgsi/SConscript b/src/gallium/auxiliary/tgsi/SConscript
new file mode 100644
index 0000000000..b6bc2924f0
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/SConscript
@@ -0,0 +1,23 @@
+Import('*')
+
+tgsi = env.ConvenienceLibrary(
+ target = 'tgsi',
+ source = [
+ 'tgsi_build.c',
+ 'tgsi_dump.c',
+ 'tgsi_dump_c.c',
+ 'tgsi_exec.c',
+ 'tgsi_info.c',
+ 'tgsi_iterate.c',
+ 'tgsi_parse.c',
+ 'tgsi_sanity.c',
+ 'tgsi_scan.c',
+ 'tgsi_ppc.c',
+ 'tgsi_sse2.c',
+ 'tgsi_text.c',
+ 'tgsi_transform.c',
+ 'tgsi_ureg.c',
+ 'tgsi_util.c',
+ ])
+
+auxiliaries.insert(0, tgsi)
diff --git a/src/gallium/auxiliary/tgsi/tgsi-instruction-set.txt b/src/gallium/auxiliary/tgsi/tgsi-instruction-set.txt
new file mode 100644
index 0000000000..eb492076b7
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi-instruction-set.txt
@@ -0,0 +1,1131 @@
+TGSI Instruction Specification
+==============================
+==============================
+
+
+1 Instruction Set Operations
+=============================
+
+
+1.1 GL_NV_vertex_program
+-------------------------
+
+
+1.1.1 ARL - Address Register Load
+
+ dst.x = floor(src.x)
+ dst.y = floor(src.y)
+ dst.z = floor(src.z)
+ dst.w = floor(src.w)
+
+
+1.1.2 MOV - Move
+
+ dst.x = src.x
+ dst.y = src.y
+ dst.z = src.z
+ dst.w = src.w
+
+
+1.1.3 LIT - Light Coefficients
+
+ dst.x = 1.0
+ dst.y = max(src.x, 0.0)
+ dst.z = (src.x > 0.0) ? pow(max(src.y, 0.0), clamp(src.w, -128.0, 128.0)) : 0.0
+ dst.w = 1.0
+
+
+1.1.4 RCP - Reciprocal
+
+ dst.x = 1.0 / src.x
+ dst.y = 1.0 / src.x
+ dst.z = 1.0 / src.x
+ dst.w = 1.0 / src.x
+
+
+1.1.5 RSQ - Reciprocal Square Root
+
+ dst.x = 1.0 / sqrt(abs(src.x))
+ dst.y = 1.0 / sqrt(abs(src.x))
+ dst.z = 1.0 / sqrt(abs(src.x))
+ dst.w = 1.0 / sqrt(abs(src.x))
+
+
+1.1.6 EXP - Approximate Exponential Base 2
+
+ dst.x = pow(2.0, floor(src.x))
+ dst.y = src.x - floor(src.x)
+ dst.z = pow(2.0, src.x)
+ dst.w = 1.0
+
+
+1.1.7 LOG - Approximate Logarithm Base 2
+
+ dst.x = floor(lg2(abs(src.x)))
+ dst.y = abs(src.x) / pow(2.0, floor(lg2(abs(src.x))))
+ dst.z = lg2(abs(src.x))
+ dst.w = 1.0
+
+
+1.1.8 MUL - Multiply
+
+ dst.x = src0.x * src1.x
+ dst.y = src0.y * src1.y
+ dst.z = src0.z * src1.z
+ dst.w = src0.w * src1.w
+
+
+1.1.9 ADD - Add
+
+ dst.x = src0.x + src1.x
+ dst.y = src0.y + src1.y
+ dst.z = src0.z + src1.z
+ dst.w = src0.w + src1.w
+
+
+1.1.10 DP3 - 3-component Dot Product
+
+ dst.x = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z
+ dst.y = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z
+ dst.z = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z
+ dst.w = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z
+
+
+1.1.11 DP4 - 4-component Dot Product
+
+ dst.x = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src0.w * src1.w
+ dst.y = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src0.w * src1.w
+ dst.z = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src0.w * src1.w
+ dst.w = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src0.w * src1.w
+
+
+1.1.12 DST - Distance Vector
+
+ dst.x = 1.0
+ dst.y = src0.y * src1.y
+ dst.z = src0.z
+ dst.w = src1.w
+
+
+1.1.13 MIN - Minimum
+
+ dst.x = min(src0.x, src1.x)
+ dst.y = min(src0.y, src1.y)
+ dst.z = min(src0.z, src1.z)
+ dst.w = min(src0.w, src1.w)
+
+
+1.1.14 MAX - Maximum
+
+ dst.x = max(src0.x, src1.x)
+ dst.y = max(src0.y, src1.y)
+ dst.z = max(src0.z, src1.z)
+ dst.w = max(src0.w, src1.w)
+
+
+1.1.15 SLT - Set On Less Than
+
+ dst.x = (src0.x < src1.x) ? 1.0 : 0.0
+ dst.y = (src0.y < src1.y) ? 1.0 : 0.0
+ dst.z = (src0.z < src1.z) ? 1.0 : 0.0
+ dst.w = (src0.w < src1.w) ? 1.0 : 0.0
+
+
+1.1.16 SGE - Set On Greater Equal Than
+
+ dst.x = (src0.x >= src1.x) ? 1.0 : 0.0
+ dst.y = (src0.y >= src1.y) ? 1.0 : 0.0
+ dst.z = (src0.z >= src1.z) ? 1.0 : 0.0
+ dst.w = (src0.w >= src1.w) ? 1.0 : 0.0
+
+
+1.1.17 MAD - Multiply And Add
+
+ dst.x = src0.x * src1.x + src2.x
+ dst.y = src0.y * src1.y + src2.y
+ dst.z = src0.z * src1.z + src2.z
+ dst.w = src0.w * src1.w + src2.w
+
+
+1.2 GL_ATI_fragment_shader
+---------------------------
+
+
+1.2.1 SUB - Subtract
+
+ dst.x = src0.x - src1.x
+ dst.y = src0.y - src1.y
+ dst.z = src0.z - src1.z
+ dst.w = src0.w - src1.w
+
+
+1.2.2 DOT3 - 3-component Dot Product
+
+ Alias for DP3.
+
+
+1.2.3 DOT4 - 4-component Dot Product
+
+ Alias for DP4.
+
+
+1.2.4 LERP - Linear Interpolate
+
+ dst.x = src0.x * (src1.x - src2.x) + src2.x
+ dst.y = src0.y * (src1.y - src2.y) + src2.y
+ dst.z = src0.z * (src1.z - src2.z) + src2.z
+ dst.w = src0.w * (src1.w - src2.w) + src2.w
+
+
+1.2.5 CND - Condition
+
+ dst.x = (src2.x > 0.5) ? src0.x : src1.x
+ dst.y = (src2.y > 0.5) ? src0.y : src1.y
+ dst.z = (src2.z > 0.5) ? src0.z : src1.z
+ dst.w = (src2.w > 0.5) ? src0.w : src1.w
+
+
+1.2.6 CND0 - Condition Zero
+
+ Removed. Use (CMP src2, src1, src0) instead.
+
+1.2.7 DOT2ADD - 2-component Dot Product And Add
+
+ dst.x = src0.x * src1.x + src0.y * src1.y + src2.x
+ dst.y = src0.x * src1.x + src0.y * src1.y + src2.x
+ dst.z = src0.x * src1.x + src0.y * src1.y + src2.x
+ dst.w = src0.x * src1.x + src0.y * src1.y + src2.x
+
+
+1.3 GL_EXT_vertex_shader
+-------------------------
+
+
+1.3.1 INDEX - Array Lookup
+
+ Considered for removal from language.
+
+
+1.3.2 NEGATE - Negate
+
+ Considered for removal from language.
+
+
+1.3.3 MADD - Multiply And Add
+
+ Alias for MAD.
+
+
+1.3.4 FRAC - Fraction
+
+ dst.x = src.x - floor(src.x)
+ dst.y = src.y - floor(src.y)
+ dst.z = src.z - floor(src.z)
+ dst.w = src.w - floor(src.w)
+
+
+1.3.5 SETGE - Set On Greater Equal
+
+ Alias for SGE.
+
+
+1.3.6 SETLT - Set On Less Than
+
+ Alias for SLT.
+
+
+1.3.7 CLAMP - Clamp
+
+ dst.x = clamp(src0.x, src1.x, src2.x)
+ dst.y = clamp(src0.y, src1.y, src2.y)
+ dst.z = clamp(src0.z, src1.z, src2.z)
+ dst.w = clamp(src0.w, src1.w, src2.w)
+
+
+1.3.8 FLOOR - Floor
+
+ dst.x = floor(src.x)
+ dst.y = floor(src.y)
+ dst.z = floor(src.z)
+ dst.w = floor(src.w)
+
+
+1.3.9 ROUND - Round
+
+ dst.x = round(src.x)
+ dst.y = round(src.y)
+ dst.z = round(src.z)
+ dst.w = round(src.w)
+
+
+1.3.10 EXPBASE2 - Exponential Base 2
+
+ dst.x = pow(2.0, src.x)
+ dst.y = pow(2.0, src.x)
+ dst.z = pow(2.0, src.x)
+ dst.w = pow(2.0, src.x)
+
+
+1.3.11 LOGBASE2 - Logarithm Base 2
+
+ dst.x = lg2(src.x)
+ dst.y = lg2(src.x)
+ dst.z = lg2(src.x)
+ dst.w = lg2(src.x)
+
+
+1.3.12 POWER - Power
+
+ dst.x = pow(src0.x, src1.x)
+ dst.y = pow(src0.x, src1.x)
+ dst.z = pow(src0.x, src1.x)
+ dst.w = pow(src0.x, src1.x)
+
+
+1.3.13 RECIP - Reciprocal
+
+ Alias for RCP.
+
+
+1.3.14 RECIPSQRT - Reciprocal Square Root
+
+ Alias for RSQ.
+
+
+1.3.15 CROSSPRODUCT - Cross Product
+
+ dst.x = src0.y * src1.z - src1.y * src0.z
+ dst.y = src0.z * src1.x - src1.z * src0.x
+ dst.z = src0.x * src1.y - src1.x * src0.y
+ dst.w = 1.0
+
+
+1.3.16 MULTIPLYMATRIX - Multiply Matrix
+
+ Considered for removal from language.
+
+
+1.4 GL_NV_vertex_program1_1
+----------------------------
+
+
+1.4.1 ABS - Absolute
+
+ dst.x = abs(src.x)
+ dst.y = abs(src.y)
+ dst.z = abs(src.z)
+ dst.w = abs(src.w)
+
+
+1.4.2 RCC - Reciprocal Clamped
+
+ dst.x = (1.0 / src.x) > 0.0 ? clamp(1.0 / src.x, 5.42101e-020, 1.884467e+019) : clamp(1.0 / src.x, -1.884467e+019, -5.42101e-020)
+ dst.y = (1.0 / src.x) > 0.0 ? clamp(1.0 / src.x, 5.42101e-020, 1.884467e+019) : clamp(1.0 / src.x, -1.884467e+019, -5.42101e-020)
+ dst.z = (1.0 / src.x) > 0.0 ? clamp(1.0 / src.x, 5.42101e-020, 1.884467e+019) : clamp(1.0 / src.x, -1.884467e+019, -5.42101e-020)
+ dst.w = (1.0 / src.x) > 0.0 ? clamp(1.0 / src.x, 5.42101e-020, 1.884467e+019) : clamp(1.0 / src.x, -1.884467e+019, -5.42101e-020)
+
+
+1.4.3 DPH - Homogeneous Dot Product
+
+ dst.x = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src1.w
+ dst.y = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src1.w
+ dst.z = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src1.w
+ dst.w = src0.x * src1.x + src0.y * src1.y + src0.z * src1.z + src1.w
+
+
+1.5 GL_NV_fragment_program
+---------------------------
+
+
+1.5.1 COS - Cosine
+
+ dst.x = cos(src.x)
+ dst.y = cos(src.x)
+ dst.z = cos(src.x)
+ dst.w = cos(src.w)
+
+
+1.5.2 DDX - Derivative Relative To X
+
+ dst.x = partialx(src.x)
+ dst.y = partialx(src.y)
+ dst.z = partialx(src.z)
+ dst.w = partialx(src.w)
+
+
+1.5.3 DDY - Derivative Relative To Y
+
+ dst.x = partialy(src.x)
+ dst.y = partialy(src.y)
+ dst.z = partialy(src.z)
+ dst.w = partialy(src.w)
+
+
+1.5.4 EX2 - Exponential Base 2
+
+ Alias for EXPBASE2.
+
+
+1.5.5 FLR - Floor
+
+ Alias for FLOOR.
+
+
+1.5.6 FRC - Fraction
+
+ Alias for FRAC.
+
+
+1.5.7 KILP - Predicated Discard
+
+ discard
+
+
+1.5.8 LG2 - Logarithm Base 2
+
+ Alias for LOGBASE2.
+
+
+1.5.9 LRP - Linear Interpolate
+
+ Alias for LERP.
+
+
+1.5.10 PK2H - Pack Two 16-bit Floats
+
+ TBD
+
+
+1.5.11 PK2US - Pack Two Unsigned 16-bit Scalars
+
+ TBD
+
+
+1.5.12 PK4B - Pack Four Signed 8-bit Scalars
+
+ TBD
+
+
+1.5.13 PK4UB - Pack Four Unsigned 8-bit Scalars
+
+ TBD
+
+
+1.5.14 POW - Power
+
+ Alias for POWER.
+
+
+1.5.15 RFL - Reflection Vector
+
+ dst.x = 2.0 * (src0.x * src1.x + src0.y * src1.y + src0.z * src1.z) / (src0.x * src0.x + src0.y * src0.y + src0.z * src0.z) * src0.x - src1.x
+ dst.y = 2.0 * (src0.x * src1.x + src0.y * src1.y + src0.z * src1.z) / (src0.x * src0.x + src0.y * src0.y + src0.z * src0.z) * src0.y - src1.y
+ dst.z = 2.0 * (src0.x * src1.x + src0.y * src1.y + src0.z * src1.z) / (src0.x * src0.x + src0.y * src0.y + src0.z * src0.z) * src0.z - src1.z
+ dst.w = 1.0
+
+
+1.5.16 SEQ - Set On Equal
+
+ dst.x = (src0.x == src1.x) ? 1.0 : 0.0
+ dst.y = (src0.y == src1.y) ? 1.0 : 0.0
+ dst.z = (src0.z == src1.z) ? 1.0 : 0.0
+ dst.w = (src0.w == src1.w) ? 1.0 : 0.0
+
+
+1.5.17 SFL - Set On False
+
+ dst.x = 0.0
+ dst.y = 0.0
+ dst.z = 0.0
+ dst.w = 0.0
+
+
+1.5.18 SGT - Set On Greater Than
+
+ dst.x = (src0.x > src1.x) ? 1.0 : 0.0
+ dst.y = (src0.y > src1.y) ? 1.0 : 0.0
+ dst.z = (src0.z > src1.z) ? 1.0 : 0.0
+ dst.w = (src0.w > src1.w) ? 1.0 : 0.0
+
+
+1.5.19 SIN - Sine
+
+ dst.x = sin(src.x)
+ dst.y = sin(src.x)
+ dst.z = sin(src.x)
+ dst.w = sin(src.w)
+
+
+1.5.20 SLE - Set On Less Equal Than
+
+ dst.x = (src0.x <= src1.x) ? 1.0 : 0.0
+ dst.y = (src0.y <= src1.y) ? 1.0 : 0.0
+ dst.z = (src0.z <= src1.z) ? 1.0 : 0.0
+ dst.w = (src0.w <= src1.w) ? 1.0 : 0.0
+
+
+1.5.21 SNE - Set On Not Equal
+
+ dst.x = (src0.x != src1.x) ? 1.0 : 0.0
+ dst.y = (src0.y != src1.y) ? 1.0 : 0.0
+ dst.z = (src0.z != src1.z) ? 1.0 : 0.0
+ dst.w = (src0.w != src1.w) ? 1.0 : 0.0
+
+
+1.5.22 STR - Set On True
+
+ dst.x = 1.0
+ dst.y = 1.0
+ dst.z = 1.0
+ dst.w = 1.0
+
+
+1.5.23 TEX - Texture Lookup
+
+ TBD
+
+
+1.5.24 TXD - Texture Lookup with Derivatives
+
+ TBD
+
+
+1.5.25 TXP - Projective Texture Lookup
+
+ TBD
+
+
+1.5.26 UP2H - Unpack Two 16-Bit Floats
+
+ TBD
+
+
+1.5.27 UP2US - Unpack Two Unsigned 16-Bit Scalars
+
+ TBD
+
+
+1.5.28 UP4B - Unpack Four Signed 8-Bit Values
+
+ TBD
+
+
+1.5.29 UP4UB - Unpack Four Unsigned 8-Bit Scalars
+
+ TBD
+
+
+1.5.30 X2D - 2D Coordinate Transformation
+
+ dst.x = src0.x + src1.x * src2.x + src1.y * src2.y
+ dst.y = src0.y + src1.x * src2.z + src1.y * src2.w
+ dst.z = src0.x + src1.x * src2.x + src1.y * src2.y
+ dst.w = src0.y + src1.x * src2.z + src1.y * src2.w
+
+
+1.6 GL_NV_vertex_program2
+--------------------------
+
+
+1.6.1 ARA - Address Register Add
+
+ TBD
+
+
+1.6.2 ARR - Address Register Load With Round
+
+ dst.x = round(src.x)
+ dst.y = round(src.y)
+ dst.z = round(src.z)
+ dst.w = round(src.w)
+
+
+1.6.3 BRA - Branch
+
+ pc = target
+
+
+1.6.4 CAL - Subroutine Call
+
+ push(pc)
+ pc = target
+
+
+1.6.5 RET - Subroutine Call Return
+
+ pc = pop()
+
+
+1.6.6 SSG - Set Sign
+
+ dst.x = (src.x > 0.0) ? 1.0 : (src.x < 0.0) ? -1.0 : 0.0
+ dst.y = (src.y > 0.0) ? 1.0 : (src.y < 0.0) ? -1.0 : 0.0
+ dst.z = (src.z > 0.0) ? 1.0 : (src.z < 0.0) ? -1.0 : 0.0
+ dst.w = (src.w > 0.0) ? 1.0 : (src.w < 0.0) ? -1.0 : 0.0
+
+
+1.7 GL_ARB_vertex_program
+--------------------------
+
+
+1.7.1 SWZ - Extended Swizzle
+
+ dst.x = src.x
+ dst.y = src.y
+ dst.z = src.z
+ dst.w = src.w
+
+
+1.7.2 XPD - Cross Product
+
+ Alias for CROSSPRODUCT.
+
+
+1.8 GL_ARB_fragment_program
+----------------------------
+
+
+1.8.1 CMP - Compare
+
+ dst.x = (src0.x < 0.0) ? src1.x : src2.x
+ dst.y = (src0.y < 0.0) ? src1.y : src2.y
+ dst.z = (src0.z < 0.0) ? src1.z : src2.z
+ dst.w = (src0.w < 0.0) ? src1.w : src2.w
+
+
+1.8.2 KIL - Conditional Discard
+
+ if (src.x < 0.0 || src.y < 0.0 || src.z < 0.0 || src.w < 0.0)
+ discard
+ endif
+
+
+1.8.3 SCS - Sine Cosine
+
+ dst.x = cos(src.x)
+ dst.y = sin(src.x)
+ dst.z = 0.0
+ dst.y = 1.0
+
+
+1.8.4 TXB - Texture Lookup With Bias
+
+ TBD
+
+
+1.9 GL_NV_fragment_program2
+----------------------------
+
+
+1.9.1 NRM - 3-component Vector Normalise
+
+ dst.x = src.x / (src.x * src.x + src.y * src.y + src.z * src.z)
+ dst.y = src.y / (src.x * src.x + src.y * src.y + src.z * src.z)
+ dst.z = src.z / (src.x * src.x + src.y * src.y + src.z * src.z)
+ dst.w = 1.0
+
+
+1.9.2 DIV - Divide
+
+ dst.x = src0.x / src1.x
+ dst.y = src0.y / src1.y
+ dst.z = src0.z / src1.z
+ dst.w = src0.w / src1.w
+
+
+1.9.3 DP2 - 2-component Dot Product
+
+ dst.x = src0.x * src1.x + src0.y * src1.y
+ dst.y = src0.x * src1.x + src0.y * src1.y
+ dst.z = src0.x * src1.x + src0.y * src1.y
+ dst.w = src0.x * src1.x + src0.y * src1.y
+
+
+1.9.4 DP2A - 2-component Dot Product And Add
+
+ Alias for DOT2ADD.
+
+
+1.9.5 TXL - Texture Lookup With LOD
+
+ TBD
+
+
+1.9.6 BRK - Break
+
+ TBD
+
+
+1.9.7 IF - If
+
+ TBD
+
+
+1.9.8 BGNFOR - Begin a For-Loop
+
+ dst.x = floor(src.x)
+ dst.y = floor(src.y)
+ dst.z = floor(src.z)
+
+ if (dst.y <= 0)
+ pc = [matching ENDFOR] + 1
+ endif
+
+ Note: The destination must be a loop register.
+ The source must be a constant register.
+
+
+1.9.9 REP - Repeat
+
+ TBD
+
+
+1.9.10 ELSE - Else
+
+ TBD
+
+
+1.9.11 ENDIF - End If
+
+ TBD
+
+
+1.9.12 ENDFOR - End a For-Loop
+
+ dst.x = dst.x + dst.z
+ dst.y = dst.y - 1.0
+
+ if (dst.y > 0)
+ pc = [matching BGNFOR instruction] + 1
+ endif
+
+ Note: The destination must be a loop register.
+
+
+1.9.13 ENDREP - End Repeat
+
+ TBD
+
+
+1.10 GL_NV_vertex_program3
+---------------------------
+
+
+1.10.1 PUSHA - Push Address Register On Stack
+
+ push(src.x)
+ push(src.y)
+ push(src.z)
+ push(src.w)
+
+
+1.10.2 POPA - Pop Address Register From Stack
+
+ dst.w = pop()
+ dst.z = pop()
+ dst.y = pop()
+ dst.x = pop()
+
+
+1.11 GL_NV_gpu_program4
+------------------------
+
+
+1.11.1 CEIL - Ceiling
+
+ dst.x = ceil(src.x)
+ dst.y = ceil(src.y)
+ dst.z = ceil(src.z)
+ dst.w = ceil(src.w)
+
+
+1.11.2 I2F - Integer To Float
+
+ dst.x = (float) src.x
+ dst.y = (float) src.y
+ dst.z = (float) src.z
+ dst.w = (float) src.w
+
+
+1.11.3 NOT - Bitwise Not
+
+ dst.x = ~src.x
+ dst.y = ~src.y
+ dst.z = ~src.z
+ dst.w = ~src.w
+
+
+1.11.4 TRUNC - Truncate
+
+ dst.x = trunc(src.x)
+ dst.y = trunc(src.y)
+ dst.z = trunc(src.z)
+ dst.w = trunc(src.w)
+
+
+1.11.5 SHL - Shift Left
+
+ dst.x = src0.x << src1.x
+ dst.y = src0.y << src1.x
+ dst.z = src0.z << src1.x
+ dst.w = src0.w << src1.x
+
+
+1.11.6 SHR - Shift Right
+
+ dst.x = src0.x >> src1.x
+ dst.y = src0.y >> src1.x
+ dst.z = src0.z >> src1.x
+ dst.w = src0.w >> src1.x
+
+
+1.11.7 AND - Bitwise And
+
+ dst.x = src0.x & src1.x
+ dst.y = src0.y & src1.y
+ dst.z = src0.z & src1.z
+ dst.w = src0.w & src1.w
+
+
+1.11.8 OR - Bitwise Or
+
+ dst.x = src0.x | src1.x
+ dst.y = src0.y | src1.y
+ dst.z = src0.z | src1.z
+ dst.w = src0.w | src1.w
+
+
+1.11.9 MOD - Modulus
+
+ dst.x = src0.x % src1.x
+ dst.y = src0.y % src1.y
+ dst.z = src0.z % src1.z
+ dst.w = src0.w % src1.w
+
+
+1.11.10 XOR - Bitwise Xor
+
+ dst.x = src0.x ^ src1.x
+ dst.y = src0.y ^ src1.y
+ dst.z = src0.z ^ src1.z
+ dst.w = src0.w ^ src1.w
+
+
+1.11.11 SAD - Sum Of Absolute Differences
+
+ dst.x = abs(src0.x - src1.x) + src2.x
+ dst.y = abs(src0.y - src1.y) + src2.y
+ dst.z = abs(src0.z - src1.z) + src2.z
+ dst.w = abs(src0.w - src1.w) + src2.w
+
+
+1.11.12 TXF - Texel Fetch
+
+ TBD
+
+
+1.11.13 TXQ - Texture Size Query
+
+ TBD
+
+
+1.11.14 CONT - Continue
+
+ TBD
+
+
+1.12 GL_NV_geometry_program4
+-----------------------------
+
+
+1.12.1 EMIT - Emit
+
+ TBD
+
+
+1.12.2 ENDPRIM - End Primitive
+
+ TBD
+
+
+1.13 GLSL
+----------
+
+
+1.13.1 BGNLOOP - Begin a Loop
+
+ TBD
+
+
+1.13.2 BGNSUB - Begin Subroutine
+
+ TBD
+
+
+1.13.3 ENDLOOP - End a Loop
+
+ TBD
+
+
+1.13.4 ENDSUB - End Subroutine
+
+ TBD
+
+
+1.13.5 INT - Truncate
+
+ Alias for TRUNC.
+
+
+1.13.6 NOISE1 - 1D Noise
+
+ TBD
+
+
+1.13.7 NOISE2 - 2D Noise
+
+ TBD
+
+
+1.13.8 NOISE3 - 3D Noise
+
+ TBD
+
+
+1.13.9 NOISE4 - 4D Noise
+
+ TBD
+
+
+1.13.10 NOP - No Operation
+
+ Do nothing.
+
+
+1.14 ps_1_1
+------------
+
+
+1.14.1 TEXKILL - Conditional Discard
+
+ Alias for KIL.
+
+
+1.15 ps_1_4
+------------
+
+
+1.15.1 TEXLD - Texture Lookup
+
+ Alias for TEX.
+
+
+1.16 ps_2_0
+------------
+
+
+1.16.1 M4X4 - Multiply Matrix
+
+ Alias for MULTIPLYMATRIX.
+
+
+1.16.2 M4X3 - Multiply Matrix
+
+ Considered for removal from language.
+
+
+1.16.3 M3X4 - Multiply Matrix
+
+ Considered for removal from language.
+
+
+1.16.4 M3X3 - Multiply Matrix
+
+ Considered for removal from language.
+
+
+1.16.5 M3X2 - Multiply Matrix
+
+ Considered for removal from language.
+
+
+1.16.6 CRS - Cross Product
+
+ Alias for XPD.
+
+
+1.16.7 NRM4 - 4-component Vector Normalise
+
+ dst.x = src.x / (src.x * src.x + src.y * src.y + src.z * src.z + src.w * src.w)
+ dst.y = src.y / (src.x * src.x + src.y * src.y + src.z * src.z + src.w * src.w)
+ dst.z = src.z / (src.x * src.x + src.y * src.y + src.z * src.z + src.w * src.w)
+ dst.w = src.w / (src.x * src.x + src.y * src.y + src.z * src.z + src.w * src.w)
+
+
+1.16.8 SINCOS - Sine Cosine
+
+ Alias for SCS.
+
+
+1.16.9 TEXLDB - Texture Lookup With Bias
+
+ Alias for TXB.
+
+
+1.16.10 DP2ADD - 2-component Dot Product And Add
+
+ Alias for DP2A.
+
+
+1.17 ps_2_x
+------------
+
+
+1.17.1 CALL - Subroutine Call
+
+ Alias for CAL.
+
+
+1.17.2 CALLNZ - Subroutine Call If Not Zero
+
+ TBD
+
+
+1.17.3 IFC - If
+
+ TBD
+
+
+1.17.4 BREAK - Break
+
+ Alias for BRK.
+
+
+1.17.5 BREAKC - Break Conditional
+
+ TBD
+
+
+1.17.6 DSX - Derivative Relative To X
+
+ Alias for DDX.
+
+
+1.17.7 DSY - Derivative Relative To Y
+
+ Alias for DDY.
+
+
+1.17.8 TEXLDD - Texture Lookup with Derivatives
+
+ Alias for TXD.
+
+
+1.18 vs_1_1
+------------
+
+
+1.18.1 EXPP - Approximate Exponential Base 2
+
+ Use EXP. See also 1.19.3.
+
+
+1.18.2 LOGP - Logarithm Base 2
+
+ Use LOG. See also 1.19.4.
+
+
+1.19 vs_2_0
+------------
+
+
+1.19.1 SGN - Set Sign
+
+ Alias for SSG.
+
+
+1.19.2 MOVA - Move Address Register
+
+ Alias for ARR.
+
+
+1.19.3 EXPP - Approximate Exponential Base 2
+
+ Use EX2.
+
+
+1.19.4 LOGP - Logarithm Base 2
+
+ Use LG2.
+
+
+2 Explanation of symbols used
+==============================
+
+
+2.1 Functions
+--------------
+
+
+ abs(x) Absolute value of x.
+ |x|
+ (x < 0.0) ? -x : x
+
+ ceil(x) Ceiling of x.
+
+ clamp(x,y,z) Clamp x between y and z.
+ (x < y) ? y : (x > z) ? z : x
+
+ cos(x) Cosine of x.
+
+ floor(x) Floor of x.
+
+ lg2(x) Logarithm base 2 of x.
+
+ max(x,y) Maximum of x and y.
+ (x > y) ? x : y
+
+ min(x,y) Minimum of x and y.
+ (x < y) ? x : y
+
+ partialx(x) Derivative of x relative to fragment's X.
+
+ partialy(x) Derivative of x relative to fragment's Y.
+
+ pop() Pop from stack.
+
+ pow(x,y) Raise x to power of y.
+
+ push(x) Push x on stack.
+
+ round(x) Round x.
+
+ sin(x) Sine of x.
+
+ sqrt(x) Square root of x.
+
+ trunc(x) Truncate x.
+
+
+2.2 Keywords
+-------------
+
+
+ discard Discard fragment.
+
+ dst First destination register.
+
+ dst0 First destination register.
+
+ pc Program counter.
+
+ src First source register.
+
+ src0 First source register.
+
+ src1 Second source register.
+
+ src2 Third source register.
+
+ target Label of target instruction.
+
diff --git a/src/gallium/auxiliary/tgsi/tgsi_build.c b/src/gallium/auxiliary/tgsi/tgsi_build.c
new file mode 100644
index 0000000000..e0cfc54420
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_build.c
@@ -0,0 +1,1368 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi_build.h"
+#include "tgsi_parse.h"
+
+/*
+ * version
+ */
+
+struct tgsi_version
+tgsi_build_version( void )
+{
+ struct tgsi_version version;
+
+ version.MajorVersion = 1;
+ version.MinorVersion = 1;
+ version.Padding = 0;
+
+ return version;
+}
+
+/*
+ * header
+ */
+
+struct tgsi_header
+tgsi_build_header( void )
+{
+ struct tgsi_header header;
+
+ header.HeaderSize = 1;
+ header.BodySize = 0;
+
+ return header;
+}
+
+static void
+header_headersize_grow( struct tgsi_header *header )
+{
+ assert( header->HeaderSize < 0xFF );
+ assert( header->BodySize == 0 );
+
+ header->HeaderSize++;
+}
+
+static void
+header_bodysize_grow( struct tgsi_header *header )
+{
+ assert( header->BodySize < 0xFFFFFF );
+
+ header->BodySize++;
+}
+
+struct tgsi_processor
+tgsi_default_processor( void )
+{
+ struct tgsi_processor processor;
+
+ processor.Processor = TGSI_PROCESSOR_FRAGMENT;
+ processor.Padding = 0;
+
+ return processor;
+}
+
+struct tgsi_processor
+tgsi_build_processor(
+ unsigned type,
+ struct tgsi_header *header )
+{
+ struct tgsi_processor processor;
+
+ processor = tgsi_default_processor();
+ processor.Processor = type;
+
+ header_headersize_grow( header );
+
+ return processor;
+}
+
+/*
+ * declaration
+ */
+
+struct tgsi_declaration
+tgsi_default_declaration( void )
+{
+ struct tgsi_declaration declaration;
+
+ declaration.Type = TGSI_TOKEN_TYPE_DECLARATION;
+ declaration.NrTokens = 1;
+ declaration.File = TGSI_FILE_NULL;
+ declaration.UsageMask = TGSI_WRITEMASK_XYZW;
+ declaration.Interpolate = TGSI_INTERPOLATE_CONSTANT;
+ declaration.Semantic = 0;
+ declaration.Centroid = 0;
+ declaration.Invariant = 0;
+ declaration.Padding = 0;
+ declaration.Extended = 0;
+
+ return declaration;
+}
+
+struct tgsi_declaration
+tgsi_build_declaration(
+ unsigned file,
+ unsigned usage_mask,
+ unsigned interpolate,
+ unsigned semantic,
+ unsigned centroid,
+ unsigned invariant,
+ struct tgsi_header *header )
+{
+ struct tgsi_declaration declaration;
+
+ assert( file < TGSI_FILE_COUNT );
+ assert( interpolate < TGSI_INTERPOLATE_COUNT );
+
+ declaration = tgsi_default_declaration();
+ declaration.File = file;
+ declaration.UsageMask = usage_mask;
+ declaration.Interpolate = interpolate;
+ declaration.Semantic = semantic;
+ declaration.Centroid = centroid;
+ declaration.Invariant = invariant;
+
+ header_bodysize_grow( header );
+
+ return declaration;
+}
+
+static void
+declaration_grow(
+ struct tgsi_declaration *declaration,
+ struct tgsi_header *header )
+{
+ assert( declaration->NrTokens < 0xFF );
+
+ declaration->NrTokens++;
+
+ header_bodysize_grow( header );
+}
+
+struct tgsi_full_declaration
+tgsi_default_full_declaration( void )
+{
+ struct tgsi_full_declaration full_declaration;
+
+ full_declaration.Declaration = tgsi_default_declaration();
+ full_declaration.DeclarationRange = tgsi_default_declaration_range();
+ full_declaration.Semantic = tgsi_default_declaration_semantic();
+
+ return full_declaration;
+}
+
+unsigned
+tgsi_build_full_declaration(
+ const struct tgsi_full_declaration *full_decl,
+ struct tgsi_token *tokens,
+ struct tgsi_header *header,
+ unsigned maxsize )
+{
+ unsigned size = 0;
+ struct tgsi_declaration *declaration;
+ struct tgsi_declaration_range *dr;
+
+ if( maxsize <= size )
+ return 0;
+ declaration = (struct tgsi_declaration *) &tokens[size];
+ size++;
+
+ *declaration = tgsi_build_declaration(
+ full_decl->Declaration.File,
+ full_decl->Declaration.UsageMask,
+ full_decl->Declaration.Interpolate,
+ full_decl->Declaration.Semantic,
+ full_decl->Declaration.Centroid,
+ full_decl->Declaration.Invariant,
+ header );
+
+ if (maxsize <= size)
+ return 0;
+ dr = (struct tgsi_declaration_range *) &tokens[size];
+ size++;
+
+ *dr = tgsi_build_declaration_range(
+ full_decl->DeclarationRange.First,
+ full_decl->DeclarationRange.Last,
+ declaration,
+ header );
+
+ if( full_decl->Declaration.Semantic ) {
+ struct tgsi_declaration_semantic *ds;
+
+ if( maxsize <= size )
+ return 0;
+ ds = (struct tgsi_declaration_semantic *) &tokens[size];
+ size++;
+
+ *ds = tgsi_build_declaration_semantic(
+ full_decl->Semantic.SemanticName,
+ full_decl->Semantic.SemanticIndex,
+ declaration,
+ header );
+ }
+
+ return size;
+}
+
+struct tgsi_declaration_range
+tgsi_default_declaration_range( void )
+{
+ struct tgsi_declaration_range dr;
+
+ dr.First = 0;
+ dr.Last = 0;
+
+ return dr;
+}
+
+struct tgsi_declaration_range
+tgsi_build_declaration_range(
+ unsigned first,
+ unsigned last,
+ struct tgsi_declaration *declaration,
+ struct tgsi_header *header )
+{
+ struct tgsi_declaration_range declaration_range;
+
+ assert( last >= first );
+ assert( last <= 0xFFFF );
+
+ declaration_range = tgsi_default_declaration_range();
+ declaration_range.First = first;
+ declaration_range.Last = last;
+
+ declaration_grow( declaration, header );
+
+ return declaration_range;
+}
+
+struct tgsi_declaration_semantic
+tgsi_default_declaration_semantic( void )
+{
+ struct tgsi_declaration_semantic ds;
+
+ ds.SemanticName = TGSI_SEMANTIC_POSITION;
+ ds.SemanticIndex = 0;
+ ds.Padding = 0;
+
+ return ds;
+}
+
+struct tgsi_declaration_semantic
+tgsi_build_declaration_semantic(
+ unsigned semantic_name,
+ unsigned semantic_index,
+ struct tgsi_declaration *declaration,
+ struct tgsi_header *header )
+{
+ struct tgsi_declaration_semantic ds;
+
+ assert( semantic_name <= TGSI_SEMANTIC_COUNT );
+ assert( semantic_index <= 0xFFFF );
+
+ ds = tgsi_default_declaration_semantic();
+ ds.SemanticName = semantic_name;
+ ds.SemanticIndex = semantic_index;
+
+ declaration_grow( declaration, header );
+
+ return ds;
+}
+
+/*
+ * immediate
+ */
+
+struct tgsi_immediate
+tgsi_default_immediate( void )
+{
+ struct tgsi_immediate immediate;
+
+ immediate.Type = TGSI_TOKEN_TYPE_IMMEDIATE;
+ immediate.NrTokens = 1;
+ immediate.DataType = TGSI_IMM_FLOAT32;
+ immediate.Padding = 0;
+ immediate.Extended = 0;
+
+ return immediate;
+}
+
+struct tgsi_immediate
+tgsi_build_immediate(
+ struct tgsi_header *header )
+{
+ struct tgsi_immediate immediate;
+
+ immediate = tgsi_default_immediate();
+
+ header_bodysize_grow( header );
+
+ return immediate;
+}
+
+struct tgsi_full_immediate
+tgsi_default_full_immediate( void )
+{
+ struct tgsi_full_immediate fullimm;
+
+ fullimm.Immediate = tgsi_default_immediate();
+ fullimm.u[0].Float = 0.0f;
+ fullimm.u[1].Float = 0.0f;
+ fullimm.u[2].Float = 0.0f;
+ fullimm.u[3].Float = 0.0f;
+
+ return fullimm;
+}
+
+static void
+immediate_grow(
+ struct tgsi_immediate *immediate,
+ struct tgsi_header *header )
+{
+ assert( immediate->NrTokens < 0xFF );
+
+ immediate->NrTokens++;
+
+ header_bodysize_grow( header );
+}
+
+union tgsi_immediate_data
+tgsi_build_immediate_float32(
+ float value,
+ struct tgsi_immediate *immediate,
+ struct tgsi_header *header )
+{
+ union tgsi_immediate_data immediate_data;
+
+ immediate_data.Float = value;
+
+ immediate_grow( immediate, header );
+
+ return immediate_data;
+}
+
+unsigned
+tgsi_build_full_immediate(
+ const struct tgsi_full_immediate *full_imm,
+ struct tgsi_token *tokens,
+ struct tgsi_header *header,
+ unsigned maxsize )
+{
+ unsigned size = 0, i;
+ struct tgsi_immediate *immediate;
+
+ if( maxsize <= size )
+ return 0;
+ immediate = (struct tgsi_immediate *) &tokens[size];
+ size++;
+
+ *immediate = tgsi_build_immediate( header );
+
+ assert( full_imm->Immediate.NrTokens <= 4 + 1 );
+
+ for( i = 0; i < full_imm->Immediate.NrTokens - 1; i++ ) {
+ union tgsi_immediate_data *data;
+
+ if( maxsize <= size )
+ return 0;
+ data = (union tgsi_immediate_data *) &tokens[size];
+ size++;
+
+ *data = tgsi_build_immediate_float32(
+ full_imm->u[i].Float,
+ immediate,
+ header );
+ }
+
+ return size;
+}
+
+/*
+ * instruction
+ */
+
+struct tgsi_instruction
+tgsi_default_instruction( void )
+{
+ struct tgsi_instruction instruction;
+
+ instruction.Type = TGSI_TOKEN_TYPE_INSTRUCTION;
+ instruction.NrTokens = 1;
+ instruction.Opcode = TGSI_OPCODE_MOV;
+ instruction.Saturate = TGSI_SAT_NONE;
+ instruction.NumDstRegs = 1;
+ instruction.NumSrcRegs = 1;
+ instruction.Padding = 0;
+ instruction.Extended = 0;
+
+ return instruction;
+}
+
+struct tgsi_instruction
+tgsi_build_instruction(
+ unsigned opcode,
+ unsigned saturate,
+ unsigned num_dst_regs,
+ unsigned num_src_regs,
+ struct tgsi_header *header )
+{
+ struct tgsi_instruction instruction;
+
+ assert (opcode <= TGSI_OPCODE_LAST);
+ assert (saturate <= TGSI_SAT_MINUS_PLUS_ONE);
+ assert (num_dst_regs <= 3);
+ assert (num_src_regs <= 15);
+
+ instruction = tgsi_default_instruction();
+ instruction.Opcode = opcode;
+ instruction.Saturate = saturate;
+ instruction.NumDstRegs = num_dst_regs;
+ instruction.NumSrcRegs = num_src_regs;
+
+ header_bodysize_grow( header );
+
+ return instruction;
+}
+
+static void
+instruction_grow(
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ assert (instruction->NrTokens < 0xFF);
+
+ instruction->NrTokens++;
+
+ header_bodysize_grow( header );
+}
+
+struct tgsi_full_instruction
+tgsi_default_full_instruction( void )
+{
+ struct tgsi_full_instruction full_instruction;
+ unsigned i;
+
+ full_instruction.Instruction = tgsi_default_instruction();
+ full_instruction.InstructionExtNv = tgsi_default_instruction_ext_nv();
+ full_instruction.InstructionExtLabel = tgsi_default_instruction_ext_label();
+ full_instruction.InstructionExtTexture = tgsi_default_instruction_ext_texture();
+ for( i = 0; i < TGSI_FULL_MAX_DST_REGISTERS; i++ ) {
+ full_instruction.FullDstRegisters[i] = tgsi_default_full_dst_register();
+ }
+ for( i = 0; i < TGSI_FULL_MAX_SRC_REGISTERS; i++ ) {
+ full_instruction.FullSrcRegisters[i] = tgsi_default_full_src_register();
+ }
+
+ full_instruction.Flags = 0x0;
+
+ return full_instruction;
+}
+
+unsigned
+tgsi_build_full_instruction(
+ const struct tgsi_full_instruction *full_inst,
+ struct tgsi_token *tokens,
+ struct tgsi_header *header,
+ unsigned maxsize )
+{
+ unsigned size = 0;
+ unsigned i;
+ struct tgsi_instruction *instruction;
+ struct tgsi_token *prev_token;
+
+ if( maxsize <= size )
+ return 0;
+ instruction = (struct tgsi_instruction *) &tokens[size];
+ size++;
+
+ *instruction = tgsi_build_instruction(
+ full_inst->Instruction.Opcode,
+ full_inst->Instruction.Saturate,
+ full_inst->Instruction.NumDstRegs,
+ full_inst->Instruction.NumSrcRegs,
+ header );
+ prev_token = (struct tgsi_token *) instruction;
+
+ if( tgsi_compare_instruction_ext_nv(
+ full_inst->InstructionExtNv,
+ tgsi_default_instruction_ext_nv() ) ) {
+ struct tgsi_instruction_ext_nv *instruction_ext_nv;
+
+ if( maxsize <= size )
+ return 0;
+ instruction_ext_nv =
+ (struct tgsi_instruction_ext_nv *) &tokens[size];
+ size++;
+
+ *instruction_ext_nv = tgsi_build_instruction_ext_nv(
+ full_inst->InstructionExtNv.Precision,
+ full_inst->InstructionExtNv.CondDstIndex,
+ full_inst->InstructionExtNv.CondFlowIndex,
+ full_inst->InstructionExtNv.CondMask,
+ full_inst->InstructionExtNv.CondSwizzleX,
+ full_inst->InstructionExtNv.CondSwizzleY,
+ full_inst->InstructionExtNv.CondSwizzleZ,
+ full_inst->InstructionExtNv.CondSwizzleW,
+ full_inst->InstructionExtNv.CondDstUpdate,
+ full_inst->InstructionExtNv.CondFlowEnable,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) instruction_ext_nv;
+ }
+
+ if( tgsi_compare_instruction_ext_label(
+ full_inst->InstructionExtLabel,
+ tgsi_default_instruction_ext_label() ) ) {
+ struct tgsi_instruction_ext_label *instruction_ext_label;
+
+ if( maxsize <= size )
+ return 0;
+ instruction_ext_label =
+ (struct tgsi_instruction_ext_label *) &tokens[size];
+ size++;
+
+ *instruction_ext_label = tgsi_build_instruction_ext_label(
+ full_inst->InstructionExtLabel.Label,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) instruction_ext_label;
+ }
+
+ if( tgsi_compare_instruction_ext_texture(
+ full_inst->InstructionExtTexture,
+ tgsi_default_instruction_ext_texture() ) ) {
+ struct tgsi_instruction_ext_texture *instruction_ext_texture;
+
+ if( maxsize <= size )
+ return 0;
+ instruction_ext_texture =
+ (struct tgsi_instruction_ext_texture *) &tokens[size];
+ size++;
+
+ *instruction_ext_texture = tgsi_build_instruction_ext_texture(
+ full_inst->InstructionExtTexture.Texture,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) instruction_ext_texture;
+ }
+
+ for( i = 0; i < full_inst->Instruction.NumDstRegs; i++ ) {
+ const struct tgsi_full_dst_register *reg = &full_inst->FullDstRegisters[i];
+ struct tgsi_dst_register *dst_register;
+ struct tgsi_token *prev_token;
+
+ if( maxsize <= size )
+ return 0;
+ dst_register = (struct tgsi_dst_register *) &tokens[size];
+ size++;
+
+ *dst_register = tgsi_build_dst_register(
+ reg->DstRegister.File,
+ reg->DstRegister.WriteMask,
+ reg->DstRegister.Indirect,
+ reg->DstRegister.Index,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) dst_register;
+
+ if( tgsi_compare_dst_register_ext_concode(
+ reg->DstRegisterExtConcode,
+ tgsi_default_dst_register_ext_concode() ) ) {
+ struct tgsi_dst_register_ext_concode *dst_register_ext_concode;
+
+ if( maxsize <= size )
+ return 0;
+ dst_register_ext_concode =
+ (struct tgsi_dst_register_ext_concode *) &tokens[size];
+ size++;
+
+ *dst_register_ext_concode = tgsi_build_dst_register_ext_concode(
+ reg->DstRegisterExtConcode.CondMask,
+ reg->DstRegisterExtConcode.CondSwizzleX,
+ reg->DstRegisterExtConcode.CondSwizzleY,
+ reg->DstRegisterExtConcode.CondSwizzleZ,
+ reg->DstRegisterExtConcode.CondSwizzleW,
+ reg->DstRegisterExtConcode.CondSrcIndex,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) dst_register_ext_concode;
+ }
+
+ if( tgsi_compare_dst_register_ext_modulate(
+ reg->DstRegisterExtModulate,
+ tgsi_default_dst_register_ext_modulate() ) ) {
+ struct tgsi_dst_register_ext_modulate *dst_register_ext_modulate;
+
+ if( maxsize <= size )
+ return 0;
+ dst_register_ext_modulate =
+ (struct tgsi_dst_register_ext_modulate *) &tokens[size];
+ size++;
+
+ *dst_register_ext_modulate = tgsi_build_dst_register_ext_modulate(
+ reg->DstRegisterExtModulate.Modulate,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) dst_register_ext_modulate;
+ }
+
+ if( reg->DstRegister.Indirect ) {
+ struct tgsi_src_register *ind;
+
+ if( maxsize <= size )
+ return 0;
+ ind = (struct tgsi_src_register *) &tokens[size];
+ size++;
+
+ *ind = tgsi_build_src_register(
+ reg->DstRegisterInd.File,
+ reg->DstRegisterInd.SwizzleX,
+ reg->DstRegisterInd.SwizzleY,
+ reg->DstRegisterInd.SwizzleZ,
+ reg->DstRegisterInd.SwizzleW,
+ reg->DstRegisterInd.Negate,
+ reg->DstRegisterInd.Indirect,
+ reg->DstRegisterInd.Dimension,
+ reg->DstRegisterInd.Index,
+ instruction,
+ header );
+ }
+ }
+
+ for( i = 0; i < full_inst->Instruction.NumSrcRegs; i++ ) {
+ const struct tgsi_full_src_register *reg = &full_inst->FullSrcRegisters[i];
+ struct tgsi_src_register *src_register;
+ struct tgsi_token *prev_token;
+
+ if( maxsize <= size )
+ return 0;
+ src_register = (struct tgsi_src_register *) &tokens[size];
+ size++;
+
+ *src_register = tgsi_build_src_register(
+ reg->SrcRegister.File,
+ reg->SrcRegister.SwizzleX,
+ reg->SrcRegister.SwizzleY,
+ reg->SrcRegister.SwizzleZ,
+ reg->SrcRegister.SwizzleW,
+ reg->SrcRegister.Negate,
+ reg->SrcRegister.Indirect,
+ reg->SrcRegister.Dimension,
+ reg->SrcRegister.Index,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) src_register;
+
+ if( tgsi_compare_src_register_ext_swz(
+ reg->SrcRegisterExtSwz,
+ tgsi_default_src_register_ext_swz() ) ) {
+ struct tgsi_src_register_ext_swz *src_register_ext_swz;
+
+ /* Use of the extended swizzle requires the simple swizzle to be identity.
+ */
+ assert( reg->SrcRegister.SwizzleX == TGSI_SWIZZLE_X );
+ assert( reg->SrcRegister.SwizzleY == TGSI_SWIZZLE_Y );
+ assert( reg->SrcRegister.SwizzleZ == TGSI_SWIZZLE_Z );
+ assert( reg->SrcRegister.SwizzleW == TGSI_SWIZZLE_W );
+ assert( reg->SrcRegister.Negate == FALSE );
+
+ if( maxsize <= size )
+ return 0;
+ src_register_ext_swz =
+ (struct tgsi_src_register_ext_swz *) &tokens[size];
+ size++;
+
+ *src_register_ext_swz = tgsi_build_src_register_ext_swz(
+ reg->SrcRegisterExtSwz.ExtSwizzleX,
+ reg->SrcRegisterExtSwz.ExtSwizzleY,
+ reg->SrcRegisterExtSwz.ExtSwizzleZ,
+ reg->SrcRegisterExtSwz.ExtSwizzleW,
+ reg->SrcRegisterExtSwz.NegateX,
+ reg->SrcRegisterExtSwz.NegateY,
+ reg->SrcRegisterExtSwz.NegateZ,
+ reg->SrcRegisterExtSwz.NegateW,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) src_register_ext_swz;
+ }
+
+ if( tgsi_compare_src_register_ext_mod(
+ reg->SrcRegisterExtMod,
+ tgsi_default_src_register_ext_mod() ) ) {
+ struct tgsi_src_register_ext_mod *src_register_ext_mod;
+
+ if( maxsize <= size )
+ return 0;
+ src_register_ext_mod =
+ (struct tgsi_src_register_ext_mod *) &tokens[size];
+ size++;
+
+ *src_register_ext_mod = tgsi_build_src_register_ext_mod(
+ reg->SrcRegisterExtMod.Complement,
+ reg->SrcRegisterExtMod.Bias,
+ reg->SrcRegisterExtMod.Scale2X,
+ reg->SrcRegisterExtMod.Absolute,
+ reg->SrcRegisterExtMod.Negate,
+ prev_token,
+ instruction,
+ header );
+ prev_token = (struct tgsi_token *) src_register_ext_mod;
+ }
+
+ if( reg->SrcRegister.Indirect ) {
+ struct tgsi_src_register *ind;
+
+ if( maxsize <= size )
+ return 0;
+ ind = (struct tgsi_src_register *) &tokens[size];
+ size++;
+
+ *ind = tgsi_build_src_register(
+ reg->SrcRegisterInd.File,
+ reg->SrcRegisterInd.SwizzleX,
+ reg->SrcRegisterInd.SwizzleY,
+ reg->SrcRegisterInd.SwizzleZ,
+ reg->SrcRegisterInd.SwizzleW,
+ reg->SrcRegisterInd.Negate,
+ reg->SrcRegisterInd.Indirect,
+ reg->SrcRegisterInd.Dimension,
+ reg->SrcRegisterInd.Index,
+ instruction,
+ header );
+ }
+
+ if( reg->SrcRegister.Dimension ) {
+ struct tgsi_dimension *dim;
+
+ assert( !reg->SrcRegisterDim.Dimension );
+
+ if( maxsize <= size )
+ return 0;
+ dim = (struct tgsi_dimension *) &tokens[size];
+ size++;
+
+ *dim = tgsi_build_dimension(
+ reg->SrcRegisterDim.Indirect,
+ reg->SrcRegisterDim.Index,
+ instruction,
+ header );
+
+ if( reg->SrcRegisterDim.Indirect ) {
+ struct tgsi_src_register *ind;
+
+ if( maxsize <= size )
+ return 0;
+ ind = (struct tgsi_src_register *) &tokens[size];
+ size++;
+
+ *ind = tgsi_build_src_register(
+ reg->SrcRegisterDimInd.File,
+ reg->SrcRegisterDimInd.SwizzleX,
+ reg->SrcRegisterDimInd.SwizzleY,
+ reg->SrcRegisterDimInd.SwizzleZ,
+ reg->SrcRegisterDimInd.SwizzleW,
+ reg->SrcRegisterDimInd.Negate,
+ reg->SrcRegisterDimInd.Indirect,
+ reg->SrcRegisterDimInd.Dimension,
+ reg->SrcRegisterDimInd.Index,
+ instruction,
+ header );
+ }
+ }
+ }
+
+ return size;
+}
+
+struct tgsi_instruction_ext_nv
+tgsi_default_instruction_ext_nv( void )
+{
+ struct tgsi_instruction_ext_nv instruction_ext_nv;
+
+ instruction_ext_nv.Type = TGSI_INSTRUCTION_EXT_TYPE_NV;
+ instruction_ext_nv.Precision = TGSI_PRECISION_DEFAULT;
+ instruction_ext_nv.CondDstIndex = 0;
+ instruction_ext_nv.CondFlowIndex = 0;
+ instruction_ext_nv.CondMask = TGSI_CC_TR;
+ instruction_ext_nv.CondSwizzleX = TGSI_SWIZZLE_X;
+ instruction_ext_nv.CondSwizzleY = TGSI_SWIZZLE_Y;
+ instruction_ext_nv.CondSwizzleZ = TGSI_SWIZZLE_Z;
+ instruction_ext_nv.CondSwizzleW = TGSI_SWIZZLE_W;
+ instruction_ext_nv.CondDstUpdate = 0;
+ instruction_ext_nv.CondFlowEnable = 0;
+ instruction_ext_nv.Padding = 0;
+ instruction_ext_nv.Extended = 0;
+
+ return instruction_ext_nv;
+}
+
+
+/** test for inequality of 32-bit values pointed to by a and b */
+static INLINE boolean
+compare32(const void *a, const void *b)
+{
+ return *((uint32_t *) a) != *((uint32_t *) b);
+}
+
+
+unsigned
+tgsi_compare_instruction_ext_nv(
+ struct tgsi_instruction_ext_nv a,
+ struct tgsi_instruction_ext_nv b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_instruction_ext_nv
+tgsi_build_instruction_ext_nv(
+ unsigned precision,
+ unsigned cond_dst_index,
+ unsigned cond_flow_index,
+ unsigned cond_mask,
+ unsigned cond_swizzle_x,
+ unsigned cond_swizzle_y,
+ unsigned cond_swizzle_z,
+ unsigned cond_swizzle_w,
+ unsigned cond_dst_update,
+ unsigned cond_flow_enable,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_instruction_ext_nv instruction_ext_nv;
+
+ instruction_ext_nv = tgsi_default_instruction_ext_nv();
+ instruction_ext_nv.Precision = precision;
+ instruction_ext_nv.CondDstIndex = cond_dst_index;
+ instruction_ext_nv.CondFlowIndex = cond_flow_index;
+ instruction_ext_nv.CondMask = cond_mask;
+ instruction_ext_nv.CondSwizzleX = cond_swizzle_x;
+ instruction_ext_nv.CondSwizzleY = cond_swizzle_y;
+ instruction_ext_nv.CondSwizzleZ = cond_swizzle_z;
+ instruction_ext_nv.CondSwizzleW = cond_swizzle_w;
+ instruction_ext_nv.CondDstUpdate = cond_dst_update;
+ instruction_ext_nv.CondFlowEnable = cond_flow_enable;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return instruction_ext_nv;
+}
+
+struct tgsi_instruction_ext_label
+tgsi_default_instruction_ext_label( void )
+{
+ struct tgsi_instruction_ext_label instruction_ext_label;
+
+ instruction_ext_label.Type = TGSI_INSTRUCTION_EXT_TYPE_LABEL;
+ instruction_ext_label.Label = 0;
+ instruction_ext_label.Padding = 0;
+ instruction_ext_label.Extended = 0;
+
+ return instruction_ext_label;
+}
+
+unsigned
+tgsi_compare_instruction_ext_label(
+ struct tgsi_instruction_ext_label a,
+ struct tgsi_instruction_ext_label b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_instruction_ext_label
+tgsi_build_instruction_ext_label(
+ unsigned label,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_instruction_ext_label instruction_ext_label;
+
+ instruction_ext_label = tgsi_default_instruction_ext_label();
+ instruction_ext_label.Label = label;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return instruction_ext_label;
+}
+
+struct tgsi_instruction_ext_texture
+tgsi_default_instruction_ext_texture( void )
+{
+ struct tgsi_instruction_ext_texture instruction_ext_texture;
+
+ instruction_ext_texture.Type = TGSI_INSTRUCTION_EXT_TYPE_TEXTURE;
+ instruction_ext_texture.Texture = TGSI_TEXTURE_UNKNOWN;
+ instruction_ext_texture.Padding = 0;
+ instruction_ext_texture.Extended = 0;
+
+ return instruction_ext_texture;
+}
+
+unsigned
+tgsi_compare_instruction_ext_texture(
+ struct tgsi_instruction_ext_texture a,
+ struct tgsi_instruction_ext_texture b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_instruction_ext_texture
+tgsi_build_instruction_ext_texture(
+ unsigned texture,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_instruction_ext_texture instruction_ext_texture;
+
+ instruction_ext_texture = tgsi_default_instruction_ext_texture();
+ instruction_ext_texture.Texture = texture;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return instruction_ext_texture;
+}
+
+struct tgsi_src_register
+tgsi_default_src_register( void )
+{
+ struct tgsi_src_register src_register;
+
+ src_register.File = TGSI_FILE_NULL;
+ src_register.SwizzleX = TGSI_SWIZZLE_X;
+ src_register.SwizzleY = TGSI_SWIZZLE_Y;
+ src_register.SwizzleZ = TGSI_SWIZZLE_Z;
+ src_register.SwizzleW = TGSI_SWIZZLE_W;
+ src_register.Negate = 0;
+ src_register.Indirect = 0;
+ src_register.Dimension = 0;
+ src_register.Index = 0;
+ src_register.Extended = 0;
+
+ return src_register;
+}
+
+struct tgsi_src_register
+tgsi_build_src_register(
+ unsigned file,
+ unsigned swizzle_x,
+ unsigned swizzle_y,
+ unsigned swizzle_z,
+ unsigned swizzle_w,
+ unsigned negate,
+ unsigned indirect,
+ unsigned dimension,
+ int index,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_src_register src_register;
+
+ assert( file < TGSI_FILE_COUNT );
+ assert( swizzle_x <= TGSI_SWIZZLE_W );
+ assert( swizzle_y <= TGSI_SWIZZLE_W );
+ assert( swizzle_z <= TGSI_SWIZZLE_W );
+ assert( swizzle_w <= TGSI_SWIZZLE_W );
+ assert( negate <= 1 );
+ assert( index >= -0x8000 && index <= 0x7FFF );
+
+ src_register = tgsi_default_src_register();
+ src_register.File = file;
+ src_register.SwizzleX = swizzle_x;
+ src_register.SwizzleY = swizzle_y;
+ src_register.SwizzleZ = swizzle_z;
+ src_register.SwizzleW = swizzle_w;
+ src_register.Negate = negate;
+ src_register.Indirect = indirect;
+ src_register.Dimension = dimension;
+ src_register.Index = index;
+
+ instruction_grow( instruction, header );
+
+ return src_register;
+}
+
+struct tgsi_full_src_register
+tgsi_default_full_src_register( void )
+{
+ struct tgsi_full_src_register full_src_register;
+
+ full_src_register.SrcRegister = tgsi_default_src_register();
+ full_src_register.SrcRegisterExtSwz = tgsi_default_src_register_ext_swz();
+ full_src_register.SrcRegisterExtMod = tgsi_default_src_register_ext_mod();
+ full_src_register.SrcRegisterInd = tgsi_default_src_register();
+ full_src_register.SrcRegisterDim = tgsi_default_dimension();
+ full_src_register.SrcRegisterDimInd = tgsi_default_src_register();
+
+ return full_src_register;
+}
+
+struct tgsi_src_register_ext_swz
+tgsi_default_src_register_ext_swz( void )
+{
+ struct tgsi_src_register_ext_swz src_register_ext_swz;
+
+ src_register_ext_swz.Type = TGSI_SRC_REGISTER_EXT_TYPE_SWZ;
+ src_register_ext_swz.ExtSwizzleX = TGSI_EXTSWIZZLE_X;
+ src_register_ext_swz.ExtSwizzleY = TGSI_EXTSWIZZLE_Y;
+ src_register_ext_swz.ExtSwizzleZ = TGSI_EXTSWIZZLE_Z;
+ src_register_ext_swz.ExtSwizzleW = TGSI_EXTSWIZZLE_W;
+ src_register_ext_swz.NegateX = 0;
+ src_register_ext_swz.NegateY = 0;
+ src_register_ext_swz.NegateZ = 0;
+ src_register_ext_swz.NegateW = 0;
+ src_register_ext_swz.Padding = 0;
+ src_register_ext_swz.Extended = 0;
+
+ return src_register_ext_swz;
+}
+
+unsigned
+tgsi_compare_src_register_ext_swz(
+ struct tgsi_src_register_ext_swz a,
+ struct tgsi_src_register_ext_swz b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_src_register_ext_swz
+tgsi_build_src_register_ext_swz(
+ unsigned ext_swizzle_x,
+ unsigned ext_swizzle_y,
+ unsigned ext_swizzle_z,
+ unsigned ext_swizzle_w,
+ unsigned negate_x,
+ unsigned negate_y,
+ unsigned negate_z,
+ unsigned negate_w,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_src_register_ext_swz src_register_ext_swz;
+
+ assert( ext_swizzle_x <= TGSI_EXTSWIZZLE_ONE );
+ assert( ext_swizzle_y <= TGSI_EXTSWIZZLE_ONE );
+ assert( ext_swizzle_z <= TGSI_EXTSWIZZLE_ONE );
+ assert( ext_swizzle_w <= TGSI_EXTSWIZZLE_ONE );
+ assert( negate_x <= 1 );
+ assert( negate_y <= 1 );
+ assert( negate_z <= 1 );
+ assert( negate_w <= 1 );
+
+ src_register_ext_swz = tgsi_default_src_register_ext_swz();
+ src_register_ext_swz.ExtSwizzleX = ext_swizzle_x;
+ src_register_ext_swz.ExtSwizzleY = ext_swizzle_y;
+ src_register_ext_swz.ExtSwizzleZ = ext_swizzle_z;
+ src_register_ext_swz.ExtSwizzleW = ext_swizzle_w;
+ src_register_ext_swz.NegateX = negate_x;
+ src_register_ext_swz.NegateY = negate_y;
+ src_register_ext_swz.NegateZ = negate_z;
+ src_register_ext_swz.NegateW = negate_w;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return src_register_ext_swz;
+}
+
+struct tgsi_src_register_ext_mod
+tgsi_default_src_register_ext_mod( void )
+{
+ struct tgsi_src_register_ext_mod src_register_ext_mod;
+
+ src_register_ext_mod.Type = TGSI_SRC_REGISTER_EXT_TYPE_MOD;
+ src_register_ext_mod.Complement = 0;
+ src_register_ext_mod.Bias = 0;
+ src_register_ext_mod.Scale2X = 0;
+ src_register_ext_mod.Absolute = 0;
+ src_register_ext_mod.Negate = 0;
+ src_register_ext_mod.Padding = 0;
+ src_register_ext_mod.Extended = 0;
+
+ return src_register_ext_mod;
+}
+
+unsigned
+tgsi_compare_src_register_ext_mod(
+ struct tgsi_src_register_ext_mod a,
+ struct tgsi_src_register_ext_mod b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_src_register_ext_mod
+tgsi_build_src_register_ext_mod(
+ unsigned complement,
+ unsigned bias,
+ unsigned scale_2x,
+ unsigned absolute,
+ unsigned negate,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_src_register_ext_mod src_register_ext_mod;
+
+ assert( complement <= 1 );
+ assert( bias <= 1 );
+ assert( scale_2x <= 1 );
+ assert( absolute <= 1 );
+ assert( negate <= 1 );
+
+ src_register_ext_mod = tgsi_default_src_register_ext_mod();
+ src_register_ext_mod.Complement = complement;
+ src_register_ext_mod.Bias = bias;
+ src_register_ext_mod.Scale2X = scale_2x;
+ src_register_ext_mod.Absolute = absolute;
+ src_register_ext_mod.Negate = negate;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return src_register_ext_mod;
+}
+
+struct tgsi_dimension
+tgsi_default_dimension( void )
+{
+ struct tgsi_dimension dimension;
+
+ dimension.Indirect = 0;
+ dimension.Dimension = 0;
+ dimension.Padding = 0;
+ dimension.Index = 0;
+ dimension.Extended = 0;
+
+ return dimension;
+}
+
+struct tgsi_dimension
+tgsi_build_dimension(
+ unsigned indirect,
+ unsigned index,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_dimension dimension;
+
+ dimension = tgsi_default_dimension();
+ dimension.Indirect = indirect;
+ dimension.Index = index;
+
+ instruction_grow( instruction, header );
+
+ return dimension;
+}
+
+struct tgsi_dst_register
+tgsi_default_dst_register( void )
+{
+ struct tgsi_dst_register dst_register;
+
+ dst_register.File = TGSI_FILE_NULL;
+ dst_register.WriteMask = TGSI_WRITEMASK_XYZW;
+ dst_register.Indirect = 0;
+ dst_register.Dimension = 0;
+ dst_register.Index = 0;
+ dst_register.Padding = 0;
+ dst_register.Extended = 0;
+
+ return dst_register;
+}
+
+struct tgsi_dst_register
+tgsi_build_dst_register(
+ unsigned file,
+ unsigned mask,
+ unsigned indirect,
+ int index,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_dst_register dst_register;
+
+ assert( file < TGSI_FILE_COUNT );
+ assert( mask <= TGSI_WRITEMASK_XYZW );
+ assert( index >= -32768 && index <= 32767 );
+
+ dst_register = tgsi_default_dst_register();
+ dst_register.File = file;
+ dst_register.WriteMask = mask;
+ dst_register.Index = index;
+ dst_register.Indirect = indirect;
+
+ instruction_grow( instruction, header );
+
+ return dst_register;
+}
+
+struct tgsi_full_dst_register
+tgsi_default_full_dst_register( void )
+{
+ struct tgsi_full_dst_register full_dst_register;
+
+ full_dst_register.DstRegister = tgsi_default_dst_register();
+ full_dst_register.DstRegisterInd = tgsi_default_src_register();
+ full_dst_register.DstRegisterExtConcode =
+ tgsi_default_dst_register_ext_concode();
+ full_dst_register.DstRegisterExtModulate =
+ tgsi_default_dst_register_ext_modulate();
+
+ return full_dst_register;
+}
+
+struct tgsi_dst_register_ext_concode
+tgsi_default_dst_register_ext_concode( void )
+{
+ struct tgsi_dst_register_ext_concode dst_register_ext_concode;
+
+ dst_register_ext_concode.Type = TGSI_DST_REGISTER_EXT_TYPE_CONDCODE;
+ dst_register_ext_concode.CondMask = TGSI_CC_TR;
+ dst_register_ext_concode.CondSwizzleX = TGSI_SWIZZLE_X;
+ dst_register_ext_concode.CondSwizzleY = TGSI_SWIZZLE_Y;
+ dst_register_ext_concode.CondSwizzleZ = TGSI_SWIZZLE_Z;
+ dst_register_ext_concode.CondSwizzleW = TGSI_SWIZZLE_W;
+ dst_register_ext_concode.CondSrcIndex = 0;
+ dst_register_ext_concode.Padding = 0;
+ dst_register_ext_concode.Extended = 0;
+
+ return dst_register_ext_concode;
+}
+
+unsigned
+tgsi_compare_dst_register_ext_concode(
+ struct tgsi_dst_register_ext_concode a,
+ struct tgsi_dst_register_ext_concode b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_dst_register_ext_concode
+tgsi_build_dst_register_ext_concode(
+ unsigned cc,
+ unsigned swizzle_x,
+ unsigned swizzle_y,
+ unsigned swizzle_z,
+ unsigned swizzle_w,
+ int index,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_dst_register_ext_concode dst_register_ext_concode;
+
+ assert( cc <= TGSI_CC_FL );
+ assert( swizzle_x <= TGSI_SWIZZLE_W );
+ assert( swizzle_y <= TGSI_SWIZZLE_W );
+ assert( swizzle_z <= TGSI_SWIZZLE_W );
+ assert( swizzle_w <= TGSI_SWIZZLE_W );
+ assert( index >= -32768 && index <= 32767 );
+
+ dst_register_ext_concode = tgsi_default_dst_register_ext_concode();
+ dst_register_ext_concode.CondMask = cc;
+ dst_register_ext_concode.CondSwizzleX = swizzle_x;
+ dst_register_ext_concode.CondSwizzleY = swizzle_y;
+ dst_register_ext_concode.CondSwizzleZ = swizzle_z;
+ dst_register_ext_concode.CondSwizzleW = swizzle_w;
+ dst_register_ext_concode.CondSrcIndex = index;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return dst_register_ext_concode;
+}
+
+struct tgsi_dst_register_ext_modulate
+tgsi_default_dst_register_ext_modulate( void )
+{
+ struct tgsi_dst_register_ext_modulate dst_register_ext_modulate;
+
+ dst_register_ext_modulate.Type = TGSI_DST_REGISTER_EXT_TYPE_MODULATE;
+ dst_register_ext_modulate.Modulate = TGSI_MODULATE_1X;
+ dst_register_ext_modulate.Padding = 0;
+ dst_register_ext_modulate.Extended = 0;
+
+ return dst_register_ext_modulate;
+}
+
+unsigned
+tgsi_compare_dst_register_ext_modulate(
+ struct tgsi_dst_register_ext_modulate a,
+ struct tgsi_dst_register_ext_modulate b )
+{
+ a.Padding = b.Padding = 0;
+ a.Extended = b.Extended = 0;
+ return compare32(&a, &b);
+}
+
+struct tgsi_dst_register_ext_modulate
+tgsi_build_dst_register_ext_modulate(
+ unsigned modulate,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header )
+{
+ struct tgsi_dst_register_ext_modulate dst_register_ext_modulate;
+
+ assert( modulate <= TGSI_MODULATE_EIGHTH );
+
+ dst_register_ext_modulate = tgsi_default_dst_register_ext_modulate();
+ dst_register_ext_modulate.Modulate = modulate;
+
+ prev_token->Extended = 1;
+ instruction_grow( instruction, header );
+
+ return dst_register_ext_modulate;
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_build.h b/src/gallium/auxiliary/tgsi/tgsi_build.h
new file mode 100644
index 0000000000..17d977b059
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_build.h
@@ -0,0 +1,339 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_BUILD_H
+#define TGSI_BUILD_H
+
+
+struct tgsi_token;
+
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+/*
+ * version
+ */
+
+struct tgsi_version
+tgsi_build_version( void );
+
+/*
+ * header
+ */
+
+struct tgsi_header
+tgsi_build_header( void );
+
+struct tgsi_processor
+tgsi_default_processor( void );
+
+struct tgsi_processor
+tgsi_build_processor(
+ unsigned processor,
+ struct tgsi_header *header );
+
+/*
+ * declaration
+ */
+
+struct tgsi_declaration
+tgsi_default_declaration( void );
+
+struct tgsi_declaration
+tgsi_build_declaration(
+ unsigned file,
+ unsigned usage_mask,
+ unsigned interpolate,
+ unsigned semantic,
+ unsigned centroid,
+ unsigned invariant,
+ struct tgsi_header *header );
+
+struct tgsi_full_declaration
+tgsi_default_full_declaration( void );
+
+unsigned
+tgsi_build_full_declaration(
+ const struct tgsi_full_declaration *full_decl,
+ struct tgsi_token *tokens,
+ struct tgsi_header *header,
+ unsigned maxsize );
+
+struct tgsi_declaration_range
+tgsi_default_declaration_range( void );
+
+struct tgsi_declaration_range
+tgsi_build_declaration_range(
+ unsigned first,
+ unsigned last,
+ struct tgsi_declaration *declaration,
+ struct tgsi_header *header );
+
+struct tgsi_declaration_semantic
+tgsi_default_declaration_semantic( void );
+
+struct tgsi_declaration_semantic
+tgsi_build_declaration_semantic(
+ unsigned semantic_name,
+ unsigned semantic_index,
+ struct tgsi_declaration *declaration,
+ struct tgsi_header *header );
+
+/*
+ * immediate
+ */
+
+struct tgsi_immediate
+tgsi_default_immediate( void );
+
+struct tgsi_immediate
+tgsi_build_immediate(
+ struct tgsi_header *header );
+
+struct tgsi_full_immediate
+tgsi_default_full_immediate( void );
+
+union tgsi_immediate_data
+tgsi_build_immediate_float32(
+ float value,
+ struct tgsi_immediate *immediate,
+ struct tgsi_header *header );
+
+unsigned
+tgsi_build_full_immediate(
+ const struct tgsi_full_immediate *full_imm,
+ struct tgsi_token *tokens,
+ struct tgsi_header *header,
+ unsigned maxsize );
+
+/*
+ * instruction
+ */
+
+struct tgsi_instruction
+tgsi_default_instruction( void );
+
+struct tgsi_instruction
+tgsi_build_instruction(
+ unsigned opcode,
+ unsigned saturate,
+ unsigned num_dst_regs,
+ unsigned num_src_regs,
+ struct tgsi_header *header );
+
+struct tgsi_full_instruction
+tgsi_default_full_instruction( void );
+
+unsigned
+tgsi_build_full_instruction(
+ const struct tgsi_full_instruction *full_inst,
+ struct tgsi_token *tokens,
+ struct tgsi_header *header,
+ unsigned maxsize );
+
+struct tgsi_instruction_ext_nv
+tgsi_default_instruction_ext_nv( void );
+
+unsigned
+tgsi_compare_instruction_ext_nv(
+ struct tgsi_instruction_ext_nv a,
+ struct tgsi_instruction_ext_nv b );
+
+struct tgsi_instruction_ext_nv
+tgsi_build_instruction_ext_nv(
+ unsigned precision,
+ unsigned cond_dst_index,
+ unsigned cond_flow_index,
+ unsigned cond_mask,
+ unsigned cond_swizzle_x,
+ unsigned cond_swizzle_y,
+ unsigned cond_swizzle_z,
+ unsigned cond_swizzle_w,
+ unsigned cond_dst_update,
+ unsigned cond_flow_enable,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_instruction_ext_label
+tgsi_default_instruction_ext_label( void );
+
+unsigned
+tgsi_compare_instruction_ext_label(
+ struct tgsi_instruction_ext_label a,
+ struct tgsi_instruction_ext_label b );
+
+struct tgsi_instruction_ext_label
+tgsi_build_instruction_ext_label(
+ unsigned label,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_instruction_ext_texture
+tgsi_default_instruction_ext_texture( void );
+
+unsigned
+tgsi_compare_instruction_ext_texture(
+ struct tgsi_instruction_ext_texture a,
+ struct tgsi_instruction_ext_texture b );
+
+struct tgsi_instruction_ext_texture
+tgsi_build_instruction_ext_texture(
+ unsigned texture,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_src_register
+tgsi_default_src_register( void );
+
+struct tgsi_src_register
+tgsi_build_src_register(
+ unsigned file,
+ unsigned swizzle_x,
+ unsigned swizzle_y,
+ unsigned swizzle_z,
+ unsigned swizzle_w,
+ unsigned negate,
+ unsigned indirect,
+ unsigned dimension,
+ int index,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_full_src_register
+tgsi_default_full_src_register( void );
+
+struct tgsi_src_register_ext_swz
+tgsi_default_src_register_ext_swz( void );
+
+unsigned
+tgsi_compare_src_register_ext_swz(
+ struct tgsi_src_register_ext_swz a,
+ struct tgsi_src_register_ext_swz b );
+
+struct tgsi_src_register_ext_swz
+tgsi_build_src_register_ext_swz(
+ unsigned ext_swizzle_x,
+ unsigned ext_swizzle_y,
+ unsigned ext_swizzle_z,
+ unsigned ext_swizzle_w,
+ unsigned negate_x,
+ unsigned negate_y,
+ unsigned negate_z,
+ unsigned negate_w,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_src_register_ext_mod
+tgsi_default_src_register_ext_mod( void );
+
+unsigned
+tgsi_compare_src_register_ext_mod(
+ struct tgsi_src_register_ext_mod a,
+ struct tgsi_src_register_ext_mod b );
+
+struct tgsi_src_register_ext_mod
+tgsi_build_src_register_ext_mod(
+ unsigned complement,
+ unsigned bias,
+ unsigned scale_2x,
+ unsigned absolute,
+ unsigned negate,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_dimension
+tgsi_default_dimension( void );
+
+struct tgsi_dimension
+tgsi_build_dimension(
+ unsigned indirect,
+ unsigned index,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_dst_register
+tgsi_default_dst_register( void );
+
+struct tgsi_dst_register
+tgsi_build_dst_register(
+ unsigned file,
+ unsigned mask,
+ unsigned indirect,
+ int index,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_full_dst_register
+tgsi_default_full_dst_register( void );
+
+struct tgsi_dst_register_ext_concode
+tgsi_default_dst_register_ext_concode( void );
+
+unsigned
+tgsi_compare_dst_register_ext_concode(
+ struct tgsi_dst_register_ext_concode a,
+ struct tgsi_dst_register_ext_concode b );
+
+struct tgsi_dst_register_ext_concode
+tgsi_build_dst_register_ext_concode(
+ unsigned cc,
+ unsigned swizzle_x,
+ unsigned swizzle_y,
+ unsigned swizzle_z,
+ unsigned swizzle_w,
+ int index,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+struct tgsi_dst_register_ext_modulate
+tgsi_default_dst_register_ext_modulate( void );
+
+unsigned
+tgsi_compare_dst_register_ext_modulate(
+ struct tgsi_dst_register_ext_modulate a,
+ struct tgsi_dst_register_ext_modulate b );
+
+struct tgsi_dst_register_ext_modulate
+tgsi_build_dst_register_ext_modulate(
+ unsigned modulate,
+ struct tgsi_token *prev_token,
+ struct tgsi_instruction *instruction,
+ struct tgsi_header *header );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_BUILD_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_dump.c b/src/gallium/auxiliary/tgsi/tgsi_dump.c
new file mode 100644
index 0000000000..111d95b666
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_dump.c
@@ -0,0 +1,617 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "util/u_string.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "tgsi_dump.h"
+#include "tgsi_info.h"
+#include "tgsi_iterate.h"
+
+
+/** Number of spaces to indent for IF/LOOP/etc */
+static const int indent_spaces = 3;
+
+
+struct dump_ctx
+{
+ struct tgsi_iterate_context iter;
+
+ uint instno;
+ int indent;
+
+ uint indentation;
+
+ void (*printf)(struct dump_ctx *ctx, const char *format, ...);
+};
+
+static void
+dump_ctx_printf(struct dump_ctx *ctx, const char *format, ...)
+{
+ va_list ap;
+ (void)ctx;
+ va_start(ap, format);
+ debug_vprintf(format, ap);
+ va_end(ap);
+}
+
+static void
+dump_enum(
+ struct dump_ctx *ctx,
+ uint e,
+ const char **enums,
+ uint enum_count )
+{
+ if (e >= enum_count)
+ ctx->printf( ctx, "%u", e );
+ else
+ ctx->printf( ctx, "%s", enums[e] );
+}
+
+#define EOL() ctx->printf( ctx, "\n" )
+#define TXT(S) ctx->printf( ctx, "%s", S )
+#define CHR(C) ctx->printf( ctx, "%c", C )
+#define UIX(I) ctx->printf( ctx, "0x%x", I )
+#define UID(I) ctx->printf( ctx, "%u", I )
+#define INSTID(I) ctx->printf( ctx, "% 3u", I )
+#define SID(I) ctx->printf( ctx, "%d", I )
+#define FLT(F) ctx->printf( ctx, "%10.4f", F )
+#define ENM(E,ENUMS) dump_enum( ctx, E, ENUMS, sizeof( ENUMS ) / sizeof( *ENUMS ) )
+
+static const char *processor_type_names[] =
+{
+ "FRAG",
+ "VERT",
+ "GEOM"
+};
+
+static const char *file_names[TGSI_FILE_COUNT] =
+{
+ "NULL",
+ "CONST",
+ "IN",
+ "OUT",
+ "TEMP",
+ "SAMP",
+ "ADDR",
+ "IMM",
+ "LOOP"
+};
+
+static const char *interpolate_names[] =
+{
+ "CONSTANT",
+ "LINEAR",
+ "PERSPECTIVE"
+};
+
+static const char *semantic_names[] =
+{
+ "POSITION",
+ "COLOR",
+ "BCOLOR",
+ "FOG",
+ "PSIZE",
+ "GENERIC",
+ "NORMAL",
+ "FACE"
+};
+
+static const char *immediate_type_names[] =
+{
+ "FLT32"
+};
+
+static const char *swizzle_names[] =
+{
+ "x",
+ "y",
+ "z",
+ "w"
+};
+
+static const char *texture_names[] =
+{
+ "UNKNOWN",
+ "1D",
+ "2D",
+ "3D",
+ "CUBE",
+ "RECT",
+ "SHADOW1D",
+ "SHADOW2D",
+ "SHADOWRECT"
+};
+
+static const char *extswizzle_names[] =
+{
+ "x",
+ "y",
+ "z",
+ "w",
+ "0",
+ "1"
+};
+
+static const char *modulate_names[TGSI_MODULATE_COUNT] =
+{
+ "",
+ "_2X",
+ "_4X",
+ "_8X",
+ "_D2",
+ "_D4",
+ "_D8"
+};
+
+static void
+_dump_register(
+ struct dump_ctx *ctx,
+ uint file,
+ int first,
+ int last )
+{
+ ENM( file, file_names );
+ CHR( '[' );
+ SID( first );
+ if (first != last) {
+ TXT( ".." );
+ SID( last );
+ }
+ CHR( ']' );
+}
+
+static void
+_dump_register_ind(
+ struct dump_ctx *ctx,
+ uint file,
+ int index,
+ uint ind_file,
+ int ind_index,
+ uint ind_swizzle )
+{
+ ENM( file, file_names );
+ CHR( '[' );
+ ENM( ind_file, file_names );
+ CHR( '[' );
+ SID( ind_index );
+ TXT( "]." );
+ ENM( ind_swizzle, swizzle_names );
+ if (index != 0) {
+ if (index > 0)
+ CHR( '+' );
+ SID( index );
+ }
+ CHR( ']' );
+}
+
+static void
+_dump_writemask(
+ struct dump_ctx *ctx,
+ uint writemask )
+{
+ if (writemask != TGSI_WRITEMASK_XYZW) {
+ CHR( '.' );
+ if (writemask & TGSI_WRITEMASK_X)
+ CHR( 'x' );
+ if (writemask & TGSI_WRITEMASK_Y)
+ CHR( 'y' );
+ if (writemask & TGSI_WRITEMASK_Z)
+ CHR( 'z' );
+ if (writemask & TGSI_WRITEMASK_W)
+ CHR( 'w' );
+ }
+}
+
+static boolean
+iter_declaration(
+ struct tgsi_iterate_context *iter,
+ struct tgsi_full_declaration *decl )
+{
+ struct dump_ctx *ctx = (struct dump_ctx *)iter;
+
+ assert(Elements(semantic_names) == TGSI_SEMANTIC_COUNT);
+ assert(Elements(interpolate_names) == TGSI_INTERPOLATE_COUNT);
+
+ TXT( "DCL " );
+
+ _dump_register(
+ ctx,
+ decl->Declaration.File,
+ decl->DeclarationRange.First,
+ decl->DeclarationRange.Last );
+ _dump_writemask(
+ ctx,
+ decl->Declaration.UsageMask );
+
+ if (decl->Declaration.Semantic) {
+ TXT( ", " );
+ ENM( decl->Semantic.SemanticName, semantic_names );
+ if (decl->Semantic.SemanticIndex != 0 ||
+ decl->Semantic.SemanticName == TGSI_SEMANTIC_GENERIC) {
+ CHR( '[' );
+ UID( decl->Semantic.SemanticIndex );
+ CHR( ']' );
+ }
+ }
+
+ if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT &&
+ decl->Declaration.File == TGSI_FILE_INPUT)
+ {
+ TXT( ", " );
+ ENM( decl->Declaration.Interpolate, interpolate_names );
+ }
+
+ if (decl->Declaration.Centroid) {
+ TXT( ", CENTROID" );
+ }
+
+ if (decl->Declaration.Invariant) {
+ TXT( ", INVARIANT" );
+ }
+
+ EOL();
+
+ return TRUE;
+}
+
+void
+tgsi_dump_declaration(
+ const struct tgsi_full_declaration *decl )
+{
+ struct dump_ctx ctx;
+
+ ctx.printf = dump_ctx_printf;
+
+ iter_declaration( &ctx.iter, (struct tgsi_full_declaration *)decl );
+}
+
+static boolean
+iter_immediate(
+ struct tgsi_iterate_context *iter,
+ struct tgsi_full_immediate *imm )
+{
+ struct dump_ctx *ctx = (struct dump_ctx *) iter;
+
+ uint i;
+
+ TXT( "IMM " );
+ ENM( imm->Immediate.DataType, immediate_type_names );
+
+ TXT( " { " );
+
+ assert( imm->Immediate.NrTokens <= 4 + 1 );
+ for (i = 0; i < imm->Immediate.NrTokens - 1; i++) {
+ switch (imm->Immediate.DataType) {
+ case TGSI_IMM_FLOAT32:
+ FLT( imm->u[i].Float );
+ break;
+ default:
+ assert( 0 );
+ }
+
+ if (i < imm->Immediate.NrTokens - 2)
+ TXT( ", " );
+ }
+ TXT( " }" );
+
+ EOL();
+
+ return TRUE;
+}
+
+void
+tgsi_dump_immediate(
+ const struct tgsi_full_immediate *imm )
+{
+ struct dump_ctx ctx;
+
+ ctx.printf = dump_ctx_printf;
+
+ iter_immediate( &ctx.iter, (struct tgsi_full_immediate *)imm );
+}
+
+static boolean
+iter_instruction(
+ struct tgsi_iterate_context *iter,
+ struct tgsi_full_instruction *inst )
+{
+ struct dump_ctx *ctx = (struct dump_ctx *) iter;
+ uint instno = ctx->instno++;
+ const struct tgsi_opcode_info *info = tgsi_get_opcode_info( inst->Instruction.Opcode );
+ uint i;
+ boolean first_reg = TRUE;
+
+ INSTID( instno );
+ TXT( ": " );
+
+ ctx->indent -= info->pre_dedent;
+ for(i = 0; (int)i < ctx->indent; ++i)
+ TXT( " " );
+ ctx->indent += info->post_indent;
+
+ TXT( info->mnemonic );
+
+ switch (inst->Instruction.Saturate) {
+ case TGSI_SAT_NONE:
+ break;
+ case TGSI_SAT_ZERO_ONE:
+ TXT( "_SAT" );
+ break;
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ TXT( "_SATNV" );
+ break;
+ default:
+ assert( 0 );
+ }
+
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
+ const struct tgsi_full_dst_register *dst = &inst->FullDstRegisters[i];
+
+ if (!first_reg)
+ CHR( ',' );
+ CHR( ' ' );
+
+ if (dst->DstRegister.Indirect) {
+ _dump_register_ind(
+ ctx,
+ dst->DstRegister.File,
+ dst->DstRegister.Index,
+ dst->DstRegisterInd.File,
+ dst->DstRegisterInd.Index,
+ dst->DstRegisterInd.SwizzleX );
+ }
+ else {
+ _dump_register(
+ ctx,
+ dst->DstRegister.File,
+ dst->DstRegister.Index,
+ dst->DstRegister.Index );
+ }
+ ENM( dst->DstRegisterExtModulate.Modulate, modulate_names );
+ _dump_writemask( ctx, dst->DstRegister.WriteMask );
+
+ first_reg = FALSE;
+ }
+
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *src = &inst->FullSrcRegisters[i];
+
+ if (!first_reg)
+ CHR( ',' );
+ CHR( ' ' );
+
+ if (src->SrcRegisterExtMod.Negate)
+ TXT( "-(" );
+ if (src->SrcRegisterExtMod.Absolute)
+ CHR( '|' );
+ if (src->SrcRegisterExtMod.Scale2X)
+ TXT( "2*(" );
+ if (src->SrcRegisterExtMod.Bias)
+ CHR( '(' );
+ if (src->SrcRegisterExtMod.Complement)
+ TXT( "1-(" );
+ if (src->SrcRegister.Negate)
+ CHR( '-' );
+
+ if (src->SrcRegister.Indirect) {
+ _dump_register_ind(
+ ctx,
+ src->SrcRegister.File,
+ src->SrcRegister.Index,
+ src->SrcRegisterInd.File,
+ src->SrcRegisterInd.Index,
+ src->SrcRegisterInd.SwizzleX );
+ }
+ else {
+ _dump_register(
+ ctx,
+ src->SrcRegister.File,
+ src->SrcRegister.Index,
+ src->SrcRegister.Index );
+ }
+
+ if (src->SrcRegister.SwizzleX != TGSI_SWIZZLE_X ||
+ src->SrcRegister.SwizzleY != TGSI_SWIZZLE_Y ||
+ src->SrcRegister.SwizzleZ != TGSI_SWIZZLE_Z ||
+ src->SrcRegister.SwizzleW != TGSI_SWIZZLE_W) {
+ CHR( '.' );
+ ENM( src->SrcRegister.SwizzleX, swizzle_names );
+ ENM( src->SrcRegister.SwizzleY, swizzle_names );
+ ENM( src->SrcRegister.SwizzleZ, swizzle_names );
+ ENM( src->SrcRegister.SwizzleW, swizzle_names );
+ }
+ if (src->SrcRegisterExtSwz.ExtSwizzleX != TGSI_EXTSWIZZLE_X ||
+ src->SrcRegisterExtSwz.ExtSwizzleY != TGSI_EXTSWIZZLE_Y ||
+ src->SrcRegisterExtSwz.ExtSwizzleZ != TGSI_EXTSWIZZLE_Z ||
+ src->SrcRegisterExtSwz.ExtSwizzleW != TGSI_EXTSWIZZLE_W) {
+ CHR( '.' );
+ if (src->SrcRegisterExtSwz.NegateX)
+ TXT("-");
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleX, extswizzle_names );
+ if (src->SrcRegisterExtSwz.NegateY)
+ TXT("-");
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleY, extswizzle_names );
+ if (src->SrcRegisterExtSwz.NegateZ)
+ TXT("-");
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleZ, extswizzle_names );
+ if (src->SrcRegisterExtSwz.NegateW)
+ TXT("-");
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleW, extswizzle_names );
+ }
+
+ if (src->SrcRegisterExtMod.Complement)
+ CHR( ')' );
+ if (src->SrcRegisterExtMod.Bias)
+ TXT( ")-.5" );
+ if (src->SrcRegisterExtMod.Scale2X)
+ CHR( ')' );
+ if (src->SrcRegisterExtMod.Absolute)
+ CHR( '|' );
+ if (src->SrcRegisterExtMod.Negate)
+ CHR( ')' );
+
+ first_reg = FALSE;
+ }
+
+ if (inst->InstructionExtTexture.Texture != TGSI_TEXTURE_UNKNOWN) {
+ TXT( ", " );
+ ENM( inst->InstructionExtTexture.Texture, texture_names );
+ }
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_IF:
+ case TGSI_OPCODE_ELSE:
+ case TGSI_OPCODE_BGNLOOP:
+ case TGSI_OPCODE_ENDLOOP:
+ case TGSI_OPCODE_CAL:
+ TXT( " :" );
+ UID( inst->InstructionExtLabel.Label );
+ break;
+ }
+
+ /* update indentation */
+ if (inst->Instruction.Opcode == TGSI_OPCODE_IF ||
+ inst->Instruction.Opcode == TGSI_OPCODE_ELSE ||
+ inst->Instruction.Opcode == TGSI_OPCODE_BGNFOR ||
+ inst->Instruction.Opcode == TGSI_OPCODE_BGNLOOP) {
+ ctx->indentation += indent_spaces;
+ }
+
+ EOL();
+
+ return TRUE;
+}
+
+void
+tgsi_dump_instruction(
+ const struct tgsi_full_instruction *inst,
+ uint instno )
+{
+ struct dump_ctx ctx;
+
+ ctx.instno = instno;
+ ctx.indent = 0;
+ ctx.printf = dump_ctx_printf;
+ ctx.indentation = 0;
+
+ iter_instruction( &ctx.iter, (struct tgsi_full_instruction *)inst );
+}
+
+static boolean
+prolog(
+ struct tgsi_iterate_context *iter )
+{
+ struct dump_ctx *ctx = (struct dump_ctx *) iter;
+ ENM( iter->processor.Processor, processor_type_names );
+ UID( iter->version.MajorVersion );
+ CHR( '.' );
+ UID( iter->version.MinorVersion );
+ EOL();
+ return TRUE;
+}
+
+void
+tgsi_dump(
+ const struct tgsi_token *tokens,
+ uint flags )
+{
+ struct dump_ctx ctx;
+
+ ctx.iter.prolog = prolog;
+ ctx.iter.iterate_instruction = iter_instruction;
+ ctx.iter.iterate_declaration = iter_declaration;
+ ctx.iter.iterate_immediate = iter_immediate;
+ ctx.iter.epilog = NULL;
+
+ ctx.instno = 0;
+ ctx.indent = 0;
+ ctx.printf = dump_ctx_printf;
+ ctx.indentation = 0;
+
+ tgsi_iterate_shader( tokens, &ctx.iter );
+}
+
+struct str_dump_ctx
+{
+ struct dump_ctx base;
+ char *str;
+ char *ptr;
+ int left;
+};
+
+static void
+str_dump_ctx_printf(struct dump_ctx *ctx, const char *format, ...)
+{
+ struct str_dump_ctx *sctx = (struct str_dump_ctx *)ctx;
+
+ if(sctx->left > 1) {
+ int written;
+ va_list ap;
+ va_start(ap, format);
+ written = util_vsnprintf(sctx->ptr, sctx->left, format, ap);
+ va_end(ap);
+
+ /* Some complicated logic needed to handle the return value of
+ * vsnprintf:
+ */
+ if (written > 0) {
+ written = MIN2(sctx->left, written);
+ sctx->ptr += written;
+ sctx->left -= written;
+ }
+ }
+}
+
+void
+tgsi_dump_str(
+ const struct tgsi_token *tokens,
+ uint flags,
+ char *str,
+ size_t size)
+{
+ struct str_dump_ctx ctx;
+
+ ctx.base.iter.prolog = prolog;
+ ctx.base.iter.iterate_instruction = iter_instruction;
+ ctx.base.iter.iterate_declaration = iter_declaration;
+ ctx.base.iter.iterate_immediate = iter_immediate;
+ ctx.base.iter.epilog = NULL;
+
+ ctx.base.instno = 0;
+ ctx.base.indent = 0;
+ ctx.base.printf = &str_dump_ctx_printf;
+ ctx.base.indentation = 0;
+
+ ctx.str = str;
+ ctx.str[0] = 0;
+ ctx.ptr = str;
+ ctx.left = (int)size;
+
+ tgsi_iterate_shader( tokens, &ctx.base.iter );
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_dump.h b/src/gallium/auxiliary/tgsi/tgsi_dump.h
new file mode 100644
index 0000000000..ad1e647ec9
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_dump.h
@@ -0,0 +1,70 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_DUMP_H
+#define TGSI_DUMP_H
+
+#include "pipe/p_shader_tokens.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+void
+tgsi_dump_str(
+ const struct tgsi_token *tokens,
+ uint flags,
+ char *str,
+ size_t size);
+
+void
+tgsi_dump(
+ const struct tgsi_token *tokens,
+ uint flags );
+
+struct tgsi_full_immediate;
+struct tgsi_full_instruction;
+struct tgsi_full_declaration;
+
+void
+tgsi_dump_immediate(
+ const struct tgsi_full_immediate *imm );
+
+void
+tgsi_dump_instruction(
+ const struct tgsi_full_instruction *inst,
+ uint instno );
+
+void
+tgsi_dump_declaration(
+ const struct tgsi_full_declaration *decl );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_DUMP_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_dump_c.c b/src/gallium/auxiliary/tgsi/tgsi_dump_c.c
new file mode 100644
index 0000000000..4a9c02b141
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_dump_c.c
@@ -0,0 +1,720 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "util/u_string.h"
+#include "tgsi_dump_c.h"
+#include "tgsi_build.h"
+#include "tgsi_info.h"
+#include "tgsi_parse.h"
+
+static void
+dump_enum(
+ const unsigned e,
+ const char **enums,
+ const unsigned enums_count )
+{
+ if (e >= enums_count) {
+ debug_printf( "%u", e );
+ }
+ else {
+ debug_printf( "%s", enums[e] );
+ }
+}
+
+#define EOL() debug_printf( "\n" )
+#define TXT(S) debug_printf( "%s", S )
+#define CHR(C) debug_printf( "%c", C )
+#define UIX(I) debug_printf( "0x%x", I )
+#define UID(I) debug_printf( "%u", I )
+#define SID(I) debug_printf( "%d", I )
+#define FLT(F) debug_printf( "%10.4f", F )
+#define ENM(E,ENUMS) dump_enum( E, ENUMS, sizeof( ENUMS ) / sizeof( *ENUMS ) )
+
+static const char *TGSI_PROCESSOR_TYPES[] =
+{
+ "PROCESSOR_FRAGMENT",
+ "PROCESSOR_VERTEX",
+ "PROCESSOR_GEOMETRY"
+};
+
+static const char *TGSI_TOKEN_TYPES[] =
+{
+ "TOKEN_TYPE_DECLARATION",
+ "TOKEN_TYPE_IMMEDIATE",
+ "TOKEN_TYPE_INSTRUCTION"
+};
+
+static const char *TGSI_FILES[TGSI_FILE_COUNT] =
+{
+ "FILE_NULL",
+ "FILE_CONSTANT",
+ "FILE_INPUT",
+ "FILE_OUTPUT",
+ "FILE_TEMPORARY",
+ "FILE_SAMPLER",
+ "FILE_ADDRESS",
+ "FILE_IMMEDIATE",
+ "FILE_LOOP"
+};
+
+static const char *TGSI_INTERPOLATES[] =
+{
+ "INTERPOLATE_CONSTANT",
+ "INTERPOLATE_LINEAR",
+ "INTERPOLATE_PERSPECTIVE"
+};
+
+static const char *TGSI_SEMANTICS[] =
+{
+ "SEMANTIC_POSITION",
+ "SEMANTIC_COLOR",
+ "SEMANTIC_BCOLOR",
+ "SEMANTIC_FOG",
+ "SEMANTIC_PSIZE",
+ "SEMANTIC_GENERIC",
+ "SEMANTIC_NORMAL"
+};
+
+static const char *TGSI_IMMS[] =
+{
+ "IMM_FLOAT32"
+};
+
+static const char *TGSI_SATS[] =
+{
+ "SAT_NONE",
+ "SAT_ZERO_ONE",
+ "SAT_MINUS_PLUS_ONE"
+};
+
+static const char *TGSI_INSTRUCTION_EXTS[] =
+{
+ "INSTRUCTION_EXT_TYPE_NV",
+ "INSTRUCTION_EXT_TYPE_LABEL",
+ "INSTRUCTION_EXT_TYPE_TEXTURE"
+};
+
+static const char *TGSI_PRECISIONS[] =
+{
+ "PRECISION_DEFAULT",
+ "PRECISION_FLOAT32",
+ "PRECISION_FLOAT16",
+ "PRECISION_FIXED12"
+};
+
+static const char *TGSI_CCS[] =
+{
+ "CC_GT",
+ "CC_EQ",
+ "CC_LT",
+ "CC_UN",
+ "CC_GE",
+ "CC_LE",
+ "CC_NE",
+ "CC_TR",
+ "CC_FL"
+};
+
+static const char *TGSI_SWIZZLES[] =
+{
+ "SWIZZLE_X",
+ "SWIZZLE_Y",
+ "SWIZZLE_Z",
+ "SWIZZLE_W"
+};
+
+static const char *TGSI_TEXTURES[] =
+{
+ "TEXTURE_UNKNOWN",
+ "TEXTURE_1D",
+ "TEXTURE_2D",
+ "TEXTURE_3D",
+ "TEXTURE_CUBE",
+ "TEXTURE_RECT",
+ "TEXTURE_SHADOW1D",
+ "TEXTURE_SHADOW2D",
+ "TEXTURE_SHADOWRECT"
+};
+
+static const char *TGSI_SRC_REGISTER_EXTS[] =
+{
+ "SRC_REGISTER_EXT_TYPE_SWZ",
+ "SRC_REGISTER_EXT_TYPE_MOD"
+};
+
+static const char *TGSI_EXTSWIZZLES[] =
+{
+ "EXTSWIZZLE_X",
+ "EXTSWIZZLE_Y",
+ "EXTSWIZZLE_Z",
+ "EXTSWIZZLE_W",
+ "EXTSWIZZLE_ZERO",
+ "EXTSWIZZLE_ONE"
+};
+
+static const char *TGSI_WRITEMASKS[] =
+{
+ "0",
+ "WRITEMASK_X",
+ "WRITEMASK_Y",
+ "WRITEMASK_XY",
+ "WRITEMASK_Z",
+ "WRITEMASK_XZ",
+ "WRITEMASK_YZ",
+ "WRITEMASK_XYZ",
+ "WRITEMASK_W",
+ "WRITEMASK_XW",
+ "WRITEMASK_YW",
+ "WRITEMASK_XYW",
+ "WRITEMASK_ZW",
+ "WRITEMASK_XZW",
+ "WRITEMASK_YZW",
+ "WRITEMASK_XYZW"
+};
+
+static const char *TGSI_DST_REGISTER_EXTS[] =
+{
+ "DST_REGISTER_EXT_TYPE_CONDCODE",
+ "DST_REGISTER_EXT_TYPE_MODULATE"
+};
+
+static const char *TGSI_MODULATES[] =
+{
+ "MODULATE_1X",
+ "MODULATE_2X",
+ "MODULATE_4X",
+ "MODULATE_8X",
+ "MODULATE_HALF",
+ "MODULATE_QUARTER",
+ "MODULATE_EIGHTH"
+};
+
+static void
+dump_declaration_verbose(
+ struct tgsi_full_declaration *decl,
+ unsigned ignored,
+ unsigned deflt,
+ struct tgsi_full_declaration *fd )
+{
+ TXT( "\nFile : " );
+ ENM( decl->Declaration.File, TGSI_FILES );
+ if( deflt || fd->Declaration.UsageMask != decl->Declaration.UsageMask ) {
+ TXT( "\nUsageMask : " );
+ if( decl->Declaration.UsageMask & TGSI_WRITEMASK_X ) {
+ CHR( 'X' );
+ }
+ if( decl->Declaration.UsageMask & TGSI_WRITEMASK_Y ) {
+ CHR( 'Y' );
+ }
+ if( decl->Declaration.UsageMask & TGSI_WRITEMASK_Z ) {
+ CHR( 'Z' );
+ }
+ if( decl->Declaration.UsageMask & TGSI_WRITEMASK_W ) {
+ CHR( 'W' );
+ }
+ }
+ if( deflt || fd->Declaration.Interpolate != decl->Declaration.Interpolate ) {
+ TXT( "\nInterpolate: " );
+ ENM( decl->Declaration.Interpolate, TGSI_INTERPOLATES );
+ }
+ if( deflt || fd->Declaration.Semantic != decl->Declaration.Semantic ) {
+ TXT( "\nSemantic : " );
+ UID( decl->Declaration.Semantic );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( decl->Declaration.Padding );
+ }
+
+ EOL();
+ TXT( "\nFirst: " );
+ UID( decl->DeclarationRange.First );
+ TXT( "\nLast : " );
+ UID( decl->DeclarationRange.Last );
+
+ if( decl->Declaration.Semantic ) {
+ EOL();
+ TXT( "\nSemanticName : " );
+ ENM( decl->Semantic.SemanticName, TGSI_SEMANTICS );
+ TXT( "\nSemanticIndex: " );
+ UID( decl->Semantic.SemanticIndex );
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( decl->Semantic.Padding );
+ }
+ }
+}
+
+static void
+dump_immediate_verbose(
+ struct tgsi_full_immediate *imm,
+ unsigned ignored )
+{
+ unsigned i;
+
+ TXT( "\nDataType : " );
+ ENM( imm->Immediate.DataType, TGSI_IMMS );
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( imm->Immediate.Padding );
+ }
+
+ assert( imm->Immediate.NrTokens <= 4 + 1 );
+ for( i = 0; i < imm->Immediate.NrTokens - 1; i++ ) {
+ EOL();
+ switch( imm->Immediate.DataType ) {
+ case TGSI_IMM_FLOAT32:
+ TXT( "\nFloat: " );
+ FLT( imm->u[i].Float );
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+}
+
+static void
+dump_instruction_verbose(
+ struct tgsi_full_instruction *inst,
+ unsigned ignored,
+ unsigned deflt,
+ struct tgsi_full_instruction *fi )
+{
+ unsigned i;
+
+ TXT( "\nOpcode : OPCODE_" );
+ TXT( tgsi_get_opcode_info( inst->Instruction.Opcode )->mnemonic );
+ if( deflt || fi->Instruction.Saturate != inst->Instruction.Saturate ) {
+ TXT( "\nSaturate : " );
+ ENM( inst->Instruction.Saturate, TGSI_SATS );
+ }
+ if( deflt || fi->Instruction.NumDstRegs != inst->Instruction.NumDstRegs ) {
+ TXT( "\nNumDstRegs : " );
+ UID( inst->Instruction.NumDstRegs );
+ }
+ if( deflt || fi->Instruction.NumSrcRegs != inst->Instruction.NumSrcRegs ) {
+ TXT( "\nNumSrcRegs : " );
+ UID( inst->Instruction.NumSrcRegs );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( inst->Instruction.Padding );
+ }
+
+ if( deflt || tgsi_compare_instruction_ext_nv( inst->InstructionExtNv, fi->InstructionExtNv ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( inst->InstructionExtNv.Type, TGSI_INSTRUCTION_EXTS );
+ if( deflt || fi->InstructionExtNv.Precision != inst->InstructionExtNv.Precision ) {
+ TXT( "\nPrecision : " );
+ ENM( inst->InstructionExtNv.Precision, TGSI_PRECISIONS );
+ }
+ if( deflt || fi->InstructionExtNv.CondDstIndex != inst->InstructionExtNv.CondDstIndex ) {
+ TXT( "\nCondDstIndex : " );
+ UID( inst->InstructionExtNv.CondDstIndex );
+ }
+ if( deflt || fi->InstructionExtNv.CondFlowIndex != inst->InstructionExtNv.CondFlowIndex ) {
+ TXT( "\nCondFlowIndex : " );
+ UID( inst->InstructionExtNv.CondFlowIndex );
+ }
+ if( deflt || fi->InstructionExtNv.CondMask != inst->InstructionExtNv.CondMask ) {
+ TXT( "\nCondMask : " );
+ ENM( inst->InstructionExtNv.CondMask, TGSI_CCS );
+ }
+ if( deflt || fi->InstructionExtNv.CondSwizzleX != inst->InstructionExtNv.CondSwizzleX ) {
+ TXT( "\nCondSwizzleX : " );
+ ENM( inst->InstructionExtNv.CondSwizzleX, TGSI_SWIZZLES );
+ }
+ if( deflt || fi->InstructionExtNv.CondSwizzleY != inst->InstructionExtNv.CondSwizzleY ) {
+ TXT( "\nCondSwizzleY : " );
+ ENM( inst->InstructionExtNv.CondSwizzleY, TGSI_SWIZZLES );
+ }
+ if( deflt || fi->InstructionExtNv.CondSwizzleZ != inst->InstructionExtNv.CondSwizzleZ ) {
+ TXT( "\nCondSwizzleZ : " );
+ ENM( inst->InstructionExtNv.CondSwizzleZ, TGSI_SWIZZLES );
+ }
+ if( deflt || fi->InstructionExtNv.CondSwizzleW != inst->InstructionExtNv.CondSwizzleW ) {
+ TXT( "\nCondSwizzleW : " );
+ ENM( inst->InstructionExtNv.CondSwizzleW, TGSI_SWIZZLES );
+ }
+ if( deflt || fi->InstructionExtNv.CondDstUpdate != inst->InstructionExtNv.CondDstUpdate ) {
+ TXT( "\nCondDstUpdate : " );
+ UID( inst->InstructionExtNv.CondDstUpdate );
+ }
+ if( deflt || fi->InstructionExtNv.CondFlowEnable != inst->InstructionExtNv.CondFlowEnable ) {
+ TXT( "\nCondFlowEnable: " );
+ UID( inst->InstructionExtNv.CondFlowEnable );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( inst->InstructionExtNv.Padding );
+ if( deflt || fi->InstructionExtNv.Extended != inst->InstructionExtNv.Extended ) {
+ TXT( "\nExtended : " );
+ UID( inst->InstructionExtNv.Extended );
+ }
+ }
+ }
+
+ if( deflt || tgsi_compare_instruction_ext_label( inst->InstructionExtLabel, fi->InstructionExtLabel ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( inst->InstructionExtLabel.Type, TGSI_INSTRUCTION_EXTS );
+ if( deflt || fi->InstructionExtLabel.Label != inst->InstructionExtLabel.Label ) {
+ TXT( "\nLabel : " );
+ UID( inst->InstructionExtLabel.Label );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( inst->InstructionExtLabel.Padding );
+ if( deflt || fi->InstructionExtLabel.Extended != inst->InstructionExtLabel.Extended ) {
+ TXT( "\nExtended: " );
+ UID( inst->InstructionExtLabel.Extended );
+ }
+ }
+ }
+
+ if( deflt || tgsi_compare_instruction_ext_texture( inst->InstructionExtTexture, fi->InstructionExtTexture ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( inst->InstructionExtTexture.Type, TGSI_INSTRUCTION_EXTS );
+ if( deflt || fi->InstructionExtTexture.Texture != inst->InstructionExtTexture.Texture ) {
+ TXT( "\nTexture : " );
+ ENM( inst->InstructionExtTexture.Texture, TGSI_TEXTURES );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( inst->InstructionExtTexture.Padding );
+ if( deflt || fi->InstructionExtTexture.Extended != inst->InstructionExtTexture.Extended ) {
+ TXT( "\nExtended: " );
+ UID( inst->InstructionExtTexture.Extended );
+ }
+ }
+ }
+
+ for( i = 0; i < inst->Instruction.NumDstRegs; i++ ) {
+ struct tgsi_full_dst_register *dst = &inst->FullDstRegisters[i];
+ struct tgsi_full_dst_register *fd = &fi->FullDstRegisters[i];
+
+ EOL();
+ TXT( "\nFile : " );
+ ENM( dst->DstRegister.File, TGSI_FILES );
+ if( deflt || fd->DstRegister.WriteMask != dst->DstRegister.WriteMask ) {
+ TXT( "\nWriteMask: " );
+ ENM( dst->DstRegister.WriteMask, TGSI_WRITEMASKS );
+ }
+ if( ignored ) {
+ if( deflt || fd->DstRegister.Indirect != dst->DstRegister.Indirect ) {
+ TXT( "\nIndirect : " );
+ UID( dst->DstRegister.Indirect );
+ }
+ if( deflt || fd->DstRegister.Dimension != dst->DstRegister.Dimension ) {
+ TXT( "\nDimension: " );
+ UID( dst->DstRegister.Dimension );
+ }
+ }
+ if( deflt || fd->DstRegister.Index != dst->DstRegister.Index ) {
+ TXT( "\nIndex : " );
+ SID( dst->DstRegister.Index );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( dst->DstRegister.Padding );
+ if( deflt || fd->DstRegister.Extended != dst->DstRegister.Extended ) {
+ TXT( "\nExtended : " );
+ UID( dst->DstRegister.Extended );
+ }
+ }
+
+ if( deflt || tgsi_compare_dst_register_ext_concode( dst->DstRegisterExtConcode, fd->DstRegisterExtConcode ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( dst->DstRegisterExtConcode.Type, TGSI_DST_REGISTER_EXTS );
+ if( deflt || fd->DstRegisterExtConcode.CondMask != dst->DstRegisterExtConcode.CondMask ) {
+ TXT( "\nCondMask : " );
+ ENM( dst->DstRegisterExtConcode.CondMask, TGSI_CCS );
+ }
+ if( deflt || fd->DstRegisterExtConcode.CondSwizzleX != dst->DstRegisterExtConcode.CondSwizzleX ) {
+ TXT( "\nCondSwizzleX: " );
+ ENM( dst->DstRegisterExtConcode.CondSwizzleX, TGSI_SWIZZLES );
+ }
+ if( deflt || fd->DstRegisterExtConcode.CondSwizzleY != dst->DstRegisterExtConcode.CondSwizzleY ) {
+ TXT( "\nCondSwizzleY: " );
+ ENM( dst->DstRegisterExtConcode.CondSwizzleY, TGSI_SWIZZLES );
+ }
+ if( deflt || fd->DstRegisterExtConcode.CondSwizzleZ != dst->DstRegisterExtConcode.CondSwizzleZ ) {
+ TXT( "\nCondSwizzleZ: " );
+ ENM( dst->DstRegisterExtConcode.CondSwizzleZ, TGSI_SWIZZLES );
+ }
+ if( deflt || fd->DstRegisterExtConcode.CondSwizzleW != dst->DstRegisterExtConcode.CondSwizzleW ) {
+ TXT( "\nCondSwizzleW: " );
+ ENM( dst->DstRegisterExtConcode.CondSwizzleW, TGSI_SWIZZLES );
+ }
+ if( deflt || fd->DstRegisterExtConcode.CondSrcIndex != dst->DstRegisterExtConcode.CondSrcIndex ) {
+ TXT( "\nCondSrcIndex: " );
+ UID( dst->DstRegisterExtConcode.CondSrcIndex );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( dst->DstRegisterExtConcode.Padding );
+ if( deflt || fd->DstRegisterExtConcode.Extended != dst->DstRegisterExtConcode.Extended ) {
+ TXT( "\nExtended : " );
+ UID( dst->DstRegisterExtConcode.Extended );
+ }
+ }
+ }
+
+ if( deflt || tgsi_compare_dst_register_ext_modulate( dst->DstRegisterExtModulate, fd->DstRegisterExtModulate ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( dst->DstRegisterExtModulate.Type, TGSI_DST_REGISTER_EXTS );
+ if( deflt || fd->DstRegisterExtModulate.Modulate != dst->DstRegisterExtModulate.Modulate ) {
+ TXT( "\nModulate: " );
+ ENM( dst->DstRegisterExtModulate.Modulate, TGSI_MODULATES );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( dst->DstRegisterExtModulate.Padding );
+ if( deflt || fd->DstRegisterExtModulate.Extended != dst->DstRegisterExtModulate.Extended ) {
+ TXT( "\nExtended: " );
+ UID( dst->DstRegisterExtModulate.Extended );
+ }
+ }
+ }
+ }
+
+ for( i = 0; i < inst->Instruction.NumSrcRegs; i++ ) {
+ struct tgsi_full_src_register *src = &inst->FullSrcRegisters[i];
+ struct tgsi_full_src_register *fs = &fi->FullSrcRegisters[i];
+
+ EOL();
+ TXT( "\nFile : ");
+ ENM( src->SrcRegister.File, TGSI_FILES );
+ if( deflt || fs->SrcRegister.SwizzleX != src->SrcRegister.SwizzleX ) {
+ TXT( "\nSwizzleX : " );
+ ENM( src->SrcRegister.SwizzleX, TGSI_SWIZZLES );
+ }
+ if( deflt || fs->SrcRegister.SwizzleY != src->SrcRegister.SwizzleY ) {
+ TXT( "\nSwizzleY : " );
+ ENM( src->SrcRegister.SwizzleY, TGSI_SWIZZLES );
+ }
+ if( deflt || fs->SrcRegister.SwizzleZ != src->SrcRegister.SwizzleZ ) {
+ TXT( "\nSwizzleZ : " );
+ ENM( src->SrcRegister.SwizzleZ, TGSI_SWIZZLES );
+ }
+ if( deflt || fs->SrcRegister.SwizzleW != src->SrcRegister.SwizzleW ) {
+ TXT( "\nSwizzleW : " );
+ ENM( src->SrcRegister.SwizzleW, TGSI_SWIZZLES );
+ }
+ if( deflt || fs->SrcRegister.Negate != src->SrcRegister.Negate ) {
+ TXT( "\nNegate : " );
+ UID( src->SrcRegister.Negate );
+ }
+ if( ignored ) {
+ if( deflt || fs->SrcRegister.Indirect != src->SrcRegister.Indirect ) {
+ TXT( "\nIndirect : " );
+ UID( src->SrcRegister.Indirect );
+ }
+ if( deflt || fs->SrcRegister.Dimension != src->SrcRegister.Dimension ) {
+ TXT( "\nDimension: " );
+ UID( src->SrcRegister.Dimension );
+ }
+ }
+ if( deflt || fs->SrcRegister.Index != src->SrcRegister.Index ) {
+ TXT( "\nIndex : " );
+ SID( src->SrcRegister.Index );
+ }
+ if( ignored ) {
+ if( deflt || fs->SrcRegister.Extended != src->SrcRegister.Extended ) {
+ TXT( "\nExtended : " );
+ UID( src->SrcRegister.Extended );
+ }
+ }
+
+ if( deflt || tgsi_compare_src_register_ext_swz( src->SrcRegisterExtSwz, fs->SrcRegisterExtSwz ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( src->SrcRegisterExtSwz.Type, TGSI_SRC_REGISTER_EXTS );
+ if( deflt || fs->SrcRegisterExtSwz.ExtSwizzleX != src->SrcRegisterExtSwz.ExtSwizzleX ) {
+ TXT( "\nExtSwizzleX: " );
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleX, TGSI_EXTSWIZZLES );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.ExtSwizzleY != src->SrcRegisterExtSwz.ExtSwizzleY ) {
+ TXT( "\nExtSwizzleY: " );
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleY, TGSI_EXTSWIZZLES );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.ExtSwizzleZ != src->SrcRegisterExtSwz.ExtSwizzleZ ) {
+ TXT( "\nExtSwizzleZ: " );
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleZ, TGSI_EXTSWIZZLES );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.ExtSwizzleW != src->SrcRegisterExtSwz.ExtSwizzleW ) {
+ TXT( "\nExtSwizzleW: " );
+ ENM( src->SrcRegisterExtSwz.ExtSwizzleW, TGSI_EXTSWIZZLES );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.NegateX != src->SrcRegisterExtSwz.NegateX ) {
+ TXT( "\nNegateX : " );
+ UID( src->SrcRegisterExtSwz.NegateX );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.NegateY != src->SrcRegisterExtSwz.NegateY ) {
+ TXT( "\nNegateY : " );
+ UID( src->SrcRegisterExtSwz.NegateY );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.NegateZ != src->SrcRegisterExtSwz.NegateZ ) {
+ TXT( "\nNegateZ : " );
+ UID( src->SrcRegisterExtSwz.NegateZ );
+ }
+ if( deflt || fs->SrcRegisterExtSwz.NegateW != src->SrcRegisterExtSwz.NegateW ) {
+ TXT( "\nNegateW : " );
+ UID( src->SrcRegisterExtSwz.NegateW );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( src->SrcRegisterExtSwz.Padding );
+ if( deflt || fs->SrcRegisterExtSwz.Extended != src->SrcRegisterExtSwz.Extended ) {
+ TXT( "\nExtended : " );
+ UID( src->SrcRegisterExtSwz.Extended );
+ }
+ }
+ }
+
+ if( deflt || tgsi_compare_src_register_ext_mod( src->SrcRegisterExtMod, fs->SrcRegisterExtMod ) ) {
+ EOL();
+ TXT( "\nType : " );
+ ENM( src->SrcRegisterExtMod.Type, TGSI_SRC_REGISTER_EXTS );
+ if( deflt || fs->SrcRegisterExtMod.Complement != src->SrcRegisterExtMod.Complement ) {
+ TXT( "\nComplement: " );
+ UID( src->SrcRegisterExtMod.Complement );
+ }
+ if( deflt || fs->SrcRegisterExtMod.Bias != src->SrcRegisterExtMod.Bias ) {
+ TXT( "\nBias : " );
+ UID( src->SrcRegisterExtMod.Bias );
+ }
+ if( deflt || fs->SrcRegisterExtMod.Scale2X != src->SrcRegisterExtMod.Scale2X ) {
+ TXT( "\nScale2X : " );
+ UID( src->SrcRegisterExtMod.Scale2X );
+ }
+ if( deflt || fs->SrcRegisterExtMod.Absolute != src->SrcRegisterExtMod.Absolute ) {
+ TXT( "\nAbsolute : " );
+ UID( src->SrcRegisterExtMod.Absolute );
+ }
+ if( deflt || fs->SrcRegisterExtMod.Negate != src->SrcRegisterExtMod.Negate ) {
+ TXT( "\nNegate : " );
+ UID( src->SrcRegisterExtMod.Negate );
+ }
+ if( ignored ) {
+ TXT( "\nPadding : " );
+ UIX( src->SrcRegisterExtMod.Padding );
+ if( deflt || fs->SrcRegisterExtMod.Extended != src->SrcRegisterExtMod.Extended ) {
+ TXT( "\nExtended : " );
+ UID( src->SrcRegisterExtMod.Extended );
+ }
+ }
+ }
+ }
+}
+
+void
+tgsi_dump_c(
+ const struct tgsi_token *tokens,
+ uint flags )
+{
+ struct tgsi_parse_context parse;
+ struct tgsi_full_instruction fi;
+ struct tgsi_full_declaration fd;
+ uint ignored = flags & TGSI_DUMP_C_IGNORED;
+ uint deflt = flags & TGSI_DUMP_C_DEFAULT;
+
+ tgsi_parse_init( &parse, tokens );
+
+ TXT( "tgsi-dump begin -----------------" );
+
+ TXT( "\nMajorVersion: " );
+ UID( parse.FullVersion.Version.MajorVersion );
+ TXT( "\nMinorVersion: " );
+ UID( parse.FullVersion.Version.MinorVersion );
+ EOL();
+
+ TXT( "\nHeaderSize: " );
+ UID( parse.FullHeader.Header.HeaderSize );
+ TXT( "\nBodySize : " );
+ UID( parse.FullHeader.Header.BodySize );
+ TXT( "\nProcessor : " );
+ ENM( parse.FullHeader.Processor.Processor, TGSI_PROCESSOR_TYPES );
+ EOL();
+
+ fi = tgsi_default_full_instruction();
+ fd = tgsi_default_full_declaration();
+
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+ tgsi_parse_token( &parse );
+
+ TXT( "\nType : " );
+ ENM( parse.FullToken.Token.Type, TGSI_TOKEN_TYPES );
+ if( ignored ) {
+ TXT( "\nSize : " );
+ UID( parse.FullToken.Token.NrTokens );
+ if( deflt || parse.FullToken.Token.Extended ) {
+ TXT( "\nExtended : " );
+ UID( parse.FullToken.Token.Extended );
+ }
+ }
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ dump_declaration_verbose(
+ &parse.FullToken.FullDeclaration,
+ ignored,
+ deflt,
+ &fd );
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ dump_immediate_verbose(
+ &parse.FullToken.FullImmediate,
+ ignored );
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ dump_instruction_verbose(
+ &parse.FullToken.FullInstruction,
+ ignored,
+ deflt,
+ &fi );
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ EOL();
+ }
+
+ TXT( "\ntgsi-dump end -------------------\n" );
+
+ tgsi_parse_free( &parse );
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_dump_c.h b/src/gallium/auxiliary/tgsi/tgsi_dump_c.h
new file mode 100644
index 0000000000..d91cd35b3b
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_dump_c.h
@@ -0,0 +1,49 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_DUMP_C_H
+#define TGSI_DUMP_C_H
+
+#include "pipe/p_shader_tokens.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#define TGSI_DUMP_C_IGNORED 1
+#define TGSI_DUMP_C_DEFAULT 2
+
+void
+tgsi_dump_c(
+ const struct tgsi_token *tokens,
+ uint flags );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_DUMP_C_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_exec.c b/src/gallium/auxiliary/tgsi/tgsi_exec.c
new file mode 100644
index 0000000000..c79c56debd
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_exec.c
@@ -0,0 +1,3311 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * TGSI interpreter/executor.
+ *
+ * Flow control information:
+ *
+ * Since we operate on 'quads' (4 pixels or 4 vertices in parallel)
+ * flow control statements (IF/ELSE/ENDIF, LOOP/ENDLOOP) require special
+ * care since a condition may be true for some quad components but false
+ * for other components.
+ *
+ * We basically execute all statements (even if they're in the part of
+ * an IF/ELSE clause that's "not taken") and use a special mask to
+ * control writing to destination registers. This is the ExecMask.
+ * See store_dest().
+ *
+ * The ExecMask is computed from three other masks (CondMask, LoopMask and
+ * ContMask) which are controlled by the flow control instructions (namely:
+ * (IF/ELSE/ENDIF, LOOP/ENDLOOP and CONT).
+ *
+ *
+ * Authors:
+ * Michal Krol
+ * Brian Paul
+ */
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi_exec.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#define FAST_MATH 1
+
+/** for tgsi_full_instruction::Flags */
+#define SOA_DEPENDENCY_FLAG 0x1
+
+#define TILE_TOP_LEFT 0
+#define TILE_TOP_RIGHT 1
+#define TILE_BOTTOM_LEFT 2
+#define TILE_BOTTOM_RIGHT 3
+
+#define CHAN_X 0
+#define CHAN_Y 1
+#define CHAN_Z 2
+#define CHAN_W 3
+
+/*
+ * Shorthand locations of various utility registers (_I = Index, _C = Channel)
+ */
+#define TEMP_0_I TGSI_EXEC_TEMP_00000000_I
+#define TEMP_0_C TGSI_EXEC_TEMP_00000000_C
+#define TEMP_7F_I TGSI_EXEC_TEMP_7FFFFFFF_I
+#define TEMP_7F_C TGSI_EXEC_TEMP_7FFFFFFF_C
+#define TEMP_80_I TGSI_EXEC_TEMP_80000000_I
+#define TEMP_80_C TGSI_EXEC_TEMP_80000000_C
+#define TEMP_FF_I TGSI_EXEC_TEMP_FFFFFFFF_I
+#define TEMP_FF_C TGSI_EXEC_TEMP_FFFFFFFF_C
+#define TEMP_1_I TGSI_EXEC_TEMP_ONE_I
+#define TEMP_1_C TGSI_EXEC_TEMP_ONE_C
+#define TEMP_2_I TGSI_EXEC_TEMP_TWO_I
+#define TEMP_2_C TGSI_EXEC_TEMP_TWO_C
+#define TEMP_128_I TGSI_EXEC_TEMP_128_I
+#define TEMP_128_C TGSI_EXEC_TEMP_128_C
+#define TEMP_M128_I TGSI_EXEC_TEMP_MINUS_128_I
+#define TEMP_M128_C TGSI_EXEC_TEMP_MINUS_128_C
+#define TEMP_KILMASK_I TGSI_EXEC_TEMP_KILMASK_I
+#define TEMP_KILMASK_C TGSI_EXEC_TEMP_KILMASK_C
+#define TEMP_OUTPUT_I TGSI_EXEC_TEMP_OUTPUT_I
+#define TEMP_OUTPUT_C TGSI_EXEC_TEMP_OUTPUT_C
+#define TEMP_PRIMITIVE_I TGSI_EXEC_TEMP_PRIMITIVE_I
+#define TEMP_PRIMITIVE_C TGSI_EXEC_TEMP_PRIMITIVE_C
+#define TEMP_CC_I TGSI_EXEC_TEMP_CC_I
+#define TEMP_CC_C TGSI_EXEC_TEMP_CC_C
+#define TEMP_3_I TGSI_EXEC_TEMP_THREE_I
+#define TEMP_3_C TGSI_EXEC_TEMP_THREE_C
+#define TEMP_HALF_I TGSI_EXEC_TEMP_HALF_I
+#define TEMP_HALF_C TGSI_EXEC_TEMP_HALF_C
+#define TEMP_R0 TGSI_EXEC_TEMP_R0
+
+#define IS_CHANNEL_ENABLED(INST, CHAN)\
+ ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define IS_CHANNEL_ENABLED2(INST, CHAN)\
+ ((INST).FullDstRegisters[1].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define FOR_EACH_ENABLED_CHANNEL(INST, CHAN)\
+ for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)\
+ if (IS_CHANNEL_ENABLED( INST, CHAN ))
+
+#define FOR_EACH_ENABLED_CHANNEL2(INST, CHAN)\
+ for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)\
+ if (IS_CHANNEL_ENABLED2( INST, CHAN ))
+
+
+/** The execution mask depends on the conditional mask and the loop mask */
+#define UPDATE_EXEC_MASK(MACH) \
+ MACH->ExecMask = MACH->CondMask & MACH->LoopMask & MACH->ContMask & MACH->FuncMask
+
+
+static const union tgsi_exec_channel ZeroVec =
+ { { 0.0, 0.0, 0.0, 0.0 } };
+
+
+#ifdef DEBUG
+static void
+check_inf_or_nan(const union tgsi_exec_channel *chan)
+{
+ assert(!util_is_inf_or_nan(chan->f[0]));
+ assert(!util_is_inf_or_nan(chan->f[1]));
+ assert(!util_is_inf_or_nan(chan->f[2]));
+ assert(!util_is_inf_or_nan(chan->f[3]));
+}
+#endif
+
+
+#ifdef DEBUG
+static void
+print_chan(const char *msg, const union tgsi_exec_channel *chan)
+{
+ debug_printf("%s = {%f, %f, %f, %f}\n",
+ msg, chan->f[0], chan->f[1], chan->f[2], chan->f[3]);
+}
+#endif
+
+
+#ifdef DEBUG
+static void
+print_temp(const struct tgsi_exec_machine *mach, uint index)
+{
+ const struct tgsi_exec_vector *tmp = &mach->Temps[index];
+ int i;
+ debug_printf("Temp[%u] =\n", index);
+ for (i = 0; i < 4; i++) {
+ debug_printf(" %c: { %f, %f, %f, %f }\n",
+ "XYZW"[i],
+ tmp->xyzw[i].f[0],
+ tmp->xyzw[i].f[1],
+ tmp->xyzw[i].f[2],
+ tmp->xyzw[i].f[3]);
+ }
+}
+#endif
+
+
+/**
+ * Check if there's a potential src/dst register data dependency when
+ * using SOA execution.
+ * Example:
+ * MOV T, T.yxwz;
+ * This would expand into:
+ * MOV t0, t1;
+ * MOV t1, t0;
+ * MOV t2, t3;
+ * MOV t3, t2;
+ * The second instruction will have the wrong value for t0 if executed as-is.
+ */
+boolean
+tgsi_check_soa_dependencies(const struct tgsi_full_instruction *inst)
+{
+ uint i, chan;
+
+ uint writemask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ if (writemask == TGSI_WRITEMASK_X ||
+ writemask == TGSI_WRITEMASK_Y ||
+ writemask == TGSI_WRITEMASK_Z ||
+ writemask == TGSI_WRITEMASK_W ||
+ writemask == TGSI_WRITEMASK_NONE) {
+ /* no chance of data dependency */
+ return FALSE;
+ }
+
+ /* loop over src regs */
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ if ((inst->FullSrcRegisters[i].SrcRegister.File ==
+ inst->FullDstRegisters[0].DstRegister.File) &&
+ (inst->FullSrcRegisters[i].SrcRegister.Index ==
+ inst->FullDstRegisters[0].DstRegister.Index)) {
+ /* loop over dest channels */
+ uint channelsWritten = 0x0;
+ FOR_EACH_ENABLED_CHANNEL(*inst, chan) {
+ /* check if we're reading a channel that's been written */
+ uint swizzle = tgsi_util_get_full_src_register_extswizzle(&inst->FullSrcRegisters[i], chan);
+ if (swizzle <= TGSI_SWIZZLE_W &&
+ (channelsWritten & (1 << swizzle))) {
+ return TRUE;
+ }
+
+ channelsWritten |= (1 << chan);
+ }
+ }
+ }
+ return FALSE;
+}
+
+
+/**
+ * Initialize machine state by expanding tokens to full instructions,
+ * allocating temporary storage, setting up constants, etc.
+ * After this, we can call tgsi_exec_machine_run() many times.
+ */
+void
+tgsi_exec_machine_bind_shader(
+ struct tgsi_exec_machine *mach,
+ const struct tgsi_token *tokens,
+ uint numSamplers,
+ struct tgsi_sampler **samplers)
+{
+ uint k;
+ struct tgsi_parse_context parse;
+ struct tgsi_exec_labels *labels = &mach->Labels;
+ struct tgsi_full_instruction *instructions;
+ struct tgsi_full_declaration *declarations;
+ uint maxInstructions = 10, numInstructions = 0;
+ uint maxDeclarations = 10, numDeclarations = 0;
+ uint instno = 0;
+
+#if 0
+ tgsi_dump(tokens, 0);
+#endif
+
+ util_init_math();
+
+ mach->Tokens = tokens;
+ mach->Samplers = samplers;
+
+ k = tgsi_parse_init (&parse, mach->Tokens);
+ if (k != TGSI_PARSE_OK) {
+ debug_printf( "Problem parsing!\n" );
+ return;
+ }
+
+ mach->Processor = parse.FullHeader.Processor.Processor;
+ mach->ImmLimit = 0;
+ labels->count = 0;
+
+ declarations = (struct tgsi_full_declaration *)
+ MALLOC( maxDeclarations * sizeof(struct tgsi_full_declaration) );
+
+ if (!declarations) {
+ return;
+ }
+
+ instructions = (struct tgsi_full_instruction *)
+ MALLOC( maxInstructions * sizeof(struct tgsi_full_instruction) );
+
+ if (!instructions) {
+ FREE( declarations );
+ return;
+ }
+
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+ uint pointer = parse.Position;
+ uint i;
+
+ tgsi_parse_token( &parse );
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ /* save expanded declaration */
+ if (numDeclarations == maxDeclarations) {
+ declarations = REALLOC(declarations,
+ maxDeclarations
+ * sizeof(struct tgsi_full_declaration),
+ (maxDeclarations + 10)
+ * sizeof(struct tgsi_full_declaration));
+ maxDeclarations += 10;
+ }
+ memcpy(declarations + numDeclarations,
+ &parse.FullToken.FullDeclaration,
+ sizeof(declarations[0]));
+ numDeclarations++;
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
+ assert( size <= 4 );
+ assert( mach->ImmLimit + 1 <= TGSI_EXEC_NUM_IMMEDIATES );
+
+ for( i = 0; i < size; i++ ) {
+ mach->Imms[mach->ImmLimit][i] =
+ parse.FullToken.FullImmediate.u[i].Float;
+ }
+ mach->ImmLimit += 1;
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ assert( labels->count < MAX_LABELS );
+
+ labels->labels[labels->count][0] = instno;
+ labels->labels[labels->count][1] = pointer;
+ labels->count++;
+
+ /* save expanded instruction */
+ if (numInstructions == maxInstructions) {
+ instructions = REALLOC(instructions,
+ maxInstructions
+ * sizeof(struct tgsi_full_instruction),
+ (maxInstructions + 10)
+ * sizeof(struct tgsi_full_instruction));
+ maxInstructions += 10;
+ }
+
+ if (tgsi_check_soa_dependencies(&parse.FullToken.FullInstruction)) {
+ uint opcode = parse.FullToken.FullInstruction.Instruction.Opcode;
+ parse.FullToken.FullInstruction.Flags = SOA_DEPENDENCY_FLAG;
+ /* XXX we only handle SOA dependencies properly for MOV/SWZ
+ * at this time!
+ */
+ if (opcode != TGSI_OPCODE_MOV && opcode != TGSI_OPCODE_SWZ) {
+ debug_printf("Warning: SOA dependency in instruction"
+ " is not handled:\n");
+ tgsi_dump_instruction(&parse.FullToken.FullInstruction,
+ numInstructions);
+ }
+ }
+
+ memcpy(instructions + numInstructions,
+ &parse.FullToken.FullInstruction,
+ sizeof(instructions[0]));
+
+ numInstructions++;
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+ tgsi_parse_free (&parse);
+
+ if (mach->Declarations) {
+ FREE( mach->Declarations );
+ }
+ mach->Declarations = declarations;
+ mach->NumDeclarations = numDeclarations;
+
+ if (mach->Instructions) {
+ FREE( mach->Instructions );
+ }
+ mach->Instructions = instructions;
+ mach->NumInstructions = numInstructions;
+}
+
+
+struct tgsi_exec_machine *
+tgsi_exec_machine_create( void )
+{
+ struct tgsi_exec_machine *mach;
+ uint i;
+
+ mach = align_malloc( sizeof *mach, 16 );
+ if (!mach)
+ goto fail;
+
+ memset(mach, 0, sizeof(*mach));
+
+ mach->Addrs = &mach->Temps[TGSI_EXEC_TEMP_ADDR];
+
+ /* Setup constants. */
+ for( i = 0; i < 4; i++ ) {
+ mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].u[i] = 0x00000000;
+ mach->Temps[TEMP_7F_I].xyzw[TEMP_7F_C].u[i] = 0x7FFFFFFF;
+ mach->Temps[TEMP_80_I].xyzw[TEMP_80_C].u[i] = 0x80000000;
+ mach->Temps[TEMP_FF_I].xyzw[TEMP_FF_C].u[i] = 0xFFFFFFFF;
+ mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].f[i] = 1.0f;
+ mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].f[i] = 2.0f;
+ mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].f[i] = 128.0f;
+ mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].f[i] = -128.0f;
+ mach->Temps[TEMP_3_I].xyzw[TEMP_3_C].f[i] = 3.0f;
+ mach->Temps[TEMP_HALF_I].xyzw[TEMP_HALF_C].f[i] = 0.5f;
+ }
+
+#ifdef DEBUG
+ /* silence warnings */
+ (void) print_chan;
+ (void) print_temp;
+#endif
+
+ return mach;
+
+fail:
+ align_free(mach);
+ return NULL;
+}
+
+
+void
+tgsi_exec_machine_destroy(struct tgsi_exec_machine *mach)
+{
+ if (mach) {
+ FREE(mach->Instructions);
+ FREE(mach->Declarations);
+ }
+
+ align_free(mach);
+}
+
+
+static void
+micro_abs(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = fabsf( src->f[0] );
+ dst->f[1] = fabsf( src->f[1] );
+ dst->f[2] = fabsf( src->f[2] );
+ dst->f[3] = fabsf( src->f[3] );
+}
+
+static void
+micro_add(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->f[0] = src0->f[0] + src1->f[0];
+ dst->f[1] = src0->f[1] + src1->f[1];
+ dst->f[2] = src0->f[2] + src1->f[2];
+ dst->f[3] = src0->f[3] + src1->f[3];
+}
+
+#if 0
+static void
+micro_iadd(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->i[0] = src0->i[0] + src1->i[0];
+ dst->i[1] = src0->i[1] + src1->i[1];
+ dst->i[2] = src0->i[2] + src1->i[2];
+ dst->i[3] = src0->i[3] + src1->i[3];
+}
+#endif
+
+static void
+micro_and(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] & src1->u[0];
+ dst->u[1] = src0->u[1] & src1->u[1];
+ dst->u[2] = src0->u[2] & src1->u[2];
+ dst->u[3] = src0->u[3] & src1->u[3];
+}
+
+static void
+micro_ceil(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = ceilf( src->f[0] );
+ dst->f[1] = ceilf( src->f[1] );
+ dst->f[2] = ceilf( src->f[2] );
+ dst->f[3] = ceilf( src->f[3] );
+}
+
+static void
+micro_cos(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = cosf( src->f[0] );
+ dst->f[1] = cosf( src->f[1] );
+ dst->f[2] = cosf( src->f[2] );
+ dst->f[3] = cosf( src->f[3] );
+}
+
+static void
+micro_ddx(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] =
+ dst->f[1] =
+ dst->f[2] =
+ dst->f[3] = src->f[TILE_BOTTOM_RIGHT] - src->f[TILE_BOTTOM_LEFT];
+}
+
+static void
+micro_ddy(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] =
+ dst->f[1] =
+ dst->f[2] =
+ dst->f[3] = src->f[TILE_TOP_LEFT] - src->f[TILE_BOTTOM_LEFT];
+}
+
+static void
+micro_div(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ if (src1->f[0] != 0) {
+ dst->f[0] = src0->f[0] / src1->f[0];
+ }
+ if (src1->f[1] != 0) {
+ dst->f[1] = src0->f[1] / src1->f[1];
+ }
+ if (src1->f[2] != 0) {
+ dst->f[2] = src0->f[2] / src1->f[2];
+ }
+ if (src1->f[3] != 0) {
+ dst->f[3] = src0->f[3] / src1->f[3];
+ }
+}
+
+#if 0
+static void
+micro_udiv(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] / src1->u[0];
+ dst->u[1] = src0->u[1] / src1->u[1];
+ dst->u[2] = src0->u[2] / src1->u[2];
+ dst->u[3] = src0->u[3] / src1->u[3];
+}
+#endif
+
+static void
+micro_eq(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2,
+ const union tgsi_exec_channel *src3 )
+{
+ dst->f[0] = src0->f[0] == src1->f[0] ? src2->f[0] : src3->f[0];
+ dst->f[1] = src0->f[1] == src1->f[1] ? src2->f[1] : src3->f[1];
+ dst->f[2] = src0->f[2] == src1->f[2] ? src2->f[2] : src3->f[2];
+ dst->f[3] = src0->f[3] == src1->f[3] ? src2->f[3] : src3->f[3];
+}
+
+#if 0
+static void
+micro_ieq(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2,
+ const union tgsi_exec_channel *src3 )
+{
+ dst->i[0] = src0->i[0] == src1->i[0] ? src2->i[0] : src3->i[0];
+ dst->i[1] = src0->i[1] == src1->i[1] ? src2->i[1] : src3->i[1];
+ dst->i[2] = src0->i[2] == src1->i[2] ? src2->i[2] : src3->i[2];
+ dst->i[3] = src0->i[3] == src1->i[3] ? src2->i[3] : src3->i[3];
+}
+#endif
+
+static void
+micro_exp2(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src)
+{
+#if FAST_MATH
+ dst->f[0] = util_fast_exp2( src->f[0] );
+ dst->f[1] = util_fast_exp2( src->f[1] );
+ dst->f[2] = util_fast_exp2( src->f[2] );
+ dst->f[3] = util_fast_exp2( src->f[3] );
+#else
+ dst->f[0] = powf( 2.0f, src->f[0] );
+ dst->f[1] = powf( 2.0f, src->f[1] );
+ dst->f[2] = powf( 2.0f, src->f[2] );
+ dst->f[3] = powf( 2.0f, src->f[3] );
+#endif
+}
+
+#if 0
+static void
+micro_f2ut(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->u[0] = (uint) src->f[0];
+ dst->u[1] = (uint) src->f[1];
+ dst->u[2] = (uint) src->f[2];
+ dst->u[3] = (uint) src->f[3];
+}
+#endif
+
+static void
+micro_float_clamp(union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src)
+{
+ uint i;
+
+ for (i = 0; i < 4; i++) {
+ if (src->f[i] > 0.0f) {
+ if (src->f[i] > 1.884467e+019f)
+ dst->f[i] = 1.884467e+019f;
+ else if (src->f[i] < 5.42101e-020f)
+ dst->f[i] = 5.42101e-020f;
+ else
+ dst->f[i] = src->f[i];
+ }
+ else {
+ if (src->f[i] < -1.884467e+019f)
+ dst->f[i] = -1.884467e+019f;
+ else if (src->f[i] > -5.42101e-020f)
+ dst->f[i] = -5.42101e-020f;
+ else
+ dst->f[i] = src->f[i];
+ }
+ }
+}
+
+static void
+micro_flr(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = floorf( src->f[0] );
+ dst->f[1] = floorf( src->f[1] );
+ dst->f[2] = floorf( src->f[2] );
+ dst->f[3] = floorf( src->f[3] );
+}
+
+static void
+micro_frc(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = src->f[0] - floorf( src->f[0] );
+ dst->f[1] = src->f[1] - floorf( src->f[1] );
+ dst->f[2] = src->f[2] - floorf( src->f[2] );
+ dst->f[3] = src->f[3] - floorf( src->f[3] );
+}
+
+static void
+micro_i2f(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = (float) src->i[0];
+ dst->f[1] = (float) src->i[1];
+ dst->f[2] = (float) src->i[2];
+ dst->f[3] = (float) src->i[3];
+}
+
+static void
+micro_lg2(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+#if FAST_MATH
+ dst->f[0] = util_fast_log2( src->f[0] );
+ dst->f[1] = util_fast_log2( src->f[1] );
+ dst->f[2] = util_fast_log2( src->f[2] );
+ dst->f[3] = util_fast_log2( src->f[3] );
+#else
+ dst->f[0] = logf( src->f[0] ) * 1.442695f;
+ dst->f[1] = logf( src->f[1] ) * 1.442695f;
+ dst->f[2] = logf( src->f[2] ) * 1.442695f;
+ dst->f[3] = logf( src->f[3] ) * 1.442695f;
+#endif
+}
+
+static void
+micro_le(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2,
+ const union tgsi_exec_channel *src3 )
+{
+ dst->f[0] = src0->f[0] <= src1->f[0] ? src2->f[0] : src3->f[0];
+ dst->f[1] = src0->f[1] <= src1->f[1] ? src2->f[1] : src3->f[1];
+ dst->f[2] = src0->f[2] <= src1->f[2] ? src2->f[2] : src3->f[2];
+ dst->f[3] = src0->f[3] <= src1->f[3] ? src2->f[3] : src3->f[3];
+}
+
+static void
+micro_lt(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2,
+ const union tgsi_exec_channel *src3 )
+{
+ dst->f[0] = src0->f[0] < src1->f[0] ? src2->f[0] : src3->f[0];
+ dst->f[1] = src0->f[1] < src1->f[1] ? src2->f[1] : src3->f[1];
+ dst->f[2] = src0->f[2] < src1->f[2] ? src2->f[2] : src3->f[2];
+ dst->f[3] = src0->f[3] < src1->f[3] ? src2->f[3] : src3->f[3];
+}
+
+#if 0
+static void
+micro_ilt(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2,
+ const union tgsi_exec_channel *src3 )
+{
+ dst->i[0] = src0->i[0] < src1->i[0] ? src2->i[0] : src3->i[0];
+ dst->i[1] = src0->i[1] < src1->i[1] ? src2->i[1] : src3->i[1];
+ dst->i[2] = src0->i[2] < src1->i[2] ? src2->i[2] : src3->i[2];
+ dst->i[3] = src0->i[3] < src1->i[3] ? src2->i[3] : src3->i[3];
+}
+#endif
+
+#if 0
+static void
+micro_ult(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2,
+ const union tgsi_exec_channel *src3 )
+{
+ dst->u[0] = src0->u[0] < src1->u[0] ? src2->u[0] : src3->u[0];
+ dst->u[1] = src0->u[1] < src1->u[1] ? src2->u[1] : src3->u[1];
+ dst->u[2] = src0->u[2] < src1->u[2] ? src2->u[2] : src3->u[2];
+ dst->u[3] = src0->u[3] < src1->u[3] ? src2->u[3] : src3->u[3];
+}
+#endif
+
+static void
+micro_max(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->f[0] = src0->f[0] > src1->f[0] ? src0->f[0] : src1->f[0];
+ dst->f[1] = src0->f[1] > src1->f[1] ? src0->f[1] : src1->f[1];
+ dst->f[2] = src0->f[2] > src1->f[2] ? src0->f[2] : src1->f[2];
+ dst->f[3] = src0->f[3] > src1->f[3] ? src0->f[3] : src1->f[3];
+}
+
+#if 0
+static void
+micro_imax(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->i[0] = src0->i[0] > src1->i[0] ? src0->i[0] : src1->i[0];
+ dst->i[1] = src0->i[1] > src1->i[1] ? src0->i[1] : src1->i[1];
+ dst->i[2] = src0->i[2] > src1->i[2] ? src0->i[2] : src1->i[2];
+ dst->i[3] = src0->i[3] > src1->i[3] ? src0->i[3] : src1->i[3];
+}
+#endif
+
+#if 0
+static void
+micro_umax(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] > src1->u[0] ? src0->u[0] : src1->u[0];
+ dst->u[1] = src0->u[1] > src1->u[1] ? src0->u[1] : src1->u[1];
+ dst->u[2] = src0->u[2] > src1->u[2] ? src0->u[2] : src1->u[2];
+ dst->u[3] = src0->u[3] > src1->u[3] ? src0->u[3] : src1->u[3];
+}
+#endif
+
+static void
+micro_min(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->f[0] = src0->f[0] < src1->f[0] ? src0->f[0] : src1->f[0];
+ dst->f[1] = src0->f[1] < src1->f[1] ? src0->f[1] : src1->f[1];
+ dst->f[2] = src0->f[2] < src1->f[2] ? src0->f[2] : src1->f[2];
+ dst->f[3] = src0->f[3] < src1->f[3] ? src0->f[3] : src1->f[3];
+}
+
+#if 0
+static void
+micro_imin(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->i[0] = src0->i[0] < src1->i[0] ? src0->i[0] : src1->i[0];
+ dst->i[1] = src0->i[1] < src1->i[1] ? src0->i[1] : src1->i[1];
+ dst->i[2] = src0->i[2] < src1->i[2] ? src0->i[2] : src1->i[2];
+ dst->i[3] = src0->i[3] < src1->i[3] ? src0->i[3] : src1->i[3];
+}
+#endif
+
+#if 0
+static void
+micro_umin(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] < src1->u[0] ? src0->u[0] : src1->u[0];
+ dst->u[1] = src0->u[1] < src1->u[1] ? src0->u[1] : src1->u[1];
+ dst->u[2] = src0->u[2] < src1->u[2] ? src0->u[2] : src1->u[2];
+ dst->u[3] = src0->u[3] < src1->u[3] ? src0->u[3] : src1->u[3];
+}
+#endif
+
+#if 0
+static void
+micro_umod(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] % src1->u[0];
+ dst->u[1] = src0->u[1] % src1->u[1];
+ dst->u[2] = src0->u[2] % src1->u[2];
+ dst->u[3] = src0->u[3] % src1->u[3];
+}
+#endif
+
+static void
+micro_mul(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->f[0] = src0->f[0] * src1->f[0];
+ dst->f[1] = src0->f[1] * src1->f[1];
+ dst->f[2] = src0->f[2] * src1->f[2];
+ dst->f[3] = src0->f[3] * src1->f[3];
+}
+
+#if 0
+static void
+micro_imul(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->i[0] = src0->i[0] * src1->i[0];
+ dst->i[1] = src0->i[1] * src1->i[1];
+ dst->i[2] = src0->i[2] * src1->i[2];
+ dst->i[3] = src0->i[3] * src1->i[3];
+}
+#endif
+
+#if 0
+static void
+micro_imul64(
+ union tgsi_exec_channel *dst0,
+ union tgsi_exec_channel *dst1,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst1->i[0] = src0->i[0] * src1->i[0];
+ dst1->i[1] = src0->i[1] * src1->i[1];
+ dst1->i[2] = src0->i[2] * src1->i[2];
+ dst1->i[3] = src0->i[3] * src1->i[3];
+ dst0->i[0] = 0;
+ dst0->i[1] = 0;
+ dst0->i[2] = 0;
+ dst0->i[3] = 0;
+}
+#endif
+
+#if 0
+static void
+micro_umul64(
+ union tgsi_exec_channel *dst0,
+ union tgsi_exec_channel *dst1,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst1->u[0] = src0->u[0] * src1->u[0];
+ dst1->u[1] = src0->u[1] * src1->u[1];
+ dst1->u[2] = src0->u[2] * src1->u[2];
+ dst1->u[3] = src0->u[3] * src1->u[3];
+ dst0->u[0] = 0;
+ dst0->u[1] = 0;
+ dst0->u[2] = 0;
+ dst0->u[3] = 0;
+}
+#endif
+
+
+#if 0
+static void
+micro_movc(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1,
+ const union tgsi_exec_channel *src2 )
+{
+ dst->u[0] = src0->u[0] ? src1->u[0] : src2->u[0];
+ dst->u[1] = src0->u[1] ? src1->u[1] : src2->u[1];
+ dst->u[2] = src0->u[2] ? src1->u[2] : src2->u[2];
+ dst->u[3] = src0->u[3] ? src1->u[3] : src2->u[3];
+}
+#endif
+
+static void
+micro_neg(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = -src->f[0];
+ dst->f[1] = -src->f[1];
+ dst->f[2] = -src->f[2];
+ dst->f[3] = -src->f[3];
+}
+
+#if 0
+static void
+micro_ineg(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->i[0] = -src->i[0];
+ dst->i[1] = -src->i[1];
+ dst->i[2] = -src->i[2];
+ dst->i[3] = -src->i[3];
+}
+#endif
+
+static void
+micro_not(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->u[0] = ~src->u[0];
+ dst->u[1] = ~src->u[1];
+ dst->u[2] = ~src->u[2];
+ dst->u[3] = ~src->u[3];
+}
+
+static void
+micro_or(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] | src1->u[0];
+ dst->u[1] = src0->u[1] | src1->u[1];
+ dst->u[2] = src0->u[2] | src1->u[2];
+ dst->u[3] = src0->u[3] | src1->u[3];
+}
+
+static void
+micro_pow(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+#if FAST_MATH
+ dst->f[0] = util_fast_pow( src0->f[0], src1->f[0] );
+ dst->f[1] = util_fast_pow( src0->f[1], src1->f[1] );
+ dst->f[2] = util_fast_pow( src0->f[2], src1->f[2] );
+ dst->f[3] = util_fast_pow( src0->f[3], src1->f[3] );
+#else
+ dst->f[0] = powf( src0->f[0], src1->f[0] );
+ dst->f[1] = powf( src0->f[1], src1->f[1] );
+ dst->f[2] = powf( src0->f[2], src1->f[2] );
+ dst->f[3] = powf( src0->f[3], src1->f[3] );
+#endif
+}
+
+static void
+micro_rnd(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = floorf( src->f[0] + 0.5f );
+ dst->f[1] = floorf( src->f[1] + 0.5f );
+ dst->f[2] = floorf( src->f[2] + 0.5f );
+ dst->f[3] = floorf( src->f[3] + 0.5f );
+}
+
+static void
+micro_sgn(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = src->f[0] < 0.0f ? -1.0f : src->f[0] > 0.0f ? 1.0f : 0.0f;
+ dst->f[1] = src->f[1] < 0.0f ? -1.0f : src->f[1] > 0.0f ? 1.0f : 0.0f;
+ dst->f[2] = src->f[2] < 0.0f ? -1.0f : src->f[2] > 0.0f ? 1.0f : 0.0f;
+ dst->f[3] = src->f[3] < 0.0f ? -1.0f : src->f[3] > 0.0f ? 1.0f : 0.0f;
+}
+
+static void
+micro_shl(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->i[0] = src0->i[0] << src1->i[0];
+ dst->i[1] = src0->i[1] << src1->i[1];
+ dst->i[2] = src0->i[2] << src1->i[2];
+ dst->i[3] = src0->i[3] << src1->i[3];
+}
+
+static void
+micro_ishr(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->i[0] = src0->i[0] >> src1->i[0];
+ dst->i[1] = src0->i[1] >> src1->i[1];
+ dst->i[2] = src0->i[2] >> src1->i[2];
+ dst->i[3] = src0->i[3] >> src1->i[3];
+}
+
+static void
+micro_trunc(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0 )
+{
+ dst->f[0] = (float) (int) src0->f[0];
+ dst->f[1] = (float) (int) src0->f[1];
+ dst->f[2] = (float) (int) src0->f[2];
+ dst->f[3] = (float) (int) src0->f[3];
+}
+
+#if 0
+static void
+micro_ushr(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] >> src1->u[0];
+ dst->u[1] = src0->u[1] >> src1->u[1];
+ dst->u[2] = src0->u[2] >> src1->u[2];
+ dst->u[3] = src0->u[3] >> src1->u[3];
+}
+#endif
+
+static void
+micro_sin(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = sinf( src->f[0] );
+ dst->f[1] = sinf( src->f[1] );
+ dst->f[2] = sinf( src->f[2] );
+ dst->f[3] = sinf( src->f[3] );
+}
+
+static void
+micro_sqrt( union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = sqrtf( src->f[0] );
+ dst->f[1] = sqrtf( src->f[1] );
+ dst->f[2] = sqrtf( src->f[2] );
+ dst->f[3] = sqrtf( src->f[3] );
+}
+
+static void
+micro_sub(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->f[0] = src0->f[0] - src1->f[0];
+ dst->f[1] = src0->f[1] - src1->f[1];
+ dst->f[2] = src0->f[2] - src1->f[2];
+ dst->f[3] = src0->f[3] - src1->f[3];
+}
+
+#if 0
+static void
+micro_u2f(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src )
+{
+ dst->f[0] = (float) src->u[0];
+ dst->f[1] = (float) src->u[1];
+ dst->f[2] = (float) src->u[2];
+ dst->f[3] = (float) src->u[3];
+}
+#endif
+
+static void
+micro_xor(
+ union tgsi_exec_channel *dst,
+ const union tgsi_exec_channel *src0,
+ const union tgsi_exec_channel *src1 )
+{
+ dst->u[0] = src0->u[0] ^ src1->u[0];
+ dst->u[1] = src0->u[1] ^ src1->u[1];
+ dst->u[2] = src0->u[2] ^ src1->u[2];
+ dst->u[3] = src0->u[3] ^ src1->u[3];
+}
+
+static void
+fetch_src_file_channel(
+ const struct tgsi_exec_machine *mach,
+ const uint file,
+ const uint swizzle,
+ const union tgsi_exec_channel *index,
+ union tgsi_exec_channel *chan )
+{
+ switch( swizzle ) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ switch( file ) {
+ case TGSI_FILE_CONSTANT:
+ assert(mach->Consts);
+ if (index->i[0] < 0)
+ chan->f[0] = 0.0f;
+ else
+ chan->f[0] = mach->Consts[index->i[0]][swizzle];
+ if (index->i[1] < 0)
+ chan->f[1] = 0.0f;
+ else
+ chan->f[1] = mach->Consts[index->i[1]][swizzle];
+ if (index->i[2] < 0)
+ chan->f[2] = 0.0f;
+ else
+ chan->f[2] = mach->Consts[index->i[2]][swizzle];
+ if (index->i[3] < 0)
+ chan->f[3] = 0.0f;
+ else
+ chan->f[3] = mach->Consts[index->i[3]][swizzle];
+ break;
+
+ case TGSI_FILE_INPUT:
+ chan->u[0] = mach->Inputs[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Inputs[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Inputs[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Inputs[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ assert(index->i[0] < TGSI_EXEC_NUM_TEMPS);
+ chan->u[0] = mach->Temps[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Temps[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Temps[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Temps[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ case TGSI_FILE_IMMEDIATE:
+ assert( index->i[0] < (int) mach->ImmLimit );
+ chan->f[0] = mach->Imms[index->i[0]][swizzle];
+ assert( index->i[1] < (int) mach->ImmLimit );
+ chan->f[1] = mach->Imms[index->i[1]][swizzle];
+ assert( index->i[2] < (int) mach->ImmLimit );
+ chan->f[2] = mach->Imms[index->i[2]][swizzle];
+ assert( index->i[3] < (int) mach->ImmLimit );
+ chan->f[3] = mach->Imms[index->i[3]][swizzle];
+ break;
+
+ case TGSI_FILE_ADDRESS:
+ chan->u[0] = mach->Addrs[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Addrs[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Addrs[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Addrs[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ case TGSI_FILE_OUTPUT:
+ /* vertex/fragment output vars can be read too */
+ chan->u[0] = mach->Outputs[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Outputs[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Outputs[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Outputs[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ default:
+ assert( 0 );
+ }
+ break;
+
+ case TGSI_EXTSWIZZLE_ZERO:
+ *chan = mach->Temps[TEMP_0_I].xyzw[TEMP_0_C];
+ break;
+
+ case TGSI_EXTSWIZZLE_ONE:
+ *chan = mach->Temps[TEMP_1_I].xyzw[TEMP_1_C];
+ break;
+
+ default:
+ assert( 0 );
+ }
+}
+
+static void
+fetch_source(
+ const struct tgsi_exec_machine *mach,
+ union tgsi_exec_channel *chan,
+ const struct tgsi_full_src_register *reg,
+ const uint chan_index )
+{
+ union tgsi_exec_channel index;
+ uint swizzle;
+
+ /* We start with a direct index into a register file.
+ *
+ * file[1],
+ * where:
+ * file = SrcRegister.File
+ * [1] = SrcRegister.Index
+ */
+ index.i[0] =
+ index.i[1] =
+ index.i[2] =
+ index.i[3] = reg->SrcRegister.Index;
+
+ /* There is an extra source register that indirectly subscripts
+ * a register file. The direct index now becomes an offset
+ * that is being added to the indirect register.
+ *
+ * file[ind[2].x+1],
+ * where:
+ * ind = SrcRegisterInd.File
+ * [2] = SrcRegisterInd.Index
+ * .x = SrcRegisterInd.SwizzleX
+ */
+ if (reg->SrcRegister.Indirect) {
+ union tgsi_exec_channel index2;
+ union tgsi_exec_channel indir_index;
+ const uint execmask = mach->ExecMask;
+ uint i;
+
+ /* which address register (always zero now) */
+ index2.i[0] =
+ index2.i[1] =
+ index2.i[2] =
+ index2.i[3] = reg->SrcRegisterInd.Index;
+
+ /* get current value of address register[swizzle] */
+ swizzle = tgsi_util_get_src_register_swizzle( &reg->SrcRegisterInd, CHAN_X );
+ fetch_src_file_channel(
+ mach,
+ reg->SrcRegisterInd.File,
+ swizzle,
+ &index2,
+ &indir_index );
+
+ /* add value of address register to the offset */
+ index.i[0] += (int) indir_index.f[0];
+ index.i[1] += (int) indir_index.f[1];
+ index.i[2] += (int) indir_index.f[2];
+ index.i[3] += (int) indir_index.f[3];
+
+ /* for disabled execution channels, zero-out the index to
+ * avoid using a potential garbage value.
+ */
+ for (i = 0; i < QUAD_SIZE; i++) {
+ if ((execmask & (1 << i)) == 0)
+ index.i[i] = 0;
+ }
+ }
+
+ /* There is an extra source register that is a second
+ * subscript to a register file. Effectively it means that
+ * the register file is actually a 2D array of registers.
+ *
+ * file[1][3] == file[1*sizeof(file[1])+3],
+ * where:
+ * [3] = SrcRegisterDim.Index
+ */
+ if (reg->SrcRegister.Dimension) {
+ /* The size of the first-order array depends on the register file type.
+ * We need to multiply the index to the first array to get an effective,
+ * "flat" index that points to the beginning of the second-order array.
+ */
+ switch (reg->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ index.i[0] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
+ index.i[1] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
+ index.i[2] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
+ index.i[3] *= TGSI_EXEC_MAX_INPUT_ATTRIBS;
+ break;
+ case TGSI_FILE_CONSTANT:
+ index.i[0] *= TGSI_EXEC_MAX_CONST_BUFFER;
+ index.i[1] *= TGSI_EXEC_MAX_CONST_BUFFER;
+ index.i[2] *= TGSI_EXEC_MAX_CONST_BUFFER;
+ index.i[3] *= TGSI_EXEC_MAX_CONST_BUFFER;
+ break;
+ default:
+ assert( 0 );
+ }
+
+ index.i[0] += reg->SrcRegisterDim.Index;
+ index.i[1] += reg->SrcRegisterDim.Index;
+ index.i[2] += reg->SrcRegisterDim.Index;
+ index.i[3] += reg->SrcRegisterDim.Index;
+
+ /* Again, the second subscript index can be addressed indirectly
+ * identically to the first one.
+ * Nothing stops us from indirectly addressing the indirect register,
+ * but there is no need for that, so we won't exercise it.
+ *
+ * file[1][ind[4].y+3],
+ * where:
+ * ind = SrcRegisterDimInd.File
+ * [4] = SrcRegisterDimInd.Index
+ * .y = SrcRegisterDimInd.SwizzleX
+ */
+ if (reg->SrcRegisterDim.Indirect) {
+ union tgsi_exec_channel index2;
+ union tgsi_exec_channel indir_index;
+ const uint execmask = mach->ExecMask;
+ uint i;
+
+ index2.i[0] =
+ index2.i[1] =
+ index2.i[2] =
+ index2.i[3] = reg->SrcRegisterDimInd.Index;
+
+ swizzle = tgsi_util_get_src_register_swizzle( &reg->SrcRegisterDimInd, CHAN_X );
+ fetch_src_file_channel(
+ mach,
+ reg->SrcRegisterDimInd.File,
+ swizzle,
+ &index2,
+ &indir_index );
+
+ index.i[0] += (int) indir_index.f[0];
+ index.i[1] += (int) indir_index.f[1];
+ index.i[2] += (int) indir_index.f[2];
+ index.i[3] += (int) indir_index.f[3];
+
+ /* for disabled execution channels, zero-out the index to
+ * avoid using a potential garbage value.
+ */
+ for (i = 0; i < QUAD_SIZE; i++) {
+ if ((execmask & (1 << i)) == 0)
+ index.i[i] = 0;
+ }
+ }
+
+ /* If by any chance there was a need for a 3D array of register
+ * files, we would have to check whether SrcRegisterDim is followed
+ * by a dimension register and continue the saga.
+ */
+ }
+
+ swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
+ fetch_src_file_channel(
+ mach,
+ reg->SrcRegister.File,
+ swizzle,
+ &index,
+ chan );
+
+ switch (tgsi_util_get_full_src_register_sign_mode( reg, chan_index )) {
+ case TGSI_UTIL_SIGN_CLEAR:
+ micro_abs( chan, chan );
+ break;
+
+ case TGSI_UTIL_SIGN_SET:
+ micro_abs( chan, chan );
+ micro_neg( chan, chan );
+ break;
+
+ case TGSI_UTIL_SIGN_TOGGLE:
+ micro_neg( chan, chan );
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+ }
+
+ if (reg->SrcRegisterExtMod.Complement) {
+ micro_sub( chan, &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], chan );
+ }
+}
+
+static void
+store_dest(
+ struct tgsi_exec_machine *mach,
+ const union tgsi_exec_channel *chan,
+ const struct tgsi_full_dst_register *reg,
+ const struct tgsi_full_instruction *inst,
+ uint chan_index )
+{
+ uint i;
+ union tgsi_exec_channel null;
+ union tgsi_exec_channel *dst;
+ uint execmask = mach->ExecMask;
+ int offset = 0; /* indirection offset */
+ int index;
+
+#ifdef DEBUG
+ check_inf_or_nan(chan);
+#endif
+
+ /* There is an extra source register that indirectly subscripts
+ * a register file. The direct index now becomes an offset
+ * that is being added to the indirect register.
+ *
+ * file[ind[2].x+1],
+ * where:
+ * ind = DstRegisterInd.File
+ * [2] = DstRegisterInd.Index
+ * .x = DstRegisterInd.SwizzleX
+ */
+ if (reg->DstRegister.Indirect) {
+ union tgsi_exec_channel index;
+ union tgsi_exec_channel indir_index;
+ uint swizzle;
+
+ /* which address register (always zero for now) */
+ index.i[0] =
+ index.i[1] =
+ index.i[2] =
+ index.i[3] = reg->DstRegisterInd.Index;
+
+ /* get current value of address register[swizzle] */
+ swizzle = tgsi_util_get_src_register_swizzle( &reg->DstRegisterInd, CHAN_X );
+
+ /* fetch values from the address/indirection register */
+ fetch_src_file_channel(
+ mach,
+ reg->DstRegisterInd.File,
+ swizzle,
+ &index,
+ &indir_index );
+
+ /* save indirection offset */
+ offset = (int) indir_index.f[0];
+ }
+
+ switch (reg->DstRegister.File) {
+ case TGSI_FILE_NULL:
+ dst = &null;
+ break;
+
+ case TGSI_FILE_OUTPUT:
+ index = mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0]
+ + reg->DstRegister.Index;
+ dst = &mach->Outputs[offset + index].xyzw[chan_index];
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ index = reg->DstRegister.Index;
+ assert( index < TGSI_EXEC_NUM_TEMPS );
+ dst = &mach->Temps[offset + index].xyzw[chan_index];
+ break;
+
+ case TGSI_FILE_ADDRESS:
+ index = reg->DstRegister.Index;
+ dst = &mach->Addrs[index].xyzw[chan_index];
+ break;
+
+ default:
+ assert( 0 );
+ return;
+ }
+
+ if (inst->InstructionExtNv.CondFlowEnable) {
+ union tgsi_exec_channel *cc = &mach->Temps[TEMP_CC_I].xyzw[TEMP_CC_C];
+ uint swizzle;
+ uint shift;
+ uint mask;
+ uint test;
+
+ /* Only CC0 supported.
+ */
+ assert( inst->InstructionExtNv.CondFlowIndex < 1 );
+
+ switch (chan_index) {
+ case CHAN_X:
+ swizzle = inst->InstructionExtNv.CondSwizzleX;
+ break;
+ case CHAN_Y:
+ swizzle = inst->InstructionExtNv.CondSwizzleY;
+ break;
+ case CHAN_Z:
+ swizzle = inst->InstructionExtNv.CondSwizzleZ;
+ break;
+ case CHAN_W:
+ swizzle = inst->InstructionExtNv.CondSwizzleW;
+ break;
+ default:
+ assert( 0 );
+ return;
+ }
+
+ switch (swizzle) {
+ case TGSI_SWIZZLE_X:
+ shift = TGSI_EXEC_CC_X_SHIFT;
+ mask = TGSI_EXEC_CC_X_MASK;
+ break;
+ case TGSI_SWIZZLE_Y:
+ shift = TGSI_EXEC_CC_Y_SHIFT;
+ mask = TGSI_EXEC_CC_Y_MASK;
+ break;
+ case TGSI_SWIZZLE_Z:
+ shift = TGSI_EXEC_CC_Z_SHIFT;
+ mask = TGSI_EXEC_CC_Z_MASK;
+ break;
+ case TGSI_SWIZZLE_W:
+ shift = TGSI_EXEC_CC_W_SHIFT;
+ mask = TGSI_EXEC_CC_W_MASK;
+ break;
+ default:
+ assert( 0 );
+ return;
+ }
+
+ switch (inst->InstructionExtNv.CondMask) {
+ case TGSI_CC_GT:
+ test = ~(TGSI_EXEC_CC_GT << shift) & mask;
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (cc->u[i] & test)
+ execmask &= ~(1 << i);
+ break;
+
+ case TGSI_CC_EQ:
+ test = ~(TGSI_EXEC_CC_EQ << shift) & mask;
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (cc->u[i] & test)
+ execmask &= ~(1 << i);
+ break;
+
+ case TGSI_CC_LT:
+ test = ~(TGSI_EXEC_CC_LT << shift) & mask;
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (cc->u[i] & test)
+ execmask &= ~(1 << i);
+ break;
+
+ case TGSI_CC_GE:
+ test = ~((TGSI_EXEC_CC_GT | TGSI_EXEC_CC_EQ) << shift) & mask;
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (cc->u[i] & test)
+ execmask &= ~(1 << i);
+ break;
+
+ case TGSI_CC_LE:
+ test = ~((TGSI_EXEC_CC_LT | TGSI_EXEC_CC_EQ) << shift) & mask;
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (cc->u[i] & test)
+ execmask &= ~(1 << i);
+ break;
+
+ case TGSI_CC_NE:
+ test = ~((TGSI_EXEC_CC_GT | TGSI_EXEC_CC_LT | TGSI_EXEC_CC_UN) << shift) & mask;
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (cc->u[i] & test)
+ execmask &= ~(1 << i);
+ break;
+
+ case TGSI_CC_TR:
+ break;
+
+ case TGSI_CC_FL:
+ for (i = 0; i < QUAD_SIZE; i++)
+ execmask &= ~(1 << i);
+ break;
+
+ default:
+ assert( 0 );
+ return;
+ }
+ }
+
+ switch (inst->Instruction.Saturate) {
+ case TGSI_SAT_NONE:
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (execmask & (1 << i))
+ dst->i[i] = chan->i[i];
+ break;
+
+ case TGSI_SAT_ZERO_ONE:
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (execmask & (1 << i)) {
+ if (chan->f[i] < 0.0f)
+ dst->f[i] = 0.0f;
+ else if (chan->f[i] > 1.0f)
+ dst->f[i] = 1.0f;
+ else
+ dst->i[i] = chan->i[i];
+ }
+ break;
+
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (execmask & (1 << i)) {
+ if (chan->f[i] < -1.0f)
+ dst->f[i] = -1.0f;
+ else if (chan->f[i] > 1.0f)
+ dst->f[i] = 1.0f;
+ else
+ dst->i[i] = chan->i[i];
+ }
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ if (inst->InstructionExtNv.CondDstUpdate) {
+ union tgsi_exec_channel *cc = &mach->Temps[TEMP_CC_I].xyzw[TEMP_CC_C];
+ uint shift;
+ uint mask;
+
+ /* Only CC0 supported.
+ */
+ assert( inst->InstructionExtNv.CondDstIndex < 1 );
+
+ switch (chan_index) {
+ case CHAN_X:
+ shift = TGSI_EXEC_CC_X_SHIFT;
+ mask = ~TGSI_EXEC_CC_X_MASK;
+ break;
+ case CHAN_Y:
+ shift = TGSI_EXEC_CC_Y_SHIFT;
+ mask = ~TGSI_EXEC_CC_Y_MASK;
+ break;
+ case CHAN_Z:
+ shift = TGSI_EXEC_CC_Z_SHIFT;
+ mask = ~TGSI_EXEC_CC_Z_MASK;
+ break;
+ case CHAN_W:
+ shift = TGSI_EXEC_CC_W_SHIFT;
+ mask = ~TGSI_EXEC_CC_W_MASK;
+ break;
+ default:
+ assert( 0 );
+ return;
+ }
+
+ for (i = 0; i < QUAD_SIZE; i++)
+ if (execmask & (1 << i)) {
+ cc->u[i] &= mask;
+ if (dst->f[i] < 0.0f)
+ cc->u[i] |= TGSI_EXEC_CC_LT << shift;
+ else if (dst->f[i] > 0.0f)
+ cc->u[i] |= TGSI_EXEC_CC_GT << shift;
+ else if (dst->f[i] == 0.0f)
+ cc->u[i] |= TGSI_EXEC_CC_EQ << shift;
+ else
+ cc->u[i] |= TGSI_EXEC_CC_UN << shift;
+ }
+ }
+}
+
+#define FETCH(VAL,INDEX,CHAN)\
+ fetch_source (mach, VAL, &inst->FullSrcRegisters[INDEX], CHAN)
+
+#define STORE(VAL,INDEX,CHAN)\
+ store_dest (mach, VAL, &inst->FullDstRegisters[INDEX], inst, CHAN )
+
+
+/**
+ * Execute ARB-style KIL which is predicated by a src register.
+ * Kill fragment if any of the four values is less than zero.
+ */
+static void
+exec_kil(struct tgsi_exec_machine *mach,
+ const struct tgsi_full_instruction *inst)
+{
+ uint uniquemask;
+ uint chan_index;
+ uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
+ union tgsi_exec_channel r[1];
+
+ /* This mask stores component bits that were already tested. Note that
+ * we test if the value is less than zero, so 1.0 and 0.0 need not to be
+ * tested. */
+ uniquemask = (1 << TGSI_EXTSWIZZLE_ZERO) | (1 << TGSI_EXTSWIZZLE_ONE);
+
+ for (chan_index = 0; chan_index < 4; chan_index++)
+ {
+ uint swizzle;
+ uint i;
+
+ /* unswizzle channel */
+ swizzle = tgsi_util_get_full_src_register_extswizzle (
+ &inst->FullSrcRegisters[0],
+ chan_index);
+
+ /* check if the component has not been already tested */
+ if (uniquemask & (1 << swizzle))
+ continue;
+ uniquemask |= 1 << swizzle;
+
+ FETCH(&r[0], 0, chan_index);
+ for (i = 0; i < 4; i++)
+ if (r[0].f[i] < 0.0f)
+ kilmask |= 1 << i;
+ }
+
+ mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
+}
+
+/**
+ * Execute NVIDIA-style KIL which is predicated by a condition code.
+ * Kill fragment if the condition code is TRUE.
+ */
+static void
+exec_kilp(struct tgsi_exec_machine *mach,
+ const struct tgsi_full_instruction *inst)
+{
+ uint kilmask; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
+
+ if (inst->InstructionExtNv.CondFlowEnable) {
+ uint swizzle[4];
+ uint chan_index;
+
+ kilmask = 0x0;
+
+ swizzle[0] = inst->InstructionExtNv.CondSwizzleX;
+ swizzle[1] = inst->InstructionExtNv.CondSwizzleY;
+ swizzle[2] = inst->InstructionExtNv.CondSwizzleZ;
+ swizzle[3] = inst->InstructionExtNv.CondSwizzleW;
+
+ for (chan_index = 0; chan_index < 4; chan_index++)
+ {
+ uint i;
+
+ for (i = 0; i < 4; i++) {
+ /* TODO: evaluate the condition code */
+ if (0)
+ kilmask |= 1 << i;
+ }
+ }
+ }
+ else {
+ /* "unconditional" kil */
+ kilmask = mach->ExecMask;
+ }
+ mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
+}
+
+
+/*
+ * Fetch a four texture samples using STR texture coordinates.
+ */
+static void
+fetch_texel( struct tgsi_sampler *sampler,
+ const union tgsi_exec_channel *s,
+ const union tgsi_exec_channel *t,
+ const union tgsi_exec_channel *p,
+ float lodbias, /* XXX should be float[4] */
+ union tgsi_exec_channel *r,
+ union tgsi_exec_channel *g,
+ union tgsi_exec_channel *b,
+ union tgsi_exec_channel *a )
+{
+ uint j;
+ float rgba[NUM_CHANNELS][QUAD_SIZE];
+
+ sampler->get_samples(sampler, s->f, t->f, p->f, lodbias, rgba);
+
+ for (j = 0; j < 4; j++) {
+ r->f[j] = rgba[0][j];
+ g->f[j] = rgba[1][j];
+ b->f[j] = rgba[2][j];
+ a->f[j] = rgba[3][j];
+ }
+}
+
+
+static void
+exec_tex(struct tgsi_exec_machine *mach,
+ const struct tgsi_full_instruction *inst,
+ boolean biasLod,
+ boolean projected)
+{
+ const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
+ union tgsi_exec_channel r[4];
+ uint chan_index;
+ float lodBias;
+
+ /* debug_printf("Sampler %u unit %u\n", sampler, unit); */
+
+ switch (inst->InstructionExtTexture.Texture) {
+ case TGSI_TEXTURE_1D:
+ case TGSI_TEXTURE_SHADOW1D:
+
+ FETCH(&r[0], 0, CHAN_X);
+
+ if (projected) {
+ FETCH(&r[1], 0, CHAN_W);
+ micro_div( &r[0], &r[0], &r[1] );
+ }
+
+ if (biasLod) {
+ FETCH(&r[1], 0, CHAN_W);
+ lodBias = r[2].f[0];
+ }
+ else
+ lodBias = 0.0;
+
+ fetch_texel(mach->Samplers[unit],
+ &r[0], &ZeroVec, &ZeroVec, lodBias, /* S, T, P, BIAS */
+ &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */
+ break;
+
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_RECT:
+ case TGSI_TEXTURE_SHADOW2D:
+ case TGSI_TEXTURE_SHADOWRECT:
+
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 0, CHAN_Z);
+
+ if (projected) {
+ FETCH(&r[3], 0, CHAN_W);
+ micro_div( &r[0], &r[0], &r[3] );
+ micro_div( &r[1], &r[1], &r[3] );
+ micro_div( &r[2], &r[2], &r[3] );
+ }
+
+ if (biasLod) {
+ FETCH(&r[3], 0, CHAN_W);
+ lodBias = r[3].f[0];
+ }
+ else
+ lodBias = 0.0;
+
+ fetch_texel(mach->Samplers[unit],
+ &r[0], &r[1], &r[2], lodBias, /* inputs */
+ &r[0], &r[1], &r[2], &r[3]); /* outputs */
+ break;
+
+ case TGSI_TEXTURE_3D:
+ case TGSI_TEXTURE_CUBE:
+
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 0, CHAN_Z);
+
+ if (projected) {
+ FETCH(&r[3], 0, CHAN_W);
+ micro_div( &r[0], &r[0], &r[3] );
+ micro_div( &r[1], &r[1], &r[3] );
+ micro_div( &r[2], &r[2], &r[3] );
+ }
+
+ if (biasLod) {
+ FETCH(&r[3], 0, CHAN_W);
+ lodBias = r[3].f[0];
+ }
+ else
+ lodBias = 0.0;
+
+ fetch_texel(mach->Samplers[unit],
+ &r[0], &r[1], &r[2], lodBias,
+ &r[0], &r[1], &r[2], &r[3]);
+ break;
+
+ default:
+ assert (0);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[chan_index], 0, chan_index );
+ }
+}
+
+
+/**
+ * Evaluate a constant-valued coefficient at the position of the
+ * current quad.
+ */
+static void
+eval_constant_coef(
+ struct tgsi_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan )
+{
+ unsigned i;
+
+ for( i = 0; i < QUAD_SIZE; i++ ) {
+ mach->Inputs[attrib].xyzw[chan].f[i] = mach->InterpCoefs[attrib].a0[chan];
+ }
+}
+
+/**
+ * Evaluate a linear-valued coefficient at the position of the
+ * current quad.
+ */
+static void
+eval_linear_coef(
+ struct tgsi_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan )
+{
+ const float x = mach->QuadPos.xyzw[0].f[0];
+ const float y = mach->QuadPos.xyzw[1].f[0];
+ const float dadx = mach->InterpCoefs[attrib].dadx[chan];
+ const float dady = mach->InterpCoefs[attrib].dady[chan];
+ const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
+ mach->Inputs[attrib].xyzw[chan].f[0] = a0;
+ mach->Inputs[attrib].xyzw[chan].f[1] = a0 + dadx;
+ mach->Inputs[attrib].xyzw[chan].f[2] = a0 + dady;
+ mach->Inputs[attrib].xyzw[chan].f[3] = a0 + dadx + dady;
+}
+
+/**
+ * Evaluate a perspective-valued coefficient at the position of the
+ * current quad.
+ */
+static void
+eval_perspective_coef(
+ struct tgsi_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan )
+{
+ const float x = mach->QuadPos.xyzw[0].f[0];
+ const float y = mach->QuadPos.xyzw[1].f[0];
+ const float dadx = mach->InterpCoefs[attrib].dadx[chan];
+ const float dady = mach->InterpCoefs[attrib].dady[chan];
+ const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
+ const float *w = mach->QuadPos.xyzw[3].f;
+ /* divide by W here */
+ mach->Inputs[attrib].xyzw[chan].f[0] = a0 / w[0];
+ mach->Inputs[attrib].xyzw[chan].f[1] = (a0 + dadx) / w[1];
+ mach->Inputs[attrib].xyzw[chan].f[2] = (a0 + dady) / w[2];
+ mach->Inputs[attrib].xyzw[chan].f[3] = (a0 + dadx + dady) / w[3];
+}
+
+
+typedef void (* eval_coef_func)(
+ struct tgsi_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan );
+
+static void
+exec_declaration(
+ struct tgsi_exec_machine *mach,
+ const struct tgsi_full_declaration *decl )
+{
+ if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
+ if( decl->Declaration.File == TGSI_FILE_INPUT ) {
+ unsigned first, last, mask;
+ eval_coef_func eval;
+
+ first = decl->DeclarationRange.First;
+ last = decl->DeclarationRange.Last;
+ mask = decl->Declaration.UsageMask;
+
+ switch( decl->Declaration.Interpolate ) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ eval = eval_constant_coef;
+ break;
+
+ case TGSI_INTERPOLATE_LINEAR:
+ eval = eval_linear_coef;
+ break;
+
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ eval = eval_perspective_coef;
+ break;
+
+ default:
+ eval = NULL;
+ assert( 0 );
+ }
+
+ if( mask == TGSI_WRITEMASK_XYZW ) {
+ unsigned i, j;
+
+ for( i = first; i <= last; i++ ) {
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ eval( mach, i, j );
+ }
+ }
+ }
+ else {
+ unsigned i, j;
+
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ if( mask & (1 << j) ) {
+ for( i = first; i <= last; i++ ) {
+ eval( mach, i, j );
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void
+exec_instruction(
+ struct tgsi_exec_machine *mach,
+ const struct tgsi_full_instruction *inst,
+ int *pc )
+{
+ uint chan_index;
+ union tgsi_exec_channel r[10];
+
+ (*pc)++;
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL:
+ case TGSI_OPCODE_FLR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_flr( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ if (inst->Flags & SOA_DEPENDENCY_FLAG) {
+ /* Do all fetches into temp regs, then do all stores to avoid
+ * intermediate/accidental clobbering. This could be done all the
+ * time for MOV but for other instructions we'll need more temps...
+ */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[chan_index], 0, chan_index );
+ }
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[chan_index], 0, chan_index );
+ }
+ }
+ else {
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ STORE( &r[0], 0, chan_index );
+ }
+ }
+ break;
+
+ case TGSI_OPCODE_LIT:
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y ) || IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( &r[0], 0, CHAN_X );
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ micro_max( &r[0], &r[0], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+ STORE( &r[0], 0, CHAN_Y );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( &r[1], 0, CHAN_Y );
+ micro_max( &r[1], &r[1], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+
+ FETCH( &r[2], 0, CHAN_W );
+ micro_min( &r[2], &r[2], &mach->Temps[TEMP_128_I].xyzw[TEMP_128_C] );
+ micro_max( &r[2], &r[2], &mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C] );
+ micro_pow( &r[1], &r[1], &r[2] );
+ micro_lt( &r[0], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], &r[0], &r[1], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+ STORE( &r[0], 0, CHAN_Z );
+ }
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_RCP:
+ /* TGSI_OPCODE_RECIP */
+ FETCH( &r[0], 0, CHAN_X );
+ micro_div( &r[0], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &r[0] );
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_RSQ:
+ /* TGSI_OPCODE_RECIPSQRT */
+ FETCH( &r[0], 0, CHAN_X );
+ micro_abs( &r[0], &r[0] );
+ micro_sqrt( &r[0], &r[0] );
+ micro_div( &r[0], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &r[0] );
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_EXP:
+ FETCH( &r[0], 0, CHAN_X );
+ micro_flr( &r[1], &r[0] ); /* r1 = floor(r0) */
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ micro_exp2( &r[2], &r[1] ); /* r2 = 2 ^ r1 */
+ STORE( &r[2], 0, CHAN_X ); /* store r2 */
+ }
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ micro_sub( &r[2], &r[0], &r[1] ); /* r2 = r0 - r1 */
+ STORE( &r[2], 0, CHAN_Y ); /* store r2 */
+ }
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ micro_exp2( &r[2], &r[0] ); /* r2 = 2 ^ r0 */
+ STORE( &r[2], 0, CHAN_Z ); /* store r2 */
+ }
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_LOG:
+ FETCH( &r[0], 0, CHAN_X );
+ micro_abs( &r[2], &r[0] ); /* r2 = abs(r0) */
+ micro_lg2( &r[1], &r[2] ); /* r1 = lg2(r2) */
+ micro_flr( &r[0], &r[1] ); /* r0 = floor(r1) */
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &r[0], 0, CHAN_X );
+ }
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ micro_exp2( &r[0], &r[0] ); /* r0 = 2 ^ r0 */
+ micro_div( &r[0], &r[2], &r[0] ); /* r0 = r2 / r0 */
+ STORE( &r[0], 0, CHAN_Y );
+ }
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ STORE( &r[1], 0, CHAN_Z );
+ }
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_MUL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index )
+ {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ micro_mul( &r[0], &r[0], &r[1] );
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_ADD:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_add( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DP3:
+ /* TGSI_OPCODE_DOT3 */
+ FETCH( &r[0], 0, CHAN_X );
+ FETCH( &r[1], 1, CHAN_X );
+ micro_mul( &r[0], &r[0], &r[1] );
+
+ FETCH( &r[1], 0, CHAN_Y );
+ FETCH( &r[2], 1, CHAN_Y );
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FETCH( &r[1], 0, CHAN_Z );
+ FETCH( &r[2], 1, CHAN_Z );
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DP4:
+ /* TGSI_OPCODE_DOT4 */
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 1, CHAN_X);
+
+ micro_mul( &r[0], &r[0], &r[1] );
+
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 1, CHAN_Y);
+
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FETCH(&r[1], 0, CHAN_Z);
+ FETCH(&r[2], 1, CHAN_Z);
+
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FETCH(&r[1], 0, CHAN_W);
+ FETCH(&r[2], 1, CHAN_W);
+
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DST:
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ FETCH( &r[0], 0, CHAN_Y );
+ FETCH( &r[1], 1, CHAN_Y);
+ micro_mul( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, CHAN_Y );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( &r[0], 0, CHAN_Z );
+ STORE( &r[0], 0, CHAN_Z );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ FETCH( &r[0], 1, CHAN_W );
+ STORE( &r[0], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_MIN:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ /* XXX use micro_min()?? */
+ micro_lt( &r[0], &r[0], &r[1], &r[0], &r[1] );
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_MAX:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ /* XXX use micro_max()?? */
+ micro_lt( &r[0], &r[0], &r[1], &r[1], &r[0] );
+
+ STORE(&r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SLT:
+ /* TGSI_OPCODE_SETLT */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_lt( &r[0], &r[0], &r[1], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SGE:
+ /* TGSI_OPCODE_SETGE */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_le( &r[0], &r[1], &r[0], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MAD:
+ /* TGSI_OPCODE_MADD */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_mul( &r[0], &r[0], &r[1] );
+ FETCH( &r[1], 2, chan_index );
+ micro_add( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SUB:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ micro_sub( &r[0], &r[0], &r[1] );
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_LRP:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+ FETCH(&r[2], 2, chan_index);
+
+ micro_sub( &r[1], &r[1], &r[2] );
+ micro_mul( &r[0], &r[0], &r[1] );
+ micro_add( &r[0], &r[0], &r[2] );
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_CND:
+ FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+ FETCH(&r[2], 2, chan_index);
+ micro_lt(&r[0], &mach->Temps[TEMP_HALF_I].xyzw[TEMP_HALF_C], &r[2], &r[0], &r[1]);
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_DP2A:
+ FETCH( &r[0], 0, CHAN_X );
+ FETCH( &r[1], 1, CHAN_X );
+ micro_mul( &r[0], &r[0], &r[1] );
+
+ FETCH( &r[1], 0, CHAN_Y );
+ FETCH( &r[2], 1, CHAN_Y );
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FETCH( &r[2], 2, CHAN_X );
+ micro_add( &r[0], &r[0], &r[2] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_FRC:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_frc( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_CLAMP:
+ FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+ micro_max(&r[0], &r[0], &r[1]);
+ FETCH(&r[1], 2, chan_index);
+ micro_min(&r[0], &r[0], &r[1]);
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_ROUND:
+ case TGSI_OPCODE_ARR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_rnd( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_EX2:
+ FETCH(&r[0], 0, CHAN_X);
+
+#if FAST_MATH
+ micro_exp2( &r[0], &r[0] );
+#else
+ micro_pow( &r[0], &mach->Temps[TEMP_2_I].xyzw[TEMP_2_C], &r[0] );
+#endif
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_LG2:
+ FETCH( &r[0], 0, CHAN_X );
+ micro_lg2( &r[0], &r[0] );
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_POW:
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 1, CHAN_X);
+
+ micro_pow( &r[0], &r[0], &r[1] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_XPD:
+ FETCH(&r[0], 0, CHAN_Y);
+ FETCH(&r[1], 1, CHAN_Z);
+
+ micro_mul( &r[2], &r[0], &r[1] );
+
+ FETCH(&r[3], 0, CHAN_Z);
+ FETCH(&r[4], 1, CHAN_Y);
+
+ micro_mul( &r[5], &r[3], &r[4] );
+ micro_sub( &r[2], &r[2], &r[5] );
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &r[2], 0, CHAN_X );
+ }
+
+ FETCH(&r[2], 1, CHAN_X);
+
+ micro_mul( &r[3], &r[3], &r[2] );
+
+ FETCH(&r[5], 0, CHAN_X);
+
+ micro_mul( &r[1], &r[1], &r[5] );
+ micro_sub( &r[3], &r[3], &r[1] );
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ STORE( &r[3], 0, CHAN_Y );
+ }
+
+ micro_mul( &r[5], &r[5], &r[4] );
+ micro_mul( &r[0], &r[0], &r[2] );
+ micro_sub( &r[5], &r[5], &r[0] );
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ STORE( &r[5], 0, CHAN_Z );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_ABS:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+
+ micro_abs( &r[0], &r[0] );
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_RCC:
+ FETCH(&r[0], 0, CHAN_X);
+ micro_div(&r[0], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &r[0]);
+ micro_float_clamp(&r[0], &r[0]);
+ FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_DPH:
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 1, CHAN_X);
+
+ micro_mul( &r[0], &r[0], &r[1] );
+
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 1, CHAN_Y);
+
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FETCH(&r[1], 0, CHAN_Z);
+ FETCH(&r[2], 1, CHAN_Z);
+
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FETCH(&r[1], 1, CHAN_W);
+
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_COS:
+ FETCH(&r[0], 0, CHAN_X);
+
+ micro_cos( &r[0], &r[0] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DDX:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_ddx( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DDY:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_ddy( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_KILP:
+ exec_kilp (mach, inst);
+ break;
+
+ case TGSI_OPCODE_KIL:
+ exec_kil (mach, inst);
+ break;
+
+ case TGSI_OPCODE_PK2H:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_PK2US:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_PK4B:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_PK4UB:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_RFL:
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_X) ||
+ IS_CHANNEL_ENABLED(*inst, CHAN_Y) ||
+ IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ /* r0 = dp3(src0, src0) */
+ FETCH(&r[2], 0, CHAN_X);
+ micro_mul(&r[0], &r[2], &r[2]);
+ FETCH(&r[4], 0, CHAN_Y);
+ micro_mul(&r[8], &r[4], &r[4]);
+ micro_add(&r[0], &r[0], &r[8]);
+ FETCH(&r[6], 0, CHAN_Z);
+ micro_mul(&r[8], &r[6], &r[6]);
+ micro_add(&r[0], &r[0], &r[8]);
+
+ /* r1 = dp3(src0, src1) */
+ FETCH(&r[3], 1, CHAN_X);
+ micro_mul(&r[1], &r[2], &r[3]);
+ FETCH(&r[5], 1, CHAN_Y);
+ micro_mul(&r[8], &r[4], &r[5]);
+ micro_add(&r[1], &r[1], &r[8]);
+ FETCH(&r[7], 1, CHAN_Z);
+ micro_mul(&r[8], &r[6], &r[7]);
+ micro_add(&r[1], &r[1], &r[8]);
+
+ /* r1 = 2 * r1 / r0 */
+ micro_add(&r[1], &r[1], &r[1]);
+ micro_div(&r[1], &r[1], &r[0]);
+
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ micro_mul(&r[2], &r[2], &r[1]);
+ micro_sub(&r[2], &r[2], &r[3]);
+ STORE(&r[2], 0, CHAN_X);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ micro_mul(&r[4], &r[4], &r[1]);
+ micro_sub(&r[4], &r[4], &r[5]);
+ STORE(&r[4], 0, CHAN_Y);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ micro_mul(&r[6], &r[6], &r[1]);
+ micro_sub(&r[6], &r[6], &r[7]);
+ STORE(&r[6], 0, CHAN_Z);
+ }
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W);
+ }
+ break;
+
+ case TGSI_OPCODE_SEQ:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_eq( &r[0], &r[0], &r[1],
+ &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C],
+ &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SFL:
+ FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
+ STORE(&mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_SGT:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_le( &r[0], &r[0], &r[1], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SIN:
+ FETCH( &r[0], 0, CHAN_X );
+ micro_sin( &r[0], &r[0] );
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SLE:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_le( &r[0], &r[0], &r[1], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SNE:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_eq( &r[0], &r[0], &r[1], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_STR:
+ FOR_EACH_ENABLED_CHANNEL(*inst, chan_index) {
+ STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_TEX:
+ /* simple texture lookup */
+ /* src[0] = texcoord */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, FALSE, FALSE);
+ break;
+
+ case TGSI_OPCODE_TXB:
+ /* Texture lookup with lod bias */
+ /* src[0] = texcoord (src[0].w = LOD bias) */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, TRUE, FALSE);
+ break;
+
+ case TGSI_OPCODE_TXD:
+ /* Texture lookup with explict partial derivatives */
+ /* src[0] = texcoord */
+ /* src[1] = d[strq]/dx */
+ /* src[2] = d[strq]/dy */
+ /* src[3] = sampler unit */
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_TXL:
+ /* Texture lookup with explit LOD */
+ /* src[0] = texcoord (src[0].w = LOD) */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, TRUE, FALSE);
+ break;
+
+ case TGSI_OPCODE_TXP:
+ /* Texture lookup with projection */
+ /* src[0] = texcoord (src[0].w = projection) */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, FALSE, TRUE);
+ break;
+
+ case TGSI_OPCODE_UP2H:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_UP2US:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_UP4B:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_UP4UB:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_X2D:
+ FETCH(&r[0], 1, CHAN_X);
+ FETCH(&r[1], 1, CHAN_Y);
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_X) ||
+ IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ FETCH(&r[2], 2, CHAN_X);
+ micro_mul(&r[2], &r[2], &r[0]);
+ FETCH(&r[3], 2, CHAN_Y);
+ micro_mul(&r[3], &r[3], &r[1]);
+ micro_add(&r[2], &r[2], &r[3]);
+ FETCH(&r[3], 0, CHAN_X);
+ micro_add(&r[2], &r[2], &r[3]);
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ STORE(&r[2], 0, CHAN_X);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ STORE(&r[2], 0, CHAN_Z);
+ }
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Y) ||
+ IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ FETCH(&r[2], 2, CHAN_Z);
+ micro_mul(&r[2], &r[2], &r[0]);
+ FETCH(&r[3], 2, CHAN_W);
+ micro_mul(&r[3], &r[3], &r[1]);
+ micro_add(&r[2], &r[2], &r[3]);
+ FETCH(&r[3], 0, CHAN_Y);
+ micro_add(&r[2], &r[2], &r[3]);
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ STORE(&r[2], 0, CHAN_Y);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ STORE(&r[2], 0, CHAN_W);
+ }
+ }
+ break;
+
+ case TGSI_OPCODE_ARA:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_BRA:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_CAL:
+ /* skip the call if no execution channels are enabled */
+ if (mach->ExecMask) {
+ /* do the call */
+
+ /* First, record the depths of the execution stacks.
+ * This is important for deeply nested/looped return statements.
+ * We have to unwind the stacks by the correct amount. For a
+ * real code generator, we could determine the number of entries
+ * to pop off each stack with simple static analysis and avoid
+ * implementing this data structure at run time.
+ */
+ mach->CallStack[mach->CallStackTop].CondStackTop = mach->CondStackTop;
+ mach->CallStack[mach->CallStackTop].LoopStackTop = mach->LoopStackTop;
+ mach->CallStack[mach->CallStackTop].ContStackTop = mach->ContStackTop;
+ /* note that PC was already incremented above */
+ mach->CallStack[mach->CallStackTop].ReturnAddr = *pc;
+
+ mach->CallStackTop++;
+
+ /* Second, push the Cond, Loop, Cont, Func stacks */
+ assert(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
+ mach->CondStack[mach->CondStackTop++] = mach->CondMask;
+ assert(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
+ assert(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->ContStack[mach->ContStackTop++] = mach->ContMask;
+ assert(mach->FuncStackTop < TGSI_EXEC_MAX_CALL_NESTING);
+ mach->FuncStack[mach->FuncStackTop++] = mach->FuncMask;
+
+ /* Finally, jump to the subroutine */
+ *pc = inst->InstructionExtLabel.Label;
+ }
+ break;
+
+ case TGSI_OPCODE_RET:
+ mach->FuncMask &= ~mach->ExecMask;
+ UPDATE_EXEC_MASK(mach);
+
+ if (mach->FuncMask == 0x0) {
+ /* really return now (otherwise, keep executing */
+
+ if (mach->CallStackTop == 0) {
+ /* returning from main() */
+ *pc = -1;
+ return;
+ }
+
+ assert(mach->CallStackTop > 0);
+ mach->CallStackTop--;
+
+ mach->CondStackTop = mach->CallStack[mach->CallStackTop].CondStackTop;
+ mach->CondMask = mach->CondStack[mach->CondStackTop];
+
+ mach->LoopStackTop = mach->CallStack[mach->CallStackTop].LoopStackTop;
+ mach->LoopMask = mach->LoopStack[mach->LoopStackTop];
+
+ mach->ContStackTop = mach->CallStack[mach->CallStackTop].ContStackTop;
+ mach->ContMask = mach->ContStack[mach->ContStackTop];
+
+ assert(mach->FuncStackTop > 0);
+ mach->FuncMask = mach->FuncStack[--mach->FuncStackTop];
+
+ *pc = mach->CallStack[mach->CallStackTop].ReturnAddr;
+
+ UPDATE_EXEC_MASK(mach);
+ }
+ break;
+
+ case TGSI_OPCODE_SSG:
+ /* TGSI_OPCODE_SGN */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_sgn( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_CMP:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+ FETCH(&r[2], 2, chan_index);
+
+ micro_lt( &r[0], &r[0], &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], &r[1], &r[2] );
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_SCS:
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) || IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
+ FETCH( &r[0], 0, CHAN_X );
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ micro_cos(&r[1], &r[0]);
+ STORE(&r[1], 0, CHAN_X);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ micro_sin(&r[1], &r[0]);
+ STORE(&r[1], 0, CHAN_Y);
+ }
+ }
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
+ STORE( &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, CHAN_Z );
+ }
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_NRM:
+ /* 3-component vector normalize */
+ if(IS_CHANNEL_ENABLED(*inst, CHAN_X) ||
+ IS_CHANNEL_ENABLED(*inst, CHAN_Y) ||
+ IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ /* r3 = sqrt(dp3(src0, src0)) */
+ FETCH(&r[0], 0, CHAN_X);
+ micro_mul(&r[3], &r[0], &r[0]);
+ FETCH(&r[1], 0, CHAN_Y);
+ micro_mul(&r[4], &r[1], &r[1]);
+ micro_add(&r[3], &r[3], &r[4]);
+ FETCH(&r[2], 0, CHAN_Z);
+ micro_mul(&r[4], &r[2], &r[2]);
+ micro_add(&r[3], &r[3], &r[4]);
+ micro_sqrt(&r[3], &r[3]);
+
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ micro_div(&r[0], &r[0], &r[3]);
+ STORE(&r[0], 0, CHAN_X);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ micro_div(&r[1], &r[1], &r[3]);
+ STORE(&r[1], 0, CHAN_Y);
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ micro_div(&r[2], &r[2], &r[3]);
+ STORE(&r[2], 0, CHAN_Z);
+ }
+ }
+ if (IS_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ STORE(&mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W);
+ }
+ break;
+
+ case TGSI_OPCODE_NRM4:
+ /* 4-component vector normalize */
+ {
+ union tgsi_exec_channel tmp, dot;
+
+ /* tmp = dp4(src0, src0): */
+ FETCH( &r[0], 0, CHAN_X );
+ micro_mul( &tmp, &r[0], &r[0] );
+
+ FETCH( &r[1], 0, CHAN_Y );
+ micro_mul( &dot, &r[1], &r[1] );
+ micro_add( &tmp, &tmp, &dot );
+
+ FETCH( &r[2], 0, CHAN_Z );
+ micro_mul( &dot, &r[2], &r[2] );
+ micro_add( &tmp, &tmp, &dot );
+
+ FETCH( &r[3], 0, CHAN_W );
+ micro_mul( &dot, &r[3], &r[3] );
+ micro_add( &tmp, &tmp, &dot );
+
+ /* tmp = 1 / sqrt(tmp) */
+ micro_sqrt( &tmp, &tmp );
+ micro_div( &tmp, &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], &tmp );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ /* chan = chan * tmp */
+ micro_mul( &r[chan_index], &tmp, &r[chan_index] );
+ STORE( &r[chan_index], 0, chan_index );
+ }
+ }
+ break;
+
+ case TGSI_OPCODE_DIV:
+ assert( 0 );
+ break;
+
+ case TGSI_OPCODE_DP2:
+ FETCH( &r[0], 0, CHAN_X );
+ FETCH( &r[1], 1, CHAN_X );
+ micro_mul( &r[0], &r[0], &r[1] );
+
+ FETCH( &r[1], 0, CHAN_Y );
+ FETCH( &r[2], 1, CHAN_Y );
+ micro_mul( &r[1], &r[1], &r[2] );
+ micro_add( &r[0], &r[0], &r[1] );
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_IF:
+ /* push CondMask */
+ assert(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
+ mach->CondStack[mach->CondStackTop++] = mach->CondMask;
+ FETCH( &r[0], 0, CHAN_X );
+ /* update CondMask */
+ if( ! r[0].u[0] ) {
+ mach->CondMask &= ~0x1;
+ }
+ if( ! r[0].u[1] ) {
+ mach->CondMask &= ~0x2;
+ }
+ if( ! r[0].u[2] ) {
+ mach->CondMask &= ~0x4;
+ }
+ if( ! r[0].u[3] ) {
+ mach->CondMask &= ~0x8;
+ }
+ UPDATE_EXEC_MASK(mach);
+ /* Todo: If CondMask==0, jump to ELSE */
+ break;
+
+ case TGSI_OPCODE_ELSE:
+ /* invert CondMask wrt previous mask */
+ {
+ uint prevMask;
+ assert(mach->CondStackTop > 0);
+ prevMask = mach->CondStack[mach->CondStackTop - 1];
+ mach->CondMask = ~mach->CondMask & prevMask;
+ UPDATE_EXEC_MASK(mach);
+ /* Todo: If CondMask==0, jump to ENDIF */
+ }
+ break;
+
+ case TGSI_OPCODE_ENDIF:
+ /* pop CondMask */
+ assert(mach->CondStackTop > 0);
+ mach->CondMask = mach->CondStack[--mach->CondStackTop];
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_END:
+ /* halt execution */
+ *pc = -1;
+ break;
+
+ case TGSI_OPCODE_REP:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_ENDREP:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_PUSHA:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_POPA:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_CEIL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_ceil( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_I2F:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_i2f( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_NOT:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_not( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_TRUNC:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ micro_trunc( &r[0], &r[0] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SHL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_shl( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SHR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_ishr( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_AND:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_and( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_OR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_or( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MOD:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_XOR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ micro_xor( &r[0], &r[0], &r[1] );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SAD:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_TXF:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_TXQ:
+ assert (0);
+ break;
+
+ case TGSI_OPCODE_EMIT:
+ mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] += 16;
+ mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]]++;
+ break;
+
+ case TGSI_OPCODE_ENDPRIM:
+ mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]++;
+ mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]] = 0;
+ break;
+
+ case TGSI_OPCODE_BGNFOR:
+ assert(mach->LoopCounterStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ for (chan_index = 0; chan_index < 3; chan_index++) {
+ FETCH( &mach->LoopCounterStack[mach->LoopCounterStackTop].xyzw[chan_index], 0, chan_index );
+ }
+ STORE( &mach->LoopCounterStack[mach->LoopCounterStackTop].xyzw[CHAN_Y], 0, CHAN_X );
+ ++mach->LoopCounterStackTop;
+ /* fall-through (for now) */
+ case TGSI_OPCODE_BGNLOOP:
+ /* push LoopMask and ContMasks */
+ assert(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
+ assert(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->ContStack[mach->ContStackTop++] = mach->ContMask;
+ assert(mach->LoopLabelStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->LoopLabelStack[mach->LoopLabelStackTop++] = *pc - 1;
+ break;
+
+ case TGSI_OPCODE_ENDFOR:
+ assert(mach->LoopCounterStackTop > 0);
+ micro_sub( &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X],
+ &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X],
+ &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C] );
+ /* update LoopMask */
+ if( mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X].f[0] <= 0) {
+ mach->LoopMask &= ~0x1;
+ }
+ if( mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X].f[1] <= 0 ) {
+ mach->LoopMask &= ~0x2;
+ }
+ if( mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X].f[2] <= 0 ) {
+ mach->LoopMask &= ~0x4;
+ }
+ if( mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_X].f[3] <= 0 ) {
+ mach->LoopMask &= ~0x8;
+ }
+ micro_add( &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y],
+ &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Y],
+ &mach->LoopCounterStack[mach->LoopCounterStackTop - 1].xyzw[CHAN_Z]);
+ assert(mach->LoopLabelStackTop > 0);
+ inst = mach->Instructions + mach->LoopLabelStack[mach->LoopLabelStackTop - 1];
+ STORE( &mach->LoopCounterStack[mach->LoopCounterStackTop].xyzw[CHAN_Y], 0, CHAN_X );
+ /* Restore ContMask, but don't pop */
+ assert(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
+ UPDATE_EXEC_MASK(mach);
+ if (mach->ExecMask) {
+ /* repeat loop: jump to instruction just past BGNLOOP */
+ assert(mach->LoopLabelStackTop > 0);
+ *pc = mach->LoopLabelStack[mach->LoopLabelStackTop - 1] + 1;
+ }
+ else {
+ /* exit loop: pop LoopMask */
+ assert(mach->LoopStackTop > 0);
+ mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
+ /* pop ContMask */
+ assert(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[--mach->ContStackTop];
+ assert(mach->LoopLabelStackTop > 0);
+ --mach->LoopLabelStackTop;
+ assert(mach->LoopCounterStackTop > 0);
+ --mach->LoopCounterStackTop;
+ }
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_ENDLOOP:
+ /* Restore ContMask, but don't pop */
+ assert(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
+ UPDATE_EXEC_MASK(mach);
+ if (mach->ExecMask) {
+ /* repeat loop: jump to instruction just past BGNLOOP */
+ assert(mach->LoopLabelStackTop > 0);
+ *pc = mach->LoopLabelStack[mach->LoopLabelStackTop - 1] + 1;
+ }
+ else {
+ /* exit loop: pop LoopMask */
+ assert(mach->LoopStackTop > 0);
+ mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
+ /* pop ContMask */
+ assert(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[--mach->ContStackTop];
+ assert(mach->LoopLabelStackTop > 0);
+ --mach->LoopLabelStackTop;
+ }
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_BRK:
+ /* turn off loop channels for each enabled exec channel */
+ mach->LoopMask &= ~mach->ExecMask;
+ /* Todo: if mach->LoopMask == 0, jump to end of loop */
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_CONT:
+ /* turn off cont channels for each enabled exec channel */
+ mach->ContMask &= ~mach->ExecMask;
+ /* Todo: if mach->LoopMask == 0, jump to end of loop */
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_BGNSUB:
+ /* no-op */
+ break;
+
+ case TGSI_OPCODE_ENDSUB:
+ /* no-op */
+ break;
+
+ case TGSI_OPCODE_NOISE1:
+ assert( 0 );
+ break;
+
+ case TGSI_OPCODE_NOISE2:
+ assert( 0 );
+ break;
+
+ case TGSI_OPCODE_NOISE3:
+ assert( 0 );
+ break;
+
+ case TGSI_OPCODE_NOISE4:
+ assert( 0 );
+ break;
+
+ case TGSI_OPCODE_NOP:
+ break;
+
+ default:
+ assert( 0 );
+ }
+}
+
+
+/**
+ * Run TGSI interpreter.
+ * \return bitmask of "alive" quad components
+ */
+uint
+tgsi_exec_machine_run( struct tgsi_exec_machine *mach )
+{
+ uint i;
+ int pc = 0;
+
+ mach->CondMask = 0xf;
+ mach->LoopMask = 0xf;
+ mach->ContMask = 0xf;
+ mach->FuncMask = 0xf;
+ mach->ExecMask = 0xf;
+
+ assert(mach->CondStackTop == 0);
+ assert(mach->LoopStackTop == 0);
+ assert(mach->ContStackTop == 0);
+ assert(mach->CallStackTop == 0);
+
+ mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] = 0;
+ mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] = 0;
+
+ if( mach->Processor == TGSI_PROCESSOR_GEOMETRY ) {
+ mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0] = 0;
+ mach->Primitives[0] = 0;
+ }
+
+ for (i = 0; i < QUAD_SIZE; i++) {
+ mach->Temps[TEMP_CC_I].xyzw[TEMP_CC_C].u[i] =
+ (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_X_SHIFT) |
+ (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_Y_SHIFT) |
+ (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_Z_SHIFT) |
+ (TGSI_EXEC_CC_EQ << TGSI_EXEC_CC_W_SHIFT);
+ }
+
+ /* execute declarations (interpolants) */
+ for (i = 0; i < mach->NumDeclarations; i++) {
+ exec_declaration( mach, mach->Declarations+i );
+ }
+
+ /* execute instructions, until pc is set to -1 */
+ while (pc != -1) {
+ assert(pc < (int) mach->NumInstructions);
+ exec_instruction( mach, mach->Instructions + pc, &pc );
+ }
+
+#if 0
+ /* we scale from floats in [0,1] to Zbuffer ints in sp_quad_depth_test.c */
+ if (mach->Processor == TGSI_PROCESSOR_FRAGMENT) {
+ /*
+ * Scale back depth component.
+ */
+ for (i = 0; i < 4; i++)
+ mach->Outputs[0].xyzw[2].f[i] *= ctx->DrawBuffer->_DepthMaxF;
+ }
+#endif
+
+ return ~mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0];
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_exec.h b/src/gallium/auxiliary/tgsi/tgsi_exec.h
new file mode 100644
index 0000000000..c72f76809d
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_exec.h
@@ -0,0 +1,327 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_EXEC_H
+#define TGSI_EXEC_H
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#define MAX_LABELS (4 * 1024) /**< basically, max instructions */
+
+#define NUM_CHANNELS 4 /* R,G,B,A */
+#define QUAD_SIZE 4 /* 4 pixel/quad */
+
+/**
+ * Registers may be treated as float, signed int or unsigned int.
+ */
+union tgsi_exec_channel
+{
+ float f[QUAD_SIZE];
+ int i[QUAD_SIZE];
+ unsigned u[QUAD_SIZE];
+};
+
+/**
+ * A vector[RGBA] of channels[4 pixels]
+ */
+struct tgsi_exec_vector
+{
+ union tgsi_exec_channel xyzw[NUM_CHANNELS];
+};
+
+/**
+ * For fragment programs, information for computing fragment input
+ * values from plane equation of the triangle/line.
+ */
+struct tgsi_interp_coef
+{
+ float a0[NUM_CHANNELS]; /* in an xyzw layout */
+ float dadx[NUM_CHANNELS];
+ float dady[NUM_CHANNELS];
+};
+
+/**
+ * Information for sampling textures, which must be implemented
+ * by code outside the TGSI executor.
+ */
+struct tgsi_sampler
+{
+ /** Get samples for four fragments in a quad */
+ void (*get_samples)(struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE]);
+};
+
+/**
+ * For branching/calling subroutines.
+ */
+struct tgsi_exec_labels
+{
+ unsigned labels[MAX_LABELS][2];
+ unsigned count;
+};
+
+
+#define TGSI_EXEC_NUM_TEMPS 128
+#define TGSI_EXEC_NUM_IMMEDIATES 256
+
+/*
+ * Locations of various utility registers (_I = Index, _C = Channel)
+ */
+#define TGSI_EXEC_TEMP_00000000_I (TGSI_EXEC_NUM_TEMPS + 0)
+#define TGSI_EXEC_TEMP_00000000_C 0
+
+#define TGSI_EXEC_TEMP_7FFFFFFF_I (TGSI_EXEC_NUM_TEMPS + 0)
+#define TGSI_EXEC_TEMP_7FFFFFFF_C 1
+
+#define TGSI_EXEC_TEMP_80000000_I (TGSI_EXEC_NUM_TEMPS + 0)
+#define TGSI_EXEC_TEMP_80000000_C 2
+
+#define TGSI_EXEC_TEMP_FFFFFFFF_I (TGSI_EXEC_NUM_TEMPS + 0)
+#define TGSI_EXEC_TEMP_FFFFFFFF_C 3
+
+#define TGSI_EXEC_TEMP_ONE_I (TGSI_EXEC_NUM_TEMPS + 1)
+#define TGSI_EXEC_TEMP_ONE_C 0
+
+#define TGSI_EXEC_TEMP_TWO_I (TGSI_EXEC_NUM_TEMPS + 1)
+#define TGSI_EXEC_TEMP_TWO_C 1
+
+#define TGSI_EXEC_TEMP_128_I (TGSI_EXEC_NUM_TEMPS + 1)
+#define TGSI_EXEC_TEMP_128_C 2
+
+#define TGSI_EXEC_TEMP_MINUS_128_I (TGSI_EXEC_NUM_TEMPS + 1)
+#define TGSI_EXEC_TEMP_MINUS_128_C 3
+
+#define TGSI_EXEC_TEMP_KILMASK_I (TGSI_EXEC_NUM_TEMPS + 2)
+#define TGSI_EXEC_TEMP_KILMASK_C 0
+
+#define TGSI_EXEC_TEMP_OUTPUT_I (TGSI_EXEC_NUM_TEMPS + 2)
+#define TGSI_EXEC_TEMP_OUTPUT_C 1
+
+#define TGSI_EXEC_TEMP_PRIMITIVE_I (TGSI_EXEC_NUM_TEMPS + 2)
+#define TGSI_EXEC_TEMP_PRIMITIVE_C 2
+
+/* NVIDIA condition code (CC) vector
+ */
+#define TGSI_EXEC_CC_GT 0x01
+#define TGSI_EXEC_CC_EQ 0x02
+#define TGSI_EXEC_CC_LT 0x04
+#define TGSI_EXEC_CC_UN 0x08
+
+#define TGSI_EXEC_CC_X_MASK 0x000000ff
+#define TGSI_EXEC_CC_X_SHIFT 0
+#define TGSI_EXEC_CC_Y_MASK 0x0000ff00
+#define TGSI_EXEC_CC_Y_SHIFT 8
+#define TGSI_EXEC_CC_Z_MASK 0x00ff0000
+#define TGSI_EXEC_CC_Z_SHIFT 16
+#define TGSI_EXEC_CC_W_MASK 0xff000000
+#define TGSI_EXEC_CC_W_SHIFT 24
+
+#define TGSI_EXEC_TEMP_CC_I (TGSI_EXEC_NUM_TEMPS + 2)
+#define TGSI_EXEC_TEMP_CC_C 3
+
+#define TGSI_EXEC_TEMP_THREE_I (TGSI_EXEC_NUM_TEMPS + 3)
+#define TGSI_EXEC_TEMP_THREE_C 0
+
+#define TGSI_EXEC_TEMP_HALF_I (TGSI_EXEC_NUM_TEMPS + 3)
+#define TGSI_EXEC_TEMP_HALF_C 1
+
+/* execution mask, each value is either 0 or ~0 */
+#define TGSI_EXEC_MASK_I (TGSI_EXEC_NUM_TEMPS + 3)
+#define TGSI_EXEC_MASK_C 2
+
+/* 4 register buffer for various purposes */
+#define TGSI_EXEC_TEMP_R0 (TGSI_EXEC_NUM_TEMPS + 4)
+#define TGSI_EXEC_NUM_TEMP_R 4
+
+#define TGSI_EXEC_TEMP_ADDR (TGSI_EXEC_NUM_TEMPS + 8)
+#define TGSI_EXEC_NUM_ADDRS 1
+#define TGSI_EXEC_NUM_TEMP_EXTRAS 9
+
+
+
+#define TGSI_EXEC_MAX_COND_NESTING 20
+#define TGSI_EXEC_MAX_LOOP_NESTING 20
+#define TGSI_EXEC_MAX_CALL_NESTING 20
+
+/* The maximum number of input attributes per vertex. For 2D
+ * input register files, this is the stride between two 1D
+ * arrays.
+ */
+#define TGSI_EXEC_MAX_INPUT_ATTRIBS 17
+
+/* The maximum number of constant vectors per constant buffer.
+ */
+#define TGSI_EXEC_MAX_CONST_BUFFER 4096
+
+
+/** function call/activation record */
+struct tgsi_call_record
+{
+ uint CondStackTop;
+ uint LoopStackTop;
+ uint ContStackTop;
+ uint ReturnAddr;
+};
+
+
+/**
+ * Run-time virtual machine state for executing TGSI shader.
+ */
+struct tgsi_exec_machine
+{
+ /* Total = program temporaries + internal temporaries
+ */
+ struct tgsi_exec_vector Temps[TGSI_EXEC_NUM_TEMPS +
+ TGSI_EXEC_NUM_TEMP_EXTRAS];
+
+ float Imms[TGSI_EXEC_NUM_IMMEDIATES][4];
+
+ struct tgsi_exec_vector Inputs[PIPE_MAX_ATTRIBS];
+ struct tgsi_exec_vector Outputs[PIPE_MAX_ATTRIBS];
+
+ struct tgsi_exec_vector *Addrs;
+
+ struct tgsi_sampler **Samplers;
+
+ unsigned ImmLimit;
+ const float (*Consts)[4];
+ const struct tgsi_token *Tokens; /**< Declarations, instructions */
+ unsigned Processor; /**< TGSI_PROCESSOR_x */
+
+ /* GEOMETRY processor only. */
+ unsigned *Primitives;
+
+ /* FRAGMENT processor only. */
+ const struct tgsi_interp_coef *InterpCoefs;
+ struct tgsi_exec_vector QuadPos;
+
+ /* Conditional execution masks */
+ uint CondMask; /**< For IF/ELSE/ENDIF */
+ uint LoopMask; /**< For BGNLOOP/ENDLOOP */
+ uint ContMask; /**< For loop CONT statements */
+ uint FuncMask; /**< For function calls */
+ uint ExecMask; /**< = CondMask & LoopMask */
+
+ /** Condition mask stack (for nested conditionals) */
+ uint CondStack[TGSI_EXEC_MAX_COND_NESTING];
+ int CondStackTop;
+
+ /** Loop mask stack (for nested loops) */
+ uint LoopStack[TGSI_EXEC_MAX_LOOP_NESTING];
+ int LoopStackTop;
+
+ /** Loop label stack */
+ uint LoopLabelStack[TGSI_EXEC_MAX_LOOP_NESTING];
+ int LoopLabelStackTop;
+
+ /** Loop counter stack (x = count, y = current, z = step) */
+ struct tgsi_exec_vector LoopCounterStack[TGSI_EXEC_MAX_LOOP_NESTING];
+ int LoopCounterStackTop;
+
+ /** Loop continue mask stack (see comments in tgsi_exec.c) */
+ uint ContStack[TGSI_EXEC_MAX_LOOP_NESTING];
+ int ContStackTop;
+
+ /** Function execution mask stack (for executing subroutine code) */
+ uint FuncStack[TGSI_EXEC_MAX_CALL_NESTING];
+ int FuncStackTop;
+
+ /** Function call stack for saving/restoring the program counter */
+ struct tgsi_call_record CallStack[TGSI_EXEC_MAX_CALL_NESTING];
+ int CallStackTop;
+
+ struct tgsi_full_instruction *Instructions;
+ uint NumInstructions;
+
+ struct tgsi_full_declaration *Declarations;
+ uint NumDeclarations;
+
+ struct tgsi_exec_labels Labels;
+};
+
+struct tgsi_exec_machine *
+tgsi_exec_machine_create( void );
+
+void
+tgsi_exec_machine_destroy(struct tgsi_exec_machine *mach);
+
+
+void
+tgsi_exec_machine_bind_shader(
+ struct tgsi_exec_machine *mach,
+ const struct tgsi_token *tokens,
+ uint numSamplers,
+ struct tgsi_sampler **samplers);
+
+uint
+tgsi_exec_machine_run(
+ struct tgsi_exec_machine *mach );
+
+
+void
+tgsi_exec_machine_free_data(struct tgsi_exec_machine *mach);
+
+
+boolean
+tgsi_check_soa_dependencies(const struct tgsi_full_instruction *inst);
+
+
+static INLINE void
+tgsi_set_kill_mask(struct tgsi_exec_machine *mach, unsigned mask)
+{
+ mach->Temps[TGSI_EXEC_TEMP_KILMASK_I].xyzw[TGSI_EXEC_TEMP_KILMASK_C].u[0] =
+ mask;
+}
+
+
+/** Set execution mask values prior to executing the shader */
+static INLINE void
+tgsi_set_exec_mask(struct tgsi_exec_machine *mach,
+ boolean ch0, boolean ch1, boolean ch2, boolean ch3)
+{
+ int *mask = mach->Temps[TGSI_EXEC_MASK_I].xyzw[TGSI_EXEC_MASK_C].i;
+ mask[0] = ch0 ? ~0 : 0;
+ mask[1] = ch1 ? ~0 : 0;
+ mask[2] = ch2 ? ~0 : 0;
+ mask[3] = ch3 ? ~0 : 0;
+}
+
+
+#if defined __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* TGSI_EXEC_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_info.c b/src/gallium/auxiliary/tgsi/tgsi_info.c
new file mode 100644
index 0000000000..17af4cb7ad
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_info.c
@@ -0,0 +1,181 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "tgsi_info.h"
+
+static const struct tgsi_opcode_info opcode_info[TGSI_OPCODE_LAST] =
+{
+ { 1, 1, 0, 0, 0, 0, "ARL", TGSI_OPCODE_ARL },
+ { 1, 1, 0, 0, 0, 0, "MOV", TGSI_OPCODE_MOV },
+ { 1, 1, 0, 0, 0, 0, "LIT", TGSI_OPCODE_LIT },
+ { 1, 1, 0, 0, 0, 0, "RCP", TGSI_OPCODE_RCP },
+ { 1, 1, 0, 0, 0, 0, "RSQ", TGSI_OPCODE_RSQ },
+ { 1, 1, 0, 0, 0, 0, "EXP", TGSI_OPCODE_EXP },
+ { 1, 1, 0, 0, 0, 0, "LOG", TGSI_OPCODE_LOG },
+ { 1, 2, 0, 0, 0, 0, "MUL", TGSI_OPCODE_MUL },
+ { 1, 2, 0, 0, 0, 0, "ADD", TGSI_OPCODE_ADD },
+ { 1, 2, 0, 0, 0, 0, "DP3", TGSI_OPCODE_DP3 },
+ { 1, 2, 0, 0, 0, 0, "DP4", TGSI_OPCODE_DP4 },
+ { 1, 2, 0, 0, 0, 0, "DST", TGSI_OPCODE_DST },
+ { 1, 2, 0, 0, 0, 0, "MIN", TGSI_OPCODE_MIN },
+ { 1, 2, 0, 0, 0, 0, "MAX", TGSI_OPCODE_MAX },
+ { 1, 2, 0, 0, 0, 0, "SLT", TGSI_OPCODE_SLT },
+ { 1, 2, 0, 0, 0, 0, "SGE", TGSI_OPCODE_SGE },
+ { 1, 3, 0, 0, 0, 0, "MAD", TGSI_OPCODE_MAD },
+ { 1, 2, 0, 0, 0, 0, "SUB", TGSI_OPCODE_SUB },
+ { 1, 3, 0, 0, 0, 0, "LRP", TGSI_OPCODE_LRP },
+ { 1, 3, 0, 0, 0, 0, "CND", TGSI_OPCODE_CND },
+ { 0, 0, 0, 0, 0, 0, "", 20 }, /* removed */
+ { 1, 3, 0, 0, 0, 0, "DP2A", TGSI_OPCODE_DP2A },
+ { 0, 0, 0, 0, 0, 0, "", 22 }, /* removed */
+ { 0, 0, 0, 0, 0, 0, "", 23 }, /* removed */
+ { 1, 1, 0, 0, 0, 0, "FRC", TGSI_OPCODE_FRC },
+ { 1, 3, 0, 0, 0, 0, "CLAMP", TGSI_OPCODE_CLAMP },
+ { 1, 1, 0, 0, 0, 0, "FLR", TGSI_OPCODE_FLR },
+ { 1, 1, 0, 0, 0, 0, "ROUND", TGSI_OPCODE_ROUND },
+ { 1, 1, 0, 0, 0, 0, "EX2", TGSI_OPCODE_EX2 },
+ { 1, 1, 0, 0, 0, 0, "LG2", TGSI_OPCODE_LG2 },
+ { 1, 2, 0, 0, 0, 0, "POW", TGSI_OPCODE_POW },
+ { 1, 2, 0, 0, 0, 0, "XPD", TGSI_OPCODE_XPD },
+ { 0, 0, 0, 0, 0, 0, "", 32 }, /* removed */
+ { 1, 1, 0, 0, 0, 0, "ABS", TGSI_OPCODE_ABS },
+ { 1, 1, 0, 0, 0, 0, "RCC", TGSI_OPCODE_RCC },
+ { 1, 2, 0, 0, 0, 0, "DPH", TGSI_OPCODE_DPH },
+ { 1, 1, 0, 0, 0, 0, "COS", TGSI_OPCODE_COS },
+ { 1, 1, 0, 0, 0, 0, "DDX", TGSI_OPCODE_DDX },
+ { 1, 1, 0, 0, 0, 0, "DDY", TGSI_OPCODE_DDY },
+ { 0, 0, 0, 0, 0, 0, "KILP", TGSI_OPCODE_KILP },
+ { 1, 1, 0, 0, 0, 0, "PK2H", TGSI_OPCODE_PK2H },
+ { 1, 1, 0, 0, 0, 0, "PK2US", TGSI_OPCODE_PK2US },
+ { 1, 1, 0, 0, 0, 0, "PK4B", TGSI_OPCODE_PK4B },
+ { 1, 1, 0, 0, 0, 0, "PK4UB", TGSI_OPCODE_PK4UB },
+ { 1, 2, 0, 0, 0, 0, "RFL", TGSI_OPCODE_RFL },
+ { 1, 2, 0, 0, 0, 0, "SEQ", TGSI_OPCODE_SEQ },
+ { 1, 2, 0, 0, 0, 0, "SFL", TGSI_OPCODE_SFL },
+ { 1, 2, 0, 0, 0, 0, "SGT", TGSI_OPCODE_SGT },
+ { 1, 1, 0, 0, 0, 0, "SIN", TGSI_OPCODE_SIN },
+ { 1, 2, 0, 0, 0, 0, "SLE", TGSI_OPCODE_SLE },
+ { 1, 2, 0, 0, 0, 0, "SNE", TGSI_OPCODE_SNE },
+ { 1, 2, 0, 0, 0, 0, "STR", TGSI_OPCODE_STR },
+ { 1, 2, 1, 0, 0, 0, "TEX", TGSI_OPCODE_TEX },
+ { 1, 4, 1, 0, 0, 0, "TXD", TGSI_OPCODE_TXD },
+ { 1, 2, 1, 0, 0, 0, "TXP", TGSI_OPCODE_TXP },
+ { 1, 1, 0, 0, 0, 0, "UP2H", TGSI_OPCODE_UP2H },
+ { 1, 1, 0, 0, 0, 0, "UP2US", TGSI_OPCODE_UP2US },
+ { 1, 1, 0, 0, 0, 0, "UP4B", TGSI_OPCODE_UP4B },
+ { 1, 1, 0, 0, 0, 0, "UP4UB", TGSI_OPCODE_UP4UB },
+ { 1, 3, 0, 0, 0, 0, "X2D", TGSI_OPCODE_X2D },
+ { 1, 1, 0, 0, 0, 0, "ARA", TGSI_OPCODE_ARA },
+ { 1, 1, 0, 0, 0, 0, "ARR", TGSI_OPCODE_ARR },
+ { 0, 1, 0, 0, 0, 0, "BRA", TGSI_OPCODE_BRA },
+ { 0, 0, 0, 1, 0, 0, "CAL", TGSI_OPCODE_CAL },
+ { 0, 0, 0, 0, 0, 0, "RET", TGSI_OPCODE_RET },
+ { 1, 1, 0, 0, 0, 0, "SSG", TGSI_OPCODE_SSG },
+ { 1, 3, 0, 0, 0, 0, "CMP", TGSI_OPCODE_CMP },
+ { 1, 1, 0, 0, 0, 0, "SCS", TGSI_OPCODE_SCS },
+ { 1, 2, 1, 0, 0, 0, "TXB", TGSI_OPCODE_TXB },
+ { 1, 1, 0, 0, 0, 0, "NRM", TGSI_OPCODE_NRM },
+ { 1, 2, 0, 0, 0, 0, "DIV", TGSI_OPCODE_DIV },
+ { 1, 2, 0, 0, 0, 0, "DP2", TGSI_OPCODE_DP2 },
+ { 1, 2, 1, 0, 0, 0, "TXL", TGSI_OPCODE_TXL },
+ { 0, 0, 0, 0, 0, 0, "BRK", TGSI_OPCODE_BRK },
+ { 0, 1, 0, 1, 0, 1, "IF", TGSI_OPCODE_IF },
+ { 1, 1, 0, 0, 0, 1, "BGNFOR", TGSI_OPCODE_BGNFOR },
+ { 0, 1, 0, 0, 0, 1, "REP", TGSI_OPCODE_REP },
+ { 0, 0, 0, 1, 1, 1, "ELSE", TGSI_OPCODE_ELSE },
+ { 0, 0, 0, 0, 1, 0, "ENDIF", TGSI_OPCODE_ENDIF },
+ { 1, 0, 0, 0, 1, 0, "ENDFOR", TGSI_OPCODE_ENDFOR },
+ { 0, 0, 0, 0, 1, 0, "ENDREP", TGSI_OPCODE_ENDREP },
+ { 0, 1, 0, 0, 0, 0, "PUSHA", TGSI_OPCODE_PUSHA },
+ { 1, 0, 0, 0, 0, 0, "POPA", TGSI_OPCODE_POPA },
+ { 1, 1, 0, 0, 0, 0, "CEIL", TGSI_OPCODE_CEIL },
+ { 1, 1, 0, 0, 0, 0, "I2F", TGSI_OPCODE_I2F },
+ { 1, 1, 0, 0, 0, 0, "NOT", TGSI_OPCODE_NOT },
+ { 1, 1, 0, 0, 0, 0, "TRUNC", TGSI_OPCODE_TRUNC },
+ { 1, 2, 0, 0, 0, 0, "SHL", TGSI_OPCODE_SHL },
+ { 1, 2, 0, 0, 0, 0, "SHR", TGSI_OPCODE_SHR },
+ { 1, 2, 0, 0, 0, 0, "AND", TGSI_OPCODE_AND },
+ { 1, 2, 0, 0, 0, 0, "OR", TGSI_OPCODE_OR },
+ { 1, 2, 0, 0, 0, 0, "MOD", TGSI_OPCODE_MOD },
+ { 1, 2, 0, 0, 0, 0, "XOR", TGSI_OPCODE_XOR },
+ { 1, 3, 0, 0, 0, 0, "SAD", TGSI_OPCODE_SAD },
+ { 1, 2, 1, 0, 0, 0, "TXF", TGSI_OPCODE_TXF },
+ { 1, 2, 1, 0, 0, 0, "TXQ", TGSI_OPCODE_TXQ },
+ { 0, 0, 0, 0, 0, 0, "CONT", TGSI_OPCODE_CONT },
+ { 0, 0, 0, 0, 0, 0, "EMIT", TGSI_OPCODE_EMIT },
+ { 0, 0, 0, 0, 0, 0, "ENDPRIM", TGSI_OPCODE_ENDPRIM },
+ { 0, 0, 0, 1, 0, 1, "BGNLOOP", TGSI_OPCODE_BGNLOOP },
+ { 0, 0, 0, 0, 0, 1, "BGNSUB", TGSI_OPCODE_BGNSUB },
+ { 0, 0, 0, 1, 1, 0, "ENDLOOP", TGSI_OPCODE_ENDLOOP },
+ { 0, 0, 0, 0, 1, 0, "ENDSUB", TGSI_OPCODE_ENDSUB },
+ { 1, 1, 0, 0, 0, 0, "NOISE1", TGSI_OPCODE_NOISE1 },
+ { 1, 1, 0, 0, 0, 0, "NOISE2", TGSI_OPCODE_NOISE2 },
+ { 1, 1, 0, 0, 0, 0, "NOISE3", TGSI_OPCODE_NOISE3 },
+ { 1, 1, 0, 0, 0, 0, "NOISE4", TGSI_OPCODE_NOISE4 },
+ { 0, 0, 0, 0, 0, 0, "NOP", TGSI_OPCODE_NOP },
+ { 0, 0, 0, 0, 0, 0, "", 108 }, /* removed */
+ { 0, 0, 0, 0, 0, 0, "", 109 }, /* removed */
+ { 0, 0, 0, 0, 0, 0, "", 110 }, /* removed */
+ { 0, 0, 0, 0, 0, 0, "", 111 }, /* removed */
+ { 1, 1, 0, 0, 0, 0, "NRM4", TGSI_OPCODE_NRM4 },
+ { 0, 1, 0, 0, 0, 0, "CALLNZ", TGSI_OPCODE_CALLNZ },
+ { 0, 1, 0, 0, 0, 0, "IFC", TGSI_OPCODE_IFC },
+ { 0, 1, 0, 0, 0, 0, "BREAKC", TGSI_OPCODE_BREAKC },
+ { 0, 1, 0, 0, 0, 0, "KIL", TGSI_OPCODE_KIL },
+ { 0, 0, 0, 0, 0, 0, "END", TGSI_OPCODE_END },
+ { 1, 1, 0, 0, 0, 0, "SWZ", TGSI_OPCODE_SWZ }
+};
+
+const struct tgsi_opcode_info *
+tgsi_get_opcode_info( uint opcode )
+{
+ static boolean firsttime = 1;
+
+ if (firsttime) {
+ unsigned i;
+ firsttime = 0;
+ for (i = 0; i < Elements(opcode_info); i++)
+ assert(opcode_info[i].opcode == i);
+ }
+
+ if (opcode < TGSI_OPCODE_LAST)
+ return &opcode_info[opcode];
+
+ assert( 0 );
+ return NULL;
+}
+
+
+const char *
+tgsi_get_opcode_name( uint opcode )
+{
+ const struct tgsi_opcode_info *info = tgsi_get_opcode_info(opcode);
+ return info->mnemonic;
+}
+
diff --git a/src/gallium/auxiliary/tgsi/tgsi_info.h b/src/gallium/auxiliary/tgsi/tgsi_info.h
new file mode 100644
index 0000000000..74713c3b98
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_info.h
@@ -0,0 +1,60 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_INFO_H
+#define TGSI_INFO_H
+
+#include "pipe/p_shader_tokens.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+struct tgsi_opcode_info
+{
+ unsigned num_dst:3;
+ unsigned num_src:3;
+ unsigned is_tex:1;
+ unsigned is_branch:1;
+ int pre_dedent:2;
+ int post_indent:2;
+ const char *mnemonic;
+ uint opcode;
+};
+
+const struct tgsi_opcode_info *
+tgsi_get_opcode_info( uint opcode );
+
+const char *
+tgsi_get_opcode_name( uint opcode );
+
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_INFO_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_iterate.c b/src/gallium/auxiliary/tgsi/tgsi_iterate.c
new file mode 100644
index 0000000000..d88c2558d8
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_iterate.c
@@ -0,0 +1,85 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "tgsi_iterate.h"
+
+boolean
+tgsi_iterate_shader(
+ const struct tgsi_token *tokens,
+ struct tgsi_iterate_context *ctx )
+{
+ struct tgsi_parse_context parse;
+
+ if (tgsi_parse_init( &parse, tokens ) != TGSI_PARSE_OK)
+ return FALSE;
+
+ ctx->processor = parse.FullHeader.Processor;
+ ctx->version = parse.FullVersion.Version;
+
+ if (ctx->prolog)
+ if (!ctx->prolog( ctx ))
+ goto fail;
+
+ while (!tgsi_parse_end_of_tokens( &parse )) {
+ tgsi_parse_token( &parse );
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ if (ctx->iterate_instruction)
+ if (!ctx->iterate_instruction( ctx, &parse.FullToken.FullInstruction ))
+ goto fail;
+ break;
+
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (ctx->iterate_declaration)
+ if (!ctx->iterate_declaration( ctx, &parse.FullToken.FullDeclaration ))
+ goto fail;
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ if (ctx->iterate_immediate)
+ if (!ctx->iterate_immediate( ctx, &parse.FullToken.FullImmediate ))
+ goto fail;
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+
+ if (ctx->epilog)
+ if (!ctx->epilog( ctx ))
+ goto fail;
+
+ tgsi_parse_free( &parse );
+ return TRUE;
+
+fail:
+ tgsi_parse_free( &parse );
+ return FALSE;
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_iterate.h b/src/gallium/auxiliary/tgsi/tgsi_iterate.h
new file mode 100644
index 0000000000..ec7b85bf63
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_iterate.h
@@ -0,0 +1,76 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_ITERATE_H
+#define TGSI_ITERATE_H
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+struct tgsi_iterate_context
+{
+ boolean
+ (* prolog)(
+ struct tgsi_iterate_context *ctx );
+
+ boolean
+ (* iterate_instruction)(
+ struct tgsi_iterate_context *ctx,
+ struct tgsi_full_instruction *inst );
+
+ boolean
+ (* iterate_declaration)(
+ struct tgsi_iterate_context *ctx,
+ struct tgsi_full_declaration *decl );
+
+ boolean
+ (* iterate_immediate)(
+ struct tgsi_iterate_context *ctx,
+ struct tgsi_full_immediate *imm );
+
+ boolean
+ (* epilog)(
+ struct tgsi_iterate_context *ctx );
+
+ struct tgsi_processor processor;
+ struct tgsi_version version;
+};
+
+boolean
+tgsi_iterate_shader(
+ const struct tgsi_token *tokens,
+ struct tgsi_iterate_context *ctx );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_ITERATE_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h b/src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h
new file mode 100644
index 0000000000..e7bcf4bf75
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_opcode_tmp.h
@@ -0,0 +1,172 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef OP12_TEX
+#define OP12_TEX(a) OP12(a)
+#endif
+
+#ifndef OP14_TEX
+#define OP14_TEX(a) OP14(a)
+#endif
+
+#ifndef OP00_LBL
+#define OP00_LBL(a) OP00(a)
+#endif
+
+#ifndef OP01_LBL
+#define OP01_LBL(a) OP01(a)
+#endif
+
+OP11(ARL)
+OP11(MOV)
+OP11(LIT)
+OP11(RCP)
+OP11(RSQ)
+OP11(EXP)
+OP11(LOG)
+OP12(MUL)
+OP12(ADD)
+OP12(DP3)
+OP12(DP4)
+OP12(DST)
+OP12(MIN)
+OP12(MAX)
+OP12(SLT)
+OP12(SGE)
+OP13(MAD)
+OP12(SUB)
+OP13(LRP)
+OP13(CND)
+OP13(DP2A)
+OP11(FRC)
+OP13(CLAMP)
+OP11(FLR)
+OP11(ROUND)
+OP11(EX2)
+OP11(LG2)
+OP12(POW)
+OP12(XPD)
+OP11(ABS)
+OP11(RCC)
+OP12(DPH)
+OP11(COS)
+OP11(DDX)
+OP11(DDY)
+OP00(KILP)
+OP11(PK2H)
+OP11(PK2US)
+OP11(PK4B)
+OP11(PK4UB)
+OP12(RFL)
+OP12(SEQ)
+OP12(SFL)
+OP12(SGT)
+OP11(SIN)
+OP12(SLE)
+OP12(SNE)
+OP12(STR)
+OP12_TEX(TEX)
+OP14_TEX(TXD)
+OP12_TEX(TXP)
+OP11(UP2H)
+OP11(UP2US)
+OP11(UP4B)
+OP11(UP4UB)
+OP13(X2D)
+OP11(ARA)
+OP11(ARR)
+OP01(BRA)
+OP00_LBL(CAL)
+OP00(RET)
+OP11(SSG)
+OP13(CMP)
+OP11(SCS)
+OP12_TEX(TXB)
+OP11(NRM)
+OP12(DIV)
+OP12(DP2)
+OP12_TEX(TXL)
+OP00(BRK)
+OP01_LBL(IF)
+OP11(BGNFOR)
+OP01(REP)
+OP00_LBL(ELSE)
+OP00(ENDIF)
+OP10(ENDFOR)
+OP00(ENDREP)
+OP01(PUSHA)
+OP10(POPA)
+OP11(CEIL)
+OP11(I2F)
+OP11(NOT)
+OP11(TRUNC)
+OP12(SHL)
+OP12(SHR)
+OP12(AND)
+OP12(OR)
+OP12(MOD)
+OP12(XOR)
+OP13(SAD)
+OP12_TEX(TXF)
+OP12_TEX(TXQ)
+OP00(CONT)
+OP00(EMIT)
+OP00(ENDPRIM)
+OP00_LBL(BGNLOOP)
+OP00(BGNSUB)
+OP00_LBL(ENDLOOP)
+OP00(ENDSUB)
+OP11(NOISE1)
+OP11(NOISE2)
+OP11(NOISE3)
+OP11(NOISE4)
+OP00(NOP)
+OP11(NRM4)
+OP01(CALLNZ)
+OP01(IFC)
+OP01(BREAKC)
+OP01(KIL)
+OP00(END)
+OP11(SWZ)
+
+
+#undef OP00
+#undef OP01
+#undef OP10
+#undef OP11
+#undef OP12
+#undef OP13
+
+#ifdef OP14
+#undef OP14
+#endif
+
+#undef OP00_LBL
+#undef OP01_LBL
+
+#undef OP12_TEX
+#undef OP14_TEX
+
diff --git a/src/gallium/auxiliary/tgsi/tgsi_parse.c b/src/gallium/auxiliary/tgsi/tgsi_parse.c
new file mode 100644
index 0000000000..4870f82b6b
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_parse.c
@@ -0,0 +1,352 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi_parse.h"
+#include "tgsi_build.h"
+#include "util/u_memory.h"
+
+void
+tgsi_full_token_init(
+ union tgsi_full_token *full_token )
+{
+ full_token->Token.Type = TGSI_TOKEN_TYPE_DECLARATION;
+}
+
+void
+tgsi_full_token_free(
+ union tgsi_full_token *full_token )
+{
+}
+
+unsigned
+tgsi_parse_init(
+ struct tgsi_parse_context *ctx,
+ const struct tgsi_token *tokens )
+{
+ ctx->FullVersion.Version = *(struct tgsi_version *) &tokens[0];
+ if( ctx->FullVersion.Version.MajorVersion > 1 ) {
+ return TGSI_PARSE_ERROR;
+ }
+
+ ctx->FullHeader.Header = *(struct tgsi_header *) &tokens[1];
+ if( ctx->FullHeader.Header.HeaderSize >= 2 ) {
+ ctx->FullHeader.Processor = *(struct tgsi_processor *) &tokens[2];
+ }
+ else {
+ ctx->FullHeader.Processor = tgsi_default_processor();
+ }
+
+ ctx->Tokens = tokens;
+ ctx->Position = 1 + ctx->FullHeader.Header.HeaderSize;
+
+ tgsi_full_token_init( &ctx->FullToken );
+
+ return TGSI_PARSE_OK;
+}
+
+void
+tgsi_parse_free(
+ struct tgsi_parse_context *ctx )
+{
+ tgsi_full_token_free( &ctx->FullToken );
+}
+
+boolean
+tgsi_parse_end_of_tokens(
+ struct tgsi_parse_context *ctx )
+{
+ return ctx->Position >=
+ 1 + ctx->FullHeader.Header.HeaderSize + ctx->FullHeader.Header.BodySize;
+}
+
+
+/**
+ * This function is used to avoid and work-around type punning/aliasing
+ * warnings. The warnings seem harmless on x86 but on PPC they cause
+ * real failures.
+ */
+static INLINE void
+copy_token(void *dst, const void *src)
+{
+ memcpy(dst, src, 4);
+}
+
+
+/**
+ * Get next 4-byte token, return it at address specified by 'token'
+ */
+static void
+next_token(
+ struct tgsi_parse_context *ctx,
+ void *token )
+{
+ assert( !tgsi_parse_end_of_tokens( ctx ) );
+ copy_token(token, &ctx->Tokens[ctx->Position]);
+ ctx->Position++;
+}
+
+
+void
+tgsi_parse_token(
+ struct tgsi_parse_context *ctx )
+{
+ struct tgsi_token token;
+ unsigned i;
+
+ tgsi_full_token_free( &ctx->FullToken );
+ tgsi_full_token_init( &ctx->FullToken );
+
+ next_token( ctx, &token );
+
+ switch( token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ struct tgsi_full_declaration *decl = &ctx->FullToken.FullDeclaration;
+
+ *decl = tgsi_default_full_declaration();
+ copy_token(&decl->Declaration, &token);
+
+ next_token( ctx, &decl->DeclarationRange );
+
+ if( decl->Declaration.Semantic ) {
+ next_token( ctx, &decl->Semantic );
+ }
+
+ break;
+ }
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ struct tgsi_full_immediate *imm = &ctx->FullToken.FullImmediate;
+
+ *imm = tgsi_default_full_immediate();
+ copy_token(&imm->Immediate, &token);
+ assert( !imm->Immediate.Extended );
+
+ switch (imm->Immediate.DataType) {
+ case TGSI_IMM_FLOAT32:
+ {
+ uint imm_count = imm->Immediate.NrTokens - 1;
+ for (i = 0; i < imm_count; i++) {
+ next_token(ctx, &imm->u[i]);
+ }
+ }
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ break;
+ }
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ struct tgsi_full_instruction *inst = &ctx->FullToken.FullInstruction;
+ unsigned extended;
+
+ *inst = tgsi_default_full_instruction();
+ copy_token(&inst->Instruction, &token);
+ extended = inst->Instruction.Extended;
+
+ while( extended ) {
+ struct tgsi_src_register_ext token;
+
+ next_token( ctx, &token );
+
+ switch( token.Type ) {
+ case TGSI_INSTRUCTION_EXT_TYPE_NV:
+ copy_token(&inst->InstructionExtNv, &token);
+ break;
+
+ case TGSI_INSTRUCTION_EXT_TYPE_LABEL:
+ copy_token(&inst->InstructionExtLabel, &token);
+ break;
+
+ case TGSI_INSTRUCTION_EXT_TYPE_TEXTURE:
+ copy_token(&inst->InstructionExtTexture, &token);
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ extended = token.Extended;
+ }
+
+ assert( inst->Instruction.NumDstRegs <= TGSI_FULL_MAX_DST_REGISTERS );
+
+ for( i = 0; i < inst->Instruction.NumDstRegs; i++ ) {
+ unsigned extended;
+
+ next_token( ctx, &inst->FullDstRegisters[i].DstRegister );
+
+ /*
+ * No support for indirect or multi-dimensional addressing.
+ */
+ assert( !inst->FullDstRegisters[i].DstRegister.Dimension );
+
+ extended = inst->FullDstRegisters[i].DstRegister.Extended;
+
+ while( extended ) {
+ struct tgsi_src_register_ext token;
+
+ next_token( ctx, &token );
+
+ switch( token.Type ) {
+ case TGSI_DST_REGISTER_EXT_TYPE_CONDCODE:
+ copy_token(&inst->FullDstRegisters[i].DstRegisterExtConcode,
+ &token);
+ break;
+
+ case TGSI_DST_REGISTER_EXT_TYPE_MODULATE:
+ copy_token(&inst->FullDstRegisters[i].DstRegisterExtModulate,
+ &token);
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ extended = token.Extended;
+ }
+
+ if( inst->FullDstRegisters[i].DstRegister.Indirect ) {
+ next_token( ctx, &inst->FullDstRegisters[i].DstRegisterInd );
+
+ /*
+ * No support for indirect or multi-dimensional addressing.
+ */
+ assert( !inst->FullDstRegisters[i].DstRegisterInd.Indirect );
+ assert( !inst->FullDstRegisters[i].DstRegisterInd.Dimension );
+ assert( !inst->FullDstRegisters[i].DstRegisterInd.Extended );
+ }
+ }
+
+ assert( inst->Instruction.NumSrcRegs <= TGSI_FULL_MAX_SRC_REGISTERS );
+
+ for( i = 0; i < inst->Instruction.NumSrcRegs; i++ ) {
+ unsigned extended;
+
+ next_token( ctx, &inst->FullSrcRegisters[i].SrcRegister );
+
+ extended = inst->FullSrcRegisters[i].SrcRegister.Extended;
+
+ while( extended ) {
+ struct tgsi_src_register_ext token;
+
+ next_token( ctx, &token );
+
+ switch( token.Type ) {
+ case TGSI_SRC_REGISTER_EXT_TYPE_SWZ:
+ copy_token(&inst->FullSrcRegisters[i].SrcRegisterExtSwz,
+ &token);
+ break;
+
+ case TGSI_SRC_REGISTER_EXT_TYPE_MOD:
+ copy_token(&inst->FullSrcRegisters[i].SrcRegisterExtMod,
+ &token);
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ extended = token.Extended;
+ }
+
+ if( inst->FullSrcRegisters[i].SrcRegister.Indirect ) {
+ next_token( ctx, &inst->FullSrcRegisters[i].SrcRegisterInd );
+
+ /*
+ * No support for indirect or multi-dimensional addressing.
+ */
+ assert( !inst->FullSrcRegisters[i].SrcRegisterInd.Indirect );
+ assert( !inst->FullSrcRegisters[i].SrcRegisterInd.Dimension );
+ assert( !inst->FullSrcRegisters[i].SrcRegisterInd.Extended );
+ }
+
+ if( inst->FullSrcRegisters[i].SrcRegister.Dimension ) {
+ next_token( ctx, &inst->FullSrcRegisters[i].SrcRegisterDim );
+
+ /*
+ * No support for multi-dimensional addressing.
+ */
+ assert( !inst->FullSrcRegisters[i].SrcRegisterDim.Dimension );
+ assert( !inst->FullSrcRegisters[i].SrcRegisterDim.Extended );
+
+ if( inst->FullSrcRegisters[i].SrcRegisterDim.Indirect ) {
+ next_token( ctx, &inst->FullSrcRegisters[i].SrcRegisterDimInd );
+
+ /*
+ * No support for indirect or multi-dimensional addressing.
+ */
+ assert( !inst->FullSrcRegisters[i].SrcRegisterInd.Indirect );
+ assert( !inst->FullSrcRegisters[i].SrcRegisterInd.Dimension );
+ assert( !inst->FullSrcRegisters[i].SrcRegisterInd.Extended );
+ }
+ }
+ }
+
+ break;
+ }
+
+ default:
+ assert( 0 );
+ }
+}
+
+
+unsigned
+tgsi_num_tokens(const struct tgsi_token *tokens)
+{
+ struct tgsi_parse_context ctx;
+ if (tgsi_parse_init(&ctx, tokens) == TGSI_PARSE_OK) {
+ unsigned len = (ctx.FullHeader.Header.HeaderSize +
+ ctx.FullHeader.Header.BodySize +
+ 1);
+ return len;
+ }
+ return 0;
+}
+
+
+/**
+ * Make a new copy of a token array.
+ */
+struct tgsi_token *
+tgsi_dup_tokens(const struct tgsi_token *tokens)
+{
+ unsigned n = tgsi_num_tokens(tokens);
+ unsigned bytes = n * sizeof(struct tgsi_token);
+ struct tgsi_token *new_tokens = (struct tgsi_token *) MALLOC(bytes);
+ if (new_tokens)
+ memcpy(new_tokens, tokens, bytes);
+ return new_tokens;
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_parse.h b/src/gallium/auxiliary/tgsi/tgsi_parse.h
new file mode 100644
index 0000000000..a26ee5ba86
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_parse.h
@@ -0,0 +1,149 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_PARSE_H
+#define TGSI_PARSE_H
+
+#include "pipe/p_shader_tokens.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+struct tgsi_full_version
+{
+ struct tgsi_version Version;
+};
+
+struct tgsi_full_header
+{
+ struct tgsi_header Header;
+ struct tgsi_processor Processor;
+};
+
+struct tgsi_full_dst_register
+{
+ struct tgsi_dst_register DstRegister;
+ struct tgsi_src_register DstRegisterInd;
+ struct tgsi_dst_register_ext_concode DstRegisterExtConcode;
+ struct tgsi_dst_register_ext_modulate DstRegisterExtModulate;
+};
+
+struct tgsi_full_src_register
+{
+ struct tgsi_src_register SrcRegister;
+ struct tgsi_src_register_ext_swz SrcRegisterExtSwz;
+ struct tgsi_src_register_ext_mod SrcRegisterExtMod;
+ struct tgsi_src_register SrcRegisterInd;
+ struct tgsi_dimension SrcRegisterDim;
+ struct tgsi_src_register SrcRegisterDimInd;
+};
+
+struct tgsi_full_declaration
+{
+ struct tgsi_declaration Declaration;
+ struct tgsi_declaration_range DeclarationRange;
+ struct tgsi_declaration_semantic Semantic;
+};
+
+struct tgsi_full_immediate
+{
+ struct tgsi_immediate Immediate;
+ union tgsi_immediate_data u[4];
+};
+
+#define TGSI_FULL_MAX_DST_REGISTERS 2
+#define TGSI_FULL_MAX_SRC_REGISTERS 4 /* TXD has 4 */
+
+struct tgsi_full_instruction
+{
+ struct tgsi_instruction Instruction;
+ struct tgsi_instruction_ext_nv InstructionExtNv;
+ struct tgsi_instruction_ext_label InstructionExtLabel;
+ struct tgsi_instruction_ext_texture InstructionExtTexture;
+ struct tgsi_full_dst_register FullDstRegisters[TGSI_FULL_MAX_DST_REGISTERS];
+ struct tgsi_full_src_register FullSrcRegisters[TGSI_FULL_MAX_SRC_REGISTERS];
+ uint Flags; /**< user-defined usage */
+};
+
+union tgsi_full_token
+{
+ struct tgsi_token Token;
+ struct tgsi_full_declaration FullDeclaration;
+ struct tgsi_full_immediate FullImmediate;
+ struct tgsi_full_instruction FullInstruction;
+};
+
+void
+tgsi_full_token_init(
+ union tgsi_full_token *full_token );
+
+void
+tgsi_full_token_free(
+ union tgsi_full_token *full_token );
+
+struct tgsi_parse_context
+{
+ const struct tgsi_token *Tokens;
+ unsigned Position;
+ struct tgsi_full_version FullVersion;
+ struct tgsi_full_header FullHeader;
+ union tgsi_full_token FullToken;
+};
+
+#define TGSI_PARSE_OK 0
+#define TGSI_PARSE_ERROR 1
+
+unsigned
+tgsi_parse_init(
+ struct tgsi_parse_context *ctx,
+ const struct tgsi_token *tokens );
+
+void
+tgsi_parse_free(
+ struct tgsi_parse_context *ctx );
+
+boolean
+tgsi_parse_end_of_tokens(
+ struct tgsi_parse_context *ctx );
+
+void
+tgsi_parse_token(
+ struct tgsi_parse_context *ctx );
+
+unsigned
+tgsi_num_tokens(const struct tgsi_token *tokens);
+
+struct tgsi_token *
+tgsi_dup_tokens(const struct tgsi_token *tokens);
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_PARSE_H */
+
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ppc.c b/src/gallium/auxiliary/tgsi/tgsi_ppc.c
new file mode 100644
index 0000000000..4b1c7d4e01
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_ppc.c
@@ -0,0 +1,1375 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * TGSI to PowerPC code generation.
+ */
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_PPC)
+
+#include "util/u_debug.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_sse.h"
+#include "tgsi/tgsi_info.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi_dump.h"
+#include "tgsi_exec.h"
+#include "tgsi_ppc.h"
+#include "rtasm/rtasm_ppc.h"
+
+
+/**
+ * Since it's pretty much impossible to form PPC vector immediates, load
+ * them from memory here:
+ */
+const float ppc_builtin_constants[] ALIGN16_ATTRIB = {
+ 1.0f, -128.0f, 128.0, 0.0
+};
+
+
+#define FOR_EACH_CHANNEL( CHAN )\
+ for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
+
+#define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
+ ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
+ if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
+
+#define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
+ FOR_EACH_CHANNEL( CHAN )\
+ IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
+
+#define CHAN_X 0
+#define CHAN_Y 1
+#define CHAN_Z 2
+#define CHAN_W 3
+
+
+/**
+ * How many TGSI temps should be implemented with real PPC vector registers
+ * rather than memory.
+ */
+#define MAX_PPC_TEMPS 3
+
+
+/**
+ * Context/state used during code gen.
+ */
+struct gen_context
+{
+ struct ppc_function *f;
+ int inputs_reg; /**< GP register pointing to input params */
+ int outputs_reg; /**< GP register pointing to output params */
+ int temps_reg; /**< GP register pointing to temporary "registers" */
+ int immed_reg; /**< GP register pointing to immediates buffer */
+ int const_reg; /**< GP register pointing to constants buffer */
+ int builtins_reg; /**< GP register pointint to built-in constants */
+
+ int offset_reg; /**< used to reduce redundant li instructions */
+ int offset_value;
+
+ int one_vec; /**< vector register with {1.0, 1.0, 1.0, 1.0} */
+ int bit31_vec; /**< vector register with {1<<31, 1<<31, 1<<31, 1<<31} */
+
+ /**
+ * Map TGSI temps to PPC vector temps.
+ * We have 32 PPC vector regs. Use 16 of them for storing 4 TGSI temps.
+ * XXX currently only do this for TGSI temps [0..MAX_PPC_TEMPS-1].
+ */
+ int temps_map[MAX_PPC_TEMPS][4];
+
+ /**
+ * Cache of src registers.
+ * This is used to avoid redundant load instructions.
+ */
+ struct {
+ struct tgsi_full_src_register src;
+ uint chan;
+ uint vec;
+ } regs[12]; /* 3 src regs, 4 channels */
+ uint num_regs;
+};
+
+
+/**
+ * Initialize code generation context.
+ */
+static void
+init_gen_context(struct gen_context *gen, struct ppc_function *func)
+{
+ uint i;
+
+ memset(gen, 0, sizeof(*gen));
+ gen->f = func;
+ gen->inputs_reg = ppc_reserve_register(func, 3); /* first function param */
+ gen->outputs_reg = ppc_reserve_register(func, 4); /* second function param */
+ gen->temps_reg = ppc_reserve_register(func, 5); /* ... */
+ gen->immed_reg = ppc_reserve_register(func, 6);
+ gen->const_reg = ppc_reserve_register(func, 7);
+ gen->builtins_reg = ppc_reserve_register(func, 8);
+ gen->one_vec = -1;
+ gen->bit31_vec = -1;
+ gen->offset_reg = -1;
+ gen->offset_value = -9999999;
+ for (i = 0; i < MAX_PPC_TEMPS; i++) {
+ gen->temps_map[i][0] = ppc_allocate_vec_register(gen->f);
+ gen->temps_map[i][1] = ppc_allocate_vec_register(gen->f);
+ gen->temps_map[i][2] = ppc_allocate_vec_register(gen->f);
+ gen->temps_map[i][3] = ppc_allocate_vec_register(gen->f);
+ }
+}
+
+
+/**
+ * Is the given TGSI register stored as a real PPC vector register?
+ */
+static boolean
+is_ppc_vec_temporary(const struct tgsi_full_src_register *reg)
+{
+ return (reg->SrcRegister.File == TGSI_FILE_TEMPORARY &&
+ reg->SrcRegister.Index < MAX_PPC_TEMPS);
+}
+
+
+/**
+ * Is the given TGSI register stored as a real PPC vector register?
+ */
+static boolean
+is_ppc_vec_temporary_dst(const struct tgsi_full_dst_register *reg)
+{
+ return (reg->DstRegister.File == TGSI_FILE_TEMPORARY &&
+ reg->DstRegister.Index < MAX_PPC_TEMPS);
+}
+
+
+
+/**
+ * All PPC vector load/store instructions form an effective address
+ * by adding the contents of two registers. For example:
+ * lvx v2,r8,r9 # v2 = memory[r8 + r9]
+ * stvx v2,r8,r9 # memory[r8 + r9] = v2;
+ * So our lvx/stvx instructions are typically preceded by an 'li' instruction
+ * to load r9 (above) with an immediate (an offset).
+ * This code emits that 'li' instruction, but only if the offset value is
+ * different than the previous 'li'.
+ * This optimization seems to save about 10% in the instruction count.
+ * Note that we need to unconditionally emit an 'li' inside basic blocks
+ * (such as inside loops).
+ */
+static int
+emit_li_offset(struct gen_context *gen, int offset)
+{
+ if (gen->offset_reg <= 0) {
+ /* allocate a GP register for storing load/store offset */
+ gen->offset_reg = ppc_allocate_register(gen->f);
+ }
+
+ /* emit new 'li' if offset is changing */
+ if (gen->offset_value < 0 || gen->offset_value != offset) {
+ gen->offset_value = offset;
+ ppc_li(gen->f, gen->offset_reg, offset);
+ }
+
+ return gen->offset_reg;
+}
+
+
+/**
+ * Forces subsequent emit_li_offset() calls to emit an 'li'.
+ * To be called at the top of basic blocks.
+ */
+static void
+reset_li_offset(struct gen_context *gen)
+{
+ gen->offset_value = -9999999;
+}
+
+
+
+/**
+ * Load the given vector register with {value, value, value, value}.
+ * The value must be in the ppu_builtin_constants[] array.
+ * We wouldn't need this if there was a simple way to load PPC vector
+ * registers with immediate values!
+ */
+static void
+load_constant_vec(struct gen_context *gen, int dst_vec, float value)
+{
+ uint pos;
+ for (pos = 0; pos < Elements(ppc_builtin_constants); pos++) {
+ if (ppc_builtin_constants[pos] == value) {
+ int offset = pos * 4;
+ int offset_reg = emit_li_offset(gen, offset);
+
+ /* Load 4-byte word into vector register.
+ * The vector slot depends on the effective address we load from.
+ * We know that our builtins start at a 16-byte boundary so we
+ * know that 'swizzle' tells us which vector slot will have the
+ * loaded word. The other vector slots will be undefined.
+ */
+ ppc_lvewx(gen->f, dst_vec, gen->builtins_reg, offset_reg);
+ /* splat word[pos % 4] across the vector reg */
+ ppc_vspltw(gen->f, dst_vec, dst_vec, pos % 4);
+ return;
+ }
+ }
+ assert(0 && "Need to add new constant to ppc_builtin_constants array");
+}
+
+
+/**
+ * Return index of vector register containing {1.0, 1.0, 1.0, 1.0}.
+ */
+static int
+gen_one_vec(struct gen_context *gen)
+{
+ if (gen->one_vec < 0) {
+ gen->one_vec = ppc_allocate_vec_register(gen->f);
+ load_constant_vec(gen, gen->one_vec, 1.0f);
+ }
+ return gen->one_vec;
+}
+
+/**
+ * Return index of vector register containing {1<<31, 1<<31, 1<<31, 1<<31}.
+ */
+static int
+gen_get_bit31_vec(struct gen_context *gen)
+{
+ if (gen->bit31_vec < 0) {
+ gen->bit31_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vspltisw(gen->f, gen->bit31_vec, -1);
+ ppc_vslw(gen->f, gen->bit31_vec, gen->bit31_vec, gen->bit31_vec);
+ }
+ return gen->bit31_vec;
+}
+
+
+/**
+ * Register fetch. Return PPC vector register with result.
+ */
+static int
+emit_fetch(struct gen_context *gen,
+ const struct tgsi_full_src_register *reg,
+ const unsigned chan_index)
+{
+ uint swizzle = tgsi_util_get_full_src_register_extswizzle(reg, chan_index);
+ int dst_vec = -1;
+
+ switch (swizzle) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ switch (reg->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ {
+ int offset = (reg->SrcRegister.Index * 4 + swizzle) * 16;
+ int offset_reg = emit_li_offset(gen, offset);
+ dst_vec = ppc_allocate_vec_register(gen->f);
+ ppc_lvx(gen->f, dst_vec, gen->inputs_reg, offset_reg);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ if (is_ppc_vec_temporary(reg)) {
+ /* use PPC vec register */
+ dst_vec = gen->temps_map[reg->SrcRegister.Index][swizzle];
+ }
+ else {
+ /* use memory-based temp register "file" */
+ int offset = (reg->SrcRegister.Index * 4 + swizzle) * 16;
+ int offset_reg = emit_li_offset(gen, offset);
+ dst_vec = ppc_allocate_vec_register(gen->f);
+ ppc_lvx(gen->f, dst_vec, gen->temps_reg, offset_reg);
+ }
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ {
+ int offset = (reg->SrcRegister.Index * 4 + swizzle) * 4;
+ int offset_reg = emit_li_offset(gen, offset);
+ dst_vec = ppc_allocate_vec_register(gen->f);
+ /* Load 4-byte word into vector register.
+ * The vector slot depends on the effective address we load from.
+ * We know that our immediates start at a 16-byte boundary so we
+ * know that 'swizzle' tells us which vector slot will have the
+ * loaded word. The other vector slots will be undefined.
+ */
+ ppc_lvewx(gen->f, dst_vec, gen->immed_reg, offset_reg);
+ /* splat word[swizzle] across the vector reg */
+ ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
+ }
+ break;
+ case TGSI_FILE_CONSTANT:
+ {
+ int offset = (reg->SrcRegister.Index * 4 + swizzle) * 4;
+ int offset_reg = emit_li_offset(gen, offset);
+ dst_vec = ppc_allocate_vec_register(gen->f);
+ /* Load 4-byte word into vector register.
+ * The vector slot depends on the effective address we load from.
+ * We know that our constants start at a 16-byte boundary so we
+ * know that 'swizzle' tells us which vector slot will have the
+ * loaded word. The other vector slots will be undefined.
+ */
+ ppc_lvewx(gen->f, dst_vec, gen->const_reg, offset_reg);
+ /* splat word[swizzle] across the vector reg */
+ ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
+ }
+ break;
+ default:
+ assert( 0 );
+ }
+ break;
+ case TGSI_EXTSWIZZLE_ZERO:
+ ppc_vzero(gen->f, dst_vec);
+ break;
+ case TGSI_EXTSWIZZLE_ONE:
+ {
+ int one_vec = gen_one_vec(gen);
+ dst_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vmove(gen->f, dst_vec, one_vec);
+ }
+ break;
+ default:
+ assert( 0 );
+ }
+
+ assert(dst_vec >= 0);
+
+ {
+ uint sign_op = tgsi_util_get_full_src_register_sign_mode(reg, chan_index);
+ if (sign_op != TGSI_UTIL_SIGN_KEEP) {
+ int bit31_vec = gen_get_bit31_vec(gen);
+ int dst_vec2;
+
+ if (is_ppc_vec_temporary(reg)) {
+ /* need to use a new temp */
+ dst_vec2 = ppc_allocate_vec_register(gen->f);
+ }
+ else {
+ dst_vec2 = dst_vec;
+ }
+
+ switch (sign_op) {
+ case TGSI_UTIL_SIGN_CLEAR:
+ /* vec = vec & ~bit31 */
+ ppc_vandc(gen->f, dst_vec2, dst_vec, bit31_vec);
+ break;
+ case TGSI_UTIL_SIGN_SET:
+ /* vec = vec | bit31 */
+ ppc_vor(gen->f, dst_vec2, dst_vec, bit31_vec);
+ break;
+ case TGSI_UTIL_SIGN_TOGGLE:
+ /* vec = vec ^ bit31 */
+ ppc_vxor(gen->f, dst_vec2, dst_vec, bit31_vec);
+ break;
+ default:
+ assert(0);
+ }
+ return dst_vec2;
+ }
+ }
+
+ return dst_vec;
+}
+
+
+
+/**
+ * Test if two TGSI src registers refer to the same memory location.
+ * We use this to avoid redundant register loads.
+ */
+static boolean
+equal_src_locs(const struct tgsi_full_src_register *a, uint chan_a,
+ const struct tgsi_full_src_register *b, uint chan_b)
+{
+ int swz_a, swz_b;
+ int sign_a, sign_b;
+ if (a->SrcRegister.File != b->SrcRegister.File)
+ return FALSE;
+ if (a->SrcRegister.Index != b->SrcRegister.Index)
+ return FALSE;
+ swz_a = tgsi_util_get_full_src_register_extswizzle(a, chan_a);
+ swz_b = tgsi_util_get_full_src_register_extswizzle(b, chan_b);
+ if (swz_a != swz_b)
+ return FALSE;
+ sign_a = tgsi_util_get_full_src_register_sign_mode(a, chan_a);
+ sign_b = tgsi_util_get_full_src_register_sign_mode(b, chan_b);
+ if (sign_a != sign_b)
+ return FALSE;
+ return TRUE;
+}
+
+
+/**
+ * Given a TGSI src register and channel index, return the PPC vector
+ * register containing the value. We use a cache to prevent re-loading
+ * the same register multiple times.
+ * \return index of PPC vector register with the desired src operand
+ */
+static int
+get_src_vec(struct gen_context *gen,
+ struct tgsi_full_instruction *inst, int src_reg, uint chan)
+{
+ const const struct tgsi_full_src_register *src =
+ &inst->FullSrcRegisters[src_reg];
+ int vec;
+ uint i;
+
+ /* check the cache */
+ for (i = 0; i < gen->num_regs; i++) {
+ if (equal_src_locs(&gen->regs[i].src, gen->regs[i].chan, src, chan)) {
+ /* cache hit */
+ assert(gen->regs[i].vec >= 0);
+ return gen->regs[i].vec;
+ }
+ }
+
+ /* cache miss: allocate new vec reg and emit fetch/load code */
+ vec = emit_fetch(gen, src, chan);
+ gen->regs[gen->num_regs].src = *src;
+ gen->regs[gen->num_regs].chan = chan;
+ gen->regs[gen->num_regs].vec = vec;
+ gen->num_regs++;
+
+ assert(gen->num_regs <= Elements(gen->regs));
+
+ assert(vec >= 0);
+
+ return vec;
+}
+
+
+/**
+ * Clear the src operand cache. To be called at the end of each emit function.
+ */
+static void
+release_src_vecs(struct gen_context *gen)
+{
+ uint i;
+ for (i = 0; i < gen->num_regs; i++) {
+ const const struct tgsi_full_src_register src = gen->regs[i].src;
+ if (!is_ppc_vec_temporary(&src)) {
+ ppc_release_vec_register(gen->f, gen->regs[i].vec);
+ }
+ }
+ gen->num_regs = 0;
+}
+
+
+
+static int
+get_dst_vec(struct gen_context *gen,
+ const struct tgsi_full_instruction *inst,
+ unsigned chan_index)
+{
+ const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[0];
+
+ if (is_ppc_vec_temporary_dst(reg)) {
+ int vec = gen->temps_map[reg->DstRegister.Index][chan_index];
+ return vec;
+ }
+ else {
+ return ppc_allocate_vec_register(gen->f);
+ }
+}
+
+
+/**
+ * Register store. Store 'src_vec' at location indicated by 'reg'.
+ * \param free_vec Should the src_vec be released when done?
+ */
+static void
+emit_store(struct gen_context *gen,
+ int src_vec,
+ const struct tgsi_full_instruction *inst,
+ unsigned chan_index,
+ boolean free_vec)
+{
+ const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[0];
+
+ switch (reg->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ {
+ int offset = (reg->DstRegister.Index * 4 + chan_index) * 16;
+ int offset_reg = emit_li_offset(gen, offset);
+ ppc_stvx(gen->f, src_vec, gen->outputs_reg, offset_reg);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ if (is_ppc_vec_temporary_dst(reg)) {
+ if (!free_vec) {
+ int dst_vec = gen->temps_map[reg->DstRegister.Index][chan_index];
+ if (dst_vec != src_vec)
+ ppc_vmove(gen->f, dst_vec, src_vec);
+ }
+ free_vec = FALSE;
+ }
+ else {
+ int offset = (reg->DstRegister.Index * 4 + chan_index) * 16;
+ int offset_reg = emit_li_offset(gen, offset);
+ ppc_stvx(gen->f, src_vec, gen->temps_reg, offset_reg);
+ }
+ break;
+#if 0
+ case TGSI_FILE_ADDRESS:
+ emit_addrs(
+ func,
+ xmm,
+ reg->DstRegister.Index,
+ chan_index );
+ break;
+#endif
+ default:
+ assert( 0 );
+ }
+
+#if 0
+ switch( inst->Instruction.Saturate ) {
+ case TGSI_SAT_NONE:
+ break;
+
+ case TGSI_SAT_ZERO_ONE:
+ /* assert( 0 ); */
+ break;
+
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ assert( 0 );
+ break;
+ }
+#endif
+
+ if (free_vec)
+ ppc_release_vec_register(gen->f, src_vec);
+}
+
+
+static void
+emit_scalar_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ int v0, v1;
+ uint chan_index;
+
+ v0 = get_src_vec(gen, inst, 0, CHAN_X);
+ v1 = ppc_allocate_vec_register(gen->f);
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_RSQ:
+ /* v1 = 1.0 / sqrt(v0) */
+ ppc_vrsqrtefp(gen->f, v1, v0);
+ break;
+ case TGSI_OPCODE_RCP:
+ /* v1 = 1.0 / v0 */
+ ppc_vrefp(gen->f, v1, v0);
+ break;
+ default:
+ assert(0);
+ }
+
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ emit_store(gen, v1, inst, chan_index, FALSE);
+ }
+
+ release_src_vecs(gen);
+ ppc_release_vec_register(gen->f, v1);
+}
+
+
+static void
+emit_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ uint chan_index;
+
+ FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
+ int v0 = get_src_vec(gen, inst, 0, chan_index); /* v0 = srcreg[0] */
+ int v1 = get_dst_vec(gen, inst, chan_index);
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ /* turn off the most significant bit of each vector float word */
+ {
+ int bit31_vec = gen_get_bit31_vec(gen);
+ ppc_vandc(gen->f, v1, v0, bit31_vec); /* v1 = v0 & ~bit31 */
+ }
+ break;
+ case TGSI_OPCODE_FLR:
+ ppc_vrfim(gen->f, v1, v0); /* v1 = floor(v0) */
+ break;
+ case TGSI_OPCODE_FRC:
+ ppc_vrfim(gen->f, v1, v0); /* tmp = floor(v0) */
+ ppc_vsubfp(gen->f, v1, v0, v1); /* v1 = v0 - v1 */
+ break;
+ case TGSI_OPCODE_EX2:
+ ppc_vexptefp(gen->f, v1, v0); /* v1 = 2^v0 */
+ break;
+ case TGSI_OPCODE_LG2:
+ /* XXX this may be broken! */
+ ppc_vlogefp(gen->f, v1, v0); /* v1 = log2(v0) */
+ break;
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ if (v0 != v1)
+ ppc_vmove(gen->f, v1, v0);
+ break;
+ default:
+ assert(0);
+ }
+ emit_store(gen, v1, inst, chan_index, TRUE); /* store v0 */
+ }
+
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_binop(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ int zero_vec = -1;
+ uint chan;
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_MUL) {
+ zero_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vzero(gen->f, zero_vec);
+ }
+
+ FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
+ /* fetch src operands */
+ int v0 = get_src_vec(gen, inst, 0, chan);
+ int v1 = get_src_vec(gen, inst, 1, chan);
+ int v2 = get_dst_vec(gen, inst, chan);
+
+ /* emit binop */
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ADD:
+ ppc_vaddfp(gen->f, v2, v0, v1);
+ break;
+ case TGSI_OPCODE_SUB:
+ ppc_vsubfp(gen->f, v2, v0, v1);
+ break;
+ case TGSI_OPCODE_MUL:
+ ppc_vmaddfp(gen->f, v2, v0, v1, zero_vec);
+ break;
+ case TGSI_OPCODE_MIN:
+ ppc_vminfp(gen->f, v2, v0, v1);
+ break;
+ case TGSI_OPCODE_MAX:
+ ppc_vmaxfp(gen->f, v2, v0, v1);
+ break;
+ default:
+ assert(0);
+ }
+
+ /* store v2 */
+ emit_store(gen, v2, inst, chan, TRUE);
+ }
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_MUL)
+ ppc_release_vec_register(gen->f, zero_vec);
+
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_triop(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ uint chan;
+
+ FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
+ /* fetch src operands */
+ int v0 = get_src_vec(gen, inst, 0, chan);
+ int v1 = get_src_vec(gen, inst, 1, chan);
+ int v2 = get_src_vec(gen, inst, 2, chan);
+ int v3 = get_dst_vec(gen, inst, chan);
+
+ /* emit ALU */
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_MAD:
+ ppc_vmaddfp(gen->f, v3, v0, v1, v2); /* v3 = v0 * v1 + v2 */
+ break;
+ case TGSI_OPCODE_LRP:
+ ppc_vsubfp(gen->f, v3, v1, v2); /* v3 = v1 - v2 */
+ ppc_vmaddfp(gen->f, v3, v0, v3, v2); /* v3 = v0 * v3 + v2 */
+ break;
+ default:
+ assert(0);
+ }
+
+ /* store v3 */
+ emit_store(gen, v3, inst, chan, TRUE);
+ }
+
+ release_src_vecs(gen);
+}
+
+
+/**
+ * Vector comparisons, resulting in 1.0 or 0.0 values.
+ */
+static void
+emit_inequality(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ uint chan;
+ int one_vec = gen_one_vec(gen);
+
+ FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
+ /* fetch src operands */
+ int v0 = get_src_vec(gen, inst, 0, chan);
+ int v1 = get_src_vec(gen, inst, 1, chan);
+ int v2 = get_dst_vec(gen, inst, chan);
+ boolean complement = FALSE;
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_SNE:
+ complement = TRUE;
+ /* fall-through */
+ case TGSI_OPCODE_SEQ:
+ ppc_vcmpeqfpx(gen->f, v2, v0, v1); /* v2 = v0 == v1 ? ~0 : 0 */
+ break;
+
+ case TGSI_OPCODE_SGE:
+ complement = TRUE;
+ /* fall-through */
+ case TGSI_OPCODE_SLT:
+ ppc_vcmpgtfpx(gen->f, v2, v1, v0); /* v2 = v1 > v0 ? ~0 : 0 */
+ break;
+
+ case TGSI_OPCODE_SLE:
+ complement = TRUE;
+ /* fall-through */
+ case TGSI_OPCODE_SGT:
+ ppc_vcmpgtfpx(gen->f, v2, v0, v1); /* v2 = v0 > v1 ? ~0 : 0 */
+ break;
+ default:
+ assert(0);
+ }
+
+ /* v2 is now {0,0,0,0} or {~0,~0,~0,~0} */
+
+ if (complement)
+ ppc_vandc(gen->f, v2, one_vec, v2); /* v2 = one_vec & ~v2 */
+ else
+ ppc_vand(gen->f, v2, one_vec, v2); /* v2 = one_vec & v2 */
+
+ /* store v2 */
+ emit_store(gen, v2, inst, chan, TRUE);
+ }
+
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_dotprod(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ int v0, v1, v2;
+ uint chan_index;
+
+ v2 = ppc_allocate_vec_register(gen->f);
+
+ ppc_vzero(gen->f, v2); /* v2 = {0, 0, 0, 0} */
+
+ v0 = get_src_vec(gen, inst, 0, CHAN_X); /* v0 = src0.XXXX */
+ v1 = get_src_vec(gen, inst, 1, CHAN_X); /* v1 = src1.XXXX */
+ ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
+
+ v0 = get_src_vec(gen, inst, 0, CHAN_Y); /* v0 = src0.YYYY */
+ v1 = get_src_vec(gen, inst, 1, CHAN_Y); /* v1 = src1.YYYY */
+ ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
+
+ v0 = get_src_vec(gen, inst, 0, CHAN_Z); /* v0 = src0.ZZZZ */
+ v1 = get_src_vec(gen, inst, 1, CHAN_Z); /* v1 = src1.ZZZZ */
+ ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_DP4) {
+ v0 = get_src_vec(gen, inst, 0, CHAN_W); /* v0 = src0.WWWW */
+ v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
+ ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
+ }
+ else if (inst->Instruction.Opcode == TGSI_OPCODE_DPH) {
+ v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
+ ppc_vaddfp(gen->f, v2, v2, v1); /* v2 = v2 + v1 */
+ }
+
+ FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
+ emit_store(gen, v2, inst, chan_index, FALSE); /* store v2, free v2 later */
+ }
+
+ release_src_vecs(gen);
+
+ ppc_release_vec_register(gen->f, v2);
+}
+
+
+/** Approximation for vr = pow(va, vb) */
+static void
+ppc_vec_pow(struct ppc_function *f, int vr, int va, int vb)
+{
+ /* pow(a,b) ~= exp2(log2(a) * b) */
+ int t_vec = ppc_allocate_vec_register(f);
+ int zero_vec = ppc_allocate_vec_register(f);
+
+ ppc_vzero(f, zero_vec);
+
+ ppc_vlogefp(f, t_vec, va); /* t = log2(va) */
+ ppc_vmaddfp(f, t_vec, t_vec, vb, zero_vec); /* t = t * vb + zero */
+ ppc_vexptefp(f, vr, t_vec); /* vr = 2^t */
+
+ ppc_release_vec_register(f, t_vec);
+ ppc_release_vec_register(f, zero_vec);
+}
+
+
+static void
+emit_lit(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ int one_vec = gen_one_vec(gen);
+
+ /* Compute X */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ emit_store(gen, one_vec, inst, CHAN_X, FALSE);
+ }
+
+ /* Compute Y, Z */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ int x_vec;
+ int zero_vec = ppc_allocate_vec_register(gen->f);
+
+ x_vec = get_src_vec(gen, inst, 0, CHAN_X); /* x_vec = src[0].x */
+
+ ppc_vzero(gen->f, zero_vec); /* zero = {0,0,0,0} */
+ ppc_vmaxfp(gen->f, x_vec, x_vec, zero_vec); /* x_vec = max(x_vec, 0) */
+
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ emit_store(gen, x_vec, inst, CHAN_Y, FALSE);
+ }
+
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ int y_vec, w_vec;
+ int z_vec = ppc_allocate_vec_register(gen->f);
+ int pow_vec = ppc_allocate_vec_register(gen->f);
+ int pos_vec = ppc_allocate_vec_register(gen->f);
+ int p128_vec = ppc_allocate_vec_register(gen->f);
+ int n128_vec = ppc_allocate_vec_register(gen->f);
+
+ y_vec = get_src_vec(gen, inst, 0, CHAN_Y); /* y_vec = src[0].y */
+ ppc_vmaxfp(gen->f, y_vec, y_vec, zero_vec); /* y_vec = max(y_vec, 0) */
+
+ w_vec = get_src_vec(gen, inst, 0, CHAN_W); /* w_vec = src[0].w */
+
+ /* clamp W to [-128, 128] */
+ load_constant_vec(gen, p128_vec, 128.0f);
+ load_constant_vec(gen, n128_vec, -128.0f);
+ ppc_vmaxfp(gen->f, w_vec, w_vec, n128_vec); /* w = max(w, -128) */
+ ppc_vminfp(gen->f, w_vec, w_vec, p128_vec); /* w = min(w, 128) */
+
+ /* if temp.x > 0
+ * z = pow(tmp.y, tmp.w)
+ * else
+ * z = 0.0
+ */
+ ppc_vec_pow(gen->f, pow_vec, y_vec, w_vec); /* pow = pow(y, w) */
+ ppc_vcmpgtfpx(gen->f, pos_vec, x_vec, zero_vec); /* pos = x > 0 */
+ ppc_vand(gen->f, z_vec, pow_vec, pos_vec); /* z = pow & pos */
+
+ emit_store(gen, z_vec, inst, CHAN_Z, FALSE);
+
+ ppc_release_vec_register(gen->f, z_vec);
+ ppc_release_vec_register(gen->f, pow_vec);
+ ppc_release_vec_register(gen->f, pos_vec);
+ ppc_release_vec_register(gen->f, p128_vec);
+ ppc_release_vec_register(gen->f, n128_vec);
+ }
+
+ ppc_release_vec_register(gen->f, zero_vec);
+ }
+
+ /* Compute W */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ emit_store(gen, one_vec, inst, CHAN_W, FALSE);
+ }
+
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_exp(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ const int one_vec = gen_one_vec(gen);
+ int src_vec;
+
+ /* get src arg */
+ src_vec = get_src_vec(gen, inst, 0, CHAN_X);
+
+ /* Compute X = 2^floor(src) */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ int dst_vec = get_dst_vec(gen, inst, CHAN_X);
+ int tmp_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vrfim(gen->f, tmp_vec, src_vec); /* tmp = floor(src); */
+ ppc_vexptefp(gen->f, dst_vec, tmp_vec); /* dst = 2 ^ tmp */
+ emit_store(gen, dst_vec, inst, CHAN_X, TRUE);
+ ppc_release_vec_register(gen->f, tmp_vec);
+ }
+
+ /* Compute Y = src - floor(src) */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ int dst_vec = get_dst_vec(gen, inst, CHAN_Y);
+ int tmp_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vrfim(gen->f, tmp_vec, src_vec); /* tmp = floor(src); */
+ ppc_vsubfp(gen->f, dst_vec, src_vec, tmp_vec); /* dst = src - tmp */
+ emit_store(gen, dst_vec, inst, CHAN_Y, TRUE);
+ ppc_release_vec_register(gen->f, tmp_vec);
+ }
+
+ /* Compute Z = RoughApprox2ToX(src) */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ int dst_vec = get_dst_vec(gen, inst, CHAN_Z);
+ ppc_vexptefp(gen->f, dst_vec, src_vec); /* dst = 2 ^ src */
+ emit_store(gen, dst_vec, inst, CHAN_Z, TRUE);
+ }
+
+ /* Compute W = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ emit_store(gen, one_vec, inst, CHAN_W, FALSE);
+ }
+
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_log(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ const int bit31_vec = gen_get_bit31_vec(gen);
+ const int one_vec = gen_one_vec(gen);
+ int src_vec, abs_vec;
+
+ /* get src arg */
+ src_vec = get_src_vec(gen, inst, 0, CHAN_X);
+
+ /* compute abs(src) */
+ abs_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vandc(gen->f, abs_vec, src_vec, bit31_vec); /* abs = src & ~bit31 */
+
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) &&
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+
+ /* compute tmp = floor(log2(abs)) */
+ int tmp_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vlogefp(gen->f, tmp_vec, abs_vec); /* tmp = log2(abs) */
+ ppc_vrfim(gen->f, tmp_vec, tmp_vec); /* tmp = floor(tmp); */
+
+ /* Compute X = tmp */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ emit_store(gen, tmp_vec, inst, CHAN_X, FALSE);
+ }
+
+ /* Compute Y = abs / 2^tmp */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ const int zero_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vzero(gen->f, zero_vec);
+ ppc_vexptefp(gen->f, tmp_vec, tmp_vec); /* tmp = 2 ^ tmp */
+ ppc_vrefp(gen->f, tmp_vec, tmp_vec); /* tmp = 1 / tmp */
+ /* tmp = abs * tmp + zero */
+ ppc_vmaddfp(gen->f, tmp_vec, abs_vec, tmp_vec, zero_vec);
+ emit_store(gen, tmp_vec, inst, CHAN_Y, FALSE);
+ ppc_release_vec_register(gen->f, zero_vec);
+ }
+
+ ppc_release_vec_register(gen->f, tmp_vec);
+ }
+
+ /* Compute Z = RoughApproxLog2(abs) */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ int dst_vec = get_dst_vec(gen, inst, CHAN_Z);
+ ppc_vlogefp(gen->f, dst_vec, abs_vec); /* dst = log2(abs) */
+ emit_store(gen, dst_vec, inst, CHAN_Z, TRUE);
+ }
+
+ /* Compute W = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ emit_store(gen, one_vec, inst, CHAN_W, FALSE);
+ }
+
+ ppc_release_vec_register(gen->f, abs_vec);
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_pow(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ int s0_vec = get_src_vec(gen, inst, 0, CHAN_X);
+ int s1_vec = get_src_vec(gen, inst, 1, CHAN_X);
+ int pow_vec = ppc_allocate_vec_register(gen->f);
+ int chan;
+
+ ppc_vec_pow(gen->f, pow_vec, s0_vec, s1_vec);
+
+ FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
+ emit_store(gen, pow_vec, inst, chan, FALSE);
+ }
+
+ ppc_release_vec_register(gen->f, pow_vec);
+
+ release_src_vecs(gen);
+}
+
+
+static void
+emit_xpd(struct gen_context *gen, struct tgsi_full_instruction *inst)
+{
+ int x0_vec, y0_vec, z0_vec;
+ int x1_vec, y1_vec, z1_vec;
+ int zero_vec, tmp_vec;
+ int tmp2_vec;
+
+ zero_vec = ppc_allocate_vec_register(gen->f);
+ ppc_vzero(gen->f, zero_vec);
+
+ tmp_vec = ppc_allocate_vec_register(gen->f);
+ tmp2_vec = ppc_allocate_vec_register(gen->f);
+
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ x0_vec = get_src_vec(gen, inst, 0, CHAN_X);
+ x1_vec = get_src_vec(gen, inst, 1, CHAN_X);
+ }
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ y0_vec = get_src_vec(gen, inst, 0, CHAN_Y);
+ y1_vec = get_src_vec(gen, inst, 1, CHAN_Y);
+ }
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ z0_vec = get_src_vec(gen, inst, 0, CHAN_Z);
+ z1_vec = get_src_vec(gen, inst, 1, CHAN_Z);
+ }
+
+ IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) {
+ /* tmp = y0 * z1 */
+ ppc_vmaddfp(gen->f, tmp_vec, y0_vec, z1_vec, zero_vec);
+ /* tmp = tmp - z0 * y1*/
+ ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, z0_vec, y1_vec);
+ emit_store(gen, tmp_vec, inst, CHAN_X, FALSE);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) {
+ /* tmp = z0 * x1 */
+ ppc_vmaddfp(gen->f, tmp_vec, z0_vec, x1_vec, zero_vec);
+ /* tmp = tmp - x0 * z1 */
+ ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, x0_vec, z1_vec);
+ emit_store(gen, tmp_vec, inst, CHAN_Y, FALSE);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z) {
+ /* tmp = x0 * y1 */
+ ppc_vmaddfp(gen->f, tmp_vec, x0_vec, y1_vec, zero_vec);
+ /* tmp = tmp - y0 * x1 */
+ ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, y0_vec, x1_vec);
+ emit_store(gen, tmp_vec, inst, CHAN_Z, FALSE);
+ }
+ /* W is undefined */
+
+ ppc_release_vec_register(gen->f, tmp_vec);
+ ppc_release_vec_register(gen->f, zero_vec);
+ release_src_vecs(gen);
+}
+
+static int
+emit_instruction(struct gen_context *gen,
+ struct tgsi_full_instruction *inst)
+{
+
+ /* we don't handle saturation/clamping yet */
+ if (inst->Instruction.Saturate != TGSI_SAT_NONE)
+ return 0;
+
+ /* need to use extra temps to fix SOA dependencies : */
+ if (tgsi_check_soa_dependencies(inst))
+ return FALSE;
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ case TGSI_OPCODE_ABS:
+ case TGSI_OPCODE_FLR:
+ case TGSI_OPCODE_FRC:
+ case TGSI_OPCODE_EX2:
+ case TGSI_OPCODE_LG2:
+ emit_unaryop(gen, inst);
+ break;
+ case TGSI_OPCODE_RSQ:
+ case TGSI_OPCODE_RCP:
+ emit_scalar_unaryop(gen, inst);
+ break;
+ case TGSI_OPCODE_ADD:
+ case TGSI_OPCODE_SUB:
+ case TGSI_OPCODE_MUL:
+ case TGSI_OPCODE_MIN:
+ case TGSI_OPCODE_MAX:
+ emit_binop(gen, inst);
+ break;
+ case TGSI_OPCODE_SEQ:
+ case TGSI_OPCODE_SNE:
+ case TGSI_OPCODE_SLT:
+ case TGSI_OPCODE_SGT:
+ case TGSI_OPCODE_SLE:
+ case TGSI_OPCODE_SGE:
+ emit_inequality(gen, inst);
+ break;
+ case TGSI_OPCODE_MAD:
+ case TGSI_OPCODE_LRP:
+ emit_triop(gen, inst);
+ break;
+ case TGSI_OPCODE_DP3:
+ case TGSI_OPCODE_DP4:
+ case TGSI_OPCODE_DPH:
+ emit_dotprod(gen, inst);
+ break;
+ case TGSI_OPCODE_LIT:
+ emit_lit(gen, inst);
+ break;
+ case TGSI_OPCODE_LOG:
+ emit_log(gen, inst);
+ break;
+ case TGSI_OPCODE_EXP:
+ emit_exp(gen, inst);
+ break;
+ case TGSI_OPCODE_POW:
+ emit_pow(gen, inst);
+ break;
+ case TGSI_OPCODE_XPD:
+ emit_xpd(gen, inst);
+ break;
+ case TGSI_OPCODE_END:
+ /* normal end */
+ return 1;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+
+static void
+emit_declaration(
+ struct ppc_function *func,
+ struct tgsi_full_declaration *decl )
+{
+ if( decl->Declaration.File == TGSI_FILE_INPUT ) {
+#if 0
+ unsigned first, last, mask;
+ unsigned i, j;
+
+ first = decl->DeclarationRange.First;
+ last = decl->DeclarationRange.Last;
+ mask = decl->Declaration.UsageMask;
+
+ for( i = first; i <= last; i++ ) {
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ if( mask & (1 << j) ) {
+ switch( decl->Declaration.Interpolate ) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ emit_coef_a0( func, 0, i, j );
+ emit_inputs( func, 0, i, j );
+ break;
+
+ case TGSI_INTERPOLATE_LINEAR:
+ emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
+ emit_coef_dadx( func, 1, i, j );
+ emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
+ emit_coef_dady( func, 3, i, j );
+ emit_mul( func, 0, 1 ); /* x * dadx */
+ emit_coef_a0( func, 4, i, j );
+ emit_mul( func, 2, 3 ); /* y * dady */
+ emit_add( func, 0, 4 ); /* x * dadx + a0 */
+ emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
+ emit_inputs( func, 0, i, j );
+ break;
+
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
+ emit_coef_dadx( func, 1, i, j );
+ emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
+ emit_coef_dady( func, 3, i, j );
+ emit_mul( func, 0, 1 ); /* x * dadx */
+ emit_tempf( func, 4, 0, TGSI_SWIZZLE_W );
+ emit_coef_a0( func, 5, i, j );
+ emit_rcp( func, 4, 4 ); /* 1.0 / w */
+ emit_mul( func, 2, 3 ); /* y * dady */
+ emit_add( func, 0, 5 ); /* x * dadx + a0 */
+ emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
+ emit_mul( func, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
+ emit_inputs( func, 0, i, j );
+ break;
+
+ default:
+ assert( 0 );
+ break;
+ }
+ }
+ }
+ }
+#endif
+ }
+}
+
+
+
+static void
+emit_prologue(struct ppc_function *func)
+{
+ /* XXX set up stack frame */
+}
+
+
+static void
+emit_epilogue(struct ppc_function *func)
+{
+ ppc_comment(func, -4, "Epilogue:");
+ ppc_return(func);
+ /* XXX restore prev stack frame */
+#if 0
+ debug_printf("PPC: Emitted %u instructions\n", func->num_inst);
+#endif
+}
+
+
+
+/**
+ * Translate a TGSI vertex/fragment shader to PPC code.
+ *
+ * \param tokens the TGSI input shader
+ * \param func the output PPC code/function
+ * \param immediates buffer to place immediates, later passed to PPC func
+ * \return TRUE for success, FALSE if translation failed
+ */
+boolean
+tgsi_emit_ppc(const struct tgsi_token *tokens,
+ struct ppc_function *func,
+ float (*immediates)[4],
+ boolean do_swizzles )
+{
+ static int use_ppc_asm = -1;
+ struct tgsi_parse_context parse;
+ /*boolean instruction_phase = FALSE;*/
+ unsigned ok = 1;
+ uint num_immediates = 0;
+ struct gen_context gen;
+ uint ic = 0;
+
+ if (use_ppc_asm < 0) {
+ /* If GALLIUM_NOPPC is set, don't use PPC codegen */
+ use_ppc_asm = !debug_get_bool_option("GALLIUM_NOPPC", FALSE);
+ }
+ if (!use_ppc_asm)
+ return FALSE;
+
+ if (0) {
+ debug_printf("\n********* TGSI->PPC ********\n");
+ tgsi_dump(tokens, 0);
+ }
+
+ util_init_math();
+
+ init_gen_context(&gen, func);
+
+ emit_prologue(func);
+
+ tgsi_parse_init( &parse, tokens );
+
+ while (!tgsi_parse_end_of_tokens(&parse) && ok) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
+ emit_declaration(func, &parse.FullToken.FullDeclaration );
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ if (func->print) {
+ _debug_printf("# ");
+ ic++;
+ tgsi_dump_instruction(&parse.FullToken.FullInstruction, ic);
+ }
+
+ ok = emit_instruction(&gen, &parse.FullToken.FullInstruction);
+
+ if (!ok) {
+ uint opcode = parse.FullToken.FullInstruction.Instruction.Opcode;
+ debug_printf("failed to translate tgsi opcode %d (%s) to PPC (%s)\n",
+ opcode,
+ tgsi_get_opcode_name(opcode),
+ parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX ?
+ "vertex shader" : "fragment shader");
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ /* splat each immediate component into a float[4] vector for SoA */
+ {
+ const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
+ uint i;
+ assert(size <= 4);
+ assert(num_immediates < TGSI_EXEC_NUM_IMMEDIATES);
+ for (i = 0; i < size; i++) {
+ immediates[num_immediates][i] =
+ parse.FullToken.FullImmediate.u[i].Float;
+ }
+ num_immediates++;
+ }
+ break;
+
+ default:
+ ok = 0;
+ assert( 0 );
+ }
+ }
+
+ emit_epilogue(func);
+
+ tgsi_parse_free( &parse );
+
+ if (ppc_num_instructions(func) == 0) {
+ /* ran out of memory for instructions */
+ ok = FALSE;
+ }
+
+ if (!ok)
+ debug_printf("TGSI->PPC translation failed\n");
+
+ return ok;
+}
+
+#endif /* PIPE_ARCH_PPC */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ppc.h b/src/gallium/auxiliary/tgsi/tgsi_ppc.h
new file mode 100644
index 0000000000..829ec075e7
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_ppc.h
@@ -0,0 +1,51 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_PPC_H
+#define TGSI_PPC_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+struct tgsi_token;
+struct ppc_function;
+
+extern const float ppc_builtin_constants[];
+
+
+boolean
+tgsi_emit_ppc(const struct tgsi_token *tokens,
+ struct ppc_function *function,
+ float (*immediates)[4],
+ boolean do_swizzles);
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_PPC_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_sanity.c b/src/gallium/auxiliary/tgsi/tgsi_sanity.c
new file mode 100644
index 0000000000..8a13885da9
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_sanity.c
@@ -0,0 +1,385 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "tgsi_sanity.h"
+#include "tgsi_info.h"
+#include "tgsi_iterate.h"
+
+typedef uint reg_flag;
+
+#define BITS_IN_REG_FLAG (sizeof( reg_flag ) * 8)
+
+#define MAX_REGISTERS 256
+#define MAX_REG_FLAGS ((MAX_REGISTERS + BITS_IN_REG_FLAG - 1) / BITS_IN_REG_FLAG)
+
+struct sanity_check_ctx
+{
+ struct tgsi_iterate_context iter;
+
+ reg_flag regs_decl[TGSI_FILE_COUNT][MAX_REG_FLAGS];
+ reg_flag regs_used[TGSI_FILE_COUNT][MAX_REG_FLAGS];
+ boolean regs_ind_used[TGSI_FILE_COUNT];
+ uint num_imms;
+ uint num_instructions;
+ uint index_of_END;
+
+ uint errors;
+ uint warnings;
+};
+
+static void
+report_error(
+ struct sanity_check_ctx *ctx,
+ const char *format,
+ ... )
+{
+ va_list args;
+
+ debug_printf( "Error : " );
+ va_start( args, format );
+ _debug_vprintf( format, args );
+ va_end( args );
+ debug_printf( "\n" );
+ ctx->errors++;
+}
+
+static void
+report_warning(
+ struct sanity_check_ctx *ctx,
+ const char *format,
+ ... )
+{
+ va_list args;
+
+ debug_printf( "Warning: " );
+ va_start( args, format );
+ _debug_vprintf( format, args );
+ va_end( args );
+ debug_printf( "\n" );
+ ctx->warnings++;
+}
+
+static boolean
+check_file_name(
+ struct sanity_check_ctx *ctx,
+ uint file )
+{
+ if (file <= TGSI_FILE_NULL || file >= TGSI_FILE_COUNT) {
+ report_error( ctx, "(%u): Invalid register file name", file );
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static boolean
+is_register_declared(
+ struct sanity_check_ctx *ctx,
+ uint file,
+ int index )
+{
+ assert( index >= 0 && index < MAX_REGISTERS );
+
+ return (ctx->regs_decl[file][index / BITS_IN_REG_FLAG] & (1 << (index % BITS_IN_REG_FLAG))) ? TRUE : FALSE;
+}
+
+static boolean
+is_any_register_declared(
+ struct sanity_check_ctx *ctx,
+ uint file )
+{
+ uint i;
+
+ for (i = 0; i < MAX_REG_FLAGS; i++)
+ if (ctx->regs_decl[file][i])
+ return TRUE;
+ return FALSE;
+}
+
+static boolean
+is_register_used(
+ struct sanity_check_ctx *ctx,
+ uint file,
+ int index )
+{
+ assert( index < MAX_REGISTERS );
+
+ return (ctx->regs_used[file][index / BITS_IN_REG_FLAG] & (1 << (index % BITS_IN_REG_FLAG))) ? TRUE : FALSE;
+}
+
+static const char *file_names[TGSI_FILE_COUNT] =
+{
+ "NULL",
+ "CONST",
+ "IN",
+ "OUT",
+ "TEMP",
+ "SAMP",
+ "ADDR",
+ "IMM",
+ "LOOP"
+};
+
+static boolean
+check_register_usage(
+ struct sanity_check_ctx *ctx,
+ uint file,
+ int index,
+ const char *name,
+ boolean indirect_access )
+{
+ if (!check_file_name( ctx, file ))
+ return FALSE;
+
+ if (indirect_access) {
+ /* Note that 'index' is an offset relative to the value of the
+ * address register. No range checking done here.
+ */
+ if (!is_any_register_declared( ctx, file ))
+ report_error( ctx, "%s: Undeclared %s register", file_names[file], name );
+ ctx->regs_ind_used[file] = TRUE;
+ }
+ else {
+ if (index < 0 || index >= MAX_REGISTERS) {
+ report_error( ctx, "%s[%d]: Invalid %s index", file_names[file], index, name );
+ return FALSE;
+ }
+
+ if (!is_register_declared( ctx, file, index ))
+ report_error( ctx, "%s[%d]: Undeclared %s register", file_names[file], index, name );
+ ctx->regs_used[file][index / BITS_IN_REG_FLAG] |= (1 << (index % BITS_IN_REG_FLAG));
+ }
+ return TRUE;
+}
+
+static boolean
+iter_instruction(
+ struct tgsi_iterate_context *iter,
+ struct tgsi_full_instruction *inst )
+{
+ struct sanity_check_ctx *ctx = (struct sanity_check_ctx *) iter;
+ const struct tgsi_opcode_info *info;
+ uint i;
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_END) {
+ if (ctx->index_of_END != ~0) {
+ report_error( ctx, "Too many END instructions" );
+ }
+ ctx->index_of_END = ctx->num_instructions;
+ }
+
+ info = tgsi_get_opcode_info( inst->Instruction.Opcode );
+ if (info == NULL) {
+ report_error( ctx, "(%u): Invalid instruction opcode", inst->Instruction.Opcode );
+ return TRUE;
+ }
+
+ if (info->num_dst != inst->Instruction.NumDstRegs) {
+ report_error( ctx, "%s: Invalid number of destination operands, should be %u", info->mnemonic, info->num_dst );
+ }
+ if (info->num_src != inst->Instruction.NumSrcRegs) {
+ report_error( ctx, "%s: Invalid number of source operands, should be %u", info->mnemonic, info->num_src );
+ }
+
+ /* Check destination and source registers' validity.
+ * Mark the registers as used.
+ */
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
+ check_register_usage(
+ ctx,
+ inst->FullDstRegisters[i].DstRegister.File,
+ inst->FullDstRegisters[i].DstRegister.Index,
+ "destination",
+ FALSE );
+ }
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ check_register_usage(
+ ctx,
+ inst->FullSrcRegisters[i].SrcRegister.File,
+ inst->FullSrcRegisters[i].SrcRegister.Index,
+ "source",
+ (boolean)inst->FullSrcRegisters[i].SrcRegister.Indirect );
+ if (inst->FullSrcRegisters[i].SrcRegister.Indirect) {
+ uint file;
+ int index;
+
+ file = inst->FullSrcRegisters[i].SrcRegisterInd.File;
+ index = inst->FullSrcRegisters[i].SrcRegisterInd.Index;
+ check_register_usage(
+ ctx,
+ file,
+ index,
+ "indirect",
+ FALSE );
+ if (!(file == TGSI_FILE_ADDRESS || file == TGSI_FILE_LOOP) || index != 0) {
+ report_warning(ctx, "Indirect register neither ADDR[0] nor LOOP[0]");
+ }
+ }
+ }
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_BGNFOR:
+ case TGSI_OPCODE_ENDFOR:
+ if (inst->FullDstRegisters[0].DstRegister.File != TGSI_FILE_LOOP ||
+ inst->FullDstRegisters[0].DstRegister.Index != 0) {
+ report_error(ctx, "Destination register must be LOOP[0]");
+ }
+ break;
+ }
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_BGNFOR:
+ if (inst->FullSrcRegisters[0].SrcRegister.File != TGSI_FILE_CONSTANT &&
+ inst->FullSrcRegisters[0].SrcRegister.File != TGSI_FILE_IMMEDIATE) {
+ report_error(ctx, "Source register file must be either CONST or IMM");
+ }
+ break;
+ }
+
+ ctx->num_instructions++;
+
+ return TRUE;
+}
+
+static boolean
+iter_declaration(
+ struct tgsi_iterate_context *iter,
+ struct tgsi_full_declaration *decl )
+{
+ struct sanity_check_ctx *ctx = (struct sanity_check_ctx *) iter;
+ uint file;
+ uint i;
+
+ /* No declarations allowed after the first instruction.
+ */
+ if (ctx->num_instructions > 0)
+ report_error( ctx, "Instruction expected but declaration found" );
+
+ /* Check registers' validity.
+ * Mark the registers as declared.
+ */
+ file = decl->Declaration.File;
+ if (!check_file_name( ctx, file ))
+ return TRUE;
+ for (i = decl->DeclarationRange.First; i <= decl->DeclarationRange.Last; i++) {
+ if (is_register_declared( ctx, file, i ))
+ report_error( ctx, "%s[%u]: The same register declared more than once", file_names[file], i );
+ ctx->regs_decl[file][i / BITS_IN_REG_FLAG] |= (1 << (i % BITS_IN_REG_FLAG));
+ }
+
+ return TRUE;
+}
+
+static boolean
+iter_immediate(
+ struct tgsi_iterate_context *iter,
+ struct tgsi_full_immediate *imm )
+{
+ struct sanity_check_ctx *ctx = (struct sanity_check_ctx *) iter;
+
+ assert( ctx->num_imms < MAX_REGISTERS );
+
+ /* No immediates allowed after the first instruction.
+ */
+ if (ctx->num_instructions > 0)
+ report_error( ctx, "Instruction expected but immediate found" );
+
+ /* Mark the register as declared.
+ */
+ ctx->regs_decl[TGSI_FILE_IMMEDIATE][ctx->num_imms / BITS_IN_REG_FLAG] |= (1 << (ctx->num_imms % BITS_IN_REG_FLAG));
+ ctx->num_imms++;
+
+ /* Check data type validity.
+ */
+ if (imm->Immediate.DataType != TGSI_IMM_FLOAT32) {
+ report_error( ctx, "(%u): Invalid immediate data type", imm->Immediate.DataType );
+ return TRUE;
+ }
+
+ return TRUE;
+}
+
+static boolean
+epilog(
+ struct tgsi_iterate_context *iter )
+{
+ struct sanity_check_ctx *ctx = (struct sanity_check_ctx *) iter;
+ uint file;
+
+ /* There must be an END instruction somewhere.
+ */
+ if (ctx->index_of_END == ~0) {
+ report_error( ctx, "Missing END instruction" );
+ }
+
+ /* Check if all declared registers were used.
+ */
+ for (file = TGSI_FILE_NULL; file < TGSI_FILE_COUNT; file++) {
+ uint i;
+
+ for (i = 0; i < MAX_REGISTERS; i++) {
+ if (is_register_declared( ctx, file, i ) && !is_register_used( ctx, file, i ) && !ctx->regs_ind_used[file]) {
+ report_warning( ctx, "%s[%u]: Register never used", file_names[file], i );
+ }
+ }
+ }
+
+ /* Print totals, if any.
+ */
+ if (ctx->errors || ctx->warnings)
+ debug_printf( "%u errors, %u warnings\n", ctx->errors, ctx->warnings );
+
+ return TRUE;
+}
+
+boolean
+tgsi_sanity_check(
+ struct tgsi_token *tokens )
+{
+ struct sanity_check_ctx ctx;
+
+ ctx.iter.prolog = NULL;
+ ctx.iter.iterate_instruction = iter_instruction;
+ ctx.iter.iterate_declaration = iter_declaration;
+ ctx.iter.iterate_immediate = iter_immediate;
+ ctx.iter.epilog = epilog;
+
+ memset( ctx.regs_decl, 0, sizeof( ctx.regs_decl ) );
+ memset( ctx.regs_used, 0, sizeof( ctx.regs_used ) );
+ memset( ctx.regs_ind_used, 0, sizeof( ctx.regs_ind_used ) );
+ ctx.num_imms = 0;
+ ctx.num_instructions = 0;
+ ctx.index_of_END = ~0;
+
+ ctx.errors = 0;
+ ctx.warnings = 0;
+
+ if (!tgsi_iterate_shader( tokens, &ctx.iter ))
+ return FALSE;
+
+ return ctx.errors == 0;
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_sanity.h b/src/gallium/auxiliary/tgsi/tgsi_sanity.h
new file mode 100644
index 0000000000..ca45e94c7a
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_sanity.h
@@ -0,0 +1,49 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_SANITY_H
+#define TGSI_SANITY_H
+
+#include "pipe/p_shader_tokens.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+/* Check the given token stream for errors and common mistakes.
+ * Diagnostic messages are printed out to the debug output.
+ * Returns TRUE if there are no errors, even though there could be some warnings.
+ */
+boolean
+tgsi_sanity_check(
+ struct tgsi_token *tokens );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_SANITY_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_scan.c b/src/gallium/auxiliary/tgsi/tgsi_scan.c
new file mode 100644
index 0000000000..c535788819
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_scan.c
@@ -0,0 +1,256 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * TGSI program scan utility.
+ * Used to determine which registers and instructions are used by a shader.
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include "util/u_math.h"
+#include "tgsi/tgsi_build.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_scan.h"
+
+
+
+
+/**
+ * Scan the given TGSI shader to collect information such as number of
+ * registers used, special instructions used, etc.
+ * \return info the result of the scan
+ */
+void
+tgsi_scan_shader(const struct tgsi_token *tokens,
+ struct tgsi_shader_info *info)
+{
+ uint procType, i;
+ struct tgsi_parse_context parse;
+
+ memset(info, 0, sizeof(*info));
+ for (i = 0; i < TGSI_FILE_COUNT; i++)
+ info->file_max[i] = -1;
+
+ /**
+ ** Setup to begin parsing input shader
+ **/
+ if (tgsi_parse_init( &parse, tokens ) != TGSI_PARSE_OK) {
+ debug_printf("tgsi_parse_init() failed in tgsi_scan_shader()!\n");
+ return;
+ }
+ procType = parse.FullHeader.Processor.Processor;
+ assert(procType == TGSI_PROCESSOR_FRAGMENT ||
+ procType == TGSI_PROCESSOR_VERTEX ||
+ procType == TGSI_PROCESSOR_GEOMETRY);
+
+
+ /**
+ ** Loop over incoming program tokens/instructions
+ */
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+
+ info->num_tokens++;
+
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *fullinst
+ = &parse.FullToken.FullInstruction;
+
+ assert(fullinst->Instruction.Opcode < TGSI_OPCODE_LAST);
+ info->opcode_count[fullinst->Instruction.Opcode]++;
+
+ /* special case: scan fragment shaders for use of the fog
+ * input/attribute. The X component is fog, the Y component
+ * is the front/back-face flag.
+ */
+ if (procType == TGSI_PROCESSOR_FRAGMENT) {
+ uint i;
+ for (i = 0; i < fullinst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *src =
+ &fullinst->FullSrcRegisters[i];
+ if (src->SrcRegister.File == TGSI_FILE_INPUT) {
+ const int ind = src->SrcRegister.Index;
+ if (info->input_semantic_name[ind] == TGSI_SEMANTIC_FOG) {
+ if (src->SrcRegister.SwizzleX == TGSI_SWIZZLE_X) {
+ info->uses_fogcoord = TRUE;
+ }
+ else if (src->SrcRegister.SwizzleX == TGSI_SWIZZLE_Y) {
+ info->uses_frontfacing = TRUE;
+ }
+ }
+ }
+ }
+ }
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *fulldecl
+ = &parse.FullToken.FullDeclaration;
+ const uint file = fulldecl->Declaration.File;
+ uint reg;
+ for (reg = fulldecl->DeclarationRange.First;
+ reg <= fulldecl->DeclarationRange.Last;
+ reg++) {
+
+ /* only first 32 regs will appear in this bitfield */
+ info->file_mask[file] |= (1 << reg);
+ info->file_count[file]++;
+ info->file_max[file] = MAX2(info->file_max[file], (int)reg);
+
+ if (file == TGSI_FILE_INPUT) {
+ info->input_semantic_name[reg] = (ubyte)fulldecl->Semantic.SemanticName;
+ info->input_semantic_index[reg] = (ubyte)fulldecl->Semantic.SemanticIndex;
+ info->num_inputs++;
+ }
+ else if (file == TGSI_FILE_OUTPUT) {
+ info->output_semantic_name[reg] = (ubyte)fulldecl->Semantic.SemanticName;
+ info->output_semantic_index[reg] = (ubyte)fulldecl->Semantic.SemanticIndex;
+ info->num_outputs++;
+ }
+
+ /* special case */
+ if (procType == TGSI_PROCESSOR_FRAGMENT &&
+ file == TGSI_FILE_OUTPUT &&
+ fulldecl->Semantic.SemanticName == TGSI_SEMANTIC_POSITION) {
+ info->writes_z = TRUE;
+ }
+ }
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ uint reg = info->immediate_count++;
+ uint file = TGSI_FILE_IMMEDIATE;
+
+ info->file_mask[file] |= (1 << reg);
+ info->file_count[file]++;
+ info->file_max[file] = MAX2(info->file_max[file], (int)reg);
+ }
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+
+ info->uses_kill = (info->opcode_count[TGSI_OPCODE_KIL] ||
+ info->opcode_count[TGSI_OPCODE_KILP]);
+
+ tgsi_parse_free (&parse);
+}
+
+
+
+/**
+ * Check if the given shader is a "passthrough" shader consisting of only
+ * MOV instructions of the form: MOV OUT[n], IN[n]
+ *
+ */
+boolean
+tgsi_is_passthrough_shader(const struct tgsi_token *tokens)
+{
+ struct tgsi_parse_context parse;
+
+ /**
+ ** Setup to begin parsing input shader
+ **/
+ if (tgsi_parse_init(&parse, tokens) != TGSI_PARSE_OK) {
+ debug_printf("tgsi_parse_init() failed in tgsi_is_passthrough_shader()!\n");
+ return FALSE;
+ }
+
+ /**
+ ** Loop over incoming program tokens/instructions
+ */
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ struct tgsi_full_instruction *fullinst =
+ &parse.FullToken.FullInstruction;
+ const struct tgsi_full_src_register *src =
+ &fullinst->FullSrcRegisters[0];
+ const struct tgsi_full_dst_register *dst =
+ &fullinst->FullDstRegisters[0];
+
+ /* Do a whole bunch of checks for a simple move */
+ if (fullinst->Instruction.Opcode != TGSI_OPCODE_MOV ||
+ src->SrcRegister.File != TGSI_FILE_INPUT ||
+ dst->DstRegister.File != TGSI_FILE_OUTPUT ||
+ src->SrcRegister.Index != dst->DstRegister.Index ||
+
+ src->SrcRegister.Negate ||
+ src->SrcRegisterExtMod.Negate ||
+ src->SrcRegisterExtMod.Absolute ||
+ src->SrcRegisterExtMod.Scale2X ||
+ src->SrcRegisterExtMod.Bias ||
+ src->SrcRegisterExtMod.Complement ||
+
+ src->SrcRegister.SwizzleX != TGSI_SWIZZLE_X ||
+ src->SrcRegister.SwizzleY != TGSI_SWIZZLE_Y ||
+ src->SrcRegister.SwizzleZ != TGSI_SWIZZLE_Z ||
+ src->SrcRegister.SwizzleW != TGSI_SWIZZLE_W ||
+
+ src->SrcRegisterExtSwz.ExtSwizzleX != TGSI_EXTSWIZZLE_X ||
+ src->SrcRegisterExtSwz.ExtSwizzleY != TGSI_EXTSWIZZLE_Y ||
+ src->SrcRegisterExtSwz.ExtSwizzleZ != TGSI_EXTSWIZZLE_Z ||
+ src->SrcRegisterExtSwz.ExtSwizzleW != TGSI_EXTSWIZZLE_W ||
+
+ dst->DstRegister.WriteMask != TGSI_WRITEMASK_XYZW)
+ {
+ tgsi_parse_free(&parse);
+ return FALSE;
+ }
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ /* fall-through */
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ /* fall-through */
+ default:
+ ; /* no-op */
+ }
+ }
+
+ tgsi_parse_free(&parse);
+
+ /* if we get here, it's a pass-through shader */
+ return TRUE;
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_scan.h b/src/gallium/auxiliary/tgsi/tgsi_scan.h
new file mode 100644
index 0000000000..2c1a75bc81
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_scan.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_SCAN_H
+#define TGSI_SCAN_H
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+
+
+/**
+ * Shader summary info
+ */
+struct tgsi_shader_info
+{
+ uint num_tokens;
+
+ ubyte num_inputs;
+ ubyte num_outputs;
+ ubyte input_semantic_name[PIPE_MAX_SHADER_INPUTS]; /**< TGSI_SEMANTIC_x */
+ ubyte input_semantic_index[PIPE_MAX_SHADER_INPUTS];
+ ubyte output_semantic_name[PIPE_MAX_SHADER_OUTPUTS]; /**< TGSI_SEMANTIC_x */
+ ubyte output_semantic_index[PIPE_MAX_SHADER_OUTPUTS];
+
+ uint file_mask[TGSI_FILE_COUNT]; /**< bitmask of declared registers */
+ uint file_count[TGSI_FILE_COUNT]; /**< number of declared registers */
+ int file_max[TGSI_FILE_COUNT]; /**< highest index of declared registers */
+
+ uint immediate_count; /**< number of immediates declared */
+
+ uint opcode_count[TGSI_OPCODE_LAST]; /**< opcode histogram */
+
+ boolean writes_z; /**< does fragment shader write Z value? */
+ boolean uses_kill; /**< KIL or KILP instruction used? */
+ boolean uses_fogcoord; /**< fragment shader uses fog coord? */
+ boolean uses_frontfacing; /**< fragment shader uses front/back-face flag? */
+};
+
+
+extern void
+tgsi_scan_shader(const struct tgsi_token *tokens,
+ struct tgsi_shader_info *info);
+
+
+extern boolean
+tgsi_is_passthrough_shader(const struct tgsi_token *tokens);
+
+
+#endif /* TGSI_SCAN_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_sse2.c b/src/gallium/auxiliary/tgsi/tgsi_sse2.c
new file mode 100644
index 0000000000..3cdf8b9f35
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_sse2.c
@@ -0,0 +1,2985 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_X86)
+
+#include "util/u_debug.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#if defined(PIPE_ARCH_SSE)
+#include "util/u_sse.h"
+#endif
+#include "tgsi/tgsi_info.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi_exec.h"
+#include "tgsi_sse2.h"
+
+#include "rtasm/rtasm_x86sse.h"
+
+/* for 1/sqrt()
+ *
+ * This costs about 100fps (close to 10%) in gears:
+ */
+#define HIGH_PRECISION 1
+
+#define FAST_MATH 1
+
+
+#define FOR_EACH_CHANNEL( CHAN )\
+ for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
+
+#define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
+ ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
+ if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
+
+#define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
+ FOR_EACH_CHANNEL( CHAN )\
+ IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
+
+#define CHAN_X 0
+#define CHAN_Y 1
+#define CHAN_Z 2
+#define CHAN_W 3
+
+#define TEMP_ONE_I TGSI_EXEC_TEMP_ONE_I
+#define TEMP_ONE_C TGSI_EXEC_TEMP_ONE_C
+
+#define TEMP_R0 TGSI_EXEC_TEMP_R0
+#define TEMP_ADDR TGSI_EXEC_TEMP_ADDR
+#define TEMP_EXEC_MASK_I TGSI_EXEC_MASK_I
+#define TEMP_EXEC_MASK_C TGSI_EXEC_MASK_C
+
+
+/**
+ * X86 utility functions.
+ */
+
+static struct x86_reg
+make_xmm(
+ unsigned xmm )
+{
+ return x86_make_reg(
+ file_XMM,
+ (enum x86_reg_name) xmm );
+}
+
+/**
+ * X86 register mapping helpers.
+ */
+
+static struct x86_reg
+get_const_base( void )
+{
+ return x86_make_reg(
+ file_REG32,
+ reg_AX );
+}
+
+static struct x86_reg
+get_machine_base( void )
+{
+ return x86_make_reg(
+ file_REG32,
+ reg_CX );
+}
+
+static struct x86_reg
+get_input_base( void )
+{
+ return x86_make_disp(
+ get_machine_base(),
+ Offset(struct tgsi_exec_machine, Inputs) );
+}
+
+static struct x86_reg
+get_output_base( void )
+{
+ return x86_make_disp(
+ get_machine_base(),
+ Offset(struct tgsi_exec_machine, Outputs) );
+}
+
+static struct x86_reg
+get_temp_base( void )
+{
+ return x86_make_disp(
+ get_machine_base(),
+ Offset(struct tgsi_exec_machine, Temps) );
+}
+
+static struct x86_reg
+get_coef_base( void )
+{
+ return x86_make_reg(
+ file_REG32,
+ reg_BX );
+}
+
+static struct x86_reg
+get_sampler_base( void )
+{
+ return x86_make_reg(
+ file_REG32,
+ reg_DI );
+}
+
+static struct x86_reg
+get_immediate_base( void )
+{
+ return x86_make_reg(
+ file_REG32,
+ reg_DX );
+}
+
+
+/**
+ * Data access helpers.
+ */
+
+
+static struct x86_reg
+get_immediate(
+ unsigned vec,
+ unsigned chan )
+{
+ return x86_make_disp(
+ get_immediate_base(),
+ (vec * 4 + chan) * 4 );
+}
+
+static struct x86_reg
+get_const(
+ unsigned vec,
+ unsigned chan )
+{
+ return x86_make_disp(
+ get_const_base(),
+ (vec * 4 + chan) * 4 );
+}
+
+static struct x86_reg
+get_sampler_ptr(
+ unsigned unit )
+{
+ return x86_make_disp(
+ get_sampler_base(),
+ unit * sizeof( struct tgsi_sampler * ) );
+}
+
+static struct x86_reg
+get_input(
+ unsigned vec,
+ unsigned chan )
+{
+ return x86_make_disp(
+ get_input_base(),
+ (vec * 4 + chan) * 16 );
+}
+
+static struct x86_reg
+get_output(
+ unsigned vec,
+ unsigned chan )
+{
+ return x86_make_disp(
+ get_output_base(),
+ (vec * 4 + chan) * 16 );
+}
+
+static struct x86_reg
+get_temp(
+ unsigned vec,
+ unsigned chan )
+{
+ return x86_make_disp(
+ get_temp_base(),
+ (vec * 4 + chan) * 16 );
+}
+
+static struct x86_reg
+get_coef(
+ unsigned vec,
+ unsigned chan,
+ unsigned member )
+{
+ return x86_make_disp(
+ get_coef_base(),
+ ((vec * 3 + member) * 4 + chan) * 4 );
+}
+
+
+static void
+emit_ret(
+ struct x86_function *func )
+{
+ x86_ret( func );
+}
+
+
+/**
+ * Data fetch helpers.
+ */
+
+/**
+ * Copy a shader constant to xmm register
+ * \param xmm the destination xmm register
+ * \param vec the src const buffer index
+ * \param chan src channel to fetch (X, Y, Z or W)
+ */
+static void
+emit_const(
+ struct x86_function *func,
+ uint xmm,
+ int vec,
+ uint chan,
+ uint indirect,
+ uint indirectFile,
+ int indirectIndex )
+{
+ if (indirect) {
+ /* 'vec' is the offset from the address register's value.
+ * We're loading CONST[ADDR+vec] into an xmm register.
+ */
+ struct x86_reg r0 = get_immediate_base();
+ struct x86_reg r1 = get_coef_base();
+ uint i;
+
+ assert( indirectFile == TGSI_FILE_ADDRESS );
+ assert( indirectIndex == 0 );
+ assert( r0.mod == mod_REG );
+ assert( r1.mod == mod_REG );
+
+ x86_push( func, r0 );
+ x86_push( func, r1 );
+
+ /*
+ * Loop over the four pixels or vertices in the quad.
+ * Get the value of the address (offset) register for pixel/vertex[i],
+ * add it to the src offset and index into the constant buffer.
+ * Note that we're working on SOA data.
+ * If any of the pixel/vertex execution channels are unused their
+ * values will be garbage. It's very important that we don't use
+ * those garbage values as indexes into the constant buffer since
+ * that'll cause segfaults.
+ * The solution is to bitwise-AND the offset with the execution mask
+ * register whose values are either 0 or ~0.
+ * The caller must setup the execution mask register to indicate
+ * which channels are valid/alive before running the shader.
+ * The execution mask will also figure into loops and conditionals
+ * someday.
+ */
+ for (i = 0; i < QUAD_SIZE; i++) {
+ /* r1 = address register[i] */
+ x86_mov( func, r1, x86_make_disp( get_temp( TEMP_ADDR, CHAN_X ), i * 4 ) );
+ /* r0 = execution mask[i] */
+ x86_mov( func, r0, x86_make_disp( get_temp( TEMP_EXEC_MASK_I, TEMP_EXEC_MASK_C ), i * 4 ) );
+ /* r1 = r1 & r0 */
+ x86_and( func, r1, r0 );
+ /* r0 = 'vec', the offset */
+ x86_lea( func, r0, get_const( vec, chan ) );
+
+ /* Quick hack to multiply r1 by 16 -- need to add SHL to rtasm.
+ */
+ x86_add( func, r1, r1 );
+ x86_add( func, r1, r1 );
+ x86_add( func, r1, r1 );
+ x86_add( func, r1, r1 );
+
+ x86_add( func, r0, r1 ); /* r0 = r0 + r1 */
+ x86_mov( func, r1, x86_deref( r0 ) );
+ x86_mov( func, x86_make_disp( get_temp( TEMP_R0, CHAN_X ), i * 4 ), r1 );
+ }
+
+ x86_pop( func, r1 );
+ x86_pop( func, r0 );
+
+ sse_movaps(
+ func,
+ make_xmm( xmm ),
+ get_temp( TEMP_R0, CHAN_X ) );
+ }
+ else {
+ /* 'vec' is the index into the src register file, such as TEMP[vec] */
+ assert( vec >= 0 );
+
+ sse_movss(
+ func,
+ make_xmm( xmm ),
+ get_const( vec, chan ) );
+ sse_shufps(
+ func,
+ make_xmm( xmm ),
+ make_xmm( xmm ),
+ SHUF( 0, 0, 0, 0 ) );
+ }
+}
+
+static void
+emit_immediate(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ sse_movss(
+ func,
+ make_xmm( xmm ),
+ get_immediate( vec, chan ) );
+ sse_shufps(
+ func,
+ make_xmm( xmm ),
+ make_xmm( xmm ),
+ SHUF( 0, 0, 0, 0 ) );
+}
+
+
+/**
+ * Copy a shader input to xmm register
+ * \param xmm the destination xmm register
+ * \param vec the src input attrib
+ * \param chan src channel to fetch (X, Y, Z or W)
+ */
+static void
+emit_inputf(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ sse_movups(
+ func,
+ make_xmm( xmm ),
+ get_input( vec, chan ) );
+}
+
+/**
+ * Store an xmm register to a shader output
+ * \param xmm the source xmm register
+ * \param vec the dest output attrib
+ * \param chan src dest channel to store (X, Y, Z or W)
+ */
+static void
+emit_output(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ sse_movups(
+ func,
+ get_output( vec, chan ),
+ make_xmm( xmm ) );
+}
+
+/**
+ * Copy a shader temporary to xmm register
+ * \param xmm the destination xmm register
+ * \param vec the src temp register
+ * \param chan src channel to fetch (X, Y, Z or W)
+ */
+static void
+emit_tempf(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ sse_movaps(
+ func,
+ make_xmm( xmm ),
+ get_temp( vec, chan ) );
+}
+
+/**
+ * Load an xmm register with an input attrib coefficient (a0, dadx or dady)
+ * \param xmm the destination xmm register
+ * \param vec the src input/attribute coefficient index
+ * \param chan src channel to fetch (X, Y, Z or W)
+ * \param member 0=a0, 1=dadx, 2=dady
+ */
+static void
+emit_coef(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan,
+ unsigned member )
+{
+ sse_movss(
+ func,
+ make_xmm( xmm ),
+ get_coef( vec, chan, member ) );
+ sse_shufps(
+ func,
+ make_xmm( xmm ),
+ make_xmm( xmm ),
+ SHUF( 0, 0, 0, 0 ) );
+}
+
+/**
+ * Data store helpers.
+ */
+
+static void
+emit_inputs(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ sse_movups(
+ func,
+ get_input( vec, chan ),
+ make_xmm( xmm ) );
+}
+
+static void
+emit_temps(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ sse_movaps(
+ func,
+ get_temp( vec, chan ),
+ make_xmm( xmm ) );
+}
+
+static void
+emit_addrs(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ assert( vec == 0 );
+
+ emit_temps(
+ func,
+ xmm,
+ vec + TGSI_EXEC_TEMP_ADDR,
+ chan );
+}
+
+/**
+ * Coefficent fetch helpers.
+ */
+
+static void
+emit_coef_a0(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ emit_coef(
+ func,
+ xmm,
+ vec,
+ chan,
+ 0 );
+}
+
+static void
+emit_coef_dadx(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ emit_coef(
+ func,
+ xmm,
+ vec,
+ chan,
+ 1 );
+}
+
+static void
+emit_coef_dady(
+ struct x86_function *func,
+ unsigned xmm,
+ unsigned vec,
+ unsigned chan )
+{
+ emit_coef(
+ func,
+ xmm,
+ vec,
+ chan,
+ 2 );
+}
+
+/**
+ * Function call helpers.
+ */
+
+/**
+ * NOTE: In gcc, if the destination uses the SSE intrinsics, then it must be
+ * defined with __attribute__((force_align_arg_pointer)), as we do not guarantee
+ * that the stack pointer is 16 byte aligned, as expected.
+ */
+static void
+emit_func_call(
+ struct x86_function *func,
+ unsigned xmm_save_mask,
+ const struct x86_reg *arg,
+ unsigned nr_args,
+ void (PIPE_CDECL *code)() )
+{
+ struct x86_reg ecx = x86_make_reg( file_REG32, reg_CX );
+ unsigned i, n;
+
+ x86_push(
+ func,
+ x86_make_reg( file_REG32, reg_AX) );
+ x86_push(
+ func,
+ x86_make_reg( file_REG32, reg_CX) );
+ x86_push(
+ func,
+ x86_make_reg( file_REG32, reg_DX) );
+
+ /* Store XMM regs to the stack
+ */
+ for(i = 0, n = 0; i < 8; ++i)
+ if(xmm_save_mask & (1 << i))
+ ++n;
+
+ x86_sub_imm(
+ func,
+ x86_make_reg( file_REG32, reg_SP ),
+ n*16);
+
+ for(i = 0, n = 0; i < 8; ++i)
+ if(xmm_save_mask & (1 << i)) {
+ sse_movups(
+ func,
+ x86_make_disp( x86_make_reg( file_REG32, reg_SP ), n*16 ),
+ make_xmm( i ) );
+ ++n;
+ }
+
+ for (i = 0; i < nr_args; i++) {
+ /* Load the address of the buffer we use for passing arguments and
+ * receiving results:
+ */
+ x86_lea(
+ func,
+ ecx,
+ arg[i] );
+
+ /* Push actual function arguments (currently just the pointer to
+ * the buffer above), and call the function:
+ */
+ x86_push( func, ecx );
+ }
+
+ x86_mov_reg_imm( func, ecx, (unsigned long) code );
+ x86_call( func, ecx );
+
+ /* Pop the arguments (or just add an immediate to esp)
+ */
+ for (i = 0; i < nr_args; i++) {
+ x86_pop(func, ecx );
+ }
+
+ /* Pop the saved XMM regs:
+ */
+ for(i = 0, n = 0; i < 8; ++i)
+ if(xmm_save_mask & (1 << i)) {
+ sse_movups(
+ func,
+ make_xmm( i ),
+ x86_make_disp( x86_make_reg( file_REG32, reg_SP ), n*16 ) );
+ ++n;
+ }
+
+ x86_add_imm(
+ func,
+ x86_make_reg( file_REG32, reg_SP ),
+ n*16);
+
+ /* Restore GP registers in a reverse order.
+ */
+ x86_pop(
+ func,
+ x86_make_reg( file_REG32, reg_DX) );
+ x86_pop(
+ func,
+ x86_make_reg( file_REG32, reg_CX) );
+ x86_pop(
+ func,
+ x86_make_reg( file_REG32, reg_AX) );
+}
+
+static void
+emit_func_call_dst_src1(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst,
+ unsigned xmm_src0,
+ void (PIPE_CDECL *code)() )
+{
+ struct x86_reg store = get_temp( TEMP_R0, 0 );
+ unsigned xmm_mask = ((1 << xmm_save) - 1) & ~(1 << xmm_dst);
+
+ /* Store our input parameters (in xmm regs) to the buffer we use
+ * for passing arguments. We will pass a pointer to this buffer as
+ * the actual function argument.
+ */
+ sse_movaps(
+ func,
+ store,
+ make_xmm( xmm_src0 ) );
+
+ emit_func_call( func,
+ xmm_mask,
+ &store,
+ 1,
+ code );
+
+ sse_movaps(
+ func,
+ make_xmm( xmm_dst ),
+ store );
+}
+
+
+static void
+emit_func_call_dst_src2(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst,
+ unsigned xmm_src0,
+ unsigned xmm_src1,
+ void (PIPE_CDECL *code)() )
+{
+ struct x86_reg store = get_temp( TEMP_R0, 0 );
+ unsigned xmm_mask = ((1 << xmm_save) - 1) & ~(1 << xmm_dst);
+
+ /* Store two inputs to parameter buffer.
+ */
+ sse_movaps(
+ func,
+ store,
+ make_xmm( xmm_src0 ) );
+
+ sse_movaps(
+ func,
+ x86_make_disp( store, 4 * sizeof(float) ),
+ make_xmm( xmm_src1 ) );
+
+
+ /* Emit the call
+ */
+ emit_func_call( func,
+ xmm_mask,
+ &store,
+ 1,
+ code );
+
+ /* Retrieve the results:
+ */
+ sse_movaps(
+ func,
+ make_xmm( xmm_dst ),
+ store );
+}
+
+
+
+
+
+#if defined(PIPE_ARCH_SSE)
+
+/*
+ * Fast SSE2 implementation of special math functions.
+ */
+
+#define POLY0(x, c0) _mm_set1_ps(c0)
+#define POLY1(x, c0, c1) _mm_add_ps(_mm_mul_ps(POLY0(x, c1), x), _mm_set1_ps(c0))
+#define POLY2(x, c0, c1, c2) _mm_add_ps(_mm_mul_ps(POLY1(x, c1, c2), x), _mm_set1_ps(c0))
+#define POLY3(x, c0, c1, c2, c3) _mm_add_ps(_mm_mul_ps(POLY2(x, c1, c2, c3), x), _mm_set1_ps(c0))
+#define POLY4(x, c0, c1, c2, c3, c4) _mm_add_ps(_mm_mul_ps(POLY3(x, c1, c2, c3, c4), x), _mm_set1_ps(c0))
+#define POLY5(x, c0, c1, c2, c3, c4, c5) _mm_add_ps(_mm_mul_ps(POLY4(x, c1, c2, c3, c4, c5), x), _mm_set1_ps(c0))
+
+#define EXP_POLY_DEGREE 3
+#define LOG_POLY_DEGREE 5
+
+/**
+ * See http://www.devmaster.net/forums/showthread.php?p=43580
+ */
+static INLINE __m128
+exp2f4(__m128 x)
+{
+ __m128i ipart;
+ __m128 fpart, expipart, expfpart;
+
+ x = _mm_min_ps(x, _mm_set1_ps( 129.00000f));
+ x = _mm_max_ps(x, _mm_set1_ps(-126.99999f));
+
+ /* ipart = int(x - 0.5) */
+ ipart = _mm_cvtps_epi32(_mm_sub_ps(x, _mm_set1_ps(0.5f)));
+
+ /* fpart = x - ipart */
+ fpart = _mm_sub_ps(x, _mm_cvtepi32_ps(ipart));
+
+ /* expipart = (float) (1 << ipart) */
+ expipart = _mm_castsi128_ps(_mm_slli_epi32(_mm_add_epi32(ipart, _mm_set1_epi32(127)), 23));
+
+ /* minimax polynomial fit of 2**x, in range [-0.5, 0.5[ */
+#if EXP_POLY_DEGREE == 5
+ expfpart = POLY5(fpart, 9.9999994e-1f, 6.9315308e-1f, 2.4015361e-1f, 5.5826318e-2f, 8.9893397e-3f, 1.8775767e-3f);
+#elif EXP_POLY_DEGREE == 4
+ expfpart = POLY4(fpart, 1.0000026f, 6.9300383e-1f, 2.4144275e-1f, 5.2011464e-2f, 1.3534167e-2f);
+#elif EXP_POLY_DEGREE == 3
+ expfpart = POLY3(fpart, 9.9992520e-1f, 6.9583356e-1f, 2.2606716e-1f, 7.8024521e-2f);
+#elif EXP_POLY_DEGREE == 2
+ expfpart = POLY2(fpart, 1.0017247f, 6.5763628e-1f, 3.3718944e-1f);
+#else
+#error
+#endif
+
+ return _mm_mul_ps(expipart, expfpart);
+}
+
+
+/**
+ * See http://www.devmaster.net/forums/showthread.php?p=43580
+ */
+static INLINE __m128
+log2f4(__m128 x)
+{
+ __m128i expmask = _mm_set1_epi32(0x7f800000);
+ __m128i mantmask = _mm_set1_epi32(0x007fffff);
+ __m128 one = _mm_set1_ps(1.0f);
+
+ __m128i i = _mm_castps_si128(x);
+
+ /* exp = (float) exponent(x) */
+ __m128 exp = _mm_cvtepi32_ps(_mm_sub_epi32(_mm_srli_epi32(_mm_and_si128(i, expmask), 23), _mm_set1_epi32(127)));
+
+ /* mant = (float) mantissa(x) */
+ __m128 mant = _mm_or_ps(_mm_castsi128_ps(_mm_and_si128(i, mantmask)), one);
+
+ __m128 logmant;
+
+ /* Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
+ * These coefficients can be generate with
+ * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
+ */
+#if LOG_POLY_DEGREE == 6
+ logmant = POLY5(mant, 3.11578814719469302614f, -3.32419399085241980044f, 2.59883907202499966007f, -1.23152682416275988241f, 0.318212422185251071475f, -0.0344359067839062357313f);
+#elif LOG_POLY_DEGREE == 5
+ logmant = POLY4(mant, 2.8882704548164776201f, -2.52074962577807006663f, 1.48116647521213171641f, -0.465725644288844778798f, 0.0596515482674574969533f);
+#elif LOG_POLY_DEGREE == 4
+ logmant = POLY3(mant, 2.61761038894603480148f, -1.75647175389045657003f, 0.688243882994381274313f, -0.107254423828329604454f);
+#elif LOG_POLY_DEGREE == 3
+ logmant = POLY2(mant, 2.28330284476918490682f, -1.04913055217340124191f, 0.204446009836232697516f);
+#else
+#error
+#endif
+
+ /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
+ logmant = _mm_mul_ps(logmant, _mm_sub_ps(mant, one));
+
+ return _mm_add_ps(logmant, exp);
+}
+
+
+static INLINE __m128
+powf4(__m128 x, __m128 y)
+{
+ return exp2f4(_mm_mul_ps(log2f4(x), y));
+}
+
+#endif /* PIPE_ARCH_SSE */
+
+
+
+/**
+ * Low-level instruction translators.
+ */
+
+static void
+emit_abs(
+ struct x86_function *func,
+ unsigned xmm )
+{
+ sse_andps(
+ func,
+ make_xmm( xmm ),
+ get_temp(
+ TGSI_EXEC_TEMP_7FFFFFFF_I,
+ TGSI_EXEC_TEMP_7FFFFFFF_C ) );
+}
+
+static void
+emit_add(
+ struct x86_function *func,
+ unsigned xmm_dst,
+ unsigned xmm_src )
+{
+ sse_addps(
+ func,
+ make_xmm( xmm_dst ),
+ make_xmm( xmm_src ) );
+}
+
+static void PIPE_CDECL
+cos4f(
+ float *store )
+{
+ store[0] = cosf( store[0] );
+ store[1] = cosf( store[1] );
+ store[2] = cosf( store[2] );
+ store[3] = cosf( store[3] );
+}
+
+static void
+emit_cos(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ cos4f );
+}
+
+static void PIPE_CDECL
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_SSE)
+__attribute__((force_align_arg_pointer))
+#endif
+ex24f(
+ float *store )
+{
+#if defined(PIPE_ARCH_SSE)
+ _mm_store_ps(&store[0], exp2f4( _mm_load_ps(&store[0]) ));
+#else
+ store[0] = util_fast_exp2( store[0] );
+ store[1] = util_fast_exp2( store[1] );
+ store[2] = util_fast_exp2( store[2] );
+ store[3] = util_fast_exp2( store[3] );
+#endif
+}
+
+static void
+emit_ex2(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ ex24f );
+}
+
+static void
+emit_f2it(
+ struct x86_function *func,
+ unsigned xmm )
+{
+ sse2_cvttps2dq(
+ func,
+ make_xmm( xmm ),
+ make_xmm( xmm ) );
+}
+
+static void
+emit_i2f(
+ struct x86_function *func,
+ unsigned xmm )
+{
+ sse2_cvtdq2ps(
+ func,
+ make_xmm( xmm ),
+ make_xmm( xmm ) );
+}
+
+static void PIPE_CDECL
+flr4f(
+ float *store )
+{
+ store[0] = floorf( store[0] );
+ store[1] = floorf( store[1] );
+ store[2] = floorf( store[2] );
+ store[3] = floorf( store[3] );
+}
+
+static void
+emit_flr(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ flr4f );
+}
+
+static void PIPE_CDECL
+frc4f(
+ float *store )
+{
+ store[0] -= floorf( store[0] );
+ store[1] -= floorf( store[1] );
+ store[2] -= floorf( store[2] );
+ store[3] -= floorf( store[3] );
+}
+
+static void
+emit_frc(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ frc4f );
+}
+
+static void PIPE_CDECL
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_SSE)
+__attribute__((force_align_arg_pointer))
+#endif
+lg24f(
+ float *store )
+{
+#if defined(PIPE_ARCH_SSE)
+ _mm_store_ps(&store[0], log2f4( _mm_load_ps(&store[0]) ));
+#else
+ store[0] = util_fast_log2( store[0] );
+ store[1] = util_fast_log2( store[1] );
+ store[2] = util_fast_log2( store[2] );
+ store[3] = util_fast_log2( store[3] );
+#endif
+}
+
+static void
+emit_lg2(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ lg24f );
+}
+
+static void
+emit_MOV(
+ struct x86_function *func,
+ unsigned xmm_dst,
+ unsigned xmm_src )
+{
+ sse_movups(
+ func,
+ make_xmm( xmm_dst ),
+ make_xmm( xmm_src ) );
+}
+
+static void
+emit_mul (struct x86_function *func,
+ unsigned xmm_dst,
+ unsigned xmm_src)
+{
+ sse_mulps(
+ func,
+ make_xmm( xmm_dst ),
+ make_xmm( xmm_src ) );
+}
+
+static void
+emit_neg(
+ struct x86_function *func,
+ unsigned xmm )
+{
+ sse_xorps(
+ func,
+ make_xmm( xmm ),
+ get_temp(
+ TGSI_EXEC_TEMP_80000000_I,
+ TGSI_EXEC_TEMP_80000000_C ) );
+}
+
+static void PIPE_CDECL
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_SSE)
+__attribute__((force_align_arg_pointer))
+#endif
+pow4f(
+ float *store )
+{
+#if defined(PIPE_ARCH_SSE)
+ _mm_store_ps(&store[0], powf4( _mm_load_ps(&store[0]), _mm_load_ps(&store[4]) ));
+#else
+ store[0] = util_fast_pow( store[0], store[4] );
+ store[1] = util_fast_pow( store[1], store[5] );
+ store[2] = util_fast_pow( store[2], store[6] );
+ store[3] = util_fast_pow( store[3], store[7] );
+#endif
+}
+
+static void
+emit_pow(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst,
+ unsigned xmm_src0,
+ unsigned xmm_src1 )
+{
+ emit_func_call_dst_src2(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_src0,
+ xmm_src1,
+ pow4f );
+}
+
+static void
+emit_rcp (
+ struct x86_function *func,
+ unsigned xmm_dst,
+ unsigned xmm_src )
+{
+ /* On Intel CPUs at least, this is only accurate to 12 bits -- not
+ * good enough. Need to either emit a proper divide or use the
+ * iterative technique described below in emit_rsqrt().
+ */
+ sse2_rcpps(
+ func,
+ make_xmm( xmm_dst ),
+ make_xmm( xmm_src ) );
+}
+
+static void PIPE_CDECL
+rnd4f(
+ float *store )
+{
+ store[0] = floorf( store[0] + 0.5f );
+ store[1] = floorf( store[1] + 0.5f );
+ store[2] = floorf( store[2] + 0.5f );
+ store[3] = floorf( store[3] + 0.5f );
+}
+
+static void
+emit_rnd(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ rnd4f );
+}
+
+static void
+emit_rsqrt(
+ struct x86_function *func,
+ unsigned xmm_dst,
+ unsigned xmm_src )
+{
+#if HIGH_PRECISION
+ /* Although rsqrtps() and rcpps() are low precision on some/all SSE
+ * implementations, it is possible to improve its precision at
+ * fairly low cost, using a newton/raphson step, as below:
+ *
+ * x1 = 2 * rcpps(a) - a * rcpps(a) * rcpps(a)
+ * x1 = 0.5 * rsqrtps(a) * [3.0 - (a * rsqrtps(a))* rsqrtps(a)]
+ *
+ * See: http://softwarecommunity.intel.com/articles/eng/1818.htm
+ */
+ {
+ struct x86_reg dst = make_xmm( xmm_dst );
+ struct x86_reg src = make_xmm( xmm_src );
+ struct x86_reg tmp0 = make_xmm( 2 );
+ struct x86_reg tmp1 = make_xmm( 3 );
+
+ assert( xmm_dst != xmm_src );
+ assert( xmm_dst != 2 && xmm_dst != 3 );
+ assert( xmm_src != 2 && xmm_src != 3 );
+
+ sse_movaps( func, dst, get_temp( TGSI_EXEC_TEMP_HALF_I, TGSI_EXEC_TEMP_HALF_C ) );
+ sse_movaps( func, tmp0, get_temp( TGSI_EXEC_TEMP_THREE_I, TGSI_EXEC_TEMP_THREE_C ) );
+ sse_rsqrtps( func, tmp1, src );
+ sse_mulps( func, src, tmp1 );
+ sse_mulps( func, dst, tmp1 );
+ sse_mulps( func, src, tmp1 );
+ sse_subps( func, tmp0, src );
+ sse_mulps( func, dst, tmp0 );
+ }
+#else
+ /* On Intel CPUs at least, this is only accurate to 12 bits -- not
+ * good enough.
+ */
+ sse_rsqrtps(
+ func,
+ make_xmm( xmm_dst ),
+ make_xmm( xmm_src ) );
+#endif
+}
+
+static void
+emit_setsign(
+ struct x86_function *func,
+ unsigned xmm )
+{
+ sse_orps(
+ func,
+ make_xmm( xmm ),
+ get_temp(
+ TGSI_EXEC_TEMP_80000000_I,
+ TGSI_EXEC_TEMP_80000000_C ) );
+}
+
+static void PIPE_CDECL
+sgn4f(
+ float *store )
+{
+ store[0] = store[0] < 0.0f ? -1.0f : store[0] > 0.0f ? 1.0f : 0.0f;
+ store[1] = store[1] < 0.0f ? -1.0f : store[1] > 0.0f ? 1.0f : 0.0f;
+ store[2] = store[2] < 0.0f ? -1.0f : store[2] > 0.0f ? 1.0f : 0.0f;
+ store[3] = store[3] < 0.0f ? -1.0f : store[3] > 0.0f ? 1.0f : 0.0f;
+}
+
+static void
+emit_sgn(
+ struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst )
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ sgn4f );
+}
+
+static void PIPE_CDECL
+sin4f(
+ float *store )
+{
+ store[0] = sinf( store[0] );
+ store[1] = sinf( store[1] );
+ store[2] = sinf( store[2] );
+ store[3] = sinf( store[3] );
+}
+
+static void
+emit_sin (struct x86_function *func,
+ unsigned xmm_save,
+ unsigned xmm_dst)
+{
+ emit_func_call_dst_src1(
+ func,
+ xmm_save,
+ xmm_dst,
+ xmm_dst,
+ sin4f );
+}
+
+static void
+emit_sub(
+ struct x86_function *func,
+ unsigned xmm_dst,
+ unsigned xmm_src )
+{
+ sse_subps(
+ func,
+ make_xmm( xmm_dst ),
+ make_xmm( xmm_src ) );
+}
+
+
+
+
+
+
+
+/**
+ * Register fetch.
+ */
+
+static void
+emit_fetch(
+ struct x86_function *func,
+ unsigned xmm,
+ const struct tgsi_full_src_register *reg,
+ const unsigned chan_index )
+{
+ unsigned swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
+
+ switch (swizzle) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ switch (reg->SrcRegister.File) {
+ case TGSI_FILE_CONSTANT:
+ emit_const(
+ func,
+ xmm,
+ reg->SrcRegister.Index,
+ swizzle,
+ reg->SrcRegister.Indirect,
+ reg->SrcRegisterInd.File,
+ reg->SrcRegisterInd.Index );
+ break;
+
+ case TGSI_FILE_IMMEDIATE:
+ emit_immediate(
+ func,
+ xmm,
+ reg->SrcRegister.Index,
+ swizzle );
+ break;
+
+ case TGSI_FILE_INPUT:
+ emit_inputf(
+ func,
+ xmm,
+ reg->SrcRegister.Index,
+ swizzle );
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ emit_tempf(
+ func,
+ xmm,
+ reg->SrcRegister.Index,
+ swizzle );
+ break;
+
+ default:
+ assert( 0 );
+ }
+ break;
+
+ case TGSI_EXTSWIZZLE_ZERO:
+ emit_tempf(
+ func,
+ xmm,
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C );
+ break;
+
+ case TGSI_EXTSWIZZLE_ONE:
+ emit_tempf(
+ func,
+ xmm,
+ TEMP_ONE_I,
+ TEMP_ONE_C );
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ switch( tgsi_util_get_full_src_register_sign_mode( reg, chan_index ) ) {
+ case TGSI_UTIL_SIGN_CLEAR:
+ emit_abs( func, xmm );
+ break;
+
+ case TGSI_UTIL_SIGN_SET:
+ emit_setsign( func, xmm );
+ break;
+
+ case TGSI_UTIL_SIGN_TOGGLE:
+ emit_neg( func, xmm );
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+ }
+}
+
+#define FETCH( FUNC, INST, XMM, INDEX, CHAN )\
+ emit_fetch( FUNC, XMM, &(INST).FullSrcRegisters[INDEX], CHAN )
+
+/**
+ * Register store.
+ */
+
+static void
+emit_store(
+ struct x86_function *func,
+ unsigned xmm,
+ const struct tgsi_full_dst_register *reg,
+ const struct tgsi_full_instruction *inst,
+ unsigned chan_index )
+{
+ switch( reg->DstRegister.File ) {
+ case TGSI_FILE_OUTPUT:
+ emit_output(
+ func,
+ xmm,
+ reg->DstRegister.Index,
+ chan_index );
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ emit_temps(
+ func,
+ xmm,
+ reg->DstRegister.Index,
+ chan_index );
+ break;
+
+ case TGSI_FILE_ADDRESS:
+ emit_addrs(
+ func,
+ xmm,
+ reg->DstRegister.Index,
+ chan_index );
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ switch( inst->Instruction.Saturate ) {
+ case TGSI_SAT_NONE:
+ break;
+
+ case TGSI_SAT_ZERO_ONE:
+ /* assert( 0 ); */
+ break;
+
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ assert( 0 );
+ break;
+ }
+}
+
+#define STORE( FUNC, INST, XMM, INDEX, CHAN )\
+ emit_store( FUNC, XMM, &(INST).FullDstRegisters[INDEX], &(INST), CHAN )
+
+
+static void PIPE_CDECL
+fetch_texel( struct tgsi_sampler **sampler,
+ float *store )
+{
+#if 0
+ uint j;
+
+ debug_printf("%s sampler: %p (%p) store: %p\n",
+ __FUNCTION__,
+ sampler, *sampler,
+ store );
+
+ debug_printf("lodbias %f\n", store[12]);
+
+ for (j = 0; j < 4; j++)
+ debug_printf("sample %d texcoord %f %f\n",
+ j,
+ store[0+j],
+ store[4+j]);
+#endif
+
+ {
+ float rgba[NUM_CHANNELS][QUAD_SIZE];
+ (*sampler)->get_samples(*sampler,
+ &store[0],
+ &store[4],
+ &store[8],
+ 0.0f, /*store[12], lodbias */
+ rgba);
+
+ memcpy( store, rgba, 16 * sizeof(float));
+ }
+
+#if 0
+ for (j = 0; j < 4; j++)
+ debug_printf("sample %d result %f %f %f %f\n",
+ j,
+ store[0+j],
+ store[4+j],
+ store[8+j],
+ store[12+j]);
+#endif
+}
+
+/**
+ * High-level instruction translators.
+ */
+
+static void
+emit_tex( struct x86_function *func,
+ const struct tgsi_full_instruction *inst,
+ boolean lodbias,
+ boolean projected)
+{
+ const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
+ struct x86_reg args[2];
+ unsigned count;
+ unsigned i;
+
+ switch (inst->InstructionExtTexture.Texture) {
+ case TGSI_TEXTURE_1D:
+ count = 1;
+ break;
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_RECT:
+ count = 2;
+ break;
+ case TGSI_TEXTURE_SHADOW1D:
+ case TGSI_TEXTURE_SHADOW2D:
+ case TGSI_TEXTURE_SHADOWRECT:
+ case TGSI_TEXTURE_3D:
+ case TGSI_TEXTURE_CUBE:
+ count = 3;
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+ if (lodbias) {
+ FETCH( func, *inst, 3, 0, 3 );
+ }
+ else {
+ emit_tempf(
+ func,
+ 3,
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C );
+
+ }
+
+ /* store lodbias whether enabled or not -- fetch_texel currently
+ * respects it always.
+ */
+ sse_movaps( func,
+ get_temp( TEMP_R0, 3 ),
+ make_xmm( 3 ) );
+
+
+ if (projected) {
+ FETCH( func, *inst, 3, 0, 3 );
+
+ emit_rcp( func, 3, 3 );
+ }
+
+ for (i = 0; i < count; i++) {
+ FETCH( func, *inst, i, 0, i );
+
+ if (projected) {
+ sse_mulps(
+ func,
+ make_xmm( i ),
+ make_xmm( 3 ) );
+ }
+
+ /* Store in the argument buffer:
+ */
+ sse_movaps(
+ func,
+ get_temp( TEMP_R0, i ),
+ make_xmm( i ) );
+ }
+
+ args[0] = get_temp( TEMP_R0, 0 );
+ args[1] = get_sampler_ptr( unit );
+
+
+ emit_func_call( func,
+ 0,
+ args,
+ Elements(args),
+ fetch_texel );
+
+ /* If all four channels are enabled, could use a pointer to
+ * dst[0].x instead of TEMP_R0 for store?
+ */
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, i ) {
+
+ sse_movaps(
+ func,
+ make_xmm( 0 ),
+ get_temp( TEMP_R0, i ) );
+
+ STORE( func, *inst, 0, 0, i );
+ }
+}
+
+
+static void
+emit_kil(
+ struct x86_function *func,
+ const struct tgsi_full_src_register *reg )
+{
+ unsigned uniquemask;
+ unsigned unique_count = 0;
+ unsigned chan_index;
+ unsigned i;
+
+ /* This mask stores component bits that were already tested. Note that
+ * we test if the value is less than zero, so 1.0 and 0.0 need not to be
+ * tested. */
+ uniquemask = (1 << TGSI_EXTSWIZZLE_ZERO) | (1 << TGSI_EXTSWIZZLE_ONE);
+
+ FOR_EACH_CHANNEL( chan_index ) {
+ unsigned swizzle;
+
+ /* unswizzle channel */
+ swizzle = tgsi_util_get_full_src_register_extswizzle(
+ reg,
+ chan_index );
+
+ /* check if the component has not been already tested */
+ if( !(uniquemask & (1 << swizzle)) ) {
+ uniquemask |= 1 << swizzle;
+
+ /* allocate register */
+ emit_fetch(
+ func,
+ unique_count++,
+ reg,
+ chan_index );
+ }
+ }
+
+ x86_push(
+ func,
+ x86_make_reg( file_REG32, reg_AX ) );
+ x86_push(
+ func,
+ x86_make_reg( file_REG32, reg_DX ) );
+
+ for (i = 0 ; i < unique_count; i++ ) {
+ struct x86_reg dataXMM = make_xmm(i);
+
+ sse_cmpps(
+ func,
+ dataXMM,
+ get_temp(
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C ),
+ cc_LessThan );
+
+ if( i == 0 ) {
+ sse_movmskps(
+ func,
+ x86_make_reg( file_REG32, reg_AX ),
+ dataXMM );
+ }
+ else {
+ sse_movmskps(
+ func,
+ x86_make_reg( file_REG32, reg_DX ),
+ dataXMM );
+ x86_or(
+ func,
+ x86_make_reg( file_REG32, reg_AX ),
+ x86_make_reg( file_REG32, reg_DX ) );
+ }
+ }
+
+ x86_or(
+ func,
+ get_temp(
+ TGSI_EXEC_TEMP_KILMASK_I,
+ TGSI_EXEC_TEMP_KILMASK_C ),
+ x86_make_reg( file_REG32, reg_AX ) );
+
+ x86_pop(
+ func,
+ x86_make_reg( file_REG32, reg_DX ) );
+ x86_pop(
+ func,
+ x86_make_reg( file_REG32, reg_AX ) );
+}
+
+
+static void
+emit_kilp(
+ struct x86_function *func )
+{
+ /* XXX todo / fix me */
+}
+
+
+static void
+emit_setcc(
+ struct x86_function *func,
+ struct tgsi_full_instruction *inst,
+ enum sse_cc cc )
+{
+ unsigned chan_index;
+
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ sse_cmpps(
+ func,
+ make_xmm( 0 ),
+ make_xmm( 1 ),
+ cc );
+ sse_andps(
+ func,
+ make_xmm( 0 ),
+ get_temp(
+ TEMP_ONE_I,
+ TEMP_ONE_C ) );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+}
+
+static void
+emit_cmp(
+ struct x86_function *func,
+ struct tgsi_full_instruction *inst )
+{
+ unsigned chan_index;
+
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ FETCH( func, *inst, 2, 2, chan_index );
+ sse_cmpps(
+ func,
+ make_xmm( 0 ),
+ get_temp(
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C ),
+ cc_LessThan );
+ sse_andps(
+ func,
+ make_xmm( 1 ),
+ make_xmm( 0 ) );
+ sse_andnps(
+ func,
+ make_xmm( 0 ),
+ make_xmm( 2 ) );
+ sse_orps(
+ func,
+ make_xmm( 0 ),
+ make_xmm( 1 ) );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+}
+
+
+/**
+ * Check if inst src/dest regs use indirect addressing into temporary
+ * register file.
+ */
+static boolean
+indirect_temp_reference(const struct tgsi_full_instruction *inst)
+{
+ uint i;
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *reg = &inst->FullSrcRegisters[i];
+ if (reg->SrcRegister.File == TGSI_FILE_TEMPORARY &&
+ reg->SrcRegister.Indirect)
+ return TRUE;
+ }
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
+ const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[i];
+ if (reg->DstRegister.File == TGSI_FILE_TEMPORARY &&
+ reg->DstRegister.Indirect)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+static int
+emit_instruction(
+ struct x86_function *func,
+ struct tgsi_full_instruction *inst )
+{
+ unsigned chan_index;
+
+ /* we can't handle indirect addressing into temp register file yet */
+ if (indirect_temp_reference(inst))
+ return FALSE;
+
+ /* we don't handle saturation/clamping yet */
+ if (inst->Instruction.Saturate != TGSI_SAT_NONE)
+ return FALSE;
+
+ /* need to use extra temps to fix SOA dependencies : */
+ if (tgsi_check_soa_dependencies(inst))
+ return FALSE;
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_flr(func, 0, 0);
+ emit_f2it( func, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_LIT:
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
+ emit_tempf(
+ func,
+ 0,
+ TEMP_ONE_I,
+ TEMP_ONE_C);
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ) {
+ STORE( func, *inst, 0, 0, CHAN_X );
+ }
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
+ STORE( func, *inst, 0, 0, CHAN_W );
+ }
+ }
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ sse_maxps(
+ func,
+ make_xmm( 0 ),
+ get_temp(
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C ) );
+ STORE( func, *inst, 0, 0, CHAN_Y );
+ }
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
+ /* XMM[1] = SrcReg[0].yyyy */
+ FETCH( func, *inst, 1, 0, CHAN_Y );
+ /* XMM[1] = max(XMM[1], 0) */
+ sse_maxps(
+ func,
+ make_xmm( 1 ),
+ get_temp(
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C ) );
+ /* XMM[2] = SrcReg[0].wwww */
+ FETCH( func, *inst, 2, 0, CHAN_W );
+ /* XMM[2] = min(XMM[2], 128.0) */
+ sse_minps(
+ func,
+ make_xmm( 2 ),
+ get_temp(
+ TGSI_EXEC_TEMP_128_I,
+ TGSI_EXEC_TEMP_128_C ) );
+ /* XMM[2] = max(XMM[2], -128.0) */
+ sse_maxps(
+ func,
+ make_xmm( 2 ),
+ get_temp(
+ TGSI_EXEC_TEMP_MINUS_128_I,
+ TGSI_EXEC_TEMP_MINUS_128_C ) );
+ emit_pow( func, 3, 1, 1, 2 );
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ sse_xorps(
+ func,
+ make_xmm( 2 ),
+ make_xmm( 2 ) );
+ sse_cmpps(
+ func,
+ make_xmm( 2 ),
+ make_xmm( 0 ),
+ cc_LessThan );
+ sse_andps(
+ func,
+ make_xmm( 2 ),
+ make_xmm( 1 ) );
+ STORE( func, *inst, 2, 0, CHAN_Z );
+ }
+ }
+ break;
+
+ case TGSI_OPCODE_RCP:
+ /* TGSI_OPCODE_RECIP */
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_rcp( func, 0, 0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_RSQ:
+ /* TGSI_OPCODE_RECIPSQRT */
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_abs( func, 0 );
+ emit_rsqrt( func, 1, 0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 1, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_EXP:
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ emit_MOV( func, 1, 0 );
+ emit_flr( func, 2, 1 );
+ /* dst.x = ex2(floor(src.x)) */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ emit_MOV( func, 2, 1 );
+ emit_ex2( func, 3, 2 );
+ STORE( func, *inst, 2, 0, CHAN_X );
+ }
+ /* dst.y = src.x - floor(src.x) */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ emit_MOV( func, 2, 0 );
+ emit_sub( func, 2, 1 );
+ STORE( func, *inst, 2, 0, CHAN_Y );
+ }
+ }
+ /* dst.z = ex2(src.x) */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ emit_ex2( func, 3, 0 );
+ STORE( func, *inst, 0, 0, CHAN_Z );
+ }
+ }
+ /* dst.w = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ emit_tempf( func, 0, TEMP_ONE_I, TEMP_ONE_C );
+ STORE( func, *inst, 0, 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_LOG:
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_abs( func, 0 );
+ emit_MOV( func, 1, 0 );
+ emit_lg2( func, 2, 1 );
+ /* dst.z = lg2(abs(src.x)) */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ STORE( func, *inst, 1, 0, CHAN_Z );
+ }
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ emit_flr( func, 2, 1 );
+ /* dst.x = floor(lg2(abs(src.x))) */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( func, *inst, 1, 0, CHAN_X );
+ }
+ /* dst.x = abs(src)/ex2(floor(lg2(abs(src.x)))) */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ emit_ex2( func, 2, 1 );
+ emit_rcp( func, 1, 1 );
+ emit_mul( func, 0, 1 );
+ STORE( func, *inst, 0, 0, CHAN_Y );
+ }
+ }
+ }
+ /* dst.w = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ emit_tempf( func, 0, TEMP_ONE_I, TEMP_ONE_C );
+ STORE( func, *inst, 0, 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_MUL:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ emit_mul( func, 0, 1 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_ADD:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ emit_add( func, 0, 1 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DP3:
+ /* TGSI_OPCODE_DOT3 */
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ FETCH( func, *inst, 1, 1, CHAN_X );
+ emit_mul( func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_Y );
+ FETCH( func, *inst, 2, 1, CHAN_Y );
+ emit_mul( func, 1, 2 );
+ emit_add( func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_Z );
+ FETCH( func, *inst, 2, 1, CHAN_Z );
+ emit_mul( func, 1, 2 );
+ emit_add( func, 0, 1 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DP4:
+ /* TGSI_OPCODE_DOT4 */
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ FETCH( func, *inst, 1, 1, CHAN_X );
+ emit_mul( func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_Y );
+ FETCH( func, *inst, 2, 1, CHAN_Y );
+ emit_mul( func, 1, 2 );
+ emit_add( func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_Z );
+ FETCH( func, *inst, 2, 1, CHAN_Z );
+ emit_mul(func, 1, 2 );
+ emit_add(func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_W );
+ FETCH( func, *inst, 2, 1, CHAN_W );
+ emit_mul( func, 1, 2 );
+ emit_add( func, 0, 1 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DST:
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) {
+ emit_tempf(
+ func,
+ 0,
+ TEMP_ONE_I,
+ TEMP_ONE_C );
+ STORE( func, *inst, 0, 0, CHAN_X );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) {
+ FETCH( func, *inst, 0, 0, CHAN_Y );
+ FETCH( func, *inst, 1, 1, CHAN_Y );
+ emit_mul( func, 0, 1 );
+ STORE( func, *inst, 0, 0, CHAN_Y );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) {
+ FETCH( func, *inst, 0, 0, CHAN_Z );
+ STORE( func, *inst, 0, 0, CHAN_Z );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W ) {
+ FETCH( func, *inst, 0, 1, CHAN_W );
+ STORE( func, *inst, 0, 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_MIN:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ sse_minps(
+ func,
+ make_xmm( 0 ),
+ make_xmm( 1 ) );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MAX:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ sse_maxps(
+ func,
+ make_xmm( 0 ),
+ make_xmm( 1 ) );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SLT:
+ /* TGSI_OPCODE_SETLT */
+ emit_setcc( func, inst, cc_LessThan );
+ break;
+
+ case TGSI_OPCODE_SGE:
+ /* TGSI_OPCODE_SETGE */
+ emit_setcc( func, inst, cc_NotLessThan );
+ break;
+
+ case TGSI_OPCODE_MAD:
+ /* TGSI_OPCODE_MADD */
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ FETCH( func, *inst, 2, 2, chan_index );
+ emit_mul( func, 0, 1 );
+ emit_add( func, 0, 2 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SUB:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ emit_sub( func, 0, 1 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_LRP:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ FETCH( func, *inst, 1, 1, chan_index );
+ FETCH( func, *inst, 2, 2, chan_index );
+ emit_sub( func, 1, 2 );
+ emit_mul( func, 0, 1 );
+ emit_add( func, 0, 2 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_CND:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DP2A:
+ FETCH( func, *inst, 0, 0, CHAN_X ); /* xmm0 = src[0].x */
+ FETCH( func, *inst, 1, 1, CHAN_X ); /* xmm1 = src[1].x */
+ emit_mul( func, 0, 1 ); /* xmm0 = xmm0 * xmm1 */
+ FETCH( func, *inst, 1, 0, CHAN_Y ); /* xmm1 = src[0].y */
+ FETCH( func, *inst, 2, 1, CHAN_Y ); /* xmm2 = src[1].y */
+ emit_mul( func, 1, 2 ); /* xmm1 = xmm1 * xmm2 */
+ emit_add( func, 0, 1 ); /* xmm0 = xmm0 + xmm1 */
+ FETCH( func, *inst, 1, 2, CHAN_X ); /* xmm1 = src[2].x */
+ emit_add( func, 0, 1 ); /* xmm0 = xmm0 + xmm1 */
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index ); /* dest[ch] = xmm0 */
+ }
+ break;
+
+ case TGSI_OPCODE_FRC:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_frc( func, 0, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_CLAMP:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_FLR:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_flr( func, 0, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_ROUND:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_rnd( func, 0, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_EX2:
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_ex2( func, 0, 0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_LG2:
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_lg2( func, 0, 0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_POW:
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ FETCH( func, *inst, 1, 1, CHAN_X );
+ emit_pow( func, 0, 0, 0, 1 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_XPD:
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
+ FETCH( func, *inst, 1, 1, CHAN_Z );
+ FETCH( func, *inst, 3, 0, CHAN_Z );
+ }
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
+ FETCH( func, *inst, 0, 0, CHAN_Y );
+ FETCH( func, *inst, 4, 1, CHAN_Y );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) {
+ emit_MOV( func, 2, 0 );
+ emit_mul( func, 2, 1 );
+ emit_MOV( func, 5, 3 );
+ emit_mul( func, 5, 4 );
+ emit_sub( func, 2, 5 );
+ STORE( func, *inst, 2, 0, CHAN_X );
+ }
+ if( IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
+ FETCH( func, *inst, 2, 1, CHAN_X );
+ FETCH( func, *inst, 5, 0, CHAN_X );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) {
+ emit_mul( func, 3, 2 );
+ emit_mul( func, 1, 5 );
+ emit_sub( func, 3, 1 );
+ STORE( func, *inst, 3, 0, CHAN_Y );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) {
+ emit_mul( func, 5, 4 );
+ emit_mul( func, 0, 2 );
+ emit_sub( func, 5, 0 );
+ STORE( func, *inst, 5, 0, CHAN_Z );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W ) {
+ emit_tempf(
+ func,
+ 0,
+ TEMP_ONE_I,
+ TEMP_ONE_C );
+ STORE( func, *inst, 0, 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_ABS:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_abs( func, 0) ;
+
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_RCC:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DPH:
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ FETCH( func, *inst, 1, 1, CHAN_X );
+ emit_mul( func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_Y );
+ FETCH( func, *inst, 2, 1, CHAN_Y );
+ emit_mul( func, 1, 2 );
+ emit_add( func, 0, 1 );
+ FETCH( func, *inst, 1, 0, CHAN_Z );
+ FETCH( func, *inst, 2, 1, CHAN_Z );
+ emit_mul( func, 1, 2 );
+ emit_add( func, 0, 1 );
+ FETCH( func, *inst, 1, 1, CHAN_W );
+ emit_add( func, 0, 1 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_COS:
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_cos( func, 0, 0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DDX:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DDY:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_KILP:
+ /* predicated kill */
+ emit_kilp( func );
+ return 0; /* XXX fix me */
+ break;
+
+ case TGSI_OPCODE_KIL:
+ /* conditional kill */
+ emit_kil( func, &inst->FullSrcRegisters[0] );
+ break;
+
+ case TGSI_OPCODE_PK2H:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PK2US:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PK4B:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PK4UB:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_RFL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SEQ:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SFL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SGT:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SIN:
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_sin( func, 0, 0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SLE:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SNE:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_STR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TEX:
+ emit_tex( func, inst, FALSE, FALSE );
+ break;
+
+ case TGSI_OPCODE_TXD:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP2H:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP2US:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP4B:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP4UB:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_X2D:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ARA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ARR:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_rnd( func, 0, 0 );
+ emit_f2it( func, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_BRA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_CAL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_RET:
+ emit_ret( func );
+ break;
+
+ case TGSI_OPCODE_END:
+ break;
+
+ case TGSI_OPCODE_SSG:
+ /* TGSI_OPCODE_SGN */
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_sgn( func, 0, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_CMP:
+ emit_cmp (func, inst);
+ break;
+
+ case TGSI_OPCODE_SCS:
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_X ) {
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_cos( func, 0, 0 );
+ STORE( func, *inst, 0, 0, CHAN_X );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Y ) {
+ FETCH( func, *inst, 0, 0, CHAN_X );
+ emit_sin( func, 0, 0 );
+ STORE( func, *inst, 0, 0, CHAN_Y );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_Z ) {
+ emit_tempf(
+ func,
+ 0,
+ TGSI_EXEC_TEMP_00000000_I,
+ TGSI_EXEC_TEMP_00000000_C );
+ STORE( func, *inst, 0, 0, CHAN_Z );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( *inst, CHAN_W ) {
+ emit_tempf(
+ func,
+ 0,
+ TEMP_ONE_I,
+ TEMP_ONE_C );
+ STORE( func, *inst, 0, 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_TXB:
+ emit_tex( func, inst, TRUE, FALSE );
+ break;
+
+ case TGSI_OPCODE_NRM:
+ /* fall-through */
+ case TGSI_OPCODE_NRM4:
+ /* 3 or 4-component normalization */
+ {
+ uint dims = (inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
+
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
+ IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z) ||
+ (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W) && dims == 4)) {
+
+ /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
+
+ /* xmm4 = src.x */
+ /* xmm0 = src.x * src.x */
+ FETCH(func, *inst, 0, 0, CHAN_X);
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ emit_MOV(func, 4, 0);
+ }
+ emit_mul(func, 0, 0);
+
+ /* xmm5 = src.y */
+ /* xmm0 = xmm0 + src.y * src.y */
+ FETCH(func, *inst, 1, 0, CHAN_Y);
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ emit_MOV(func, 5, 1);
+ }
+ emit_mul(func, 1, 1);
+ emit_add(func, 0, 1);
+
+ /* xmm6 = src.z */
+ /* xmm0 = xmm0 + src.z * src.z */
+ FETCH(func, *inst, 1, 0, CHAN_Z);
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ emit_MOV(func, 6, 1);
+ }
+ emit_mul(func, 1, 1);
+ emit_add(func, 0, 1);
+
+ if (dims == 4) {
+ /* xmm7 = src.w */
+ /* xmm0 = xmm0 + src.w * src.w */
+ FETCH(func, *inst, 1, 0, CHAN_W);
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
+ emit_MOV(func, 7, 1);
+ }
+ emit_mul(func, 1, 1);
+ emit_add(func, 0, 1);
+ }
+
+ /* xmm1 = 1 / sqrt(xmm0) */
+ emit_rsqrt(func, 1, 0);
+
+ /* dst.x = xmm1 * src.x */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
+ emit_mul(func, 4, 1);
+ STORE(func, *inst, 4, 0, CHAN_X);
+ }
+
+ /* dst.y = xmm1 * src.y */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
+ emit_mul(func, 5, 1);
+ STORE(func, *inst, 5, 0, CHAN_Y);
+ }
+
+ /* dst.z = xmm1 * src.z */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
+ emit_mul(func, 6, 1);
+ STORE(func, *inst, 6, 0, CHAN_Z);
+ }
+
+ /* dst.w = xmm1 * src.w */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) && dims == 4) {
+ emit_mul(func, 7, 1);
+ STORE(func, *inst, 7, 0, CHAN_W);
+ }
+ }
+
+ /* dst0.w = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W) && dims == 3) {
+ emit_tempf(func, 0, TEMP_ONE_I, TEMP_ONE_C);
+ STORE(func, *inst, 0, 0, CHAN_W);
+ }
+ }
+ break;
+
+ case TGSI_OPCODE_DIV:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DP2:
+ FETCH( func, *inst, 0, 0, CHAN_X ); /* xmm0 = src[0].x */
+ FETCH( func, *inst, 1, 1, CHAN_X ); /* xmm1 = src[1].x */
+ emit_mul( func, 0, 1 ); /* xmm0 = xmm0 * xmm1 */
+ FETCH( func, *inst, 1, 0, CHAN_Y ); /* xmm1 = src[0].y */
+ FETCH( func, *inst, 2, 1, CHAN_Y ); /* xmm2 = src[1].y */
+ emit_mul( func, 1, 2 ); /* xmm1 = xmm1 * xmm2 */
+ emit_add( func, 0, 1 ); /* xmm0 = xmm0 + xmm1 */
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( func, *inst, 0, 0, chan_index ); /* dest[ch] = xmm0 */
+ }
+ break;
+
+ case TGSI_OPCODE_TXL:
+ emit_tex( func, inst, TRUE, FALSE );
+ break;
+
+ case TGSI_OPCODE_TXP:
+ emit_tex( func, inst, FALSE, TRUE );
+ break;
+
+ case TGSI_OPCODE_BRK:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_IF:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_BGNFOR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_REP:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ELSE:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDIF:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDFOR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDREP:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PUSHA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_POPA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_CEIL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_I2F:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_NOT:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TRUNC:
+ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( func, *inst, 0, 0, chan_index );
+ emit_f2it( func, 0 );
+ emit_i2f( func, 0 );
+ STORE( func, *inst, 0, 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SHL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SHR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_AND:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_OR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_MOD:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_XOR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SAD:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TXF:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TXQ:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_CONT:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_EMIT:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDPRIM:
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static void
+emit_declaration(
+ struct x86_function *func,
+ struct tgsi_full_declaration *decl )
+{
+ if( decl->Declaration.File == TGSI_FILE_INPUT ) {
+ unsigned first, last, mask;
+ unsigned i, j;
+
+ first = decl->DeclarationRange.First;
+ last = decl->DeclarationRange.Last;
+ mask = decl->Declaration.UsageMask;
+
+ for( i = first; i <= last; i++ ) {
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ if( mask & (1 << j) ) {
+ switch( decl->Declaration.Interpolate ) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ emit_coef_a0( func, 0, i, j );
+ emit_inputs( func, 0, i, j );
+ break;
+
+ case TGSI_INTERPOLATE_LINEAR:
+ emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
+ emit_coef_dadx( func, 1, i, j );
+ emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
+ emit_coef_dady( func, 3, i, j );
+ emit_mul( func, 0, 1 ); /* x * dadx */
+ emit_coef_a0( func, 4, i, j );
+ emit_mul( func, 2, 3 ); /* y * dady */
+ emit_add( func, 0, 4 ); /* x * dadx + a0 */
+ emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
+ emit_inputs( func, 0, i, j );
+ break;
+
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
+ emit_coef_dadx( func, 1, i, j );
+ emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
+ emit_coef_dady( func, 3, i, j );
+ emit_mul( func, 0, 1 ); /* x * dadx */
+ emit_tempf( func, 4, 0, TGSI_SWIZZLE_W );
+ emit_coef_a0( func, 5, i, j );
+ emit_rcp( func, 4, 4 ); /* 1.0 / w */
+ emit_mul( func, 2, 3 ); /* y * dady */
+ emit_add( func, 0, 5 ); /* x * dadx + a0 */
+ emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
+ emit_mul( func, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
+ emit_inputs( func, 0, i, j );
+ break;
+
+ default:
+ assert( 0 );
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+static void aos_to_soa( struct x86_function *func,
+ uint arg_aos,
+ uint arg_machine,
+ uint arg_num,
+ uint arg_stride )
+{
+ struct x86_reg soa_input = x86_make_reg( file_REG32, reg_AX );
+ struct x86_reg aos_input = x86_make_reg( file_REG32, reg_BX );
+ struct x86_reg num_inputs = x86_make_reg( file_REG32, reg_CX );
+ struct x86_reg stride = x86_make_reg( file_REG32, reg_DX );
+ int inner_loop;
+
+
+ /* Save EBX */
+ x86_push( func, x86_make_reg( file_REG32, reg_BX ) );
+
+ x86_mov( func, aos_input, x86_fn_arg( func, arg_aos ) );
+ x86_mov( func, soa_input, x86_fn_arg( func, arg_machine ) );
+ x86_lea( func, soa_input,
+ x86_make_disp( soa_input,
+ Offset(struct tgsi_exec_machine, Inputs) ) );
+ x86_mov( func, num_inputs, x86_fn_arg( func, arg_num ) );
+ x86_mov( func, stride, x86_fn_arg( func, arg_stride ) );
+
+ /* do */
+ inner_loop = x86_get_label( func );
+ {
+ x86_push( func, aos_input );
+ sse_movlps( func, make_xmm( 0 ), x86_make_disp( aos_input, 0 ) );
+ sse_movlps( func, make_xmm( 3 ), x86_make_disp( aos_input, 8 ) );
+ x86_add( func, aos_input, stride );
+ sse_movhps( func, make_xmm( 0 ), x86_make_disp( aos_input, 0 ) );
+ sse_movhps( func, make_xmm( 3 ), x86_make_disp( aos_input, 8 ) );
+ x86_add( func, aos_input, stride );
+ sse_movlps( func, make_xmm( 1 ), x86_make_disp( aos_input, 0 ) );
+ sse_movlps( func, make_xmm( 4 ), x86_make_disp( aos_input, 8 ) );
+ x86_add( func, aos_input, stride );
+ sse_movhps( func, make_xmm( 1 ), x86_make_disp( aos_input, 0 ) );
+ sse_movhps( func, make_xmm( 4 ), x86_make_disp( aos_input, 8 ) );
+ x86_pop( func, aos_input );
+
+ sse_movaps( func, make_xmm( 2 ), make_xmm( 0 ) );
+ sse_movaps( func, make_xmm( 5 ), make_xmm( 3 ) );
+ sse_shufps( func, make_xmm( 0 ), make_xmm( 1 ), 0x88 );
+ sse_shufps( func, make_xmm( 2 ), make_xmm( 1 ), 0xdd );
+ sse_shufps( func, make_xmm( 3 ), make_xmm( 4 ), 0x88 );
+ sse_shufps( func, make_xmm( 5 ), make_xmm( 4 ), 0xdd );
+
+ sse_movups( func, x86_make_disp( soa_input, 0 ), make_xmm( 0 ) );
+ sse_movups( func, x86_make_disp( soa_input, 16 ), make_xmm( 2 ) );
+ sse_movups( func, x86_make_disp( soa_input, 32 ), make_xmm( 3 ) );
+ sse_movups( func, x86_make_disp( soa_input, 48 ), make_xmm( 5 ) );
+
+ /* Advance to next input */
+ x86_lea( func, aos_input, x86_make_disp(aos_input, 16) );
+ x86_lea( func, soa_input, x86_make_disp(soa_input, 64) );
+ }
+ /* while --num_inputs */
+ x86_dec( func, num_inputs );
+ x86_jcc( func, cc_NE, inner_loop );
+
+ /* Restore EBX */
+ x86_pop( func, x86_make_reg( file_REG32, reg_BX ) );
+}
+
+static void soa_to_aos( struct x86_function *func,
+ uint arg_aos,
+ uint arg_machine,
+ uint arg_num,
+ uint arg_stride )
+{
+ struct x86_reg soa_output = x86_make_reg( file_REG32, reg_AX );
+ struct x86_reg aos_output = x86_make_reg( file_REG32, reg_BX );
+ struct x86_reg num_outputs = x86_make_reg( file_REG32, reg_CX );
+ struct x86_reg temp = x86_make_reg( file_REG32, reg_DX );
+ int inner_loop;
+
+ /* Save EBX */
+ x86_push( func, x86_make_reg( file_REG32, reg_BX ) );
+
+ x86_mov( func, aos_output, x86_fn_arg( func, arg_aos ) );
+ x86_mov( func, soa_output, x86_fn_arg( func, arg_machine ) );
+ x86_lea( func, soa_output,
+ x86_make_disp( soa_output,
+ Offset(struct tgsi_exec_machine, Outputs) ) );
+ x86_mov( func, num_outputs, x86_fn_arg( func, arg_num ) );
+
+ /* do */
+ inner_loop = x86_get_label( func );
+ {
+ sse_movups( func, make_xmm( 0 ), x86_make_disp( soa_output, 0 ) );
+ sse_movups( func, make_xmm( 1 ), x86_make_disp( soa_output, 16 ) );
+ sse_movups( func, make_xmm( 3 ), x86_make_disp( soa_output, 32 ) );
+ sse_movups( func, make_xmm( 4 ), x86_make_disp( soa_output, 48 ) );
+
+ sse_movaps( func, make_xmm( 2 ), make_xmm( 0 ) );
+ sse_movaps( func, make_xmm( 5 ), make_xmm( 3 ) );
+ sse_unpcklps( func, make_xmm( 0 ), make_xmm( 1 ) );
+ sse_unpckhps( func, make_xmm( 2 ), make_xmm( 1 ) );
+ sse_unpcklps( func, make_xmm( 3 ), make_xmm( 4 ) );
+ sse_unpckhps( func, make_xmm( 5 ), make_xmm( 4 ) );
+
+ x86_mov( func, temp, x86_fn_arg( func, arg_stride ) );
+ x86_push( func, aos_output );
+ sse_movlps( func, x86_make_disp( aos_output, 0 ), make_xmm( 0 ) );
+ sse_movlps( func, x86_make_disp( aos_output, 8 ), make_xmm( 3 ) );
+ x86_add( func, aos_output, temp );
+ sse_movhps( func, x86_make_disp( aos_output, 0 ), make_xmm( 0 ) );
+ sse_movhps( func, x86_make_disp( aos_output, 8 ), make_xmm( 3 ) );
+ x86_add( func, aos_output, temp );
+ sse_movlps( func, x86_make_disp( aos_output, 0 ), make_xmm( 2 ) );
+ sse_movlps( func, x86_make_disp( aos_output, 8 ), make_xmm( 5 ) );
+ x86_add( func, aos_output, temp );
+ sse_movhps( func, x86_make_disp( aos_output, 0 ), make_xmm( 2 ) );
+ sse_movhps( func, x86_make_disp( aos_output, 8 ), make_xmm( 5 ) );
+ x86_pop( func, aos_output );
+
+ /* Advance to next output */
+ x86_lea( func, aos_output, x86_make_disp(aos_output, 16) );
+ x86_lea( func, soa_output, x86_make_disp(soa_output, 64) );
+ }
+ /* while --num_outputs */
+ x86_dec( func, num_outputs );
+ x86_jcc( func, cc_NE, inner_loop );
+
+ /* Restore EBX */
+ x86_pop( func, x86_make_reg( file_REG32, reg_BX ) );
+}
+
+/**
+ * Translate a TGSI vertex/fragment shader to SSE2 code.
+ * Slightly different things are done for vertex vs. fragment shaders.
+ *
+ * \param tokens the TGSI input shader
+ * \param func the output SSE code/function
+ * \param immediates buffer to place immediates, later passed to SSE func
+ * \param return 1 for success, 0 if translation failed
+ */
+unsigned
+tgsi_emit_sse2(
+ const struct tgsi_token *tokens,
+ struct x86_function *func,
+ float (*immediates)[4],
+ boolean do_swizzles )
+{
+ struct tgsi_parse_context parse;
+ unsigned ok = 1;
+ uint num_immediates = 0;
+
+ util_init_math();
+
+ func->csr = func->store;
+
+ tgsi_parse_init( &parse, tokens );
+
+ /* Can't just use EDI, EBX without save/restoring them:
+ */
+ x86_push( func, x86_make_reg( file_REG32, reg_BX ) );
+ x86_push( func, x86_make_reg( file_REG32, reg_DI ) );
+
+ /*
+ * Different function args for vertex/fragment shaders:
+ */
+ if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX) {
+ if (do_swizzles)
+ aos_to_soa( func,
+ 4, /* aos_input */
+ 1, /* machine */
+ 5, /* num_inputs */
+ 6 ); /* input_stride */
+ }
+
+ x86_mov(
+ func,
+ get_machine_base(),
+ x86_fn_arg( func, 1 ) );
+ x86_mov(
+ func,
+ get_const_base(),
+ x86_fn_arg( func, 2 ) );
+ x86_mov(
+ func,
+ get_immediate_base(),
+ x86_fn_arg( func, 3 ) );
+
+ if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
+ x86_mov(
+ func,
+ get_coef_base(),
+ x86_fn_arg( func, 4 ) );
+ }
+
+ x86_mov(
+ func,
+ get_sampler_base(),
+ x86_make_disp( get_machine_base(),
+ Offset( struct tgsi_exec_machine, Samplers ) ) );
+
+
+ while( !tgsi_parse_end_of_tokens( &parse ) && ok ) {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
+ emit_declaration(
+ func,
+ &parse.FullToken.FullDeclaration );
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ ok = emit_instruction(
+ func,
+ &parse.FullToken.FullInstruction );
+
+ if (!ok) {
+ uint opcode = parse.FullToken.FullInstruction.Instruction.Opcode;
+ debug_printf("failed to translate tgsi opcode %d (%s) to SSE (%s)\n",
+ opcode,
+ tgsi_get_opcode_name(opcode),
+ parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX ?
+ "vertex shader" : "fragment shader");
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ /* simply copy the immediate values into the next immediates[] slot */
+ {
+ const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
+ uint i;
+ assert(size <= 4);
+ assert(num_immediates < TGSI_EXEC_NUM_IMMEDIATES);
+ for( i = 0; i < size; i++ ) {
+ immediates[num_immediates][i] =
+ parse.FullToken.FullImmediate.u[i].Float;
+ }
+#if 0
+ debug_printf("SSE FS immediate[%d] = %f %f %f %f\n",
+ num_immediates,
+ immediates[num_immediates][0],
+ immediates[num_immediates][1],
+ immediates[num_immediates][2],
+ immediates[num_immediates][3]);
+#endif
+ num_immediates++;
+ }
+ break;
+
+ default:
+ ok = 0;
+ assert( 0 );
+ }
+ }
+
+ if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX) {
+ if (do_swizzles)
+ soa_to_aos( func,
+ 7, /* aos_output */
+ 1, /* machine */
+ 8, /* num_outputs */
+ 9 ); /* output_stride */
+ }
+
+ /* Can't just use EBX, EDI without save/restoring them:
+ */
+ x86_pop( func, x86_make_reg( file_REG32, reg_DI ) );
+ x86_pop( func, x86_make_reg( file_REG32, reg_BX ) );
+
+ emit_ret( func );
+
+ tgsi_parse_free( &parse );
+
+ return ok;
+}
+
+#endif /* PIPE_ARCH_X86 */
+
diff --git a/src/gallium/auxiliary/tgsi/tgsi_sse2.h b/src/gallium/auxiliary/tgsi/tgsi_sse2.h
new file mode 100644
index 0000000000..d81ee3d00e
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_sse2.h
@@ -0,0 +1,77 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_SSE2_H
+#define TGSI_SSE2_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+struct tgsi_token;
+struct x86_function;
+struct tgsi_interp_coef;
+
+unsigned
+tgsi_emit_sse2(
+ const struct tgsi_token *tokens,
+ struct x86_function *function,
+ float (*immediates)[4],
+ boolean do_swizzles );
+
+
+/* This is the function prototype generated when do_swizzles is false
+ * -- effectively for fragment shaders.
+ */
+typedef void (PIPE_CDECL *tgsi_sse2_fs_function) (
+ struct tgsi_exec_machine *machine, /* 1 */
+ const float (*constant)[4], /* 2 */
+ const float (*immediate)[4], /* 3 */
+ const struct tgsi_interp_coef *coef /* 4 */
+ );
+
+
+/* This is the function prototype generated when do_swizzles is true
+ * -- effectively for vertex shaders.
+ */
+typedef void (PIPE_CDECL *tgsi_sse2_vs_func) (
+ struct tgsi_exec_machine *machine, /* 1 */
+ const float (*constant)[4], /* 2 */
+ const float (*immediate)[4], /* 3 */
+ const float (*aos_input)[4], /* 4 */
+ uint num_inputs, /* 5 */
+ uint input_stride, /* 6 */
+ float (*aos_output)[4], /* 7 */
+ uint num_outputs, /* 8 */
+ uint output_stride ); /* 9 */
+
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_SSE2_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_text.c b/src/gallium/auxiliary/tgsi/tgsi_text.c
new file mode 100644
index 0000000000..d438450b1e
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_text.c
@@ -0,0 +1,1157 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "tgsi_text.h"
+#include "tgsi_build.h"
+#include "tgsi_info.h"
+#include "tgsi_parse.h"
+#include "tgsi_sanity.h"
+#include "tgsi_util.h"
+
+static boolean is_alpha_underscore( const char *cur )
+{
+ return
+ (*cur >= 'a' && *cur <= 'z') ||
+ (*cur >= 'A' && *cur <= 'Z') ||
+ *cur == '_';
+}
+
+static boolean is_digit( const char *cur )
+{
+ return *cur >= '0' && *cur <= '9';
+}
+
+static boolean is_digit_alpha_underscore( const char *cur )
+{
+ return is_digit( cur ) || is_alpha_underscore( cur );
+}
+
+static boolean uprcase( char c )
+{
+ if (c >= 'a' && c <= 'z')
+ return c += 'A' - 'a';
+ return c;
+}
+
+static boolean str_match_no_case( const char **pcur, const char *str )
+{
+ const char *cur = *pcur;
+
+ while (*str != '\0' && *str == uprcase( *cur )) {
+ str++;
+ cur++;
+ }
+ if (*str == '\0') {
+ *pcur = cur;
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/* Eat zero or more whitespaces.
+ */
+static void eat_opt_white( const char **pcur )
+{
+ while (**pcur == ' ' || **pcur == '\t' || **pcur == '\n')
+ (*pcur)++;
+}
+
+/* Eat one or more whitespaces.
+ * Return TRUE if at least one whitespace eaten.
+ */
+static boolean eat_white( const char **pcur )
+{
+ const char *cur = *pcur;
+
+ eat_opt_white( pcur );
+ return *pcur > cur;
+}
+
+/* Parse unsigned integer.
+ * No checks for overflow.
+ */
+static boolean parse_uint( const char **pcur, uint *val )
+{
+ const char *cur = *pcur;
+
+ if (is_digit( cur )) {
+ *val = *cur++ - '0';
+ while (is_digit( cur ))
+ *val = *val * 10 + *cur++ - '0';
+ *pcur = cur;
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/* Parse floating point.
+ */
+static boolean parse_float( const char **pcur, float *val )
+{
+ const char *cur = *pcur;
+ boolean integral_part = FALSE;
+ boolean fractional_part = FALSE;
+
+ *val = (float) atof( cur );
+
+ if (*cur == '-' || *cur == '+')
+ cur++;
+ if (is_digit( cur )) {
+ cur++;
+ integral_part = TRUE;
+ while (is_digit( cur ))
+ cur++;
+ }
+ if (*cur == '.') {
+ cur++;
+ if (is_digit( cur )) {
+ cur++;
+ fractional_part = TRUE;
+ while (is_digit( cur ))
+ cur++;
+ }
+ }
+ if (!integral_part && !fractional_part)
+ return FALSE;
+ if (uprcase( *cur ) == 'E') {
+ cur++;
+ if (*cur == '-' || *cur == '+')
+ cur++;
+ if (is_digit( cur )) {
+ cur++;
+ while (is_digit( cur ))
+ cur++;
+ }
+ else
+ return FALSE;
+ }
+ *pcur = cur;
+ return TRUE;
+}
+
+struct translate_ctx
+{
+ const char *text;
+ const char *cur;
+ struct tgsi_token *tokens;
+ struct tgsi_token *tokens_cur;
+ struct tgsi_token *tokens_end;
+ struct tgsi_header *header;
+};
+
+static void report_error( struct translate_ctx *ctx, const char *msg )
+{
+ debug_printf( "\nError: %s", msg );
+}
+
+/* Parse shader header.
+ * Return TRUE for one of the following headers.
+ * FRAG1.1
+ * GEOM1.1
+ * VERT1.1
+ */
+static boolean parse_header( struct translate_ctx *ctx )
+{
+ uint processor;
+
+ if (str_match_no_case( &ctx->cur, "FRAG1.1" ))
+ processor = TGSI_PROCESSOR_FRAGMENT;
+ else if (str_match_no_case( &ctx->cur, "VERT1.1" ))
+ processor = TGSI_PROCESSOR_VERTEX;
+ else if (str_match_no_case( &ctx->cur, "GEOM1.1" ))
+ processor = TGSI_PROCESSOR_GEOMETRY;
+ else {
+ report_error( ctx, "Unknown header" );
+ return FALSE;
+ }
+
+ if (ctx->tokens_cur >= ctx->tokens_end)
+ return FALSE;
+ *(struct tgsi_version *) ctx->tokens_cur++ = tgsi_build_version();
+
+ if (ctx->tokens_cur >= ctx->tokens_end)
+ return FALSE;
+ ctx->header = (struct tgsi_header *) ctx->tokens_cur++;
+ *ctx->header = tgsi_build_header();
+
+ if (ctx->tokens_cur >= ctx->tokens_end)
+ return FALSE;
+ *(struct tgsi_processor *) ctx->tokens_cur++ = tgsi_build_processor( processor, ctx->header );
+
+ return TRUE;
+}
+
+static boolean parse_label( struct translate_ctx *ctx, uint *val )
+{
+ const char *cur = ctx->cur;
+
+ if (parse_uint( &cur, val )) {
+ eat_opt_white( &cur );
+ if (*cur == ':') {
+ cur++;
+ ctx->cur = cur;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static const char *file_names[TGSI_FILE_COUNT] =
+{
+ "NULL",
+ "CONST",
+ "IN",
+ "OUT",
+ "TEMP",
+ "SAMP",
+ "ADDR",
+ "IMM",
+ "LOOP"
+};
+
+static boolean
+parse_file( const char **pcur, uint *file )
+{
+ uint i;
+
+ for (i = 0; i < TGSI_FILE_COUNT; i++) {
+ const char *cur = *pcur;
+
+ if (str_match_no_case( &cur, file_names[i] )) {
+ if (!is_digit_alpha_underscore( cur )) {
+ *pcur = cur;
+ *file = i;
+ return TRUE;
+ }
+ }
+ }
+ return FALSE;
+}
+
+static boolean
+parse_opt_writemask(
+ struct translate_ctx *ctx,
+ uint *writemask )
+{
+ const char *cur;
+
+ cur = ctx->cur;
+ eat_opt_white( &cur );
+ if (*cur == '.') {
+ cur++;
+ *writemask = TGSI_WRITEMASK_NONE;
+ eat_opt_white( &cur );
+ if (uprcase( *cur ) == 'X') {
+ cur++;
+ *writemask |= TGSI_WRITEMASK_X;
+ }
+ if (uprcase( *cur ) == 'Y') {
+ cur++;
+ *writemask |= TGSI_WRITEMASK_Y;
+ }
+ if (uprcase( *cur ) == 'Z') {
+ cur++;
+ *writemask |= TGSI_WRITEMASK_Z;
+ }
+ if (uprcase( *cur ) == 'W') {
+ cur++;
+ *writemask |= TGSI_WRITEMASK_W;
+ }
+
+ if (*writemask == TGSI_WRITEMASK_NONE) {
+ report_error( ctx, "Writemask expected" );
+ return FALSE;
+ }
+
+ ctx->cur = cur;
+ }
+ else {
+ *writemask = TGSI_WRITEMASK_XYZW;
+ }
+ return TRUE;
+}
+
+/* <register_file_bracket> ::= <file> `['
+ */
+static boolean
+parse_register_file_bracket(
+ struct translate_ctx *ctx,
+ uint *file )
+{
+ if (!parse_file( &ctx->cur, file )) {
+ report_error( ctx, "Unknown register file" );
+ return FALSE;
+ }
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != '[') {
+ report_error( ctx, "Expected `['" );
+ return FALSE;
+ }
+ ctx->cur++;
+ return TRUE;
+}
+
+/* <register_file_bracket_index> ::= <register_file_bracket> <uint>
+ */
+static boolean
+parse_register_file_bracket_index(
+ struct translate_ctx *ctx,
+ uint *file,
+ int *index )
+{
+ uint uindex;
+
+ if (!parse_register_file_bracket( ctx, file ))
+ return FALSE;
+ eat_opt_white( &ctx->cur );
+ if (!parse_uint( &ctx->cur, &uindex )) {
+ report_error( ctx, "Expected literal unsigned integer" );
+ return FALSE;
+ }
+ *index = (int) uindex;
+ return TRUE;
+}
+
+/* Parse destination register operand.
+ * <register_dst> ::= <register_file_bracket_index> `]'
+ */
+static boolean
+parse_register_dst(
+ struct translate_ctx *ctx,
+ uint *file,
+ int *index )
+{
+ if (!parse_register_file_bracket_index( ctx, file, index ))
+ return FALSE;
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ']') {
+ report_error( ctx, "Expected `]'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ return TRUE;
+}
+
+/* Parse source register operand.
+ * <register_src> ::= <register_file_bracket_index> `]' |
+ * <register_file_bracket> <register_dst> [`.' (`x' | `y' | `z' | `w')] `]' |
+ * <register_file_bracket> <register_dst> [`.' (`x' | `y' | `z' | `w')] `+' <uint> `]' |
+ * <register_file_bracket> <register_dst> [`.' (`x' | `y' | `z' | `w')] `-' <uint> `]'
+ */
+static boolean
+parse_register_src(
+ struct translate_ctx *ctx,
+ uint *file,
+ int *index,
+ uint *ind_file,
+ int *ind_index,
+ uint *ind_comp)
+{
+ const char *cur;
+ uint uindex;
+
+ *ind_comp = TGSI_SWIZZLE_X;
+ if (!parse_register_file_bracket( ctx, file ))
+ return FALSE;
+ eat_opt_white( &ctx->cur );
+ cur = ctx->cur;
+ if (parse_file( &cur, ind_file )) {
+ if (!parse_register_dst( ctx, ind_file, ind_index ))
+ return FALSE;
+ eat_opt_white( &ctx->cur );
+
+ if (*ctx->cur == '.') {
+ ctx->cur++;
+ eat_opt_white(&ctx->cur);
+
+ switch (uprcase(*ctx->cur)) {
+ case 'X':
+ *ind_comp = TGSI_SWIZZLE_X;
+ break;
+ case 'Y':
+ *ind_comp = TGSI_SWIZZLE_Y;
+ break;
+ case 'Z':
+ *ind_comp = TGSI_SWIZZLE_Z;
+ break;
+ case 'W':
+ *ind_comp = TGSI_SWIZZLE_W;
+ break;
+ default:
+ report_error(ctx, "Expected indirect register swizzle component `x', `y', `z' or `w'");
+ return FALSE;
+ }
+ ctx->cur++;
+ eat_opt_white(&ctx->cur);
+ }
+
+ if (*ctx->cur == '+' || *ctx->cur == '-') {
+ boolean negate;
+
+ negate = *ctx->cur == '-';
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ if (!parse_uint( &ctx->cur, &uindex )) {
+ report_error( ctx, "Expected literal unsigned integer" );
+ return FALSE;
+ }
+ if (negate)
+ *index = -(int) uindex;
+ else
+ *index = (int) uindex;
+ }
+ else {
+ *index = 0;
+ }
+ }
+ else {
+ if (!parse_uint( &ctx->cur, &uindex )) {
+ report_error( ctx, "Expected literal unsigned integer" );
+ return FALSE;
+ }
+ *index = (int) uindex;
+ *ind_file = TGSI_FILE_NULL;
+ *ind_index = 0;
+ }
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ']') {
+ report_error( ctx, "Expected `]'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ return TRUE;
+}
+
+/* Parse register declaration.
+ * <register_dcl> ::= <register_file_bracket_index> `]' |
+ * <register_file_bracket_index> `..' <index> `]'
+ */
+static boolean
+parse_register_dcl(
+ struct translate_ctx *ctx,
+ uint *file,
+ int *first,
+ int *last )
+{
+ if (!parse_register_file_bracket_index( ctx, file, first ))
+ return FALSE;
+ eat_opt_white( &ctx->cur );
+ if (ctx->cur[0] == '.' && ctx->cur[1] == '.') {
+ uint uindex;
+
+ ctx->cur += 2;
+ eat_opt_white( &ctx->cur );
+ if (!parse_uint( &ctx->cur, &uindex )) {
+ report_error( ctx, "Expected literal integer" );
+ return FALSE;
+ }
+ *last = (int) uindex;
+ eat_opt_white( &ctx->cur );
+ }
+ else {
+ *last = *first;
+ }
+ if (*ctx->cur != ']') {
+ report_error( ctx, "Expected `]' or `..'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ return TRUE;
+}
+
+static const char *modulate_names[TGSI_MODULATE_COUNT] =
+{
+ "_1X",
+ "_2X",
+ "_4X",
+ "_8X",
+ "_D2",
+ "_D4",
+ "_D8"
+};
+
+static boolean
+parse_dst_operand(
+ struct translate_ctx *ctx,
+ struct tgsi_full_dst_register *dst )
+{
+ uint file;
+ int index;
+ uint writemask;
+ const char *cur;
+
+ if (!parse_register_dst( ctx, &file, &index ))
+ return FALSE;
+
+ cur = ctx->cur;
+ eat_opt_white( &cur );
+ if (*cur == '_') {
+ uint i;
+
+ for (i = 0; i < TGSI_MODULATE_COUNT; i++) {
+ if (str_match_no_case( &cur, modulate_names[i] )) {
+ if (!is_digit_alpha_underscore( cur )) {
+ dst->DstRegisterExtModulate.Modulate = i;
+ ctx->cur = cur;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!parse_opt_writemask( ctx, &writemask ))
+ return FALSE;
+
+ dst->DstRegister.File = file;
+ dst->DstRegister.Index = index;
+ dst->DstRegister.WriteMask = writemask;
+ return TRUE;
+}
+
+static boolean
+parse_optional_swizzle(
+ struct translate_ctx *ctx,
+ uint swizzle[4],
+ boolean *parsed_swizzle,
+ boolean *parsed_extswizzle )
+{
+ const char *cur = ctx->cur;
+
+ *parsed_swizzle = FALSE;
+ *parsed_extswizzle = FALSE;
+
+ eat_opt_white( &cur );
+ if (*cur == '.') {
+ uint i;
+
+ cur++;
+ eat_opt_white( &cur );
+ for (i = 0; i < 4; i++) {
+ if (uprcase( *cur ) == 'X')
+ swizzle[i] = TGSI_SWIZZLE_X;
+ else if (uprcase( *cur ) == 'Y')
+ swizzle[i] = TGSI_SWIZZLE_Y;
+ else if (uprcase( *cur ) == 'Z')
+ swizzle[i] = TGSI_SWIZZLE_Z;
+ else if (uprcase( *cur ) == 'W')
+ swizzle[i] = TGSI_SWIZZLE_W;
+ else {
+ if (*cur == '0')
+ swizzle[i] = TGSI_EXTSWIZZLE_ZERO;
+ else if (*cur == '1')
+ swizzle[i] = TGSI_EXTSWIZZLE_ONE;
+ else {
+ report_error( ctx, "Expected register swizzle component `x', `y', `z', `w', `0' or `1'" );
+ return FALSE;
+ }
+ *parsed_extswizzle = TRUE;
+ }
+ cur++;
+ }
+ *parsed_swizzle = TRUE;
+ ctx->cur = cur;
+ }
+ return TRUE;
+}
+
+static boolean
+parse_src_operand(
+ struct translate_ctx *ctx,
+ struct tgsi_full_src_register *src )
+{
+ const char *cur;
+ float value;
+ uint file;
+ int index;
+ uint ind_file;
+ int ind_index;
+ uint ind_comp;
+ uint swizzle[4];
+ boolean parsed_ext_negate_paren = FALSE;
+ boolean parsed_swizzle;
+ boolean parsed_extswizzle;
+
+ if (*ctx->cur == '-') {
+ cur = ctx->cur;
+ cur++;
+ eat_opt_white( &cur );
+ if (*cur == '(') {
+ cur++;
+ src->SrcRegisterExtMod.Negate = 1;
+ eat_opt_white( &cur );
+ ctx->cur = cur;
+ parsed_ext_negate_paren = TRUE;
+ }
+ else if (*cur == '|') {
+ cur++;
+ src->SrcRegisterExtMod.Negate = 1;
+ src->SrcRegisterExtMod.Absolute = 1;
+ eat_opt_white(&cur);
+ ctx->cur = cur;
+ }
+ }
+ else if (*ctx->cur == '|') {
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ src->SrcRegisterExtMod.Absolute = 1;
+ }
+
+ if (*ctx->cur == '-') {
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ src->SrcRegister.Negate = 1;
+ }
+
+ cur = ctx->cur;
+ if (parse_float( &cur, &value )) {
+ if (value == 2.0f) {
+ eat_opt_white( &cur );
+ if (*cur != '*') {
+ report_error( ctx, "Expected `*'" );
+ return FALSE;
+ }
+ cur++;
+ if (*cur != '(') {
+ report_error( ctx, "Expected `('" );
+ return FALSE;
+ }
+ cur++;
+ src->SrcRegisterExtMod.Scale2X = 1;
+ eat_opt_white( &cur );
+ ctx->cur = cur;
+ }
+ }
+
+ if (*ctx->cur == '(') {
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ src->SrcRegisterExtMod.Bias = 1;
+ }
+
+ cur = ctx->cur;
+ if (parse_float( &cur, &value )) {
+ if (value == 1.0f) {
+ eat_opt_white( &cur );
+ if (*cur != '-') {
+ report_error( ctx, "Expected `-'" );
+ return FALSE;
+ }
+ cur++;
+ if (*cur != '(') {
+ report_error( ctx, "Expected `('" );
+ return FALSE;
+ }
+ cur++;
+ src->SrcRegisterExtMod.Complement = 1;
+ eat_opt_white( &cur );
+ ctx->cur = cur;
+ }
+ }
+
+ if (!parse_register_src(ctx, &file, &index, &ind_file, &ind_index, &ind_comp))
+ return FALSE;
+ src->SrcRegister.File = file;
+ src->SrcRegister.Index = index;
+ if (ind_file != TGSI_FILE_NULL) {
+ src->SrcRegister.Indirect = 1;
+ src->SrcRegisterInd.File = ind_file;
+ src->SrcRegisterInd.Index = ind_index;
+ src->SrcRegisterInd.SwizzleX = ind_comp;
+ src->SrcRegisterInd.SwizzleY = ind_comp;
+ src->SrcRegisterInd.SwizzleZ = ind_comp;
+ src->SrcRegisterInd.SwizzleW = ind_comp;
+ }
+
+ /* Parse optional swizzle.
+ */
+ if (parse_optional_swizzle( ctx, swizzle, &parsed_swizzle, &parsed_extswizzle )) {
+ if (parsed_extswizzle) {
+ assert( parsed_swizzle );
+
+ src->SrcRegisterExtSwz.ExtSwizzleX = swizzle[0];
+ src->SrcRegisterExtSwz.ExtSwizzleY = swizzle[1];
+ src->SrcRegisterExtSwz.ExtSwizzleZ = swizzle[2];
+ src->SrcRegisterExtSwz.ExtSwizzleW = swizzle[3];
+ }
+ else if (parsed_swizzle) {
+ src->SrcRegister.SwizzleX = swizzle[0];
+ src->SrcRegister.SwizzleY = swizzle[1];
+ src->SrcRegister.SwizzleZ = swizzle[2];
+ src->SrcRegister.SwizzleW = swizzle[3];
+ }
+ }
+
+ if (src->SrcRegisterExtMod.Complement) {
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ')') {
+ report_error( ctx, "Expected `)'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ }
+
+ if (src->SrcRegisterExtMod.Bias) {
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ')') {
+ report_error( ctx, "Expected `)'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != '-') {
+ report_error( ctx, "Expected `-'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ if (!parse_float( &ctx->cur, &value )) {
+ report_error( ctx, "Expected literal floating point" );
+ return FALSE;
+ }
+ if (value != 0.5f) {
+ report_error( ctx, "Expected 0.5" );
+ return FALSE;
+ }
+ }
+
+ if (src->SrcRegisterExtMod.Scale2X) {
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ')') {
+ report_error( ctx, "Expected `)'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ }
+
+ if (src->SrcRegisterExtMod.Absolute) {
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != '|') {
+ report_error( ctx, "Expected `|'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ }
+
+ if (parsed_ext_negate_paren) {
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ')') {
+ report_error( ctx, "Expected `)'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ }
+
+ return TRUE;
+}
+
+static const char *texture_names[TGSI_TEXTURE_COUNT] =
+{
+ "UNKNOWN",
+ "1D",
+ "2D",
+ "3D",
+ "CUBE",
+ "RECT",
+ "SHADOW1D",
+ "SHADOW2D",
+ "SHADOWRECT"
+};
+
+static boolean
+match_inst_mnemonic(const char **pcur,
+ const struct tgsi_opcode_info *info)
+{
+ if (str_match_no_case(pcur, info->mnemonic)) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static boolean
+parse_instruction(
+ struct translate_ctx *ctx,
+ boolean has_label )
+{
+ uint i;
+ uint saturate = TGSI_SAT_NONE;
+ const struct tgsi_opcode_info *info;
+ struct tgsi_full_instruction inst;
+ uint advance;
+
+ /* Parse instruction name.
+ */
+ eat_opt_white( &ctx->cur );
+ for (i = 0; i < TGSI_OPCODE_LAST; i++) {
+ const char *cur = ctx->cur;
+
+ info = tgsi_get_opcode_info( i );
+ if (match_inst_mnemonic(&cur, info)) {
+ if (str_match_no_case( &cur, "_SATNV" ))
+ saturate = TGSI_SAT_MINUS_PLUS_ONE;
+ else if (str_match_no_case( &cur, "_SAT" ))
+ saturate = TGSI_SAT_ZERO_ONE;
+
+ if (info->num_dst + info->num_src + info->is_tex == 0) {
+ if (!is_digit_alpha_underscore( cur )) {
+ ctx->cur = cur;
+ break;
+ }
+ }
+ else if (*cur == '\0' || eat_white( &cur )) {
+ ctx->cur = cur;
+ break;
+ }
+ }
+ }
+ if (i == TGSI_OPCODE_LAST) {
+ if (has_label)
+ report_error( ctx, "Unknown opcode" );
+ else
+ report_error( ctx, "Expected `DCL', `IMM' or a label" );
+ return FALSE;
+ }
+
+ inst = tgsi_default_full_instruction();
+ inst.Instruction.Opcode = i;
+ inst.Instruction.Saturate = saturate;
+ inst.Instruction.NumDstRegs = info->num_dst;
+ inst.Instruction.NumSrcRegs = info->num_src;
+
+ /* Parse instruction operands.
+ */
+ for (i = 0; i < info->num_dst + info->num_src + info->is_tex; i++) {
+ if (i > 0) {
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ',') {
+ report_error( ctx, "Expected `,'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ }
+
+ if (i < info->num_dst) {
+ if (!parse_dst_operand( ctx, &inst.FullDstRegisters[i] ))
+ return FALSE;
+ }
+ else if (i < info->num_dst + info->num_src) {
+ if (!parse_src_operand( ctx, &inst.FullSrcRegisters[i - info->num_dst] ))
+ return FALSE;
+ }
+ else {
+ uint j;
+
+ for (j = 0; j < TGSI_TEXTURE_COUNT; j++) {
+ if (str_match_no_case( &ctx->cur, texture_names[j] )) {
+ if (!is_digit_alpha_underscore( ctx->cur )) {
+ inst.InstructionExtTexture.Texture = j;
+ break;
+ }
+ }
+ }
+ if (j == TGSI_TEXTURE_COUNT) {
+ report_error( ctx, "Expected texture target" );
+ return FALSE;
+ }
+ }
+ }
+
+ if (info->is_branch) {
+ uint target;
+
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != ':') {
+ report_error( ctx, "Expected `:'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ if (!parse_uint( &ctx->cur, &target )) {
+ report_error( ctx, "Expected a label" );
+ return FALSE;
+ }
+ inst.InstructionExtLabel.Label = target;
+ }
+
+ advance = tgsi_build_full_instruction(
+ &inst,
+ ctx->tokens_cur,
+ ctx->header,
+ (uint) (ctx->tokens_end - ctx->tokens_cur) );
+ if (advance == 0)
+ return FALSE;
+ ctx->tokens_cur += advance;
+
+ return TRUE;
+}
+
+static const char *semantic_names[TGSI_SEMANTIC_COUNT] =
+{
+ "POSITION",
+ "COLOR",
+ "BCOLOR",
+ "FOG",
+ "PSIZE",
+ "GENERIC",
+ "NORMAL",
+ "FACE"
+};
+
+static const char *interpolate_names[TGSI_INTERPOLATE_COUNT] =
+{
+ "CONSTANT",
+ "LINEAR",
+ "PERSPECTIVE"
+};
+
+static boolean parse_declaration( struct translate_ctx *ctx )
+{
+ struct tgsi_full_declaration decl;
+ uint file;
+ int first;
+ int last;
+ uint writemask;
+ const char *cur;
+ uint advance;
+
+ assert(Elements(semantic_names) == TGSI_SEMANTIC_COUNT);
+ assert(Elements(interpolate_names) == TGSI_INTERPOLATE_COUNT);
+
+ if (!eat_white( &ctx->cur )) {
+ report_error( ctx, "Syntax error" );
+ return FALSE;
+ }
+ if (!parse_register_dcl( ctx, &file, &first, &last ))
+ return FALSE;
+ if (!parse_opt_writemask( ctx, &writemask ))
+ return FALSE;
+
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = file;
+ decl.Declaration.UsageMask = writemask;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ cur = ctx->cur;
+ eat_opt_white( &cur );
+ if (*cur == ',') {
+ uint i;
+
+ cur++;
+ eat_opt_white( &cur );
+ for (i = 0; i < TGSI_SEMANTIC_COUNT; i++) {
+ if (str_match_no_case( &cur, semantic_names[i] )) {
+ const char *cur2 = cur;
+ uint index;
+
+ if (is_digit_alpha_underscore( cur ))
+ continue;
+ eat_opt_white( &cur2 );
+ if (*cur2 == '[') {
+ cur2++;
+ eat_opt_white( &cur2 );
+ if (!parse_uint( &cur2, &index )) {
+ report_error( ctx, "Expected literal integer" );
+ return FALSE;
+ }
+ eat_opt_white( &cur2 );
+ if (*cur2 != ']') {
+ report_error( ctx, "Expected `]'" );
+ return FALSE;
+ }
+ cur2++;
+
+ decl.Semantic.SemanticIndex = index;
+
+ cur = cur2;
+ }
+
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = i;
+
+ ctx->cur = cur;
+ break;
+ }
+ }
+ }
+
+ cur = ctx->cur;
+ eat_opt_white( &cur );
+ if (*cur == ',') {
+ uint i;
+
+ cur++;
+ eat_opt_white( &cur );
+ for (i = 0; i < TGSI_INTERPOLATE_COUNT; i++) {
+ if (str_match_no_case( &cur, interpolate_names[i] )) {
+ if (is_digit_alpha_underscore( cur ))
+ continue;
+ decl.Declaration.Interpolate = i;
+
+ ctx->cur = cur;
+ break;
+ }
+ }
+ if (i == TGSI_INTERPOLATE_COUNT) {
+ report_error( ctx, "Expected semantic or interpolate attribute" );
+ return FALSE;
+ }
+ }
+
+ advance = tgsi_build_full_declaration(
+ &decl,
+ ctx->tokens_cur,
+ ctx->header,
+ (uint) (ctx->tokens_end - ctx->tokens_cur) );
+ if (advance == 0)
+ return FALSE;
+ ctx->tokens_cur += advance;
+
+ return TRUE;
+}
+
+static boolean parse_immediate( struct translate_ctx *ctx )
+{
+ struct tgsi_full_immediate imm;
+ uint i;
+ float values[4];
+ uint advance;
+
+ if (!eat_white( &ctx->cur )) {
+ report_error( ctx, "Syntax error" );
+ return FALSE;
+ }
+ if (!str_match_no_case( &ctx->cur, "FLT32" ) || is_digit_alpha_underscore( ctx->cur )) {
+ report_error( ctx, "Expected `FLT32'" );
+ return FALSE;
+ }
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != '{') {
+ report_error( ctx, "Expected `{'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ for (i = 0; i < 4; i++) {
+ eat_opt_white( &ctx->cur );
+ if (i > 0) {
+ if (*ctx->cur != ',') {
+ report_error( ctx, "Expected `,'" );
+ return FALSE;
+ }
+ ctx->cur++;
+ eat_opt_white( &ctx->cur );
+ }
+ if (!parse_float( &ctx->cur, &values[i] )) {
+ report_error( ctx, "Expected literal floating point" );
+ return FALSE;
+ }
+ }
+ eat_opt_white( &ctx->cur );
+ if (*ctx->cur != '}') {
+ report_error( ctx, "Expected `}'" );
+ return FALSE;
+ }
+ ctx->cur++;
+
+ imm = tgsi_default_full_immediate();
+ imm.Immediate.NrTokens += 4;
+ imm.Immediate.DataType = TGSI_IMM_FLOAT32;
+ imm.u[0].Float = values[0];
+ imm.u[1].Float = values[1];
+ imm.u[2].Float = values[2];
+ imm.u[3].Float = values[3];
+
+ advance = tgsi_build_full_immediate(
+ &imm,
+ ctx->tokens_cur,
+ ctx->header,
+ (uint) (ctx->tokens_end - ctx->tokens_cur) );
+ if (advance == 0)
+ return FALSE;
+ ctx->tokens_cur += advance;
+
+ return TRUE;
+}
+
+static boolean translate( struct translate_ctx *ctx )
+{
+ eat_opt_white( &ctx->cur );
+ if (!parse_header( ctx ))
+ return FALSE;
+
+ while (*ctx->cur != '\0') {
+ uint label_val = 0;
+
+ if (!eat_white( &ctx->cur )) {
+ report_error( ctx, "Syntax error" );
+ return FALSE;
+ }
+
+ if (*ctx->cur == '\0')
+ break;
+
+ if (parse_label( ctx, &label_val )) {
+ if (!parse_instruction( ctx, TRUE ))
+ return FALSE;
+ }
+ else if (str_match_no_case( &ctx->cur, "DCL" )) {
+ if (!parse_declaration( ctx ))
+ return FALSE;
+ }
+ else if (str_match_no_case( &ctx->cur, "IMM" )) {
+ if (!parse_immediate( ctx ))
+ return FALSE;
+ }
+ else if (!parse_instruction( ctx, FALSE )) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+boolean
+tgsi_text_translate(
+ const char *text,
+ struct tgsi_token *tokens,
+ uint num_tokens )
+{
+ struct translate_ctx ctx;
+
+ ctx.text = text;
+ ctx.cur = text;
+ ctx.tokens = tokens;
+ ctx.tokens_cur = tokens;
+ ctx.tokens_end = tokens + num_tokens;
+
+ if (!translate( &ctx ))
+ return FALSE;
+
+ return tgsi_sanity_check( tokens );
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_text.h b/src/gallium/auxiliary/tgsi/tgsi_text.h
new file mode 100644
index 0000000000..8eeeeef140
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_text.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_TEXT_H
+#define TGSI_TEXT_H
+
+#include "pipe/p_shader_tokens.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+boolean
+tgsi_text_translate(
+ const char *text,
+ struct tgsi_token *tokens,
+ uint num_tokens );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_TEXT_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_transform.c b/src/gallium/auxiliary/tgsi/tgsi_transform.c
new file mode 100644
index 0000000000..bc9c18fd4a
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_transform.c
@@ -0,0 +1,227 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * TGSI program transformation utility.
+ *
+ * Authors: Brian Paul
+ */
+
+#include "util/u_debug.h"
+
+#include "tgsi_transform.h"
+
+
+
+static void
+emit_instruction(struct tgsi_transform_context *ctx,
+ const struct tgsi_full_instruction *inst)
+{
+ uint ti = ctx->ti;
+
+ ti += tgsi_build_full_instruction(inst,
+ ctx->tokens_out + ti,
+ ctx->header,
+ ctx->max_tokens_out - ti);
+ ctx->ti = ti;
+}
+
+
+static void
+emit_declaration(struct tgsi_transform_context *ctx,
+ const struct tgsi_full_declaration *decl)
+{
+ uint ti = ctx->ti;
+
+ ti += tgsi_build_full_declaration(decl,
+ ctx->tokens_out + ti,
+ ctx->header,
+ ctx->max_tokens_out - ti);
+ ctx->ti = ti;
+}
+
+
+static void
+emit_immediate(struct tgsi_transform_context *ctx,
+ const struct tgsi_full_immediate *imm)
+{
+ uint ti = ctx->ti;
+
+ ti += tgsi_build_full_immediate(imm,
+ ctx->tokens_out + ti,
+ ctx->header,
+ ctx->max_tokens_out - ti);
+ ctx->ti = ti;
+}
+
+
+
+/**
+ * Apply user-defined transformations to the input shader to produce
+ * the output shader.
+ * For example, a register search-and-replace operation could be applied
+ * by defining a transform_instruction() callback that examined and changed
+ * the instruction src/dest regs.
+ *
+ * \return number of tokens emitted
+ */
+int
+tgsi_transform_shader(const struct tgsi_token *tokens_in,
+ struct tgsi_token *tokens_out,
+ uint max_tokens_out,
+ struct tgsi_transform_context *ctx)
+{
+ uint procType;
+
+ /* input shader */
+ struct tgsi_parse_context parse;
+
+ /* output shader */
+ struct tgsi_processor *processor;
+
+
+ /**
+ ** callback context init
+ **/
+ ctx->emit_instruction = emit_instruction;
+ ctx->emit_declaration = emit_declaration;
+ ctx->emit_immediate = emit_immediate;
+ ctx->tokens_out = tokens_out;
+ ctx->max_tokens_out = max_tokens_out;
+
+
+ /**
+ ** Setup to begin parsing input shader
+ **/
+ if (tgsi_parse_init( &parse, tokens_in ) != TGSI_PARSE_OK) {
+ debug_printf("tgsi_parse_init() failed in tgsi_transform_shader()!\n");
+ return -1;
+ }
+ procType = parse.FullHeader.Processor.Processor;
+ assert(procType == TGSI_PROCESSOR_FRAGMENT ||
+ procType == TGSI_PROCESSOR_VERTEX ||
+ procType == TGSI_PROCESSOR_GEOMETRY);
+
+
+ /**
+ ** Setup output shader
+ **/
+ *(struct tgsi_version *) &tokens_out[0] = tgsi_build_version();
+
+ ctx->header = (struct tgsi_header *) (tokens_out + 1);
+ *ctx->header = tgsi_build_header();
+
+ processor = (struct tgsi_processor *) (tokens_out + 2);
+ *processor = tgsi_build_processor( procType, ctx->header );
+
+ ctx->ti = 3;
+
+
+ /**
+ ** Loop over incoming program tokens/instructions
+ */
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ struct tgsi_full_instruction *fullinst
+ = &parse.FullToken.FullInstruction;
+
+ if (ctx->transform_instruction)
+ ctx->transform_instruction(ctx, fullinst);
+ else
+ ctx->emit_instruction(ctx, fullinst);
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ struct tgsi_full_declaration *fulldecl
+ = &parse.FullToken.FullDeclaration;
+
+ if (ctx->transform_declaration)
+ ctx->transform_declaration(ctx, fulldecl);
+ else
+ ctx->emit_declaration(ctx, fulldecl);
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ struct tgsi_full_immediate *fullimm
+ = &parse.FullToken.FullImmediate;
+
+ if (ctx->transform_immediate)
+ ctx->transform_immediate(ctx, fullimm);
+ else
+ ctx->emit_immediate(ctx, fullimm);
+ }
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+
+ if (ctx->epilog) {
+ ctx->epilog(ctx);
+ }
+
+ tgsi_parse_free (&parse);
+
+ return ctx->ti;
+}
+
+
+#include "tgsi_text.h"
+
+extern int tgsi_transform_foo( struct tgsi_token *tokens_out,
+ uint max_tokens_out );
+
+/* This function exists only so that tgsi_text_translate() doesn't get
+ * magic-ed out of the libtgsi.a archive by the build system. Don't
+ * remove unless you know this has been fixed - check on mingw/scons
+ * builds as well.
+ */
+int
+tgsi_transform_foo( struct tgsi_token *tokens_out,
+ uint max_tokens_out )
+{
+ const char *text =
+ "FRAG1.1\n"
+ "DCL IN[0], COLOR, CONSTANT\n"
+ "DCL OUT[0], COLOR\n"
+ " 0: MOV OUT[0], IN[0]\n"
+ " 1: END";
+
+ return tgsi_text_translate( text,
+ tokens_out,
+ max_tokens_out );
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_transform.h b/src/gallium/auxiliary/tgsi/tgsi_transform.h
new file mode 100644
index 0000000000..a121adbaef
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_transform.h
@@ -0,0 +1,92 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_TRANSFORM_H
+#define TGSI_TRANSFORM_H
+
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_build.h"
+
+
+
+/**
+ * Subclass this to add caller-specific data
+ */
+struct tgsi_transform_context
+{
+/**** PUBLIC ***/
+
+ /**
+ * User-defined callbacks invoked per instruction.
+ */
+ void (*transform_instruction)(struct tgsi_transform_context *ctx,
+ struct tgsi_full_instruction *inst);
+
+ void (*transform_declaration)(struct tgsi_transform_context *ctx,
+ struct tgsi_full_declaration *decl);
+
+ void (*transform_immediate)(struct tgsi_transform_context *ctx,
+ struct tgsi_full_immediate *imm);
+
+ /**
+ * Called at end of input program to allow caller to append extra
+ * instructions. Return number of tokens emitted.
+ */
+ void (*epilog)(struct tgsi_transform_context *ctx);
+
+
+/*** PRIVATE ***/
+
+ /**
+ * These are setup by tgsi_transform_shader() and cannot be overridden.
+ * Meant to be called from in the above user callback functions.
+ */
+ void (*emit_instruction)(struct tgsi_transform_context *ctx,
+ const struct tgsi_full_instruction *inst);
+ void (*emit_declaration)(struct tgsi_transform_context *ctx,
+ const struct tgsi_full_declaration *decl);
+ void (*emit_immediate)(struct tgsi_transform_context *ctx,
+ const struct tgsi_full_immediate *imm);
+
+ struct tgsi_header *header;
+ uint max_tokens_out;
+ struct tgsi_token *tokens_out;
+ uint ti;
+};
+
+
+
+extern int
+tgsi_transform_shader(const struct tgsi_token *tokens_in,
+ struct tgsi_token *tokens_out,
+ uint max_tokens_out,
+ struct tgsi_transform_context *ctx);
+
+
+#endif /* TGSI_TRANSFORM_H */
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.c b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
new file mode 100644
index 0000000000..f7096bd8e2
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_ureg.c
@@ -0,0 +1,938 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE, INC AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_ureg.h"
+#include "tgsi/tgsi_info.h"
+#include "tgsi/tgsi_dump.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+union tgsi_any_token {
+ struct tgsi_version version;
+ struct tgsi_header header;
+ struct tgsi_processor processor;
+ struct tgsi_token token;
+ struct tgsi_declaration decl;
+ struct tgsi_declaration_range decl_range;
+ struct tgsi_declaration_semantic decl_semantic;
+ struct tgsi_immediate imm;
+ union tgsi_immediate_data imm_data;
+ struct tgsi_instruction insn;
+ struct tgsi_instruction_ext_nv insn_ext_nv;
+ struct tgsi_instruction_ext_label insn_ext_label;
+ struct tgsi_instruction_ext_texture insn_ext_texture;
+ struct tgsi_instruction_ext_predicate insn_ext_predicate;
+ struct tgsi_src_register src;
+ struct tgsi_src_register_ext_swz src_ext_swz;
+ struct tgsi_src_register_ext_mod src_ext_mod;
+ struct tgsi_dimension dim;
+ struct tgsi_dst_register dst;
+ struct tgsi_dst_register_ext_concode dst_ext_code;
+ struct tgsi_dst_register_ext_modulate dst_ext_mod;
+ struct tgsi_dst_register_ext_predicate dst_ext_pred;
+ unsigned value;
+};
+
+
+struct ureg_tokens {
+ union tgsi_any_token *tokens;
+ unsigned size;
+ unsigned order;
+ unsigned count;
+};
+
+#define UREG_MAX_INPUT PIPE_MAX_ATTRIBS
+#define UREG_MAX_OUTPUT PIPE_MAX_ATTRIBS
+#define UREG_MAX_IMMEDIATE 32
+#define UREG_MAX_TEMP 256
+#define UREG_MAX_ADDR 2
+
+#define DOMAIN_DECL 0
+#define DOMAIN_INSN 1
+
+struct ureg_program
+{
+ unsigned processor;
+ struct pipe_context *pipe;
+
+ struct {
+ unsigned semantic_name;
+ unsigned semantic_index;
+ unsigned interp;
+ } input[UREG_MAX_INPUT];
+ unsigned nr_inputs;
+
+ struct {
+ unsigned semantic_name;
+ unsigned semantic_index;
+ } output[UREG_MAX_OUTPUT];
+ unsigned nr_outputs;
+
+ struct {
+ float v[4];
+ unsigned nr;
+ } immediate[UREG_MAX_IMMEDIATE];
+ unsigned nr_immediates;
+
+ struct ureg_src sampler[PIPE_MAX_SAMPLERS];
+ unsigned nr_samplers;
+
+ unsigned temps_active[UREG_MAX_TEMP / 32];
+ unsigned nr_temps;
+
+ unsigned nr_addrs;
+
+ unsigned nr_constants;
+ unsigned nr_instructions;
+
+ struct ureg_tokens domain[2];
+};
+
+static union tgsi_any_token error_tokens[32];
+
+static void tokens_error( struct ureg_tokens *tokens )
+{
+ tokens->tokens = error_tokens;
+ tokens->size = Elements(error_tokens);
+ tokens->count = 0;
+}
+
+
+static void tokens_expand( struct ureg_tokens *tokens,
+ unsigned count )
+{
+ unsigned old_size = tokens->size * sizeof(unsigned);
+
+ if (tokens->tokens == error_tokens)
+ goto fail;
+
+ while (tokens->count + count > tokens->size) {
+ tokens->size = (1 << ++tokens->order);
+ }
+
+ tokens->tokens = REALLOC(tokens->tokens,
+ old_size,
+ tokens->size * sizeof(unsigned));
+ if (tokens->tokens == NULL)
+ goto fail;
+
+ return;
+
+fail:
+ tokens_error(tokens);
+}
+
+static void set_bad( struct ureg_program *ureg )
+{
+ tokens_error(&ureg->domain[0]);
+}
+
+
+
+static union tgsi_any_token *get_tokens( struct ureg_program *ureg,
+ unsigned domain,
+ unsigned count )
+{
+ struct ureg_tokens *tokens = &ureg->domain[domain];
+ union tgsi_any_token *result;
+
+ if (tokens->count + count > tokens->size)
+ tokens_expand(tokens, count);
+
+ result = &tokens->tokens[tokens->count];
+ tokens->count += count;
+ return result;
+}
+
+
+static union tgsi_any_token *retrieve_token( struct ureg_program *ureg,
+ unsigned domain,
+ unsigned nr )
+{
+ if (ureg->domain[domain].tokens == error_tokens)
+ return &error_tokens[0];
+
+ return &ureg->domain[domain].tokens[nr];
+}
+
+
+
+static INLINE struct ureg_dst
+ureg_dst_register( unsigned file,
+ unsigned index )
+{
+ struct ureg_dst dst;
+
+ dst.File = file;
+ dst.WriteMask = TGSI_WRITEMASK_XYZW;
+ dst.Indirect = 0;
+ dst.IndirectIndex = 0;
+ dst.IndirectSwizzle = 0;
+ dst.Saturate = 0;
+ dst.Index = index;
+ dst.Pad1 = 0;
+ dst.Pad2 = 0;
+
+ return dst;
+}
+
+static INLINE struct ureg_src
+ureg_src_register( unsigned file,
+ unsigned index )
+{
+ struct ureg_src src;
+
+ src.File = file;
+ src.SwizzleX = TGSI_SWIZZLE_X;
+ src.SwizzleY = TGSI_SWIZZLE_Y;
+ src.SwizzleZ = TGSI_SWIZZLE_Z;
+ src.SwizzleW = TGSI_SWIZZLE_W;
+ src.Pad = 0;
+ src.Indirect = 0;
+ src.IndirectIndex = 0;
+ src.IndirectSwizzle = 0;
+ src.Absolute = 0;
+ src.Index = index;
+ src.Negate = 0;
+
+ return src;
+}
+
+
+
+
+static struct ureg_src
+ureg_DECL_input( struct ureg_program *ureg,
+ unsigned name,
+ unsigned index,
+ unsigned interp_mode )
+{
+ unsigned i;
+
+ for (i = 0; i < ureg->nr_inputs; i++) {
+ if (ureg->input[i].semantic_name == name &&
+ ureg->input[i].semantic_index == index)
+ goto out;
+ }
+
+ if (ureg->nr_inputs < UREG_MAX_INPUT) {
+ ureg->input[i].semantic_name = name;
+ ureg->input[i].semantic_index = index;
+ ureg->input[i].interp = interp_mode;
+ ureg->nr_inputs++;
+ }
+ else {
+ set_bad( ureg );
+ }
+
+out:
+ return ureg_src_register( TGSI_FILE_INPUT, i );
+}
+
+
+
+struct ureg_src
+ureg_DECL_fs_input( struct ureg_program *ureg,
+ unsigned name,
+ unsigned index,
+ unsigned interp )
+{
+ assert(ureg->processor == TGSI_PROCESSOR_FRAGMENT);
+ return ureg_DECL_input( ureg, name, index, interp );
+}
+
+
+struct ureg_src
+ureg_DECL_vs_input( struct ureg_program *ureg,
+ unsigned name,
+ unsigned index )
+{
+ assert(ureg->processor == TGSI_PROCESSOR_VERTEX);
+ return ureg_DECL_input( ureg, name, index, TGSI_INTERPOLATE_CONSTANT );
+}
+
+
+struct ureg_dst
+ureg_DECL_output( struct ureg_program *ureg,
+ unsigned name,
+ unsigned index )
+{
+ unsigned i;
+
+ for (i = 0; i < ureg->nr_outputs; i++) {
+ if (ureg->output[i].semantic_name == name &&
+ ureg->output[i].semantic_index == index)
+ goto out;
+ }
+
+ if (ureg->nr_outputs < UREG_MAX_OUTPUT) {
+ ureg->output[i].semantic_name = name;
+ ureg->output[i].semantic_index = index;
+ ureg->nr_outputs++;
+ }
+ else {
+ set_bad( ureg );
+ }
+
+out:
+ return ureg_dst_register( TGSI_FILE_OUTPUT, i );
+}
+
+
+/* Returns a new constant register. Keep track of which have been
+ * referred to so that we can emit decls later.
+ *
+ * There is nothing in this code to bind this constant to any tracked
+ * value or manage any constant_buffer contents -- that's the
+ * resposibility of the calling code.
+ */
+struct ureg_src ureg_DECL_constant(struct ureg_program *ureg )
+{
+ return ureg_src_register( TGSI_FILE_CONSTANT, ureg->nr_constants++ );
+}
+
+
+/* Allocate a new temporary. Temporaries greater than UREG_MAX_TEMP
+ * are legal, but will not be released.
+ */
+struct ureg_dst ureg_DECL_temporary( struct ureg_program *ureg )
+{
+ unsigned i;
+
+ for (i = 0; i < UREG_MAX_TEMP; i += 32) {
+ int bit = ffs(~ureg->temps_active[i/32]);
+ if (bit != 0) {
+ i += bit - 1;
+ goto out;
+ }
+ }
+
+ /* No reusable temps, so allocate a new one:
+ */
+ i = ureg->nr_temps++;
+
+out:
+ if (i < UREG_MAX_TEMP)
+ ureg->temps_active[i/32] |= 1 << (i % 32);
+
+ if (i >= ureg->nr_temps)
+ ureg->nr_temps = i + 1;
+
+ return ureg_dst_register( TGSI_FILE_TEMPORARY, i );
+}
+
+
+void ureg_release_temporary( struct ureg_program *ureg,
+ struct ureg_dst tmp )
+{
+ if(tmp.File == TGSI_FILE_TEMPORARY)
+ if (tmp.Index < UREG_MAX_TEMP)
+ ureg->temps_active[tmp.Index/32] &= ~(1 << (tmp.Index % 32));
+}
+
+
+/* Allocate a new address register.
+ */
+struct ureg_dst ureg_DECL_address( struct ureg_program *ureg )
+{
+ if (ureg->nr_addrs < UREG_MAX_ADDR)
+ return ureg_dst_register( TGSI_FILE_ADDRESS, ureg->nr_addrs++ );
+
+ assert( 0 );
+ return ureg_dst_register( TGSI_FILE_ADDRESS, 0 );
+}
+
+/* Allocate a new sampler.
+ */
+struct ureg_src ureg_DECL_sampler( struct ureg_program *ureg,
+ unsigned nr )
+{
+ unsigned i;
+
+ for (i = 0; i < ureg->nr_samplers; i++)
+ if (ureg->sampler[i].Index == nr)
+ return ureg->sampler[i];
+
+ if (i < PIPE_MAX_SAMPLERS) {
+ ureg->sampler[i] = ureg_src_register( TGSI_FILE_SAMPLER, nr );
+ ureg->nr_samplers++;
+ return ureg->sampler[i];
+ }
+
+ assert( 0 );
+ return ureg->sampler[0];
+}
+
+
+
+
+static int match_or_expand_immediate( const float *v,
+ unsigned nr,
+ float *v2,
+ unsigned *nr2,
+ unsigned *swizzle )
+{
+ unsigned i, j;
+
+ *swizzle = 0;
+
+ for (i = 0; i < nr; i++) {
+ boolean found = FALSE;
+
+ for (j = 0; j < *nr2 && !found; j++) {
+ if (v[i] == v2[j]) {
+ *swizzle |= j << (i * 2);
+ found = TRUE;
+ }
+ }
+
+ if (!found) {
+ if (*nr2 >= 4)
+ return FALSE;
+
+ v2[*nr2] = v[i];
+ *swizzle |= *nr2 << (i * 2);
+ (*nr2)++;
+ }
+ }
+
+ return TRUE;
+}
+
+
+
+
+struct ureg_src ureg_DECL_immediate( struct ureg_program *ureg,
+ const float *v,
+ unsigned nr )
+{
+ unsigned i, j;
+ unsigned swizzle;
+
+ /* Could do a first pass where we examine all existing immediates
+ * without expanding.
+ */
+
+ for (i = 0; i < ureg->nr_immediates; i++) {
+ if (match_or_expand_immediate( v,
+ nr,
+ ureg->immediate[i].v,
+ &ureg->immediate[i].nr,
+ &swizzle ))
+ goto out;
+ }
+
+ if (ureg->nr_immediates < UREG_MAX_IMMEDIATE) {
+ i = ureg->nr_immediates++;
+ if (match_or_expand_immediate( v,
+ nr,
+ ureg->immediate[i].v,
+ &ureg->immediate[i].nr,
+ &swizzle ))
+ goto out;
+ }
+
+ set_bad( ureg );
+
+out:
+ /* Make sure that all referenced elements are from this immediate.
+ * Has the effect of making size-one immediates into scalars.
+ */
+ for (j = nr; j < 4; j++)
+ swizzle |= (swizzle & 0x3) << (j * 2);
+
+ return ureg_swizzle( ureg_src_register( TGSI_FILE_IMMEDIATE, i ),
+ (swizzle >> 0) & 0x3,
+ (swizzle >> 2) & 0x3,
+ (swizzle >> 4) & 0x3,
+ (swizzle >> 6) & 0x3);
+}
+
+
+void
+ureg_emit_src( struct ureg_program *ureg,
+ struct ureg_src src )
+{
+ unsigned size = (1 +
+ (src.Absolute ? 1 : 0) +
+ (src.Indirect ? 1 : 0));
+
+ union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
+ unsigned n = 0;
+
+ assert(src.File != TGSI_FILE_NULL);
+ assert(src.File != TGSI_FILE_OUTPUT);
+ assert(src.File < TGSI_FILE_COUNT);
+
+ out[n].value = 0;
+ out[n].src.File = src.File;
+ out[n].src.SwizzleX = src.SwizzleX;
+ out[n].src.SwizzleY = src.SwizzleY;
+ out[n].src.SwizzleZ = src.SwizzleZ;
+ out[n].src.SwizzleW = src.SwizzleW;
+ out[n].src.Index = src.Index;
+ out[n].src.Negate = src.Negate;
+ n++;
+
+ if (src.Absolute) {
+ out[0].src.Extended = 1;
+ out[0].src.Negate = 0;
+ out[n].value = 0;
+ out[n].src_ext_mod.Type = TGSI_SRC_REGISTER_EXT_TYPE_MOD;
+ out[n].src_ext_mod.Absolute = 1;
+ out[n].src_ext_mod.Negate = src.Negate;
+ n++;
+ }
+
+ if (src.Indirect) {
+ out[0].src.Indirect = 1;
+ out[n].value = 0;
+ out[n].src.File = TGSI_FILE_ADDRESS;
+ out[n].src.SwizzleX = src.IndirectSwizzle;
+ out[n].src.SwizzleY = src.IndirectSwizzle;
+ out[n].src.SwizzleZ = src.IndirectSwizzle;
+ out[n].src.SwizzleW = src.IndirectSwizzle;
+ out[n].src.Index = src.IndirectIndex;
+ n++;
+ }
+
+ assert(n == size);
+}
+
+
+void
+ureg_emit_dst( struct ureg_program *ureg,
+ struct ureg_dst dst )
+{
+ unsigned size = (1 +
+ (dst.Indirect ? 1 : 0));
+
+ union tgsi_any_token *out = get_tokens( ureg, DOMAIN_INSN, size );
+ unsigned n = 0;
+
+ assert(dst.File != TGSI_FILE_NULL);
+ assert(dst.File != TGSI_FILE_CONSTANT);
+ assert(dst.File != TGSI_FILE_INPUT);
+ assert(dst.File != TGSI_FILE_SAMPLER);
+ assert(dst.File != TGSI_FILE_IMMEDIATE);
+ assert(dst.File < TGSI_FILE_COUNT);
+
+ out[n].value = 0;
+ out[n].dst.File = dst.File;
+ out[n].dst.WriteMask = dst.WriteMask;
+ out[n].dst.Indirect = dst.Indirect;
+ out[n].dst.Index = dst.Index;
+ n++;
+
+ if (dst.Indirect) {
+ out[n].value = 0;
+ out[n].src.File = TGSI_FILE_ADDRESS;
+ out[n].src.SwizzleX = dst.IndirectSwizzle;
+ out[n].src.SwizzleY = dst.IndirectSwizzle;
+ out[n].src.SwizzleZ = dst.IndirectSwizzle;
+ out[n].src.SwizzleW = dst.IndirectSwizzle;
+ out[n].src.Index = dst.IndirectIndex;
+ n++;
+ }
+
+ assert(n == size);
+}
+
+
+
+unsigned
+ureg_emit_insn(struct ureg_program *ureg,
+ unsigned opcode,
+ boolean saturate,
+ unsigned num_dst,
+ unsigned num_src )
+{
+ union tgsi_any_token *out;
+
+ out = get_tokens( ureg, DOMAIN_INSN, 1 );
+ out[0].value = 0;
+ out[0].insn.Type = TGSI_TOKEN_TYPE_INSTRUCTION;
+ out[0].insn.NrTokens = 0;
+ out[0].insn.Opcode = opcode;
+ out[0].insn.Saturate = saturate;
+ out[0].insn.NumDstRegs = num_dst;
+ out[0].insn.NumSrcRegs = num_src;
+ out[0].insn.Padding = 0;
+ out[0].insn.Extended = 0;
+
+ ureg->nr_instructions++;
+
+ return ureg->domain[DOMAIN_INSN].count - 1;
+}
+
+
+void
+ureg_emit_label(struct ureg_program *ureg,
+ unsigned insn_token,
+ unsigned *label_token )
+{
+ union tgsi_any_token *out, *insn;
+
+ if(!label_token)
+ return;
+
+ out = get_tokens( ureg, DOMAIN_INSN, 1 );
+ insn = retrieve_token( ureg, DOMAIN_INSN, insn_token );
+
+ insn->insn.Extended = 1;
+
+ out[0].value = 0;
+ out[0].insn_ext_label.Type = TGSI_INSTRUCTION_EXT_TYPE_LABEL;
+
+ *label_token = ureg->domain[DOMAIN_INSN].count - 1;
+}
+
+/* Will return a number which can be used in a label to point to the
+ * next instruction to be emitted.
+ */
+unsigned
+ureg_get_instruction_number( struct ureg_program *ureg )
+{
+ return ureg->nr_instructions;
+}
+
+/* Patch a given label (expressed as a token number) to point to a
+ * given instruction (expressed as an instruction number).
+ */
+void
+ureg_fixup_label(struct ureg_program *ureg,
+ unsigned label_token,
+ unsigned instruction_number )
+{
+ union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, label_token );
+
+ assert(out->insn_ext_label.Type == TGSI_INSTRUCTION_EXT_TYPE_LABEL);
+ out->insn_ext_label.Label = instruction_number;
+}
+
+
+void
+ureg_emit_texture(struct ureg_program *ureg,
+ unsigned insn_token,
+ unsigned target )
+{
+ union tgsi_any_token *out, *insn;
+
+ out = get_tokens( ureg, DOMAIN_INSN, 1 );
+ insn = retrieve_token( ureg, DOMAIN_INSN, insn_token );
+
+ insn->insn.Extended = 1;
+
+ out[0].value = 0;
+ out[0].insn_ext_texture.Type = TGSI_INSTRUCTION_EXT_TYPE_TEXTURE;
+ out[0].insn_ext_texture.Texture = target;
+}
+
+
+void
+ureg_fixup_insn_size(struct ureg_program *ureg,
+ unsigned insn )
+{
+ union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_INSN, insn );
+
+ assert(out->insn.Type == TGSI_TOKEN_TYPE_INSTRUCTION);
+ out->insn.NrTokens = ureg->domain[DOMAIN_INSN].count - insn - 1;
+}
+
+
+void
+ureg_insn(struct ureg_program *ureg,
+ unsigned opcode,
+ const struct ureg_dst *dst,
+ unsigned nr_dst,
+ const struct ureg_src *src,
+ unsigned nr_src )
+{
+ unsigned insn, i;
+ boolean saturate;
+
+#ifdef DEBUG
+ {
+ const struct tgsi_opcode_info *info = tgsi_get_opcode_info( opcode );
+ assert(info);
+ if(info) {
+ assert(nr_dst == info->num_dst);
+ assert(nr_src == info->num_src);
+ }
+ }
+#endif
+
+ saturate = nr_dst ? dst[0].Saturate : FALSE;
+
+ insn = ureg_emit_insn( ureg, opcode, saturate, nr_dst, nr_src );
+
+ for (i = 0; i < nr_dst; i++)
+ ureg_emit_dst( ureg, dst[i] );
+
+ for (i = 0; i < nr_src; i++)
+ ureg_emit_src( ureg, src[i] );
+
+ ureg_fixup_insn_size( ureg, insn );
+}
+
+
+
+static void emit_decl( struct ureg_program *ureg,
+ unsigned file,
+ unsigned index,
+ unsigned semantic_name,
+ unsigned semantic_index,
+ unsigned interp )
+{
+ union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 3 );
+
+ out[0].value = 0;
+ out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
+ out[0].decl.NrTokens = 3;
+ out[0].decl.File = file;
+ out[0].decl.UsageMask = TGSI_WRITEMASK_XYZW; /* FIXME! */
+ out[0].decl.Interpolate = interp;
+ out[0].decl.Semantic = 1;
+
+ out[1].value = 0;
+ out[1].decl_range.First =
+ out[1].decl_range.Last = index;
+
+ out[2].value = 0;
+ out[2].decl_semantic.SemanticName = semantic_name;
+ out[2].decl_semantic.SemanticIndex = semantic_index;
+
+}
+
+
+static void emit_decl_range( struct ureg_program *ureg,
+ unsigned file,
+ unsigned first,
+ unsigned count )
+{
+ union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 2 );
+
+ out[0].value = 0;
+ out[0].decl.Type = TGSI_TOKEN_TYPE_DECLARATION;
+ out[0].decl.NrTokens = 2;
+ out[0].decl.File = file;
+ out[0].decl.UsageMask = 0xf;
+ out[0].decl.Interpolate = TGSI_INTERPOLATE_CONSTANT;
+ out[0].decl.Semantic = 0;
+
+ out[1].value = 0;
+ out[1].decl_range.First = first;
+ out[1].decl_range.Last = first + count - 1;
+}
+
+static void emit_immediate( struct ureg_program *ureg,
+ const float *v )
+{
+ union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 5 );
+
+ out[0].value = 0;
+ out[0].imm.Type = TGSI_TOKEN_TYPE_IMMEDIATE;
+ out[0].imm.NrTokens = 5;
+ out[0].imm.DataType = TGSI_IMM_FLOAT32;
+ out[0].imm.Padding = 0;
+ out[0].imm.Extended = 0;
+
+ out[1].imm_data.Float = v[0];
+ out[2].imm_data.Float = v[1];
+ out[3].imm_data.Float = v[2];
+ out[4].imm_data.Float = v[3];
+}
+
+
+
+
+static void emit_decls( struct ureg_program *ureg )
+{
+ unsigned i;
+
+ for (i = 0; i < ureg->nr_inputs; i++) {
+ emit_decl( ureg,
+ TGSI_FILE_INPUT,
+ i,
+ ureg->input[i].semantic_name,
+ ureg->input[i].semantic_index,
+ ureg->input[i].interp );
+ }
+
+ for (i = 0; i < ureg->nr_outputs; i++) {
+ emit_decl( ureg,
+ TGSI_FILE_OUTPUT,
+ i,
+ ureg->output[i].semantic_name,
+ ureg->output[i].semantic_index,
+ TGSI_INTERPOLATE_CONSTANT );
+ }
+
+ for (i = 0; i < ureg->nr_samplers; i++) {
+ emit_decl_range( ureg,
+ TGSI_FILE_SAMPLER,
+ ureg->sampler[i].Index, 1 );
+ }
+
+ if (ureg->nr_constants) {
+ emit_decl_range( ureg,
+ TGSI_FILE_CONSTANT,
+ 0, ureg->nr_constants );
+ }
+
+ if (ureg->nr_temps) {
+ emit_decl_range( ureg,
+ TGSI_FILE_TEMPORARY,
+ 0, ureg->nr_temps );
+ }
+
+ if (ureg->nr_addrs) {
+ emit_decl_range( ureg,
+ TGSI_FILE_ADDRESS,
+ 0, ureg->nr_addrs );
+ }
+
+ for (i = 0; i < ureg->nr_immediates; i++) {
+ emit_immediate( ureg,
+ ureg->immediate[i].v );
+ }
+}
+
+/* Append the instruction tokens onto the declarations to build a
+ * contiguous stream suitable to send to the driver.
+ */
+static void copy_instructions( struct ureg_program *ureg )
+{
+ unsigned nr_tokens = ureg->domain[DOMAIN_INSN].count;
+ union tgsi_any_token *out = get_tokens( ureg,
+ DOMAIN_DECL,
+ nr_tokens );
+
+ memcpy(out,
+ ureg->domain[DOMAIN_INSN].tokens,
+ nr_tokens * sizeof out[0] );
+}
+
+
+static void
+fixup_header_size(struct ureg_program *ureg)
+{
+ union tgsi_any_token *out = retrieve_token( ureg, DOMAIN_DECL, 1 );
+
+ out->header.BodySize = ureg->domain[DOMAIN_DECL].count - 3;
+}
+
+
+static void
+emit_header( struct ureg_program *ureg )
+{
+ union tgsi_any_token *out = get_tokens( ureg, DOMAIN_DECL, 3 );
+
+ out[0].version.MajorVersion = 1;
+ out[0].version.MinorVersion = 1;
+ out[0].version.Padding = 0;
+
+ out[1].header.HeaderSize = 2;
+ out[1].header.BodySize = 0;
+
+ out[2].processor.Processor = ureg->processor;
+ out[2].processor.Padding = 0;
+}
+
+
+const struct tgsi_token *ureg_finalize( struct ureg_program *ureg )
+{
+ const struct tgsi_token *tokens;
+
+ emit_header( ureg );
+ emit_decls( ureg );
+ copy_instructions( ureg );
+ fixup_header_size( ureg );
+
+ if (ureg->domain[0].tokens == error_tokens ||
+ ureg->domain[1].tokens == error_tokens) {
+ debug_printf("%s: error in generated shader\n", __FUNCTION__);
+ assert(0);
+ return NULL;
+ }
+
+ tokens = &ureg->domain[DOMAIN_DECL].tokens[0].token;
+
+ if (0) {
+ debug_printf("%s: emitted shader %d tokens:\n", __FUNCTION__,
+ ureg->domain[DOMAIN_DECL].count);
+ tgsi_dump( tokens, 0 );
+ }
+
+ return tokens;
+}
+
+
+void *ureg_create_shader( struct ureg_program *ureg,
+ struct pipe_context *pipe )
+{
+ struct pipe_shader_state state;
+
+ state.tokens = ureg_finalize(ureg);
+ if(!state.tokens)
+ return NULL;
+
+ if (ureg->processor == TGSI_PROCESSOR_VERTEX)
+ return pipe->create_vs_state( pipe, &state );
+ else
+ return pipe->create_fs_state( pipe, &state );
+}
+
+
+
+
+struct ureg_program *ureg_create( unsigned processor )
+{
+ struct ureg_program *ureg = CALLOC_STRUCT( ureg_program );
+ if (ureg == NULL)
+ return NULL;
+
+ ureg->processor = processor;
+ return ureg;
+}
+
+
+void ureg_destroy( struct ureg_program *ureg )
+{
+ unsigned i;
+
+ for (i = 0; i < Elements(ureg->domain); i++) {
+ if (ureg->domain[i].tokens &&
+ ureg->domain[i].tokens != error_tokens)
+ FREE(ureg->domain[i].tokens);
+ }
+
+ FREE(ureg);
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_ureg.h b/src/gallium/auxiliary/tgsi/tgsi_ureg.h
new file mode 100644
index 0000000000..acbca59040
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_ureg.h
@@ -0,0 +1,590 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE, INC AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_UREG_H
+#define TGSI_UREG_H
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_shader_tokens.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct ureg_program;
+
+/* Almost a tgsi_src_register, but we need to pull in the Absolute
+ * flag from the _ext token. Indirect flag always implies ADDR[0].
+ */
+struct ureg_src
+{
+ unsigned File : 4; /* TGSI_FILE_ */
+ unsigned SwizzleX : 2; /* TGSI_SWIZZLE_ */
+ unsigned SwizzleY : 2; /* TGSI_SWIZZLE_ */
+ unsigned SwizzleZ : 2; /* TGSI_SWIZZLE_ */
+ unsigned SwizzleW : 2; /* TGSI_SWIZZLE_ */
+ unsigned Pad : 1; /* BOOL */
+ unsigned Indirect : 1; /* BOOL */
+ unsigned Absolute : 1; /* BOOL */
+ int Index : 16; /* SINT */
+ unsigned Negate : 1; /* BOOL */
+ int IndirectIndex : 16; /* SINT */
+ int IndirectSwizzle : 2; /* TGSI_SWIZZLE_ */
+};
+
+/* Very similar to a tgsi_dst_register, removing unsupported fields
+ * and adding a Saturate flag. It's easier to push saturate into the
+ * destination register than to try and create a _SAT varient of each
+ * instruction function.
+ */
+struct ureg_dst
+{
+ unsigned File : 4; /* TGSI_FILE_ */
+ unsigned WriteMask : 4; /* TGSI_WRITEMASK_ */
+ unsigned Indirect : 1; /* BOOL */
+ unsigned Saturate : 1; /* BOOL */
+ int Index : 16; /* SINT */
+ unsigned Pad1 : 5;
+ unsigned Pad2 : 1; /* BOOL */
+ int IndirectIndex : 16; /* SINT */
+ int IndirectSwizzle : 2; /* TGSI_SWIZZLE_ */
+};
+
+struct pipe_context;
+
+struct ureg_program *
+ureg_create( unsigned processor );
+
+const struct tgsi_token *
+ureg_finalize( struct ureg_program * );
+
+void *
+ureg_create_shader( struct ureg_program *,
+ struct pipe_context *pipe );
+
+void
+ureg_destroy( struct ureg_program * );
+
+
+/***********************************************************************
+ * Convenience routine:
+ */
+static INLINE void *
+ureg_create_shader_and_destroy( struct ureg_program *p,
+ struct pipe_context *pipe )
+{
+ void *result = ureg_create_shader( p, pipe );
+ ureg_destroy( p );
+ return result;
+}
+
+
+
+/***********************************************************************
+ * Build shader declarations:
+ */
+
+struct ureg_src
+ureg_DECL_fs_input( struct ureg_program *,
+ unsigned semantic_name,
+ unsigned semantic_index,
+ unsigned interp_mode );
+
+struct ureg_src
+ureg_DECL_vs_input( struct ureg_program *,
+ unsigned semantic_name,
+ unsigned semantic_index );
+
+struct ureg_dst
+ureg_DECL_output( struct ureg_program *,
+ unsigned semantic_name,
+ unsigned semantic_index );
+
+struct ureg_src
+ureg_DECL_immediate( struct ureg_program *,
+ const float *v,
+ unsigned nr );
+
+struct ureg_src
+ureg_DECL_constant( struct ureg_program * );
+
+struct ureg_dst
+ureg_DECL_temporary( struct ureg_program * );
+
+void
+ureg_release_temporary( struct ureg_program *ureg,
+ struct ureg_dst tmp );
+
+struct ureg_dst
+ureg_DECL_address( struct ureg_program * );
+
+/* Supply an index to the sampler declaration as this is the hook to
+ * the external pipe_sampler state. Users of this function probably
+ * don't want just any sampler, but a specific one which they've set
+ * up state for in the context.
+ */
+struct ureg_src
+ureg_DECL_sampler( struct ureg_program *,
+ unsigned index );
+
+
+static INLINE struct ureg_src
+ureg_imm4f( struct ureg_program *ureg,
+ float a, float b,
+ float c, float d)
+{
+ float v[4];
+ v[0] = a;
+ v[1] = b;
+ v[2] = c;
+ v[3] = d;
+ return ureg_DECL_immediate( ureg, v, 4 );
+}
+
+static INLINE struct ureg_src
+ureg_imm3f( struct ureg_program *ureg,
+ float a, float b,
+ float c)
+{
+ float v[3];
+ v[0] = a;
+ v[1] = b;
+ v[2] = c;
+ return ureg_DECL_immediate( ureg, v, 3 );
+}
+
+static INLINE struct ureg_src
+ureg_imm2f( struct ureg_program *ureg,
+ float a, float b)
+{
+ float v[2];
+ v[0] = a;
+ v[1] = b;
+ return ureg_DECL_immediate( ureg, v, 2 );
+}
+
+static INLINE struct ureg_src
+ureg_imm1f( struct ureg_program *ureg,
+ float a)
+{
+ float v[1];
+ v[0] = a;
+ return ureg_DECL_immediate( ureg, v, 1 );
+}
+
+/***********************************************************************
+ * Functions for patching up labels
+ */
+
+
+/* Will return a number which can be used in a label to point to the
+ * next instruction to be emitted.
+ */
+unsigned
+ureg_get_instruction_number( struct ureg_program *ureg );
+
+
+/* Patch a given label (expressed as a token number) to point to a
+ * given instruction (expressed as an instruction number).
+ *
+ * Labels are obtained from instruction emitters, eg ureg_CAL().
+ * Instruction numbers are obtained from ureg_get_instruction_number(),
+ * above.
+ */
+void
+ureg_fixup_label(struct ureg_program *ureg,
+ unsigned label_token,
+ unsigned instruction_number );
+
+
+/* Generic instruction emitter. Use if you need to pass the opcode as
+ * a parameter, rather than using the emit_OP() varients below.
+ */
+void
+ureg_insn(struct ureg_program *ureg,
+ unsigned opcode,
+ const struct ureg_dst *dst,
+ unsigned nr_dst,
+ const struct ureg_src *src,
+ unsigned nr_src );
+
+
+/***********************************************************************
+ * Internal instruction helpers, don't call these directly:
+ */
+
+unsigned
+ureg_emit_insn(struct ureg_program *ureg,
+ unsigned opcode,
+ boolean saturate,
+ unsigned num_dst,
+ unsigned num_src );
+
+void
+ureg_emit_label(struct ureg_program *ureg,
+ unsigned insn_token,
+ unsigned *label_token );
+
+void
+ureg_emit_texture(struct ureg_program *ureg,
+ unsigned insn_token,
+ unsigned target );
+
+void
+ureg_emit_dst( struct ureg_program *ureg,
+ struct ureg_dst dst );
+
+void
+ureg_emit_src( struct ureg_program *ureg,
+ struct ureg_src src );
+
+void
+ureg_fixup_insn_size(struct ureg_program *ureg,
+ unsigned insn );
+
+
+#define OP00( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, FALSE, 0, 0 ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP01( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_src src ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, FALSE, 0, 1 ); \
+ ureg_emit_src( ureg, src ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP00_LBL( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ unsigned *label_token ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, FALSE, 0, 0 ); \
+ ureg_emit_label( ureg, insn, label_token ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP01_LBL( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_src src, \
+ unsigned *label_token ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, FALSE, 0, 1 ); \
+ ureg_emit_label( ureg, insn, label_token ); \
+ ureg_emit_src( ureg, src ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP10( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_dst dst ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, dst.Saturate, 1, 0 ); \
+ ureg_emit_dst( ureg, dst ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+
+#define OP11( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_dst dst, \
+ struct ureg_src src ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, dst.Saturate, 1, 1 ); \
+ ureg_emit_dst( ureg, dst ); \
+ ureg_emit_src( ureg, src ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP12( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_dst dst, \
+ struct ureg_src src0, \
+ struct ureg_src src1 ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, dst.Saturate, 1, 2 ); \
+ ureg_emit_dst( ureg, dst ); \
+ ureg_emit_src( ureg, src0 ); \
+ ureg_emit_src( ureg, src1 ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP12_TEX( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_dst dst, \
+ unsigned target, \
+ struct ureg_src src0, \
+ struct ureg_src src1 ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, dst.Saturate, 1, 2 ); \
+ ureg_emit_texture( ureg, insn, target ); \
+ ureg_emit_dst( ureg, dst ); \
+ ureg_emit_src( ureg, src0 ); \
+ ureg_emit_src( ureg, src1 ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP13( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_dst dst, \
+ struct ureg_src src0, \
+ struct ureg_src src1, \
+ struct ureg_src src2 ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, dst.Saturate, 1, 3 ); \
+ ureg_emit_dst( ureg, dst ); \
+ ureg_emit_src( ureg, src0 ); \
+ ureg_emit_src( ureg, src1 ); \
+ ureg_emit_src( ureg, src2 ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+#define OP14_TEX( op ) \
+static INLINE void ureg_##op( struct ureg_program *ureg, \
+ struct ureg_dst dst, \
+ unsigned target, \
+ struct ureg_src src0, \
+ struct ureg_src src1, \
+ struct ureg_src src2, \
+ struct ureg_src src3 ) \
+{ \
+ unsigned opcode = TGSI_OPCODE_##op; \
+ unsigned insn = ureg_emit_insn( ureg, opcode, dst.Saturate, 1, 4 ); \
+ ureg_emit_texture( ureg, insn, target ); \
+ ureg_emit_dst( ureg, dst ); \
+ ureg_emit_src( ureg, src0 ); \
+ ureg_emit_src( ureg, src1 ); \
+ ureg_emit_src( ureg, src2 ); \
+ ureg_emit_src( ureg, src3 ); \
+ ureg_fixup_insn_size( ureg, insn ); \
+}
+
+
+/* Use a template include to generate a correctly-typed ureg_OP()
+ * function for each TGSI opcode:
+ */
+#include "tgsi_opcode_tmp.h"
+
+
+/***********************************************************************
+ * Inline helpers for manipulating register structs:
+ */
+static INLINE struct ureg_src
+ureg_negate( struct ureg_src reg )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ reg.Negate ^= 1;
+ return reg;
+}
+
+static INLINE struct ureg_src
+ureg_abs( struct ureg_src reg )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ reg.Absolute = 1;
+ reg.Negate = 0;
+ return reg;
+}
+
+static INLINE struct ureg_src
+ureg_swizzle( struct ureg_src reg,
+ int x, int y, int z, int w )
+{
+ unsigned swz = ( (reg.SwizzleX << 0) |
+ (reg.SwizzleY << 2) |
+ (reg.SwizzleZ << 4) |
+ (reg.SwizzleW << 6));
+
+ assert(reg.File != TGSI_FILE_NULL);
+ assert(x < 4);
+ assert(y < 4);
+ assert(z < 4);
+ assert(w < 4);
+
+ reg.SwizzleX = (swz >> (x*2)) & 0x3;
+ reg.SwizzleY = (swz >> (y*2)) & 0x3;
+ reg.SwizzleZ = (swz >> (z*2)) & 0x3;
+ reg.SwizzleW = (swz >> (w*2)) & 0x3;
+ return reg;
+}
+
+static INLINE struct ureg_src
+ureg_scalar( struct ureg_src reg, int x )
+{
+ return ureg_swizzle(reg, x, x, x, x);
+}
+
+static INLINE struct ureg_dst
+ureg_writemask( struct ureg_dst reg,
+ unsigned writemask )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ reg.WriteMask &= writemask;
+ return reg;
+}
+
+static INLINE struct ureg_dst
+ureg_saturate( struct ureg_dst reg )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ reg.Saturate = 1;
+ return reg;
+}
+
+static INLINE struct ureg_dst
+ureg_dst_indirect( struct ureg_dst reg, struct ureg_src addr )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ assert(addr.File == TGSI_FILE_ADDRESS);
+ reg.Indirect = 1;
+ reg.IndirectIndex = addr.Index;
+ reg.IndirectSwizzle = addr.SwizzleX;
+ return reg;
+}
+
+static INLINE struct ureg_src
+ureg_src_indirect( struct ureg_src reg, struct ureg_src addr )
+{
+ assert(reg.File != TGSI_FILE_NULL);
+ assert(addr.File == TGSI_FILE_ADDRESS);
+ reg.Indirect = 1;
+ reg.IndirectIndex = addr.Index;
+ reg.IndirectSwizzle = addr.SwizzleX;
+ return reg;
+}
+
+static INLINE struct ureg_dst
+ureg_dst( struct ureg_src src )
+{
+ struct ureg_dst dst;
+
+ dst.File = src.File;
+ dst.WriteMask = TGSI_WRITEMASK_XYZW;
+ dst.Indirect = src.Indirect;
+ dst.IndirectIndex = src.IndirectIndex;
+ dst.IndirectSwizzle = src.IndirectSwizzle;
+ dst.Saturate = 0;
+ dst.Index = src.Index;
+ dst.Pad1 = 0;
+ dst.Pad2 = 0;
+
+ return dst;
+}
+
+static INLINE struct ureg_src
+ureg_src( struct ureg_dst dst )
+{
+ struct ureg_src src;
+
+ src.File = dst.File;
+ src.SwizzleX = TGSI_SWIZZLE_X;
+ src.SwizzleY = TGSI_SWIZZLE_Y;
+ src.SwizzleZ = TGSI_SWIZZLE_Z;
+ src.SwizzleW = TGSI_SWIZZLE_W;
+ src.Pad = 0;
+ src.Indirect = dst.Indirect;
+ src.IndirectIndex = dst.IndirectIndex;
+ src.IndirectSwizzle = dst.IndirectSwizzle;
+ src.Absolute = 0;
+ src.Index = dst.Index;
+ src.Negate = 0;
+
+ return src;
+}
+
+
+
+static INLINE struct ureg_dst
+ureg_dst_undef( void )
+{
+ struct ureg_dst dst;
+
+ dst.File = TGSI_FILE_NULL;
+ dst.WriteMask = 0;
+ dst.Indirect = 0;
+ dst.IndirectIndex = 0;
+ dst.IndirectSwizzle = 0;
+ dst.Saturate = 0;
+ dst.Index = 0;
+ dst.Pad1 = 0;
+ dst.Pad2 = 0;
+
+ return dst;
+}
+
+static INLINE struct ureg_src
+ureg_src_undef( void )
+{
+ struct ureg_src src;
+
+ src.File = TGSI_FILE_NULL;
+ src.SwizzleX = 0;
+ src.SwizzleY = 0;
+ src.SwizzleZ = 0;
+ src.SwizzleW = 0;
+ src.Pad = 0;
+ src.Indirect = 0;
+ src.IndirectIndex = 0;
+ src.IndirectSwizzle = 0;
+ src.Absolute = 0;
+ src.Index = 0;
+ src.Negate = 0;
+
+ return src;
+}
+
+static INLINE boolean
+ureg_src_is_undef( struct ureg_src src )
+{
+ return src.File == TGSI_FILE_NULL;
+}
+
+static INLINE boolean
+ureg_dst_is_undef( struct ureg_dst dst )
+{
+ return dst.File == TGSI_FILE_NULL;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/tgsi/tgsi_util.c b/src/gallium/auxiliary/tgsi/tgsi_util.c
new file mode 100644
index 0000000000..71f8a6ca40
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_util.c
@@ -0,0 +1,299 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_debug.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi_parse.h"
+#include "tgsi_build.h"
+#include "tgsi_util.h"
+
+union pointer_hack
+{
+ void *pointer;
+ uint64_t uint64;
+};
+
+void *
+tgsi_align_128bit(
+ void *unaligned )
+{
+ union pointer_hack ph;
+
+ ph.uint64 = 0;
+ ph.pointer = unaligned;
+ ph.uint64 = (ph.uint64 + 15) & ~15;
+ return ph.pointer;
+}
+
+unsigned
+tgsi_util_get_src_register_swizzle(
+ const struct tgsi_src_register *reg,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ return reg->SwizzleX;
+ case 1:
+ return reg->SwizzleY;
+ case 2:
+ return reg->SwizzleZ;
+ case 3:
+ return reg->SwizzleW;
+ default:
+ assert( 0 );
+ }
+ return 0;
+}
+
+unsigned
+tgsi_util_get_src_register_extswizzle(
+ const struct tgsi_src_register_ext_swz *reg,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ return reg->ExtSwizzleX;
+ case 1:
+ return reg->ExtSwizzleY;
+ case 2:
+ return reg->ExtSwizzleZ;
+ case 3:
+ return reg->ExtSwizzleW;
+ default:
+ assert( 0 );
+ }
+ return 0;
+}
+
+unsigned
+tgsi_util_get_full_src_register_extswizzle(
+ const struct tgsi_full_src_register *reg,
+ unsigned component )
+{
+ unsigned swizzle;
+
+ /*
+ * First, calculate the extended swizzle for a given channel. This will give
+ * us either a channel index into the simple swizzle or a constant 1 or 0.
+ */
+ swizzle = tgsi_util_get_src_register_extswizzle(
+ &reg->SrcRegisterExtSwz,
+ component );
+
+ assert (TGSI_SWIZZLE_X == TGSI_EXTSWIZZLE_X);
+ assert (TGSI_SWIZZLE_Y == TGSI_EXTSWIZZLE_Y);
+ assert (TGSI_SWIZZLE_Z == TGSI_EXTSWIZZLE_Z);
+ assert (TGSI_SWIZZLE_W == TGSI_EXTSWIZZLE_W);
+ assert (TGSI_EXTSWIZZLE_ZERO > TGSI_SWIZZLE_W);
+ assert (TGSI_EXTSWIZZLE_ONE > TGSI_SWIZZLE_W);
+
+ /*
+ * Second, calculate the simple swizzle for the unswizzled channel index.
+ * Leave the constants intact, they are not affected by the simple swizzle.
+ */
+ if( swizzle <= TGSI_SWIZZLE_W ) {
+ swizzle = tgsi_util_get_src_register_swizzle(
+ &reg->SrcRegister,
+ swizzle );
+ }
+
+ return swizzle;
+}
+
+void
+tgsi_util_set_src_register_swizzle(
+ struct tgsi_src_register *reg,
+ unsigned swizzle,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ reg->SwizzleX = swizzle;
+ break;
+ case 1:
+ reg->SwizzleY = swizzle;
+ break;
+ case 2:
+ reg->SwizzleZ = swizzle;
+ break;
+ case 3:
+ reg->SwizzleW = swizzle;
+ break;
+ default:
+ assert( 0 );
+ }
+}
+
+void
+tgsi_util_set_src_register_extswizzle(
+ struct tgsi_src_register_ext_swz *reg,
+ unsigned swizzle,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ reg->ExtSwizzleX = swizzle;
+ break;
+ case 1:
+ reg->ExtSwizzleY = swizzle;
+ break;
+ case 2:
+ reg->ExtSwizzleZ = swizzle;
+ break;
+ case 3:
+ reg->ExtSwizzleW = swizzle;
+ break;
+ default:
+ assert( 0 );
+ }
+}
+
+unsigned
+tgsi_util_get_src_register_extnegate(
+ const struct tgsi_src_register_ext_swz *reg,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ return reg->NegateX;
+ case 1:
+ return reg->NegateY;
+ case 2:
+ return reg->NegateZ;
+ case 3:
+ return reg->NegateW;
+ default:
+ assert( 0 );
+ }
+ return 0;
+}
+
+void
+tgsi_util_set_src_register_extnegate(
+ struct tgsi_src_register_ext_swz *reg,
+ unsigned negate,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ reg->NegateX = negate;
+ break;
+ case 1:
+ reg->NegateY = negate;
+ break;
+ case 2:
+ reg->NegateZ = negate;
+ break;
+ case 3:
+ reg->NegateW = negate;
+ break;
+ default:
+ assert( 0 );
+ }
+}
+
+unsigned
+tgsi_util_get_full_src_register_sign_mode(
+ const struct tgsi_full_src_register *reg,
+ unsigned component )
+{
+ unsigned sign_mode;
+
+ if( reg->SrcRegisterExtMod.Absolute ) {
+ /* Consider only the post-abs negation. */
+
+ if( reg->SrcRegisterExtMod.Negate ) {
+ sign_mode = TGSI_UTIL_SIGN_SET;
+ }
+ else {
+ sign_mode = TGSI_UTIL_SIGN_CLEAR;
+ }
+ }
+ else {
+ /* Accumulate the three negations. */
+
+ unsigned negate;
+
+ negate = reg->SrcRegister.Negate;
+ if( tgsi_util_get_src_register_extnegate( &reg->SrcRegisterExtSwz, component ) ) {
+ negate = !negate;
+ }
+ if( reg->SrcRegisterExtMod.Negate ) {
+ negate = !negate;
+ }
+
+ if( negate ) {
+ sign_mode = TGSI_UTIL_SIGN_TOGGLE;
+ }
+ else {
+ sign_mode = TGSI_UTIL_SIGN_KEEP;
+ }
+ }
+
+ return sign_mode;
+}
+
+void
+tgsi_util_set_full_src_register_sign_mode(
+ struct tgsi_full_src_register *reg,
+ unsigned sign_mode )
+{
+ reg->SrcRegisterExtSwz.NegateX = 0;
+ reg->SrcRegisterExtSwz.NegateY = 0;
+ reg->SrcRegisterExtSwz.NegateZ = 0;
+ reg->SrcRegisterExtSwz.NegateW = 0;
+
+ switch (sign_mode)
+ {
+ case TGSI_UTIL_SIGN_CLEAR:
+ reg->SrcRegister.Negate = 0;
+ reg->SrcRegisterExtMod.Absolute = 1;
+ reg->SrcRegisterExtMod.Negate = 0;
+ break;
+
+ case TGSI_UTIL_SIGN_SET:
+ reg->SrcRegister.Negate = 0;
+ reg->SrcRegisterExtMod.Absolute = 1;
+ reg->SrcRegisterExtMod.Negate = 1;
+ break;
+
+ case TGSI_UTIL_SIGN_TOGGLE:
+ reg->SrcRegister.Negate = 1;
+ reg->SrcRegisterExtMod.Absolute = 0;
+ reg->SrcRegisterExtMod.Negate = 0;
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ reg->SrcRegister.Negate = 0;
+ reg->SrcRegisterExtMod.Absolute = 0;
+ reg->SrcRegisterExtMod.Negate = 0;
+ break;
+
+ default:
+ assert( 0 );
+ }
+}
diff --git a/src/gallium/auxiliary/tgsi/tgsi_util.h b/src/gallium/auxiliary/tgsi/tgsi_util.h
new file mode 100644
index 0000000000..21eb656327
--- /dev/null
+++ b/src/gallium/auxiliary/tgsi/tgsi_util.h
@@ -0,0 +1,100 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_UTIL_H
+#define TGSI_UTIL_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+struct tgsi_src_register;
+struct tgsi_src_register_ext_swz;
+struct tgsi_full_src_register;
+
+void *
+tgsi_align_128bit(
+ void *unaligned );
+
+unsigned
+tgsi_util_get_src_register_swizzle(
+ const struct tgsi_src_register *reg,
+ unsigned component );
+
+unsigned
+tgsi_util_get_src_register_extswizzle(
+ const struct tgsi_src_register_ext_swz *reg,
+ unsigned component);
+
+unsigned
+tgsi_util_get_full_src_register_extswizzle(
+ const struct tgsi_full_src_register *reg,
+ unsigned component );
+
+void
+tgsi_util_set_src_register_swizzle(
+ struct tgsi_src_register *reg,
+ unsigned swizzle,
+ unsigned component );
+
+void
+tgsi_util_set_src_register_extswizzle(
+ struct tgsi_src_register_ext_swz *reg,
+ unsigned swizzle,
+ unsigned component );
+
+unsigned
+tgsi_util_get_src_register_extnegate(
+ const struct tgsi_src_register_ext_swz *reg,
+ unsigned component );
+
+void
+tgsi_util_set_src_register_extnegate(
+ struct tgsi_src_register_ext_swz *reg,
+ unsigned negate,
+ unsigned component );
+
+#define TGSI_UTIL_SIGN_CLEAR 0 /* Force positive */
+#define TGSI_UTIL_SIGN_SET 1 /* Force negative */
+#define TGSI_UTIL_SIGN_TOGGLE 2 /* Negate */
+#define TGSI_UTIL_SIGN_KEEP 3 /* No change */
+
+unsigned
+tgsi_util_get_full_src_register_sign_mode(
+ const struct tgsi_full_src_register *reg,
+ unsigned component );
+
+void
+tgsi_util_set_full_src_register_sign_mode(
+ struct tgsi_full_src_register *reg,
+ unsigned sign_mode );
+
+#if defined __cplusplus
+}
+#endif
+
+#endif /* TGSI_UTIL_H */
diff --git a/src/gallium/auxiliary/translate/Makefile b/src/gallium/auxiliary/translate/Makefile
new file mode 100644
index 0000000000..3c82f8ae03
--- /dev/null
+++ b/src/gallium/auxiliary/translate/Makefile
@@ -0,0 +1,12 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = translate
+
+C_SOURCES = \
+ translate_generic.c \
+ translate_sse.c \
+ translate.c \
+ translate_cache.c
+
+include ../../Makefile.template
diff --git a/src/gallium/auxiliary/translate/SConscript b/src/gallium/auxiliary/translate/SConscript
new file mode 100644
index 0000000000..9553a67537
--- /dev/null
+++ b/src/gallium/auxiliary/translate/SConscript
@@ -0,0 +1,12 @@
+Import('*')
+
+translate = env.ConvenienceLibrary(
+ target = 'translate',
+ source = [
+ 'translate_generic.c',
+ 'translate_sse.c',
+ 'translate.c',
+ 'translate_cache.c',
+ ])
+
+auxiliaries.insert(0, translate)
diff --git a/src/gallium/auxiliary/translate/translate.c b/src/gallium/auxiliary/translate/translate.c
new file mode 100644
index 0000000000..a9b7253bf4
--- /dev/null
+++ b/src/gallium/auxiliary/translate/translate.c
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "pipe/p_config.h"
+#include "pipe/p_state.h"
+#include "translate.h"
+
+struct translate *translate_create( const struct translate_key *key )
+{
+ struct translate *translate = NULL;
+
+#if defined(PIPE_ARCH_X86)
+ translate = translate_sse2_create( key );
+ if (translate)
+ return translate;
+#else
+ (void)translate;
+#endif
+
+ return translate_generic_create( key );
+}
diff --git a/src/gallium/auxiliary/translate/translate.h b/src/gallium/auxiliary/translate/translate.h
new file mode 100644
index 0000000000..34526eb061
--- /dev/null
+++ b/src/gallium/auxiliary/translate/translate.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2008 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * Vertex fetch/store/convert code. This functionality is used in two places:
+ * 1. Vertex fetch/convert - to grab vertex data from incoming vertex
+ * arrays and convert to format needed by vertex shaders.
+ * 2. Vertex store/emit - to convert simple float[][4] vertex attributes
+ * (which is the organization used throughout the draw/prim pipeline) to
+ * hardware-specific formats and emit into hardware vertex buffers.
+ *
+ *
+ * Authors:
+ * Keith Whitwell <keithw@tungstengraphics.com>
+ */
+
+#ifndef _TRANSLATE_H
+#define _TRANSLATE_H
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+
+struct translate_element
+{
+ enum pipe_format input_format;
+ enum pipe_format output_format;
+ unsigned input_buffer:8;
+ unsigned input_offset:24;
+ unsigned output_offset;
+};
+
+
+struct translate_key {
+ unsigned output_stride;
+ unsigned nr_elements;
+ struct translate_element element[PIPE_MAX_ATTRIBS + 1];
+};
+
+
+struct translate {
+ struct translate_key key;
+
+ void (*release)( struct translate * );
+
+ void (*set_buffer)( struct translate *,
+ unsigned i,
+ const void *ptr,
+ unsigned stride );
+
+ void (PIPE_CDECL *run_elts)( struct translate *,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer);
+
+ void (PIPE_CDECL *run)( struct translate *,
+ unsigned start,
+ unsigned count,
+ void *output_buffer);
+};
+
+
+
+#if 0
+struct translate_context *translate_context_create( void );
+void translate_context_destroy( struct translate_context * );
+
+struct translate *translate_lookup_or_create( struct translate_context *tctx,
+ const struct translate_key *key );
+#endif
+
+
+struct translate *translate_create( const struct translate_key *key );
+
+static INLINE int translate_keysize( const struct translate_key *key )
+{
+ return 2 * sizeof(int) + key->nr_elements * sizeof(struct translate_element);
+}
+
+static INLINE int translate_key_compare( const struct translate_key *a,
+ const struct translate_key *b )
+{
+ int keysize = translate_keysize(a);
+ return memcmp(a, b, keysize);
+}
+
+
+static INLINE void translate_key_sanitize( struct translate_key *a )
+{
+ int keysize = translate_keysize(a);
+ char *ptr = (char *)a;
+ memset(ptr + keysize, 0, sizeof(*a) - keysize);
+}
+
+
+/*******************************************************************************
+ * Private:
+ */
+struct translate *translate_sse2_create( const struct translate_key *key );
+
+struct translate *translate_generic_create( const struct translate_key *key );
+
+
+#endif
diff --git a/src/gallium/auxiliary/translate/translate_cache.c b/src/gallium/auxiliary/translate/translate_cache.c
new file mode 100644
index 0000000000..d8069a149c
--- /dev/null
+++ b/src/gallium/auxiliary/translate/translate_cache.c
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "pipe/p_state.h"
+#include "translate.h"
+#include "translate_cache.h"
+
+#include "cso_cache/cso_cache.h"
+#include "cso_cache/cso_hash.h"
+
+struct translate_cache {
+ struct cso_hash *hash;
+};
+
+struct translate_cache * translate_cache_create( void )
+{
+ struct translate_cache *cache = MALLOC_STRUCT(translate_cache);
+ cache->hash = cso_hash_create();
+ return cache;
+}
+
+
+static INLINE void delete_translates(struct translate_cache *cache)
+{
+ struct cso_hash *hash = cache->hash;
+ struct cso_hash_iter iter = cso_hash_first_node(hash);
+ while (!cso_hash_iter_is_null(iter)) {
+ struct translate *state = (struct translate*)cso_hash_iter_data(iter);
+ iter = cso_hash_iter_next(iter);
+ if (state) {
+ state->release(state);
+ }
+ }
+}
+
+void translate_cache_destroy(struct translate_cache *cache)
+{
+ delete_translates(cache);
+ cso_hash_delete(cache->hash);
+ FREE(cache);
+}
+
+
+static INLINE unsigned translate_hash_key_size(struct translate_key *key)
+{
+ unsigned size = sizeof(struct translate_key) -
+ sizeof(struct translate_element) * (PIPE_MAX_ATTRIBS - key->nr_elements);
+ return size;
+}
+
+static INLINE unsigned create_key(struct translate_key *key)
+{
+ unsigned hash_key;
+ unsigned size = translate_hash_key_size(key);
+ /*debug_printf("key size = %d, (els = %d)\n",
+ size, key->nr_elements);*/
+ hash_key = cso_construct_key(key, size);
+ return hash_key;
+}
+
+struct translate * translate_cache_find(struct translate_cache *cache,
+ struct translate_key *key)
+{
+ unsigned hash_key = create_key(key);
+ struct translate *translate = (struct translate*)
+ cso_hash_find_data_from_template(cache->hash,
+ hash_key,
+ key, sizeof(*key));
+
+ if (!translate) {
+ /* create/insert */
+ translate = translate_create(key);
+ cso_hash_insert(cache->hash, hash_key, translate);
+ }
+
+ return translate;
+}
diff --git a/src/gallium/auxiliary/translate/translate_cache.h b/src/gallium/auxiliary/translate/translate_cache.h
new file mode 100644
index 0000000000..7dba871e57
--- /dev/null
+++ b/src/gallium/auxiliary/translate/translate_cache.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2008 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _TRANSLATE_CACHE_H
+#define _TRANSLATE_CACHE_H
+
+
+/*******************************************************************************
+ * Translate cache.
+ * Simply used to cache created translates. Avoids unecessary creation of
+ * translate's if one suitable for a given translate_key has already been
+ * created.
+ *
+ * Note: this functionality depends and requires the CSO module.
+ */
+struct translate_cache;
+
+struct translate_key;
+struct translate;
+
+struct translate_cache *translate_cache_create( void );
+void translate_cache_destroy(struct translate_cache *cache);
+
+/**
+ * Will try to find a translate structure matched by the given key.
+ * If such a structure doesn't exist in the cache the function
+ * will automatically create it, insert it in the cache and
+ * return the created version.
+ *
+ */
+struct translate *translate_cache_find(struct translate_cache *cache,
+ struct translate_key *key);
+
+#endif
diff --git a/src/gallium/auxiliary/translate/translate_generic.c b/src/gallium/auxiliary/translate/translate_generic.c
new file mode 100644
index 0000000000..8d39b64c6c
--- /dev/null
+++ b/src/gallium/auxiliary/translate/translate_generic.c
@@ -0,0 +1,700 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "pipe/p_state.h"
+#include "translate.h"
+
+
+#define DRAW_DBG 0
+
+typedef void (*fetch_func)(const void *ptr, float *attrib);
+typedef void (*emit_func)(const float *attrib, void *ptr);
+
+
+
+struct translate_generic {
+ struct translate translate;
+
+ struct {
+ fetch_func fetch;
+ unsigned buffer;
+ unsigned input_offset;
+
+ emit_func emit;
+ unsigned output_offset;
+
+ char *input_ptr;
+ unsigned input_stride;
+
+ } attrib[PIPE_MAX_ATTRIBS];
+
+ unsigned nr_attrib;
+};
+
+
+static struct translate_generic *translate_generic( struct translate *translate )
+{
+ return (struct translate_generic *)translate;
+}
+
+/**
+ * Fetch a float[4] vertex attribute from memory, doing format/type
+ * conversion as needed.
+ *
+ * This is probably needed/dupliocated elsewhere, eg format
+ * conversion, texture sampling etc.
+ */
+#define ATTRIB( NAME, SZ, TYPE, FROM, TO ) \
+static void \
+fetch_##NAME(const void *ptr, float *attrib) \
+{ \
+ const float defaults[4] = { 0.0f,0.0f,0.0f,1.0f }; \
+ unsigned i; \
+ \
+ for (i = 0; i < SZ; i++) { \
+ attrib[i] = FROM(i); \
+ } \
+ \
+ for (; i < 4; i++) { \
+ attrib[i] = defaults[i]; \
+ } \
+} \
+ \
+static void \
+emit_##NAME(const float *attrib, void *ptr) \
+{ \
+ unsigned i; \
+ TYPE *out = (TYPE *)ptr; \
+ \
+ for (i = 0; i < SZ; i++) { \
+ out[i] = TO(attrib[i]); \
+ } \
+}
+
+
+#define FROM_64_FLOAT(i) ((float) ((double *) ptr)[i])
+#define FROM_32_FLOAT(i) (((float *) ptr)[i])
+
+#define FROM_8_USCALED(i) ((float) ((unsigned char *) ptr)[i])
+#define FROM_16_USCALED(i) ((float) ((unsigned short *) ptr)[i])
+#define FROM_32_USCALED(i) ((float) ((unsigned int *) ptr)[i])
+
+#define FROM_8_SSCALED(i) ((float) ((char *) ptr)[i])
+#define FROM_16_SSCALED(i) ((float) ((short *) ptr)[i])
+#define FROM_32_SSCALED(i) ((float) ((int *) ptr)[i])
+
+#define FROM_8_UNORM(i) ((float) ((unsigned char *) ptr)[i] / 255.0f)
+#define FROM_16_UNORM(i) ((float) ((unsigned short *) ptr)[i] / 65535.0f)
+#define FROM_32_UNORM(i) ((float) ((unsigned int *) ptr)[i] / 4294967295.0f)
+
+#define FROM_8_SNORM(i) ((float) ((char *) ptr)[i] / 127.0f)
+#define FROM_16_SNORM(i) ((float) ((short *) ptr)[i] / 32767.0f)
+#define FROM_32_SNORM(i) ((float) ((int *) ptr)[i] / 2147483647.0f)
+
+#define FROM_32_FIXED(i) (((int *) ptr)[i] / 65536.0f)
+
+#define TO_64_FLOAT(x) ((double) x)
+#define TO_32_FLOAT(x) (x)
+
+#define TO_8_USCALED(x) ((unsigned char) x)
+#define TO_16_USCALED(x) ((unsigned short) x)
+#define TO_32_USCALED(x) ((unsigned int) x)
+
+#define TO_8_SSCALED(x) ((char) x)
+#define TO_16_SSCALED(x) ((short) x)
+#define TO_32_SSCALED(x) ((int) x)
+
+#define TO_8_UNORM(x) ((unsigned char) (x * 255.0f))
+#define TO_16_UNORM(x) ((unsigned short) (x * 65535.0f))
+#define TO_32_UNORM(x) ((unsigned int) (x * 4294967295.0f))
+
+#define TO_8_SNORM(x) ((char) (x * 127.0f))
+#define TO_16_SNORM(x) ((short) (x * 32767.0f))
+#define TO_32_SNORM(x) ((int) (x * 2147483647.0f))
+
+#define TO_32_FIXED(x) ((int) (x * 65536.0f))
+
+
+
+ATTRIB( R64G64B64A64_FLOAT, 4, double, FROM_64_FLOAT, TO_64_FLOAT )
+ATTRIB( R64G64B64_FLOAT, 3, double, FROM_64_FLOAT, TO_64_FLOAT )
+ATTRIB( R64G64_FLOAT, 2, double, FROM_64_FLOAT, TO_64_FLOAT )
+ATTRIB( R64_FLOAT, 1, double, FROM_64_FLOAT, TO_64_FLOAT )
+
+ATTRIB( R32G32B32A32_FLOAT, 4, float, FROM_32_FLOAT, TO_32_FLOAT )
+ATTRIB( R32G32B32_FLOAT, 3, float, FROM_32_FLOAT, TO_32_FLOAT )
+ATTRIB( R32G32_FLOAT, 2, float, FROM_32_FLOAT, TO_32_FLOAT )
+ATTRIB( R32_FLOAT, 1, float, FROM_32_FLOAT, TO_32_FLOAT )
+
+ATTRIB( R32G32B32A32_USCALED, 4, unsigned, FROM_32_USCALED, TO_32_USCALED )
+ATTRIB( R32G32B32_USCALED, 3, unsigned, FROM_32_USCALED, TO_32_USCALED )
+ATTRIB( R32G32_USCALED, 2, unsigned, FROM_32_USCALED, TO_32_USCALED )
+ATTRIB( R32_USCALED, 1, unsigned, FROM_32_USCALED, TO_32_USCALED )
+
+ATTRIB( R32G32B32A32_SSCALED, 4, int, FROM_32_SSCALED, TO_32_SSCALED )
+ATTRIB( R32G32B32_SSCALED, 3, int, FROM_32_SSCALED, TO_32_SSCALED )
+ATTRIB( R32G32_SSCALED, 2, int, FROM_32_SSCALED, TO_32_SSCALED )
+ATTRIB( R32_SSCALED, 1, int, FROM_32_SSCALED, TO_32_SSCALED )
+
+ATTRIB( R32G32B32A32_UNORM, 4, unsigned, FROM_32_UNORM, TO_32_UNORM )
+ATTRIB( R32G32B32_UNORM, 3, unsigned, FROM_32_UNORM, TO_32_UNORM )
+ATTRIB( R32G32_UNORM, 2, unsigned, FROM_32_UNORM, TO_32_UNORM )
+ATTRIB( R32_UNORM, 1, unsigned, FROM_32_UNORM, TO_32_UNORM )
+
+ATTRIB( R32G32B32A32_SNORM, 4, int, FROM_32_SNORM, TO_32_SNORM )
+ATTRIB( R32G32B32_SNORM, 3, int, FROM_32_SNORM, TO_32_SNORM )
+ATTRIB( R32G32_SNORM, 2, int, FROM_32_SNORM, TO_32_SNORM )
+ATTRIB( R32_SNORM, 1, int, FROM_32_SNORM, TO_32_SNORM )
+
+ATTRIB( R16G16B16A16_USCALED, 4, ushort, FROM_16_USCALED, TO_16_USCALED )
+ATTRIB( R16G16B16_USCALED, 3, ushort, FROM_16_USCALED, TO_16_USCALED )
+ATTRIB( R16G16_USCALED, 2, ushort, FROM_16_USCALED, TO_16_USCALED )
+ATTRIB( R16_USCALED, 1, ushort, FROM_16_USCALED, TO_16_USCALED )
+
+ATTRIB( R16G16B16A16_SSCALED, 4, short, FROM_16_SSCALED, TO_16_SSCALED )
+ATTRIB( R16G16B16_SSCALED, 3, short, FROM_16_SSCALED, TO_16_SSCALED )
+ATTRIB( R16G16_SSCALED, 2, short, FROM_16_SSCALED, TO_16_SSCALED )
+ATTRIB( R16_SSCALED, 1, short, FROM_16_SSCALED, TO_16_SSCALED )
+
+ATTRIB( R16G16B16A16_UNORM, 4, ushort, FROM_16_UNORM, TO_16_UNORM )
+ATTRIB( R16G16B16_UNORM, 3, ushort, FROM_16_UNORM, TO_16_UNORM )
+ATTRIB( R16G16_UNORM, 2, ushort, FROM_16_UNORM, TO_16_UNORM )
+ATTRIB( R16_UNORM, 1, ushort, FROM_16_UNORM, TO_16_UNORM )
+
+ATTRIB( R16G16B16A16_SNORM, 4, short, FROM_16_SNORM, TO_16_SNORM )
+ATTRIB( R16G16B16_SNORM, 3, short, FROM_16_SNORM, TO_16_SNORM )
+ATTRIB( R16G16_SNORM, 2, short, FROM_16_SNORM, TO_16_SNORM )
+ATTRIB( R16_SNORM, 1, short, FROM_16_SNORM, TO_16_SNORM )
+
+ATTRIB( R8G8B8A8_USCALED, 4, ubyte, FROM_8_USCALED, TO_8_USCALED )
+ATTRIB( R8G8B8_USCALED, 3, ubyte, FROM_8_USCALED, TO_8_USCALED )
+ATTRIB( R8G8_USCALED, 2, ubyte, FROM_8_USCALED, TO_8_USCALED )
+ATTRIB( R8_USCALED, 1, ubyte, FROM_8_USCALED, TO_8_USCALED )
+
+ATTRIB( R8G8B8A8_SSCALED, 4, char, FROM_8_SSCALED, TO_8_SSCALED )
+ATTRIB( R8G8B8_SSCALED, 3, char, FROM_8_SSCALED, TO_8_SSCALED )
+ATTRIB( R8G8_SSCALED, 2, char, FROM_8_SSCALED, TO_8_SSCALED )
+ATTRIB( R8_SSCALED, 1, char, FROM_8_SSCALED, TO_8_SSCALED )
+
+ATTRIB( R8G8B8A8_UNORM, 4, ubyte, FROM_8_UNORM, TO_8_UNORM )
+ATTRIB( R8G8B8_UNORM, 3, ubyte, FROM_8_UNORM, TO_8_UNORM )
+ATTRIB( R8G8_UNORM, 2, ubyte, FROM_8_UNORM, TO_8_UNORM )
+ATTRIB( R8_UNORM, 1, ubyte, FROM_8_UNORM, TO_8_UNORM )
+
+ATTRIB( R8G8B8A8_SNORM, 4, char, FROM_8_SNORM, TO_8_SNORM )
+ATTRIB( R8G8B8_SNORM, 3, char, FROM_8_SNORM, TO_8_SNORM )
+ATTRIB( R8G8_SNORM, 2, char, FROM_8_SNORM, TO_8_SNORM )
+ATTRIB( R8_SNORM, 1, char, FROM_8_SNORM, TO_8_SNORM )
+
+ATTRIB( A8R8G8B8_UNORM, 4, ubyte, FROM_8_UNORM, TO_8_UNORM )
+//ATTRIB( R8G8B8A8_UNORM, 4, ubyte, FROM_8_UNORM, TO_8_UNORM )
+
+ATTRIB( R32G32B32A32_FIXED, 4, int, FROM_32_FIXED, TO_32_FIXED )
+ATTRIB( R32G32B32_FIXED, 3, int, FROM_32_FIXED, TO_32_FIXED )
+ATTRIB( R32G32_FIXED, 2, int, FROM_32_FIXED, TO_32_FIXED )
+ATTRIB( R32_FIXED, 1, int, FROM_32_FIXED, TO_32_FIXED )
+
+
+
+static void
+fetch_B8G8R8A8_UNORM(const void *ptr, float *attrib)
+{
+ attrib[2] = FROM_8_UNORM(0);
+ attrib[1] = FROM_8_UNORM(1);
+ attrib[0] = FROM_8_UNORM(2);
+ attrib[3] = FROM_8_UNORM(3);
+}
+
+static void
+emit_B8G8R8A8_UNORM( const float *attrib, void *ptr)
+{
+ ubyte *out = (ubyte *)ptr;
+ out[2] = TO_8_UNORM(attrib[0]);
+ out[1] = TO_8_UNORM(attrib[1]);
+ out[0] = TO_8_UNORM(attrib[2]);
+ out[3] = TO_8_UNORM(attrib[3]);
+}
+
+static void
+fetch_NULL( const void *ptr, float *attrib )
+{
+ attrib[0] = 0;
+ attrib[1] = 0;
+ attrib[2] = 0;
+ attrib[3] = 1;
+}
+
+static void
+emit_NULL( const float *attrib, void *ptr )
+{
+ /* do nothing is the only sensible option */
+}
+
+static fetch_func get_fetch_func( enum pipe_format format )
+{
+ switch (format) {
+ case PIPE_FORMAT_R64_FLOAT:
+ return &fetch_R64_FLOAT;
+ case PIPE_FORMAT_R64G64_FLOAT:
+ return &fetch_R64G64_FLOAT;
+ case PIPE_FORMAT_R64G64B64_FLOAT:
+ return &fetch_R64G64B64_FLOAT;
+ case PIPE_FORMAT_R64G64B64A64_FLOAT:
+ return &fetch_R64G64B64A64_FLOAT;
+
+ case PIPE_FORMAT_R32_FLOAT:
+ return &fetch_R32_FLOAT;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ return &fetch_R32G32_FLOAT;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ return &fetch_R32G32B32_FLOAT;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ return &fetch_R32G32B32A32_FLOAT;
+
+ case PIPE_FORMAT_R32_UNORM:
+ return &fetch_R32_UNORM;
+ case PIPE_FORMAT_R32G32_UNORM:
+ return &fetch_R32G32_UNORM;
+ case PIPE_FORMAT_R32G32B32_UNORM:
+ return &fetch_R32G32B32_UNORM;
+ case PIPE_FORMAT_R32G32B32A32_UNORM:
+ return &fetch_R32G32B32A32_UNORM;
+
+ case PIPE_FORMAT_R32_USCALED:
+ return &fetch_R32_USCALED;
+ case PIPE_FORMAT_R32G32_USCALED:
+ return &fetch_R32G32_USCALED;
+ case PIPE_FORMAT_R32G32B32_USCALED:
+ return &fetch_R32G32B32_USCALED;
+ case PIPE_FORMAT_R32G32B32A32_USCALED:
+ return &fetch_R32G32B32A32_USCALED;
+
+ case PIPE_FORMAT_R32_SNORM:
+ return &fetch_R32_SNORM;
+ case PIPE_FORMAT_R32G32_SNORM:
+ return &fetch_R32G32_SNORM;
+ case PIPE_FORMAT_R32G32B32_SNORM:
+ return &fetch_R32G32B32_SNORM;
+ case PIPE_FORMAT_R32G32B32A32_SNORM:
+ return &fetch_R32G32B32A32_SNORM;
+
+ case PIPE_FORMAT_R32_SSCALED:
+ return &fetch_R32_SSCALED;
+ case PIPE_FORMAT_R32G32_SSCALED:
+ return &fetch_R32G32_SSCALED;
+ case PIPE_FORMAT_R32G32B32_SSCALED:
+ return &fetch_R32G32B32_SSCALED;
+ case PIPE_FORMAT_R32G32B32A32_SSCALED:
+ return &fetch_R32G32B32A32_SSCALED;
+
+ case PIPE_FORMAT_R16_UNORM:
+ return &fetch_R16_UNORM;
+ case PIPE_FORMAT_R16G16_UNORM:
+ return &fetch_R16G16_UNORM;
+ case PIPE_FORMAT_R16G16B16_UNORM:
+ return &fetch_R16G16B16_UNORM;
+ case PIPE_FORMAT_R16G16B16A16_UNORM:
+ return &fetch_R16G16B16A16_UNORM;
+
+ case PIPE_FORMAT_R16_USCALED:
+ return &fetch_R16_USCALED;
+ case PIPE_FORMAT_R16G16_USCALED:
+ return &fetch_R16G16_USCALED;
+ case PIPE_FORMAT_R16G16B16_USCALED:
+ return &fetch_R16G16B16_USCALED;
+ case PIPE_FORMAT_R16G16B16A16_USCALED:
+ return &fetch_R16G16B16A16_USCALED;
+
+ case PIPE_FORMAT_R16_SNORM:
+ return &fetch_R16_SNORM;
+ case PIPE_FORMAT_R16G16_SNORM:
+ return &fetch_R16G16_SNORM;
+ case PIPE_FORMAT_R16G16B16_SNORM:
+ return &fetch_R16G16B16_SNORM;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ return &fetch_R16G16B16A16_SNORM;
+
+ case PIPE_FORMAT_R16_SSCALED:
+ return &fetch_R16_SSCALED;
+ case PIPE_FORMAT_R16G16_SSCALED:
+ return &fetch_R16G16_SSCALED;
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ return &fetch_R16G16B16_SSCALED;
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ return &fetch_R16G16B16A16_SSCALED;
+
+ case PIPE_FORMAT_R8_UNORM:
+ return &fetch_R8_UNORM;
+ case PIPE_FORMAT_R8G8_UNORM:
+ return &fetch_R8G8_UNORM;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ return &fetch_R8G8B8_UNORM;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ return &fetch_R8G8B8A8_UNORM;
+
+ case PIPE_FORMAT_R8_USCALED:
+ return &fetch_R8_USCALED;
+ case PIPE_FORMAT_R8G8_USCALED:
+ return &fetch_R8G8_USCALED;
+ case PIPE_FORMAT_R8G8B8_USCALED:
+ return &fetch_R8G8B8_USCALED;
+ case PIPE_FORMAT_R8G8B8A8_USCALED:
+ return &fetch_R8G8B8A8_USCALED;
+
+ case PIPE_FORMAT_R8_SNORM:
+ return &fetch_R8_SNORM;
+ case PIPE_FORMAT_R8G8_SNORM:
+ return &fetch_R8G8_SNORM;
+ case PIPE_FORMAT_R8G8B8_SNORM:
+ return &fetch_R8G8B8_SNORM;
+ case PIPE_FORMAT_R8G8B8A8_SNORM:
+ return &fetch_R8G8B8A8_SNORM;
+
+ case PIPE_FORMAT_R8_SSCALED:
+ return &fetch_R8_SSCALED;
+ case PIPE_FORMAT_R8G8_SSCALED:
+ return &fetch_R8G8_SSCALED;
+ case PIPE_FORMAT_R8G8B8_SSCALED:
+ return &fetch_R8G8B8_SSCALED;
+ case PIPE_FORMAT_R8G8B8A8_SSCALED:
+ return &fetch_R8G8B8A8_SSCALED;
+
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return &fetch_A8R8G8B8_UNORM;
+
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ return &fetch_B8G8R8A8_UNORM;
+
+ case PIPE_FORMAT_R32_FIXED:
+ return &fetch_R32_FIXED;
+ case PIPE_FORMAT_R32G32_FIXED:
+ return &fetch_R32G32_FIXED;
+ case PIPE_FORMAT_R32G32B32_FIXED:
+ return &fetch_R32G32B32_FIXED;
+ case PIPE_FORMAT_R32G32B32A32_FIXED:
+ return &fetch_R32G32B32A32_FIXED;
+
+ default:
+ assert(0);
+ return &fetch_NULL;
+ }
+}
+
+
+
+
+static emit_func get_emit_func( enum pipe_format format )
+{
+ /* silence warnings */
+ (void) emit_R32G32B32A32_FIXED;
+ (void) emit_R32G32B32_FIXED;
+ (void) emit_R32G32_FIXED;
+ (void) emit_R32_FIXED;
+
+ switch (format) {
+ case PIPE_FORMAT_R64_FLOAT:
+ return &emit_R64_FLOAT;
+ case PIPE_FORMAT_R64G64_FLOAT:
+ return &emit_R64G64_FLOAT;
+ case PIPE_FORMAT_R64G64B64_FLOAT:
+ return &emit_R64G64B64_FLOAT;
+ case PIPE_FORMAT_R64G64B64A64_FLOAT:
+ return &emit_R64G64B64A64_FLOAT;
+
+ case PIPE_FORMAT_R32_FLOAT:
+ return &emit_R32_FLOAT;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ return &emit_R32G32_FLOAT;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ return &emit_R32G32B32_FLOAT;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ return &emit_R32G32B32A32_FLOAT;
+
+ case PIPE_FORMAT_R32_UNORM:
+ return &emit_R32_UNORM;
+ case PIPE_FORMAT_R32G32_UNORM:
+ return &emit_R32G32_UNORM;
+ case PIPE_FORMAT_R32G32B32_UNORM:
+ return &emit_R32G32B32_UNORM;
+ case PIPE_FORMAT_R32G32B32A32_UNORM:
+ return &emit_R32G32B32A32_UNORM;
+
+ case PIPE_FORMAT_R32_USCALED:
+ return &emit_R32_USCALED;
+ case PIPE_FORMAT_R32G32_USCALED:
+ return &emit_R32G32_USCALED;
+ case PIPE_FORMAT_R32G32B32_USCALED:
+ return &emit_R32G32B32_USCALED;
+ case PIPE_FORMAT_R32G32B32A32_USCALED:
+ return &emit_R32G32B32A32_USCALED;
+
+ case PIPE_FORMAT_R32_SNORM:
+ return &emit_R32_SNORM;
+ case PIPE_FORMAT_R32G32_SNORM:
+ return &emit_R32G32_SNORM;
+ case PIPE_FORMAT_R32G32B32_SNORM:
+ return &emit_R32G32B32_SNORM;
+ case PIPE_FORMAT_R32G32B32A32_SNORM:
+ return &emit_R32G32B32A32_SNORM;
+
+ case PIPE_FORMAT_R32_SSCALED:
+ return &emit_R32_SSCALED;
+ case PIPE_FORMAT_R32G32_SSCALED:
+ return &emit_R32G32_SSCALED;
+ case PIPE_FORMAT_R32G32B32_SSCALED:
+ return &emit_R32G32B32_SSCALED;
+ case PIPE_FORMAT_R32G32B32A32_SSCALED:
+ return &emit_R32G32B32A32_SSCALED;
+
+ case PIPE_FORMAT_R16_UNORM:
+ return &emit_R16_UNORM;
+ case PIPE_FORMAT_R16G16_UNORM:
+ return &emit_R16G16_UNORM;
+ case PIPE_FORMAT_R16G16B16_UNORM:
+ return &emit_R16G16B16_UNORM;
+ case PIPE_FORMAT_R16G16B16A16_UNORM:
+ return &emit_R16G16B16A16_UNORM;
+
+ case PIPE_FORMAT_R16_USCALED:
+ return &emit_R16_USCALED;
+ case PIPE_FORMAT_R16G16_USCALED:
+ return &emit_R16G16_USCALED;
+ case PIPE_FORMAT_R16G16B16_USCALED:
+ return &emit_R16G16B16_USCALED;
+ case PIPE_FORMAT_R16G16B16A16_USCALED:
+ return &emit_R16G16B16A16_USCALED;
+
+ case PIPE_FORMAT_R16_SNORM:
+ return &emit_R16_SNORM;
+ case PIPE_FORMAT_R16G16_SNORM:
+ return &emit_R16G16_SNORM;
+ case PIPE_FORMAT_R16G16B16_SNORM:
+ return &emit_R16G16B16_SNORM;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ return &emit_R16G16B16A16_SNORM;
+
+ case PIPE_FORMAT_R16_SSCALED:
+ return &emit_R16_SSCALED;
+ case PIPE_FORMAT_R16G16_SSCALED:
+ return &emit_R16G16_SSCALED;
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ return &emit_R16G16B16_SSCALED;
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ return &emit_R16G16B16A16_SSCALED;
+
+ case PIPE_FORMAT_R8_UNORM:
+ return &emit_R8_UNORM;
+ case PIPE_FORMAT_R8G8_UNORM:
+ return &emit_R8G8_UNORM;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ return &emit_R8G8B8_UNORM;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ return &emit_R8G8B8A8_UNORM;
+
+ case PIPE_FORMAT_R8_USCALED:
+ return &emit_R8_USCALED;
+ case PIPE_FORMAT_R8G8_USCALED:
+ return &emit_R8G8_USCALED;
+ case PIPE_FORMAT_R8G8B8_USCALED:
+ return &emit_R8G8B8_USCALED;
+ case PIPE_FORMAT_R8G8B8A8_USCALED:
+ return &emit_R8G8B8A8_USCALED;
+
+ case PIPE_FORMAT_R8_SNORM:
+ return &emit_R8_SNORM;
+ case PIPE_FORMAT_R8G8_SNORM:
+ return &emit_R8G8_SNORM;
+ case PIPE_FORMAT_R8G8B8_SNORM:
+ return &emit_R8G8B8_SNORM;
+ case PIPE_FORMAT_R8G8B8A8_SNORM:
+ return &emit_R8G8B8A8_SNORM;
+
+ case PIPE_FORMAT_R8_SSCALED:
+ return &emit_R8_SSCALED;
+ case PIPE_FORMAT_R8G8_SSCALED:
+ return &emit_R8G8_SSCALED;
+ case PIPE_FORMAT_R8G8B8_SSCALED:
+ return &emit_R8G8B8_SSCALED;
+ case PIPE_FORMAT_R8G8B8A8_SSCALED:
+ return &emit_R8G8B8A8_SSCALED;
+
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return &emit_A8R8G8B8_UNORM;
+
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ return &emit_B8G8R8A8_UNORM;
+
+ default:
+ assert(0);
+ return &emit_NULL;
+ }
+}
+
+
+
+/**
+ * Fetch vertex attributes for 'count' vertices.
+ */
+static void PIPE_CDECL generic_run_elts( struct translate *translate,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer )
+{
+ struct translate_generic *tg = translate_generic(translate);
+ char *vert = output_buffer;
+ unsigned nr_attrs = tg->nr_attrib;
+ unsigned attr;
+ unsigned i;
+
+ /* loop over vertex attributes (vertex shader inputs)
+ */
+ for (i = 0; i < count; i++) {
+ unsigned elt = *elts++;
+
+ for (attr = 0; attr < nr_attrs; attr++) {
+ float data[4];
+
+ const char *src = (tg->attrib[attr].input_ptr +
+ tg->attrib[attr].input_stride * elt);
+
+ char *dst = (vert +
+ tg->attrib[attr].output_offset);
+
+ tg->attrib[attr].fetch( src, data );
+
+ if (0) debug_printf("vert %d/%d attr %d: %f %f %f %f\n",
+ i, elt, attr, data[0], data[1], data[2], data[3]);
+
+ tg->attrib[attr].emit( data, dst );
+ }
+
+ vert += tg->translate.key.output_stride;
+ }
+}
+
+
+
+static void PIPE_CDECL generic_run( struct translate *translate,
+ unsigned start,
+ unsigned count,
+ void *output_buffer )
+{
+ struct translate_generic *tg = translate_generic(translate);
+ char *vert = output_buffer;
+ unsigned nr_attrs = tg->nr_attrib;
+ unsigned attr;
+ unsigned i;
+
+ /* loop over vertex attributes (vertex shader inputs)
+ */
+ for (i = 0; i < count; i++) {
+ unsigned elt = start + i;
+
+ for (attr = 0; attr < nr_attrs; attr++) {
+ float data[4];
+
+ const char *src = (tg->attrib[attr].input_ptr +
+ tg->attrib[attr].input_stride * elt);
+
+ char *dst = (vert +
+ tg->attrib[attr].output_offset);
+
+ tg->attrib[attr].fetch( src, data );
+
+ if (0) debug_printf("vert %d attr %d: %f %f %f %f\n",
+ i, attr, data[0], data[1], data[2], data[3]);
+
+ tg->attrib[attr].emit( data, dst );
+ }
+
+ vert += tg->translate.key.output_stride;
+ }
+}
+
+
+
+static void generic_set_buffer( struct translate *translate,
+ unsigned buf,
+ const void *ptr,
+ unsigned stride )
+{
+ struct translate_generic *tg = translate_generic(translate);
+ unsigned i;
+
+ for (i = 0; i < tg->nr_attrib; i++) {
+ if (tg->attrib[i].buffer == buf) {
+ tg->attrib[i].input_ptr = ((char *)ptr +
+ tg->attrib[i].input_offset);
+ tg->attrib[i].input_stride = stride;
+ }
+ }
+}
+
+
+static void generic_release( struct translate *translate )
+{
+ /* Refcount?
+ */
+ FREE(translate);
+}
+
+struct translate *translate_generic_create( const struct translate_key *key )
+{
+ struct translate_generic *tg = CALLOC_STRUCT(translate_generic);
+ unsigned i;
+
+ if (tg == NULL)
+ return NULL;
+
+ tg->translate.key = *key;
+ tg->translate.release = generic_release;
+ tg->translate.set_buffer = generic_set_buffer;
+ tg->translate.run_elts = generic_run_elts;
+ tg->translate.run = generic_run;
+
+ for (i = 0; i < key->nr_elements; i++) {
+
+ tg->attrib[i].fetch = get_fetch_func(key->element[i].input_format);
+ tg->attrib[i].buffer = key->element[i].input_buffer;
+ tg->attrib[i].input_offset = key->element[i].input_offset;
+
+ tg->attrib[i].emit = get_emit_func(key->element[i].output_format);
+ tg->attrib[i].output_offset = key->element[i].output_offset;
+
+ }
+
+ tg->nr_attrib = key->nr_elements;
+
+
+ return &tg->translate;
+}
diff --git a/src/gallium/auxiliary/translate/translate_sse.c b/src/gallium/auxiliary/translate/translate_sse.c
new file mode 100644
index 0000000000..b62db8d8f3
--- /dev/null
+++ b/src/gallium/auxiliary/translate/translate_sse.c
@@ -0,0 +1,706 @@
+/*
+ * Copyright 2003 Tungsten Graphics, inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keithw@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_config.h"
+#include "pipe/p_compiler.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "translate.h"
+
+
+#if defined(PIPE_ARCH_X86)
+
+#include "rtasm/rtasm_cpu.h"
+#include "rtasm/rtasm_x86sse.h"
+
+
+#define X 0
+#define Y 1
+#define Z 2
+#define W 3
+
+
+typedef void (PIPE_CDECL *run_func)( struct translate *translate,
+ unsigned start,
+ unsigned count,
+ void *output_buffer );
+
+typedef void (PIPE_CDECL *run_elts_func)( struct translate *translate,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer );
+
+struct translate_buffer {
+ const void *base_ptr;
+ unsigned stride;
+ void *ptr; /* updated per vertex */
+};
+
+
+struct translate_sse {
+ struct translate translate;
+
+ struct x86_function linear_func;
+ struct x86_function elt_func;
+ struct x86_function *func;
+
+ boolean loaded_identity;
+ boolean loaded_255;
+ boolean loaded_inv_255;
+
+ float identity[4];
+ float float_255[4];
+ float inv_255[4];
+
+ struct translate_buffer buffer[PIPE_MAX_ATTRIBS];
+ unsigned nr_buffers;
+
+ run_func gen_run;
+ run_elts_func gen_run_elts;
+
+ /* these are actually known values, but putting them in a struct
+ * like this is helpful to keep them in sync across the file.
+ */
+ struct x86_reg tmp_EAX;
+ struct x86_reg idx_EBX; /* either start+i or &elt[i] */
+ struct x86_reg outbuf_ECX;
+ struct x86_reg machine_EDX;
+ struct x86_reg count_ESI; /* decrements to zero */
+};
+
+static int get_offset( const void *a, const void *b )
+{
+ return (const char *)b - (const char *)a;
+}
+
+
+
+static struct x86_reg get_identity( struct translate_sse *p )
+{
+ struct x86_reg reg = x86_make_reg(file_XMM, 6);
+
+ if (!p->loaded_identity) {
+ p->loaded_identity = TRUE;
+ p->identity[0] = 0;
+ p->identity[1] = 0;
+ p->identity[2] = 0;
+ p->identity[3] = 1;
+
+ sse_movups(p->func, reg,
+ x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->identity[0])));
+ }
+
+ return reg;
+}
+
+static struct x86_reg get_255( struct translate_sse *p )
+{
+ struct x86_reg reg = x86_make_reg(file_XMM, 7);
+
+ if (!p->loaded_255) {
+ p->loaded_255 = TRUE;
+ p->float_255[0] =
+ p->float_255[1] =
+ p->float_255[2] =
+ p->float_255[3] = 255.0f;
+
+ sse_movups(p->func, reg,
+ x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->float_255[0])));
+ }
+
+ return reg;
+}
+
+static struct x86_reg get_inv_255( struct translate_sse *p )
+{
+ struct x86_reg reg = x86_make_reg(file_XMM, 5);
+
+ if (!p->loaded_inv_255) {
+ p->loaded_inv_255 = TRUE;
+ p->inv_255[0] =
+ p->inv_255[1] =
+ p->inv_255[2] =
+ p->inv_255[3] = 1.0f / 255.0f;
+
+ sse_movups(p->func, reg,
+ x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->inv_255[0])));
+ }
+
+ return reg;
+}
+
+
+static void emit_load_R32G32B32A32( struct translate_sse *p,
+ struct x86_reg data,
+ struct x86_reg arg0 )
+{
+ sse_movups(p->func, data, arg0);
+}
+
+static void emit_load_R32G32B32( struct translate_sse *p,
+ struct x86_reg data,
+ struct x86_reg arg0 )
+{
+ /* Have to jump through some hoops:
+ *
+ * c 0 0 0
+ * c 0 0 1
+ * 0 0 c 1
+ * a b c 1
+ */
+ sse_movss(p->func, data, x86_make_disp(arg0, 8));
+ sse_shufps(p->func, data, get_identity(p), SHUF(X,Y,Z,W) );
+ sse_shufps(p->func, data, data, SHUF(Y,Z,X,W) );
+ sse_movlps(p->func, data, arg0);
+}
+
+static void emit_load_R32G32( struct translate_sse *p,
+ struct x86_reg data,
+ struct x86_reg arg0 )
+{
+ /* 0 0 0 1
+ * a b 0 1
+ */
+ sse_movups(p->func, data, get_identity(p) );
+ sse_movlps(p->func, data, arg0);
+}
+
+
+static void emit_load_R32( struct translate_sse *p,
+ struct x86_reg data,
+ struct x86_reg arg0 )
+{
+ /* a 0 0 0
+ * a 0 0 1
+ */
+ sse_movss(p->func, data, arg0);
+ sse_orps(p->func, data, get_identity(p) );
+}
+
+
+static void emit_load_R8G8B8A8_UNORM( struct translate_sse *p,
+ struct x86_reg data,
+ struct x86_reg src )
+{
+
+ /* Load and unpack twice:
+ */
+ sse_movss(p->func, data, src);
+ sse2_punpcklbw(p->func, data, get_identity(p));
+ sse2_punpcklbw(p->func, data, get_identity(p));
+
+ /* Convert to float:
+ */
+ sse2_cvtdq2ps(p->func, data, data);
+
+
+ /* Scale by 1/255.0
+ */
+ sse_mulps(p->func, data, get_inv_255(p));
+}
+
+
+
+
+static void emit_store_R32G32B32A32( struct translate_sse *p,
+ struct x86_reg dest,
+ struct x86_reg dataXMM )
+{
+ sse_movups(p->func, dest, dataXMM);
+}
+
+static void emit_store_R32G32B32( struct translate_sse *p,
+ struct x86_reg dest,
+ struct x86_reg dataXMM )
+{
+ /* Emit two, shuffle, emit one.
+ */
+ sse_movlps(p->func, dest, dataXMM);
+ sse_shufps(p->func, dataXMM, dataXMM, SHUF(Z,Z,Z,Z) ); /* NOTE! destructive */
+ sse_movss(p->func, x86_make_disp(dest,8), dataXMM);
+}
+
+static void emit_store_R32G32( struct translate_sse *p,
+ struct x86_reg dest,
+ struct x86_reg dataXMM )
+{
+ sse_movlps(p->func, dest, dataXMM);
+}
+
+static void emit_store_R32( struct translate_sse *p,
+ struct x86_reg dest,
+ struct x86_reg dataXMM )
+{
+ sse_movss(p->func, dest, dataXMM);
+}
+
+
+
+static void emit_store_R8G8B8A8_UNORM( struct translate_sse *p,
+ struct x86_reg dest,
+ struct x86_reg dataXMM )
+{
+ /* Scale by 255.0
+ */
+ sse_mulps(p->func, dataXMM, get_255(p));
+
+ /* Pack and emit:
+ */
+ sse2_cvtps2dq(p->func, dataXMM, dataXMM);
+ sse2_packssdw(p->func, dataXMM, dataXMM);
+ sse2_packuswb(p->func, dataXMM, dataXMM);
+ sse_movss(p->func, dest, dataXMM);
+}
+
+
+
+
+
+/* Extended swizzles? Maybe later.
+ */
+static void emit_swizzle( struct translate_sse *p,
+ struct x86_reg dest,
+ struct x86_reg src,
+ unsigned char shuffle )
+{
+ sse_shufps(p->func, dest, src, shuffle);
+}
+
+
+static boolean translate_attr( struct translate_sse *p,
+ const struct translate_element *a,
+ struct x86_reg srcECX,
+ struct x86_reg dstEAX)
+{
+ struct x86_reg dataXMM = x86_make_reg(file_XMM, 0);
+
+ switch (a->input_format) {
+ case PIPE_FORMAT_R32_FLOAT:
+ emit_load_R32(p, dataXMM, srcECX);
+ break;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ emit_load_R32G32(p, dataXMM, srcECX);
+ break;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ emit_load_R32G32B32(p, dataXMM, srcECX);
+ break;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ emit_load_R32G32B32A32(p, dataXMM, srcECX);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ emit_load_R8G8B8A8_UNORM(p, dataXMM, srcECX);
+ emit_swizzle(p, dataXMM, dataXMM, SHUF(Z,Y,X,W));
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ emit_load_R8G8B8A8_UNORM(p, dataXMM, srcECX);
+ break;
+ default:
+ return FALSE;
+ }
+
+ switch (a->output_format) {
+ case PIPE_FORMAT_R32_FLOAT:
+ emit_store_R32(p, dstEAX, dataXMM);
+ break;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ emit_store_R32G32(p, dstEAX, dataXMM);
+ break;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ emit_store_R32G32B32(p, dstEAX, dataXMM);
+ break;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ emit_store_R32G32B32A32(p, dstEAX, dataXMM);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ emit_swizzle(p, dataXMM, dataXMM, SHUF(Z,Y,X,W));
+ emit_store_R8G8B8A8_UNORM(p, dstEAX, dataXMM);
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ emit_store_R8G8B8A8_UNORM(p, dstEAX, dataXMM);
+ break;
+ default:
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+static boolean init_inputs( struct translate_sse *p,
+ boolean linear )
+{
+ unsigned i;
+ if (linear) {
+ for (i = 0; i < p->nr_buffers; i++) {
+ struct x86_reg buf_stride = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[i].stride));
+ struct x86_reg buf_ptr = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[i].ptr));
+ struct x86_reg buf_base_ptr = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[i].base_ptr));
+ struct x86_reg elt = p->idx_EBX;
+ struct x86_reg tmp = p->tmp_EAX;
+
+
+ /* Calculate pointer to first attrib:
+ */
+ x86_mov(p->func, tmp, buf_stride);
+ x86_imul(p->func, tmp, elt);
+ x86_add(p->func, tmp, buf_base_ptr);
+
+
+ /* In the linear case, keep the buffer pointer instead of the
+ * index number.
+ */
+ if (p->nr_buffers == 1)
+ x86_mov( p->func, elt, tmp );
+ else
+ x86_mov( p->func, buf_ptr, tmp );
+ }
+ }
+
+ return TRUE;
+}
+
+
+static struct x86_reg get_buffer_ptr( struct translate_sse *p,
+ boolean linear,
+ unsigned buf_idx,
+ struct x86_reg elt )
+{
+ if (linear && p->nr_buffers == 1) {
+ return p->idx_EBX;
+ }
+ else if (linear) {
+ struct x86_reg ptr = p->tmp_EAX;
+ struct x86_reg buf_ptr =
+ x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[buf_idx].ptr));
+
+ x86_mov(p->func, ptr, buf_ptr);
+ return ptr;
+ }
+ else {
+ struct x86_reg ptr = p->tmp_EAX;
+
+ struct x86_reg buf_stride =
+ x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[buf_idx].stride));
+
+ struct x86_reg buf_base_ptr =
+ x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[buf_idx].base_ptr));
+
+
+
+ /* Calculate pointer to current attrib:
+ */
+ x86_mov(p->func, ptr, buf_stride);
+ x86_imul(p->func, ptr, elt);
+ x86_add(p->func, ptr, buf_base_ptr);
+ return ptr;
+ }
+}
+
+
+
+static boolean incr_inputs( struct translate_sse *p,
+ boolean linear )
+{
+ if (linear && p->nr_buffers == 1) {
+ struct x86_reg stride = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[0].stride));
+
+ x86_add(p->func, p->idx_EBX, stride);
+ sse_prefetchnta(p->func, x86_make_disp(p->idx_EBX, 192));
+ }
+ else if (linear) {
+ unsigned i;
+
+ /* Is this worthwhile??
+ */
+ for (i = 0; i < p->nr_buffers; i++) {
+ struct x86_reg buf_ptr = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[i].ptr));
+ struct x86_reg buf_stride = x86_make_disp(p->machine_EDX,
+ get_offset(p, &p->buffer[i].stride));
+
+ x86_mov(p->func, p->tmp_EAX, buf_ptr);
+ x86_add(p->func, p->tmp_EAX, buf_stride);
+ if (i == 0) sse_prefetchnta(p->func, x86_make_disp(p->tmp_EAX, 192));
+ x86_mov(p->func, buf_ptr, p->tmp_EAX);
+ }
+ }
+ else {
+ x86_lea(p->func, p->idx_EBX, x86_make_disp(p->idx_EBX, 4));
+ }
+
+ return TRUE;
+}
+
+
+/* Build run( struct translate *machine,
+ * unsigned start,
+ * unsigned count,
+ * void *output_buffer )
+ * or
+ * run_elts( struct translate *machine,
+ * unsigned *elts,
+ * unsigned count,
+ * void *output_buffer )
+ *
+ * Lots of hardcoding
+ *
+ * EAX -- pointer to current output vertex
+ * ECX -- pointer to current attribute
+ *
+ */
+static boolean build_vertex_emit( struct translate_sse *p,
+ struct x86_function *func,
+ boolean linear )
+{
+ int fixup, label;
+ unsigned j;
+
+ p->tmp_EAX = x86_make_reg(file_REG32, reg_AX);
+ p->idx_EBX = x86_make_reg(file_REG32, reg_BX);
+ p->outbuf_ECX = x86_make_reg(file_REG32, reg_CX);
+ p->machine_EDX = x86_make_reg(file_REG32, reg_DX);
+ p->count_ESI = x86_make_reg(file_REG32, reg_SI);
+
+ p->func = func;
+ p->loaded_inv_255 = FALSE;
+ p->loaded_255 = FALSE;
+ p->loaded_identity = FALSE;
+
+ x86_init_func(p->func);
+
+ /* Push a few regs?
+ */
+ x86_push(p->func, p->idx_EBX);
+ x86_push(p->func, p->count_ESI);
+
+ /* Load arguments into regs:
+ */
+ x86_mov(p->func, p->machine_EDX, x86_fn_arg(p->func, 1));
+ x86_mov(p->func, p->idx_EBX, x86_fn_arg(p->func, 2));
+ x86_mov(p->func, p->count_ESI, x86_fn_arg(p->func, 3));
+ x86_mov(p->func, p->outbuf_ECX, x86_fn_arg(p->func, 4));
+
+ /* Get vertex count, compare to zero
+ */
+ x86_xor(p->func, p->tmp_EAX, p->tmp_EAX);
+ x86_cmp(p->func, p->count_ESI, p->tmp_EAX);
+ fixup = x86_jcc_forward(p->func, cc_E);
+
+ /* always load, needed or not:
+ */
+ init_inputs(p, linear);
+
+ /* Note address for loop jump
+ */
+ label = x86_get_label(p->func);
+ {
+ struct x86_reg elt = linear ? p->idx_EBX : x86_deref(p->idx_EBX);
+ int last_vb = -1;
+ struct x86_reg vb;
+
+ for (j = 0; j < p->translate.key.nr_elements; j++) {
+ const struct translate_element *a = &p->translate.key.element[j];
+
+ /* Figure out source pointer address:
+ */
+ if (a->input_buffer != last_vb) {
+ last_vb = a->input_buffer;
+ vb = get_buffer_ptr(p, linear, a->input_buffer, elt);
+ }
+
+ if (!translate_attr( p, a,
+ x86_make_disp(vb, a->input_offset),
+ x86_make_disp(p->outbuf_ECX, a->output_offset)))
+ return FALSE;
+ }
+
+ /* Next output vertex:
+ */
+ x86_lea(p->func,
+ p->outbuf_ECX,
+ x86_make_disp(p->outbuf_ECX,
+ p->translate.key.output_stride));
+
+ /* Incr index
+ */
+ incr_inputs( p, linear );
+ }
+
+ /* decr count, loop if not zero
+ */
+ x86_dec(p->func, p->count_ESI);
+ x86_jcc(p->func, cc_NZ, label);
+
+ /* Exit mmx state?
+ */
+ if (p->func->need_emms)
+ mmx_emms(p->func);
+
+ /* Land forward jump here:
+ */
+ x86_fixup_fwd_jump(p->func, fixup);
+
+ /* Pop regs and return
+ */
+
+ x86_pop(p->func, p->count_ESI);
+ x86_pop(p->func, p->idx_EBX);
+ x86_ret(p->func);
+
+ return TRUE;
+}
+
+
+
+
+
+
+
+static void translate_sse_set_buffer( struct translate *translate,
+ unsigned buf,
+ const void *ptr,
+ unsigned stride )
+{
+ struct translate_sse *p = (struct translate_sse *)translate;
+
+ if (buf < p->nr_buffers) {
+ p->buffer[buf].base_ptr = (char *)ptr;
+ p->buffer[buf].stride = stride;
+ }
+
+ if (0) debug_printf("%s %d/%d: %p %d\n",
+ __FUNCTION__, buf,
+ p->nr_buffers,
+ ptr, stride);
+}
+
+
+static void translate_sse_release( struct translate *translate )
+{
+ struct translate_sse *p = (struct translate_sse *)translate;
+
+ x86_release_func( &p->linear_func );
+ x86_release_func( &p->elt_func );
+
+ FREE(p);
+}
+
+static void PIPE_CDECL translate_sse_run_elts( struct translate *translate,
+ const unsigned *elts,
+ unsigned count,
+ void *output_buffer )
+{
+ struct translate_sse *p = (struct translate_sse *)translate;
+
+ p->gen_run_elts( translate,
+ elts,
+ count,
+ output_buffer );
+}
+
+static void PIPE_CDECL translate_sse_run( struct translate *translate,
+ unsigned start,
+ unsigned count,
+ void *output_buffer )
+{
+ struct translate_sse *p = (struct translate_sse *)translate;
+
+ p->gen_run( translate,
+ start,
+ count,
+ output_buffer );
+}
+
+
+struct translate *translate_sse2_create( const struct translate_key *key )
+{
+ struct translate_sse *p = NULL;
+ unsigned i;
+
+ if (!rtasm_cpu_has_sse() || !rtasm_cpu_has_sse2())
+ goto fail;
+
+ p = CALLOC_STRUCT( translate_sse );
+ if (p == NULL)
+ goto fail;
+
+ p->translate.key = *key;
+ p->translate.release = translate_sse_release;
+ p->translate.set_buffer = translate_sse_set_buffer;
+ p->translate.run_elts = translate_sse_run_elts;
+ p->translate.run = translate_sse_run;
+
+ for (i = 0; i < key->nr_elements; i++)
+ p->nr_buffers = MAX2( p->nr_buffers, key->element[i].input_buffer + 1 );
+
+ if (0) debug_printf("nr_buffers: %d\n", p->nr_buffers);
+
+ if (!build_vertex_emit(p, &p->linear_func, TRUE))
+ goto fail;
+
+ if (!build_vertex_emit(p, &p->elt_func, FALSE))
+ goto fail;
+
+ p->gen_run = (run_func)x86_get_func(&p->linear_func);
+ if (p->gen_run == NULL)
+ goto fail;
+
+ p->gen_run_elts = (run_elts_func)x86_get_func(&p->elt_func);
+ if (p->gen_run_elts == NULL)
+ goto fail;
+
+ return &p->translate;
+
+ fail:
+ if (p)
+ translate_sse_release( &p->translate );
+
+ return NULL;
+}
+
+
+
+#else
+
+struct translate *translate_sse2_create( const struct translate_key *key )
+{
+ return NULL;
+}
+
+#endif
diff --git a/src/gallium/auxiliary/util/.gitignore b/src/gallium/auxiliary/util/.gitignore
new file mode 100644
index 0000000000..29c586c9b5
--- /dev/null
+++ b/src/gallium/auxiliary/util/.gitignore
@@ -0,0 +1,2 @@
+u_format_access.c
+u_format_table.c
diff --git a/src/gallium/auxiliary/util/Makefile b/src/gallium/auxiliary/util/Makefile
new file mode 100644
index 0000000000..ae8d330a78
--- /dev/null
+++ b/src/gallium/auxiliary/util/Makefile
@@ -0,0 +1,44 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = util
+
+C_SOURCES = \
+ u_debug.c \
+ u_debug_dump.c \
+ u_debug_symbol.c \
+ u_debug_stack.c \
+ u_blit.c \
+ u_cache.c \
+ u_draw_quad.c \
+ u_format.c \
+ u_format_access.c \
+ u_format_table.c \
+ u_gen_mipmap.c \
+ u_handle_table.c \
+ u_hash_table.c \
+ u_hash.c \
+ u_keymap.c \
+ u_linear.c \
+ u_network.c \
+ u_math.c \
+ u_mm.c \
+ u_rect.c \
+ u_simple_shaders.c \
+ u_snprintf.c \
+ u_stream_stdc.c \
+ u_stream_wd.c \
+ u_surface.c \
+ u_tile.c \
+ u_time.c \
+ u_timed_winsys.c \
+ u_upload_mgr.c \
+ u_simple_screen.c
+
+include ../../Makefile.template
+
+u_format_table.c: u_format_table.py u_format_parse.py u_format.csv
+ python u_format_table.py u_format.csv > $@
+
+u_format_access.c: u_format_access.py u_format_parse.py u_format.csv
+ python u_format_access.py u_format.csv > $@
diff --git a/src/gallium/auxiliary/util/SConscript b/src/gallium/auxiliary/util/SConscript
new file mode 100644
index 0000000000..28a5ab4256
--- /dev/null
+++ b/src/gallium/auxiliary/util/SConscript
@@ -0,0 +1,58 @@
+Import('*')
+
+env.Clone()
+
+env.Append(CPPPATH = ['.'])
+
+env.CodeGenerate(
+ target = 'u_format_table.c',
+ script = 'u_format_table.py',
+ source = ['u_format.csv'],
+ command = 'python $SCRIPT $SOURCE > $TARGET'
+)
+
+env.CodeGenerate(
+ target = 'u_format_access.c',
+ script = 'u_format_access.py',
+ source = ['u_format.csv'],
+ command = 'python $SCRIPT $SOURCE > $TARGET'
+)
+
+util = env.ConvenienceLibrary(
+ target = 'util',
+ source = [
+ 'u_bitmask.c',
+ 'u_blit.c',
+ 'u_cache.c',
+ 'u_debug.c',
+ 'u_debug_dump.c',
+ 'u_debug_memory.c',
+ 'u_debug_profile.c',
+ 'u_debug_stack.c',
+ 'u_debug_symbol.c',
+ 'u_draw_quad.c',
+ 'u_format.c',
+ 'u_format_access.c',
+ 'u_format_table.c',
+ 'u_gen_mipmap.c',
+ 'u_handle_table.c',
+ 'u_hash.c',
+ 'u_hash_table.c',
+ 'u_keymap.c',
+ 'u_network.c',
+ 'u_math.c',
+ 'u_mm.c',
+ 'u_rect.c',
+ 'u_simple_shaders.c',
+ 'u_snprintf.c',
+ 'u_stream_stdc.c',
+ 'u_stream_wd.c',
+ 'u_surface.c',
+ 'u_tile.c',
+ 'u_time.c',
+ 'u_timed_winsys.c',
+ 'u_upload_mgr.c',
+ 'u_simple_screen.c',
+ ])
+
+auxiliaries.insert(0, util)
diff --git a/src/gallium/auxiliary/util/u_bitmask.c b/src/gallium/auxiliary/util/u_bitmask.c
new file mode 100644
index 0000000000..77587c07ec
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_bitmask.c
@@ -0,0 +1,320 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Generic bitmask implementation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+#include "util/u_memory.h"
+#include "util/u_bitmask.h"
+
+
+typedef uint32_t util_bitmask_word;
+
+
+#define UTIL_BITMASK_INITIAL_WORDS 16
+#define UTIL_BITMASK_BITS_PER_BYTE 8
+#define UTIL_BITMASK_BITS_PER_WORD (sizeof(util_bitmask_word) * UTIL_BITMASK_BITS_PER_BYTE)
+
+
+struct util_bitmask
+{
+ util_bitmask_word *words;
+
+ /** Number of bits we can currently hold */
+ unsigned size;
+
+ /** Number of consecutive bits set at the start of the bitmask */
+ unsigned filled;
+};
+
+
+struct util_bitmask *
+util_bitmask_create(void)
+{
+ struct util_bitmask *bm;
+
+ bm = MALLOC_STRUCT(util_bitmask);
+ if(!bm)
+ return NULL;
+
+ bm->words = (util_bitmask_word *)CALLOC(UTIL_BITMASK_INITIAL_WORDS, sizeof(util_bitmask_word));
+ if(!bm->words) {
+ FREE(bm);
+ return NULL;
+ }
+
+ bm->size = UTIL_BITMASK_INITIAL_WORDS * UTIL_BITMASK_BITS_PER_WORD;
+ bm->filled = 0;
+
+ return bm;
+}
+
+
+/**
+ * Resize the bitmask if necessary
+ */
+static INLINE boolean
+util_bitmask_resize(struct util_bitmask *bm,
+ unsigned minimum_index)
+{
+ unsigned minimum_size = minimum_index + 1;
+ unsigned new_size;
+ util_bitmask_word *new_words;
+
+ /* Check integer overflow */
+ if(!minimum_size)
+ return FALSE;
+
+ if(bm->size > minimum_size)
+ return TRUE;
+
+ assert(bm->size % UTIL_BITMASK_BITS_PER_WORD == 0);
+ new_size = bm->size;
+ while(!(new_size > minimum_size)) {
+ new_size *= 2;
+ /* Check integer overflow */
+ if(new_size < bm->size)
+ return FALSE;
+ }
+ assert(new_size);
+ assert(new_size % UTIL_BITMASK_BITS_PER_WORD == 0);
+
+ new_words = (util_bitmask_word *)REALLOC((void *)bm->words,
+ bm->size / UTIL_BITMASK_BITS_PER_BYTE,
+ new_size / UTIL_BITMASK_BITS_PER_BYTE);
+ if(!new_words)
+ return FALSE;
+
+ memset(new_words + bm->size/UTIL_BITMASK_BITS_PER_WORD,
+ 0,
+ (new_size - bm->size)/UTIL_BITMASK_BITS_PER_BYTE);
+
+ bm->size = new_size;
+ bm->words = new_words;
+
+ return TRUE;
+}
+
+
+/**
+ * Lazily update the filled.
+ */
+static INLINE void
+util_bitmask_filled_set(struct util_bitmask *bm,
+ unsigned index)
+{
+ assert(bm->filled <= bm->size);
+ assert(index <= bm->size);
+
+ if(index == bm->filled) {
+ ++bm->filled;
+ assert(bm->filled <= bm->size);
+ }
+}
+
+static INLINE void
+util_bitmask_filled_unset(struct util_bitmask *bm,
+ unsigned index)
+{
+ assert(bm->filled <= bm->size);
+ assert(index <= bm->size);
+
+ if(index < bm->filled)
+ bm->filled = index;
+}
+
+
+unsigned
+util_bitmask_add(struct util_bitmask *bm)
+{
+ unsigned word;
+ unsigned bit;
+ util_bitmask_word mask;
+
+ assert(bm);
+
+ /* linear search for an empty index */
+ word = bm->filled / UTIL_BITMASK_BITS_PER_WORD;
+ bit = bm->filled % UTIL_BITMASK_BITS_PER_WORD;
+ mask = 1 << bit;
+ while(word < bm->size / UTIL_BITMASK_BITS_PER_WORD) {
+ while(bit < UTIL_BITMASK_BITS_PER_WORD) {
+ if(!(bm->words[word] & mask))
+ goto found;
+ ++bm->filled;
+ ++bit;
+ mask <<= 1;
+ }
+ ++word;
+ bit = 0;
+ mask = 1;
+ }
+found:
+
+ /* grow the bitmask if necessary */
+ if(!util_bitmask_resize(bm, bm->filled))
+ return UTIL_BITMASK_INVALID_INDEX;
+
+ assert(!(bm->words[word] & mask));
+ bm->words[word] |= mask;
+
+ return bm->filled++;
+}
+
+
+unsigned
+util_bitmask_set(struct util_bitmask *bm,
+ unsigned index)
+{
+ unsigned word = index / UTIL_BITMASK_BITS_PER_WORD;
+ unsigned bit = index % UTIL_BITMASK_BITS_PER_WORD;
+ util_bitmask_word mask = 1 << bit;
+
+ assert(bm);
+
+ /* grow the bitmask if necessary */
+ if(!util_bitmask_resize(bm, index))
+ return UTIL_BITMASK_INVALID_INDEX;
+
+ bm->words[word] |= mask;
+
+ util_bitmask_filled_set(bm, index);
+
+ return index;
+}
+
+
+void
+util_bitmask_clear(struct util_bitmask *bm,
+ unsigned index)
+{
+ unsigned word = index / UTIL_BITMASK_BITS_PER_WORD;
+ unsigned bit = index % UTIL_BITMASK_BITS_PER_WORD;
+ util_bitmask_word mask = 1 << bit;
+
+ assert(bm);
+
+ if(index >= bm->size)
+ return;
+
+ bm->words[word] &= ~mask;
+
+ util_bitmask_filled_unset(bm, index);
+}
+
+
+boolean
+util_bitmask_get(struct util_bitmask *bm,
+ unsigned index)
+{
+ unsigned word = index / UTIL_BITMASK_BITS_PER_WORD;
+ unsigned bit = index % UTIL_BITMASK_BITS_PER_WORD;
+ util_bitmask_word mask = 1 << bit;
+
+ assert(bm);
+
+ if(index < bm->filled) {
+ assert(bm->words[word] & mask);
+ return TRUE;
+ }
+
+ if(index > bm->size)
+ return FALSE;
+
+ if(bm->words[word] & mask) {
+ util_bitmask_filled_set(bm, index);
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+
+
+unsigned
+util_bitmask_get_next_index(struct util_bitmask *bm,
+ unsigned index)
+{
+ unsigned word = index / UTIL_BITMASK_BITS_PER_WORD;
+ unsigned bit = index % UTIL_BITMASK_BITS_PER_WORD;
+ util_bitmask_word mask = 1 << bit;
+
+ if(index < bm->filled) {
+ assert(bm->words[word] & mask);
+ return index;
+ }
+
+ if(index >= bm->size) {
+ return UTIL_BITMASK_INVALID_INDEX;
+ }
+
+ /* Do a linear search */
+ while(word < bm->size / UTIL_BITMASK_BITS_PER_WORD) {
+ while(bit < UTIL_BITMASK_BITS_PER_WORD) {
+ if(bm->words[word] & mask) {
+ if(index == bm->filled) {
+ ++bm->filled;
+ assert(bm->filled <= bm->size);
+ }
+ return index;
+ }
+ ++index;
+ ++bit;
+ mask <<= 1;
+ }
+ ++word;
+ bit = 0;
+ mask = 1;
+ }
+
+ return UTIL_BITMASK_INVALID_INDEX;
+}
+
+
+unsigned
+util_bitmask_get_first_index(struct util_bitmask *bm)
+{
+ return util_bitmask_get_next_index(bm, 0);
+}
+
+
+void
+util_bitmask_destroy(struct util_bitmask *bm)
+{
+ assert(bm);
+
+ FREE(bm->words);
+ FREE(bm);
+}
+
diff --git a/src/gallium/auxiliary/util/u_bitmask.h b/src/gallium/auxiliary/util/u_bitmask.h
new file mode 100644
index 0000000000..87f1110296
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_bitmask.h
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Generic bitmask.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_HANDLE_BITMASK_H_
+#define U_HANDLE_BITMASK_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define UTIL_BITMASK_INVALID_INDEX (~0U)
+
+
+/**
+ * Abstract data type to represent arbitrary set of bits.
+ */
+struct util_bitmask;
+
+
+struct util_bitmask *
+util_bitmask_create(void);
+
+
+/**
+ * Search a cleared bit and set it.
+ *
+ * It searches for the first cleared bit.
+ *
+ * Returns the bit index on success, or UTIL_BITMASK_INVALID_INDEX on out of
+ * memory growing the bitmask.
+ */
+unsigned
+util_bitmask_add(struct util_bitmask *bm);
+
+/**
+ * Set a bit.
+ *
+ * Returns the input index on success, or UTIL_BITMASK_INVALID_INDEX on out of
+ * memory growing the bitmask.
+ */
+unsigned
+util_bitmask_set(struct util_bitmask *bm,
+ unsigned index);
+
+void
+util_bitmask_clear(struct util_bitmask *bm,
+ unsigned index);
+
+boolean
+util_bitmask_get(struct util_bitmask *bm,
+ unsigned index);
+
+
+void
+util_bitmask_destroy(struct util_bitmask *bm);
+
+
+/**
+ * Search for the first set bit.
+ *
+ * Returns UTIL_BITMASK_INVALID_INDEX if a set bit cannot be found.
+ */
+unsigned
+util_bitmask_get_first_index(struct util_bitmask *bm);
+
+
+/**
+ * Search for the first set bit, starting from the giving index.
+ *
+ * Returns UTIL_BITMASK_INVALID_INDEX if a set bit cannot be found.
+ */
+unsigned
+util_bitmask_get_next_index(struct util_bitmask *bm,
+ unsigned index);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_HANDLE_BITMASK_H_ */
diff --git a/src/gallium/auxiliary/util/u_blit.c b/src/gallium/auxiliary/util/u_blit.c
new file mode 100644
index 0000000000..c516317d70
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_blit.c
@@ -0,0 +1,600 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Copy/blit pixel rect between surfaces
+ *
+ * @author Brian Paul
+ */
+
+
+#include "pipe/p_context.h"
+#include "util/u_debug.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_state.h"
+
+#include "util/u_blit.h"
+#include "util/u_draw_quad.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_simple_shaders.h"
+#include "util/u_surface.h"
+
+#include "cso_cache/cso_context.h"
+
+
+struct blit_state
+{
+ struct pipe_context *pipe;
+ struct cso_context *cso;
+
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state depthstencil;
+ struct pipe_rasterizer_state rasterizer;
+ struct pipe_sampler_state sampler;
+ struct pipe_viewport_state viewport;
+
+ void *vs;
+ void *fs[TGSI_WRITEMASK_XYZW + 1];
+
+ struct pipe_buffer *vbuf; /**< quad vertices */
+ unsigned vbuf_slot;
+
+ float vertices[4][2][4]; /**< vertex/texcoords for quad */
+};
+
+
+/**
+ * Create state object for blit.
+ * Intended to be created once and re-used for many blit() calls.
+ */
+struct blit_state *
+util_create_blit(struct pipe_context *pipe, struct cso_context *cso)
+{
+ struct blit_state *ctx;
+ uint i;
+
+ ctx = CALLOC_STRUCT(blit_state);
+ if (!ctx)
+ return NULL;
+
+ ctx->pipe = pipe;
+ ctx->cso = cso;
+
+ /* disabled blending/masking */
+ memset(&ctx->blend, 0, sizeof(ctx->blend));
+ ctx->blend.colormask = PIPE_MASK_RGBA;
+
+ /* no-op depth/stencil/alpha */
+ memset(&ctx->depthstencil, 0, sizeof(ctx->depthstencil));
+
+ /* rasterizer */
+ memset(&ctx->rasterizer, 0, sizeof(ctx->rasterizer));
+ ctx->rasterizer.front_winding = PIPE_WINDING_CW;
+ ctx->rasterizer.cull_mode = PIPE_WINDING_NONE;
+ ctx->rasterizer.bypass_vs_clip_and_viewport = 1;
+ ctx->rasterizer.gl_rasterization_rules = 1;
+
+ /* samplers */
+ memset(&ctx->sampler, 0, sizeof(ctx->sampler));
+ ctx->sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ ctx->sampler.min_img_filter = 0; /* set later */
+ ctx->sampler.mag_img_filter = 0; /* set later */
+ ctx->sampler.normalized_coords = 1;
+
+
+ /* vertex shader - still required to provide the linkage between
+ * fragment shader input semantics and vertex_element/buffers.
+ */
+ {
+ const uint semantic_names[] = { TGSI_SEMANTIC_POSITION,
+ TGSI_SEMANTIC_GENERIC };
+ const uint semantic_indexes[] = { 0, 0 };
+ ctx->vs = util_make_vertex_passthrough_shader(pipe, 2, semantic_names,
+ semantic_indexes);
+ }
+
+ /* fragment shader */
+ ctx->fs[TGSI_WRITEMASK_XYZW] = util_make_fragment_tex_shader(pipe);
+ ctx->vbuf = NULL;
+
+ /* init vertex data that doesn't change */
+ for (i = 0; i < 4; i++) {
+ ctx->vertices[i][0][3] = 1.0f; /* w */
+ ctx->vertices[i][1][2] = 0.0f; /* r */
+ ctx->vertices[i][1][3] = 1.0f; /* q */
+ }
+
+ return ctx;
+}
+
+
+/**
+ * Destroy a blit context
+ */
+void
+util_destroy_blit(struct blit_state *ctx)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ unsigned i;
+
+ pipe->delete_vs_state(pipe, ctx->vs);
+
+ for (i = 0; i < Elements(ctx->fs); i++)
+ if (ctx->fs[i])
+ pipe->delete_fs_state(pipe, ctx->fs[i]);
+
+ pipe_buffer_reference(&ctx->vbuf, NULL);
+
+ FREE(ctx);
+}
+
+
+/**
+ * Get offset of next free slot in vertex buffer for quad vertices.
+ */
+static unsigned
+get_next_slot( struct blit_state *ctx )
+{
+ const unsigned max_slots = 4096 / sizeof ctx->vertices;
+
+ if (ctx->vbuf_slot >= max_slots)
+ util_blit_flush( ctx );
+
+ if (!ctx->vbuf) {
+ ctx->vbuf = pipe_buffer_create(ctx->pipe->screen,
+ 32,
+ PIPE_BUFFER_USAGE_VERTEX,
+ max_slots * sizeof ctx->vertices);
+ }
+
+ return ctx->vbuf_slot++ * sizeof ctx->vertices;
+}
+
+
+/**
+ * Setup vertex data for the textured quad we'll draw.
+ * Note: y=0=top
+ */
+static unsigned
+setup_vertex_data(struct blit_state *ctx,
+ float x0, float y0, float x1, float y1, float z)
+{
+ unsigned offset;
+
+ ctx->vertices[0][0][0] = x0;
+ ctx->vertices[0][0][1] = y0;
+ ctx->vertices[0][0][2] = z;
+ ctx->vertices[0][1][0] = 0.0f; /*s*/
+ ctx->vertices[0][1][1] = 0.0f; /*t*/
+
+ ctx->vertices[1][0][0] = x1;
+ ctx->vertices[1][0][1] = y0;
+ ctx->vertices[1][0][2] = z;
+ ctx->vertices[1][1][0] = 1.0f; /*s*/
+ ctx->vertices[1][1][1] = 0.0f; /*t*/
+
+ ctx->vertices[2][0][0] = x1;
+ ctx->vertices[2][0][1] = y1;
+ ctx->vertices[2][0][2] = z;
+ ctx->vertices[2][1][0] = 1.0f;
+ ctx->vertices[2][1][1] = 1.0f;
+
+ ctx->vertices[3][0][0] = x0;
+ ctx->vertices[3][0][1] = y1;
+ ctx->vertices[3][0][2] = z;
+ ctx->vertices[3][1][0] = 0.0f;
+ ctx->vertices[3][1][1] = 1.0f;
+
+ offset = get_next_slot( ctx );
+
+ pipe_buffer_write(ctx->pipe->screen, ctx->vbuf,
+ offset, sizeof(ctx->vertices), ctx->vertices);
+
+ return offset;
+}
+
+
+/**
+ * Setup vertex data for the textured quad we'll draw.
+ * Note: y=0=top
+ */
+static unsigned
+setup_vertex_data_tex(struct blit_state *ctx,
+ float x0, float y0, float x1, float y1,
+ float s0, float t0, float s1, float t1,
+ float z)
+{
+ unsigned offset;
+
+ ctx->vertices[0][0][0] = x0;
+ ctx->vertices[0][0][1] = y0;
+ ctx->vertices[0][0][2] = z;
+ ctx->vertices[0][1][0] = s0; /*s*/
+ ctx->vertices[0][1][1] = t0; /*t*/
+
+ ctx->vertices[1][0][0] = x1;
+ ctx->vertices[1][0][1] = y0;
+ ctx->vertices[1][0][2] = z;
+ ctx->vertices[1][1][0] = s1; /*s*/
+ ctx->vertices[1][1][1] = t0; /*t*/
+
+ ctx->vertices[2][0][0] = x1;
+ ctx->vertices[2][0][1] = y1;
+ ctx->vertices[2][0][2] = z;
+ ctx->vertices[2][1][0] = s1;
+ ctx->vertices[2][1][1] = t1;
+
+ ctx->vertices[3][0][0] = x0;
+ ctx->vertices[3][0][1] = y1;
+ ctx->vertices[3][0][2] = z;
+ ctx->vertices[3][1][0] = s0;
+ ctx->vertices[3][1][1] = t1;
+
+ offset = get_next_slot( ctx );
+
+ pipe_buffer_write(ctx->pipe->screen, ctx->vbuf,
+ offset, sizeof(ctx->vertices), ctx->vertices);
+
+ return offset;
+}
+
+
+/**
+ * \return TRUE if two regions overlap, FALSE otherwise
+ */
+static boolean
+regions_overlap(int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1)
+{
+ if (MAX2(srcX0, srcX1) < MIN2(dstX0, dstX1))
+ return FALSE; /* src completely left of dst */
+
+ if (MAX2(dstX0, dstX1) < MIN2(srcX0, srcX1))
+ return FALSE; /* dst completely left of src */
+
+ if (MAX2(srcY0, srcY1) < MIN2(dstY0, dstY1))
+ return FALSE; /* src completely above dst */
+
+ if (MAX2(dstY0, dstY1) < MIN2(srcY0, srcY1))
+ return FALSE; /* dst completely above src */
+
+ return TRUE; /* some overlap */
+}
+
+
+/**
+ * Copy pixel block from src surface to dst surface.
+ * Overlapping regions are acceptable.
+ * Flipping and stretching are supported.
+ * XXX what about clipping???
+ * XXX need some control over blitting Z and/or stencil.
+ */
+void
+util_blit_pixels_writemask(struct blit_state *ctx,
+ struct pipe_surface *src,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, uint filter,
+ uint writemask)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_texture texTemp, *tex;
+ struct pipe_surface *texSurf;
+ struct pipe_framebuffer_state fb;
+ const int srcW = abs(srcX1 - srcX0);
+ const int srcH = abs(srcY1 - srcY0);
+ const int srcLeft = MIN2(srcX0, srcX1);
+ const int srcTop = MIN2(srcY0, srcY1);
+ unsigned offset;
+ boolean overlap;
+
+ assert(filter == PIPE_TEX_MIPFILTER_NEAREST ||
+ filter == PIPE_TEX_MIPFILTER_LINEAR);
+
+ assert(screen->is_format_supported(screen, src->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_SAMPLER, 0));
+ assert(screen->is_format_supported(screen, dst->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0));
+
+ /* do the regions overlap? */
+ overlap = util_same_surface(src, dst) &&
+ regions_overlap(srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1);
+
+ /*
+ * Check for simple case: no format conversion, no flipping, no stretching,
+ * no overlapping.
+ * Filter mode should not matter since there's no stretching.
+ */
+ if (dst->format == src->format &&
+ srcX0 < srcX1 &&
+ dstX0 < dstX1 &&
+ srcY0 < srcY1 &&
+ dstY0 < dstY1 &&
+ (dstX1 - dstX0) == (srcX1 - srcX0) &&
+ (dstY1 - dstY0) == (srcY1 - srcY0) &&
+ !overlap) {
+ pipe->surface_copy(pipe,
+ dst, dstX0, dstY0, /* dest */
+ src, srcX0, srcY0, /* src */
+ srcW, srcH); /* size */
+ return;
+ }
+
+ if (srcLeft != srcX0) {
+ /* left-right flip */
+ int tmp = dstX0;
+ dstX0 = dstX1;
+ dstX1 = tmp;
+ }
+
+ if (srcTop != srcY0) {
+ /* up-down flip */
+ int tmp = dstY0;
+ dstY0 = dstY1;
+ dstY1 = tmp;
+ }
+
+ assert(screen->is_format_supported(screen, dst->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0));
+
+ /*
+ * XXX for now we're always creating a temporary texture.
+ * Strictly speaking that's not always needed.
+ */
+
+ /* create temp texture */
+ memset(&texTemp, 0, sizeof(texTemp));
+ texTemp.target = PIPE_TEXTURE_2D;
+ texTemp.format = src->format;
+ texTemp.last_level = 0;
+ texTemp.width[0] = srcW;
+ texTemp.height[0] = srcH;
+ texTemp.depth[0] = 1;
+ pf_get_block(src->format, &texTemp.block);
+
+ tex = screen->texture_create(screen, &texTemp);
+ if (!tex)
+ return;
+
+ texSurf = screen->get_tex_surface(screen, tex, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* load temp texture */
+ pipe->surface_copy(pipe,
+ texSurf, 0, 0, /* dest */
+ src, srcLeft, srcTop, /* src */
+ srcW, srcH); /* size */
+
+ /* free the surface, update the texture if necessary.
+ */
+ pipe_surface_reference(&texSurf, NULL);
+
+ /* save state (restored below) */
+ cso_save_blend(ctx->cso);
+ cso_save_depth_stencil_alpha(ctx->cso);
+ cso_save_rasterizer(ctx->cso);
+ cso_save_samplers(ctx->cso);
+ cso_save_sampler_textures(ctx->cso);
+ cso_save_framebuffer(ctx->cso);
+ cso_save_fragment_shader(ctx->cso);
+ cso_save_vertex_shader(ctx->cso);
+
+ /* set misc state we care about */
+ cso_set_blend(ctx->cso, &ctx->blend);
+ cso_set_depth_stencil_alpha(ctx->cso, &ctx->depthstencil);
+ cso_set_rasterizer(ctx->cso, &ctx->rasterizer);
+
+ /* sampler */
+ ctx->sampler.min_img_filter = filter;
+ ctx->sampler.mag_img_filter = filter;
+ cso_single_sampler(ctx->cso, 0, &ctx->sampler);
+ cso_single_sampler_done(ctx->cso);
+
+ /* texture */
+ cso_set_sampler_textures(ctx->cso, 1, &tex);
+
+ if (ctx->fs[writemask] == NULL)
+ ctx->fs[writemask] = util_make_fragment_tex_shader_writemask(pipe, writemask);
+
+ /* shaders */
+ cso_set_fragment_shader_handle(ctx->cso, ctx->fs[writemask]);
+ cso_set_vertex_shader_handle(ctx->cso, ctx->vs);
+
+ /* drawing dest */
+ memset(&fb, 0, sizeof(fb));
+ fb.width = dst->width;
+ fb.height = dst->height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = dst;
+ cso_set_framebuffer(ctx->cso, &fb);
+
+ /* draw quad */
+ offset = setup_vertex_data(ctx,
+ (float) dstX0, (float) dstY0,
+ (float) dstX1, (float) dstY1, z);
+
+ util_draw_vertex_buffer(ctx->pipe, ctx->vbuf, offset,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ /* restore state we changed */
+ cso_restore_blend(ctx->cso);
+ cso_restore_depth_stencil_alpha(ctx->cso);
+ cso_restore_rasterizer(ctx->cso);
+ cso_restore_samplers(ctx->cso);
+ cso_restore_sampler_textures(ctx->cso);
+ cso_restore_framebuffer(ctx->cso);
+ cso_restore_fragment_shader(ctx->cso);
+ cso_restore_vertex_shader(ctx->cso);
+
+ pipe_texture_reference(&tex, NULL);
+}
+
+
+void
+util_blit_pixels(struct blit_state *ctx,
+ struct pipe_surface *src,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, uint filter )
+{
+ util_blit_pixels_writemask( ctx, src,
+ srcX0, srcY0,
+ srcX1, srcY1,
+ dst,
+ dstX0, dstY0,
+ dstX1, dstY1,
+ z, filter,
+ TGSI_WRITEMASK_XYZW );
+}
+
+
+/* Release vertex buffer at end of frame to avoid synchronous
+ * rendering.
+ */
+void util_blit_flush( struct blit_state *ctx )
+{
+ pipe_buffer_reference(&ctx->vbuf, NULL);
+ ctx->vbuf_slot = 0;
+}
+
+
+
+/**
+ * Copy pixel block from src texture to dst surface.
+ * Overlapping regions are acceptable.
+ *
+ * XXX Should support selection of level.
+ * XXX need some control over blitting Z and/or stencil.
+ */
+void
+util_blit_pixels_tex(struct blit_state *ctx,
+ struct pipe_texture *tex,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, uint filter)
+{
+ struct pipe_framebuffer_state fb;
+ float s0, t0, s1, t1;
+ unsigned offset;
+
+ assert(filter == PIPE_TEX_MIPFILTER_NEAREST ||
+ filter == PIPE_TEX_MIPFILTER_LINEAR);
+
+ assert(tex->width[0] != 0);
+ assert(tex->height[0] != 0);
+
+ s0 = srcX0 / (float)tex->width[0];
+ s1 = srcX1 / (float)tex->width[0];
+ t0 = srcY0 / (float)tex->height[0];
+ t1 = srcY1 / (float)tex->height[0];
+
+ assert(ctx->pipe->screen->is_format_supported(ctx->pipe->screen, dst->format,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ 0));
+
+ /* save state (restored below) */
+ cso_save_blend(ctx->cso);
+ cso_save_depth_stencil_alpha(ctx->cso);
+ cso_save_rasterizer(ctx->cso);
+ cso_save_samplers(ctx->cso);
+ cso_save_sampler_textures(ctx->cso);
+ cso_save_framebuffer(ctx->cso);
+ cso_save_fragment_shader(ctx->cso);
+ cso_save_vertex_shader(ctx->cso);
+
+ /* set misc state we care about */
+ cso_set_blend(ctx->cso, &ctx->blend);
+ cso_set_depth_stencil_alpha(ctx->cso, &ctx->depthstencil);
+ cso_set_rasterizer(ctx->cso, &ctx->rasterizer);
+
+ /* sampler */
+ ctx->sampler.min_img_filter = filter;
+ ctx->sampler.mag_img_filter = filter;
+ cso_single_sampler(ctx->cso, 0, &ctx->sampler);
+ cso_single_sampler_done(ctx->cso);
+
+ /* texture */
+ cso_set_sampler_textures(ctx->cso, 1, &tex);
+
+ /* shaders */
+ cso_set_fragment_shader_handle(ctx->cso, ctx->fs[TGSI_WRITEMASK_XYZW]);
+ cso_set_vertex_shader_handle(ctx->cso, ctx->vs);
+
+ /* drawing dest */
+ memset(&fb, 0, sizeof(fb));
+ fb.width = dst->width;
+ fb.height = dst->height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = dst;
+ cso_set_framebuffer(ctx->cso, &fb);
+
+ /* draw quad */
+ offset = setup_vertex_data_tex(ctx,
+ (float) dstX0, (float) dstY0,
+ (float) dstX1, (float) dstY1,
+ s0, t0, s1, t1,
+ z);
+
+ util_draw_vertex_buffer(ctx->pipe,
+ ctx->vbuf, offset,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ /* restore state we changed */
+ cso_restore_blend(ctx->cso);
+ cso_restore_depth_stencil_alpha(ctx->cso);
+ cso_restore_rasterizer(ctx->cso);
+ cso_restore_samplers(ctx->cso);
+ cso_restore_sampler_textures(ctx->cso);
+ cso_restore_framebuffer(ctx->cso);
+ cso_restore_fragment_shader(ctx->cso);
+ cso_restore_vertex_shader(ctx->cso);
+}
diff --git a/src/gallium/auxiliary/util/u_blit.h b/src/gallium/auxiliary/util/u_blit.h
new file mode 100644
index 0000000000..a102021529
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_blit.h
@@ -0,0 +1,93 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_BLIT_H
+#define U_BLIT_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_context;
+struct pipe_surface;
+struct pipe_texture;
+struct cso_context;
+
+
+struct blit_state;
+
+
+extern struct blit_state *
+util_create_blit(struct pipe_context *pipe, struct cso_context *cso);
+
+extern void
+util_destroy_blit(struct blit_state *ctx);
+
+extern void
+util_blit_pixels(struct blit_state *ctx,
+ struct pipe_surface *src,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, uint filter);
+
+void
+util_blit_pixels_writemask(struct blit_state *ctx,
+ struct pipe_surface *src,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, uint filter,
+ uint writemask);
+
+extern void
+util_blit_pixels_tex(struct blit_state *ctx,
+ struct pipe_texture *tex,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, uint filter);
+
+/* Call at end of frame to avoid synchronous rendering.
+ */
+extern void
+util_blit_flush( struct blit_state *ctx );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_cache.c b/src/gallium/auxiliary/util/u_cache.c
new file mode 100644
index 0000000000..47c16b1c92
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_cache.c
@@ -0,0 +1,217 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Simple cache implementation.
+ *
+ * We simply have fixed size array, and destroy previous values on collision.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_cache.h"
+
+
+struct util_cache_entry
+{
+ void *key;
+ void *value;
+
+#ifdef DEBUG
+ unsigned count;
+#endif
+};
+
+
+struct util_cache
+{
+ /** Hash function */
+ uint32_t (*hash)(const void *key);
+
+ /** Compare two keys */
+ int (*compare)(const void *key1, const void *key2);
+
+ /** Destroy a (key, value) pair */
+ void (*destroy)(void *key, void *value);
+
+ uint32_t size;
+
+ struct util_cache_entry *entries;
+
+#ifdef DEBUG
+ unsigned count;
+#endif
+};
+
+
+struct util_cache *
+util_cache_create(uint32_t (*hash)(const void *key),
+ int (*compare)(const void *key1, const void *key2),
+ void (*destroy)(void *key, void *value),
+ uint32_t size)
+{
+ struct util_cache *cache;
+
+ cache = CALLOC_STRUCT(util_cache);
+ if(!cache)
+ return NULL;
+
+ cache->hash = hash;
+ cache->compare = compare;
+ cache->destroy = destroy;
+ cache->size = size;
+
+ cache->entries = CALLOC(size, sizeof(struct util_cache_entry));
+ if(!cache->entries) {
+ FREE(cache);
+ return NULL;
+ }
+
+ return cache;
+}
+
+
+static INLINE struct util_cache_entry *
+util_cache_entry_get(struct util_cache *cache,
+ const void *key)
+{
+ uint32_t hash;
+
+ hash = cache->hash(key);
+
+ return &cache->entries[hash % cache->size];
+}
+
+static INLINE void
+util_cache_entry_destroy(struct util_cache *cache,
+ struct util_cache_entry *entry)
+{
+ void *key = entry->key;
+ void *value = entry->value;
+
+ entry->key = NULL;
+ entry->value = NULL;
+
+ if(key || value)
+ if(cache->destroy)
+ cache->destroy(key, value);
+}
+
+
+void
+util_cache_set(struct util_cache *cache,
+ void *key,
+ void *value)
+{
+ struct util_cache_entry *entry;
+
+ assert(cache);
+ if (!cache)
+ return;
+
+ entry = util_cache_entry_get(cache, key);
+ util_cache_entry_destroy(cache, entry);
+
+#ifdef DEBUG
+ ++entry->count;
+ ++cache->count;
+#endif
+
+ entry->key = key;
+ entry->value = value;
+}
+
+
+void *
+util_cache_get(struct util_cache *cache,
+ const void *key)
+{
+ struct util_cache_entry *entry;
+
+ assert(cache);
+ if (!cache)
+ return NULL;
+
+ entry = util_cache_entry_get(cache, key);
+ if(!entry->key && !entry->value)
+ return NULL;
+
+ if(cache->compare(key, entry->key) != 0)
+ return NULL;
+
+ return entry->value;
+}
+
+
+void
+util_cache_clear(struct util_cache *cache)
+{
+ uint32_t i;
+
+ assert(cache);
+ if (!cache)
+ return;
+
+ for(i = 0; i < cache->size; ++i)
+ util_cache_entry_destroy(cache, &cache->entries[i]);
+}
+
+
+void
+util_cache_destroy(struct util_cache *cache)
+{
+ assert(cache);
+ if (!cache)
+ return;
+
+#ifdef DEBUG
+ if(cache->count >= 20*cache->size) {
+ /* Normal approximation of the Poisson distribution */
+ double mean = (double)cache->count/(double)cache->size;
+ double stddev = sqrt(mean);
+ unsigned i;
+ for(i = 0; i < cache->size; ++i) {
+ double z = fabs(cache->count - mean)/stddev;
+ /* This assert should not fail 99.9999998027% of the times, unless
+ * the hash function is a poor one */
+ assert(z <= 6.0);
+ }
+ }
+#endif
+
+ util_cache_clear(cache);
+
+ FREE(cache->entries);
+ FREE(cache);
+}
diff --git a/src/gallium/auxiliary/util/u_cache.h b/src/gallium/auxiliary/util/u_cache.h
new file mode 100644
index 0000000000..8a612c6585
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_cache.h
@@ -0,0 +1,87 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Simple cache.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_CACHE_H_
+#define U_CACHE_H_
+
+
+#include "pipe/p_compiler.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Least Recently Used (LRU) cache.
+ */
+struct util_cache;
+
+
+/**
+ * Create a cache.
+ *
+ * @param hash hash function
+ * @param compare should return 0 for two equal keys
+ * @param destroy destruction callback (optional)
+ * @param size maximum number of entries
+ */
+struct util_cache *
+util_cache_create(uint32_t (*hash)(const void *key),
+ int (*compare)(const void *key1, const void *key2),
+ void (*destroy)(void *key, void *value),
+ uint32_t size);
+
+void
+util_cache_set(struct util_cache *cache,
+ void *key,
+ void *value);
+
+void *
+util_cache_get(struct util_cache *cache,
+ const void *key);
+
+void
+util_cache_clear(struct util_cache *cache);
+
+void
+util_cache_destroy(struct util_cache *cache);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_CACHE_H_ */
diff --git a/src/gallium/auxiliary/util/u_clear.h b/src/gallium/auxiliary/util/u_clear.h
new file mode 100644
index 0000000000..7c16b32cf9
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_clear.h
@@ -0,0 +1,60 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors:
+ * Michel Dänzer
+ */
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "util/u_pack_color.h"
+
+
+/**
+ * Clear the given buffers to the specified values.
+ * No masking, no scissor (clear entire buffer).
+ */
+static INLINE void
+util_clear(struct pipe_context *pipe,
+ struct pipe_framebuffer_state *framebuffer, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil)
+{
+ if (buffers & PIPE_CLEAR_COLOR) {
+ struct pipe_surface *ps = framebuffer->cbufs[0];
+ unsigned color;
+
+ util_pack_color(rgba, ps->format, &color);
+ pipe->surface_fill(pipe, ps, 0, 0, ps->width, ps->height, color);
+ }
+
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
+ struct pipe_surface *ps = framebuffer->zsbuf;
+
+ pipe->surface_fill(pipe, ps, 0, 0, ps->width, ps->height,
+ util_pack_z_stencil(ps->format, depth, stencil));
+ }
+}
diff --git a/src/gallium/auxiliary/util/u_cpu_detect.c b/src/gallium/auxiliary/util/u_cpu_detect.c
new file mode 100644
index 0000000000..d9f2f8fc28
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_cpu_detect.c
@@ -0,0 +1,506 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Dennis Smit
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/*
+ * Based on the work of Eric Anholt <anholt@FreeBSD.org>
+ */
+
+/* FIXME: clean this entire file up */
+
+#include "u_cpu_detect.h"
+
+#ifdef __linux__
+#define OS_LINUX
+#endif
+#ifdef WIN32
+#define OS_WIN32
+#endif
+
+#if defined(ARCH_POWERPC)
+#if defined(OS_DARWIN)
+#include <sys/sysctl.h>
+#else
+#include <signal.h>
+#include <setjmp.h>
+#endif
+#endif
+
+#if defined(OS_NETBSD) || defined(OS_OPENBSD)
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#include <machine/cpu.h>
+#endif
+
+#if defined(OS_FREEBSD)
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+
+#if defined(OS_LINUX)
+#include <signal.h>
+#endif
+
+#if defined(OS_WIN32)
+#include <windows.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+
+static struct cpu_detect_caps __cpu_detect_caps;
+static int __cpu_detect_initialized = 0;
+
+static int has_cpuid(void);
+static int cpuid(unsigned int ax, unsigned int *p);
+
+/* The sigill handlers */
+#if defined(ARCH_X86) /* x86 (linux katmai handler check thing) */
+#if defined(OS_LINUX) && defined(_POSIX_SOURCE) && defined(X86_FXSR_MAGIC)
+static void sigill_handler_sse(int signal, struct sigcontext sc)
+{
+ /* Both the "xorps %%xmm0,%%xmm0" and "divps %xmm0,%%xmm1"
+ * instructions are 3 bytes long. We must increment the instruction
+ * pointer manually to avoid repeated execution of the offending
+ * instruction.
+ *
+ * If the SIGILL is caused by a divide-by-zero when unmasked
+ * exceptions aren't supported, the SIMD FPU status and control
+ * word will be restored at the end of the test, so we don't need
+ * to worry about doing it here. Besides, we may not be able to...
+ */
+ sc.eip += 3;
+
+ __cpu_detect_caps.hasSSE=0;
+}
+
+static void sigfpe_handler_sse(int signal, struct sigcontext sc)
+{
+ if (sc.fpstate->magic != 0xffff) {
+ /* Our signal context has the extended FPU state, so reset the
+ * divide-by-zero exception mask and clear the divide-by-zero
+ * exception bit.
+ */
+ sc.fpstate->mxcsr |= 0x00000200;
+ sc.fpstate->mxcsr &= 0xfffffffb;
+ } else {
+ /* If we ever get here, we're completely hosed.
+ */
+ }
+}
+#endif
+#endif /* OS_LINUX && _POSIX_SOURCE && X86_FXSR_MAGIC */
+
+#if defined(OS_WIN32)
+LONG CALLBACK win32_sig_handler_sse(EXCEPTION_POINTERS* ep)
+{
+ if(ep->ExceptionRecord->ExceptionCode==EXCEPTION_ILLEGAL_INSTRUCTION){
+ ep->ContextRecord->Eip +=3;
+ __cpu_detect_caps.hasSSE=0;
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+#endif /* OS_WIN32 */
+
+
+#if defined(ARCH_POWERPC) && !defined(OS_DARWIN)
+static sigjmp_buf __lv_powerpc_jmpbuf;
+static volatile sig_atomic_t __lv_powerpc_canjump = 0;
+
+static void sigill_handler (int sig);
+
+static void sigill_handler (int sig)
+{
+ if (!__lv_powerpc_canjump) {
+ signal (sig, SIG_DFL);
+ raise (sig);
+ }
+
+ __lv_powerpc_canjump = 0;
+ siglongjmp(__lv_powerpc_jmpbuf, 1);
+}
+
+static void check_os_altivec_support(void)
+{
+#if defined(OS_DARWIN)
+ int sels[2] = {CTL_HW, HW_VECTORUNIT};
+ int has_vu = 0;
+ int len = sizeof (has_vu);
+ int err;
+
+ err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
+
+ if (err == 0) {
+ if (has_vu != 0) {
+ __cpu_detect_caps.hasAltiVec = 1;
+ }
+ }
+#else /* !OS_DARWIN */
+ /* no Darwin, do it the brute-force way */
+ /* this is borrowed from the libmpeg2 library */
+ signal(SIGILL, sigill_handler);
+ if (sigsetjmp(__lv_powerpc_jmpbuf, 1)) {
+ signal(SIGILL, SIG_DFL);
+ } else {
+ __lv_powerpc_canjump = 1;
+
+ __asm __volatile
+ ("mtspr 256, %0\n\t"
+ "vand %%v0, %%v0, %%v0"
+ :
+ : "r" (-1));
+
+ signal(SIGILL, SIG_DFL);
+ __cpu_detect_caps.hasAltiVec = 1;
+ }
+#endif
+}
+#endif
+
+/* If we're running on a processor that can do SSE, let's see if we
+ * are allowed to or not. This will catch 2.4.0 or later kernels that
+ * haven't been configured for a Pentium III but are running on one,
+ * and RedHat patched 2.2 kernels that have broken exception handling
+ * support for user space apps that do SSE.
+ */
+static void check_os_katmai_support(void)
+{
+#if defined(ARCH_X86)
+#if defined(OS_FREEBSD)
+ int has_sse=0, ret;
+ int len = sizeof (has_sse);
+
+ ret = sysctlbyname("hw.instruction_sse", &has_sse, &len, NULL, 0);
+ if (ret || !has_sse)
+ __cpu_detect_caps.hasSSE=0;
+
+#elif defined(OS_NETBSD) || defined(OS_OPENBSD)
+ int has_sse, has_sse2, ret, mib[2];
+ int varlen;
+
+ mib[0] = CTL_MACHDEP;
+ mib[1] = CPU_SSE;
+ varlen = sizeof (has_sse);
+
+ ret = sysctl(mib, 2, &has_sse, &varlen, NULL, 0);
+ if (ret < 0 || !has_sse) {
+ __cpu_detect_caps.hasSSE = 0;
+ } else {
+ __cpu_detect_caps.hasSSE = 1;
+ }
+
+ mib[1] = CPU_SSE2;
+ varlen = sizeof (has_sse2);
+ ret = sysctl(mib, 2, &has_sse2, &varlen, NULL, 0);
+ if (ret < 0 || !has_sse2) {
+ __cpu_detect_caps.hasSSE2 = 0;
+ } else {
+ __cpu_detect_caps.hasSSE2 = 1;
+ }
+ __cpu_detect_caps.hasSSE = 0; /* FIXME ?!?!? */
+
+#elif defined(OS_WIN32)
+ LPTOP_LEVEL_EXCEPTION_FILTER exc_fil;
+ if (__cpu_detect_caps.hasSSE) {
+ exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse);
+ __asm __volatile ("xorps %xmm0, %xmm0");
+ SetUnhandledExceptionFilter(exc_fil);
+ }
+#elif defined(OS_LINUX)
+ struct sigaction saved_sigill;
+ struct sigaction saved_sigfpe;
+
+ /* Save the original signal handlers.
+ */
+ sigaction(SIGILL, NULL, &saved_sigill);
+ sigaction(SIGFPE, NULL, &saved_sigfpe);
+
+ signal(SIGILL, (void (*)(int))sigill_handler_sse);
+ signal(SIGFPE, (void (*)(int))sigfpe_handler_sse);
+
+ /* Emulate test for OSFXSR in CR4. The OS will set this bit if it
+ * supports the extended FPU save and restore required for SSE. If
+ * we execute an SSE instruction on a PIII and get a SIGILL, the OS
+ * doesn't support Streaming SIMD Exceptions, even if the processor
+ * does.
+ */
+ if (__cpu_detect_caps.hasSSE) {
+ __asm __volatile ("xorps %xmm1, %xmm0");
+ }
+
+ /* Emulate test for OSXMMEXCPT in CR4. The OS will set this bit if
+ * it supports unmasked SIMD FPU exceptions. If we unmask the
+ * exceptions, do a SIMD divide-by-zero and get a SIGILL, the OS
+ * doesn't support unmasked SIMD FPU exceptions. If we get a SIGFPE
+ * as expected, we're okay but we need to clean up after it.
+ *
+ * Are we being too stringent in our requirement that the OS support
+ * unmasked exceptions? Certain RedHat 2.2 kernels enable SSE by
+ * setting CR4.OSFXSR but don't support unmasked exceptions. Win98
+ * doesn't even support them. We at least know the user-space SSE
+ * support is good in kernels that do support unmasked exceptions,
+ * and therefore to be safe I'm going to leave this test in here.
+ */
+ if (__cpu_detect_caps.hasSSE) {
+ // test_os_katmai_exception_support();
+ }
+
+ /* Restore the original signal handlers.
+ */
+ sigaction(SIGILL, &saved_sigill, NULL);
+ sigaction(SIGFPE, &saved_sigfpe, NULL);
+
+#else
+ /* We can't use POSIX signal handling to test the availability of
+ * SSE, so we disable it by default.
+ */
+ __cpu_detect_caps.hasSSE = 0;
+#endif /* __linux__ */
+#endif
+}
+
+
+static int has_cpuid(void)
+{
+#if defined(ARCH_X86)
+ int a, c;
+
+ __asm __volatile
+ ("pushf\n"
+ "popl %0\n"
+ "movl %0, %1\n"
+ "xorl $0x200000, %0\n"
+ "push %0\n"
+ "popf\n"
+ "pushf\n"
+ "popl %0\n"
+ : "=a" (a), "=c" (c)
+ :
+ : "cc");
+
+ return a != c;
+#else
+ return 0;
+#endif
+}
+
+static int cpuid(unsigned int ax, unsigned int *p)
+{
+#if defined(ARCH_X86)
+ unsigned int flags;
+
+ __asm __volatile
+ ("movl %%ebx, %%esi\n\t"
+ "cpuid\n\t"
+ "xchgl %%ebx, %%esi"
+ : "=a" (p[0]), "=S" (p[1]),
+ "=c" (p[2]), "=d" (p[3])
+ : "0" (ax));
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+void cpu_detect_initialize()
+{
+ unsigned int regs[4];
+ unsigned int regs2[4];
+
+ int mib[2], ncpu;
+ int len;
+
+ memset(&__cpu_detect_caps, 0, sizeof (struct cpu_detect_caps));
+
+ /* Check for arch type */
+#if defined(ARCH_MIPS)
+ __cpu_detect_caps.type = CPU_DETECT_TYPE_MIPS;
+#elif defined(ARCH_ALPHA)
+ __cpu_detect_caps.type = CPU_DETECT_TYPE_ALPHA;
+#elif defined(ARCH_SPARC)
+ __cpu_detect_caps.type = CPU_DETECT_TYPE_SPARC;
+#elif defined(ARCH_X86)
+ __cpu_detect_caps.type = CPU_DETECT_TYPE_X86;
+#elif defined(ARCH_POWERPC)
+ __cpu_detect_caps.type = CPU_DETECT_TYPE_POWERPC;
+#else
+ __cpu_detect_caps.type = CPU_DETECT_TYPE_OTHER;
+#endif
+
+ /* Count the number of CPUs in system */
+#if !defined(OS_WIN32) && !defined(OS_UNKNOWN) && defined(_SC_NPROCESSORS_ONLN)
+ __cpu_detect_caps.nrcpu = sysconf(_SC_NPROCESSORS_ONLN);
+ if (__cpu_detect_caps.nrcpu == -1)
+ __cpu_detect_caps.nrcpu = 1;
+
+#elif defined(OS_NETBSD) || defined(OS_FREEBSD) || defined(OS_OPENBSD)
+
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+
+ len = sizeof (ncpu);
+ sysctl(mib, 2, &ncpu, &len, NULL, 0);
+ __cpu_detect_caps.nrcpu = ncpu;
+
+#else
+ __cpu_detect_caps.nrcpu = 1;
+#endif
+
+#if defined(ARCH_X86)
+ /* No cpuid, old 486 or lower */
+ if (has_cpuid() == 0)
+ return;
+
+ __cpu_detect_caps.cacheline = 32;
+
+ /* Get max cpuid level */
+ cpuid(0x00000000, regs);
+
+ if (regs[0] >= 0x00000001) {
+ unsigned int cacheline;
+
+ cpuid (0x00000001, regs2);
+
+ __cpu_detect_caps.x86cpuType = (regs2[0] >> 8) & 0xf;
+ if (__cpu_detect_caps.x86cpuType == 0xf)
+ __cpu_detect_caps.x86cpuType = 8 + ((regs2[0] >> 20) & 255); /* use extended family (P4, IA64) */
+
+ /* general feature flags */
+ __cpu_detect_caps.hasTSC = (regs2[3] & (1 << 8 )) >> 8; /* 0x0000010 */
+ __cpu_detect_caps.hasMMX = (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */
+ __cpu_detect_caps.hasSSE = (regs2[3] & (1 << 25 )) >> 25; /* 0x2000000 */
+ __cpu_detect_caps.hasSSE2 = (regs2[3] & (1 << 26 )) >> 26; /* 0x4000000 */
+ __cpu_detect_caps.hasSSE3 = (regs2[2] & (1)); /* 0x0000001 */
+ __cpu_detect_caps.hasSSSE3 = (regs2[2] & (1 << 9 )) >> 9; /* 0x0000020 */
+ __cpu_detect_caps.hasMMX2 = __cpu_detect_caps.hasSSE; /* SSE cpus supports mmxext too */
+
+ cacheline = ((regs2[1] >> 8) & 0xFF) * 8;
+ if (cacheline > 0)
+ __cpu_detect_caps.cacheline = cacheline;
+ }
+
+ cpuid(0x80000000, regs);
+
+ if (regs[0] >= 0x80000001) {
+
+ cpuid(0x80000001, regs2);
+
+ __cpu_detect_caps.hasMMX |= (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */
+ __cpu_detect_caps.hasMMX2 |= (regs2[3] & (1 << 22 )) >> 22; /* 0x400000 */
+ __cpu_detect_caps.has3DNow = (regs2[3] & (1 << 31 )) >> 31; /* 0x80000000 */
+ __cpu_detect_caps.has3DNowExt = (regs2[3] & (1 << 30 )) >> 30;
+ }
+
+ if (regs[0] >= 0x80000006) {
+ cpuid(0x80000006, regs2);
+ __cpu_detect_caps.cacheline = regs2[2] & 0xFF;
+ }
+
+
+#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_CYGWIN) || defined(OS_OPENBSD)
+ if (__cpu_detect_caps.hasSSE)
+ check_os_katmai_support();
+
+ if (!__cpu_detect_caps.hasSSE) {
+ __cpu_detect_caps.hasSSE2 = 0;
+ __cpu_detect_caps.hasSSE3 = 0;
+ __cpu_detect_caps.hasSSSE3 = 0;
+ }
+#else
+ __cpu_detect_caps.hasSSE = 0;
+ __cpu_detect_caps.hasSSE2 = 0;
+ __cpu_detect_caps.hasSSE3 = 0;
+ __cpu_detect_caps.hasSSSE3 = 0;
+#endif
+#endif /* ARCH_X86 */
+
+#if defined(ARCH_POWERPC)
+ check_os_altivec_support();
+#endif /* ARCH_POWERPC */
+
+ __cpu_detect_initialized = 1;
+}
+
+struct cpu_detect_caps *cpu_detect_get_caps()
+{
+ return &__cpu_detect_caps;
+}
+
+/* The getters and setters for feature flags */
+int cpu_detect_get_tsc()
+{
+ return __cpu_detect_caps.hasTSC;
+}
+
+int cpu_detect_get_mmx()
+{
+ return __cpu_detect_caps.hasMMX;
+}
+
+int cpu_detect_get_mmx2()
+{
+ return __cpu_detect_caps.hasMMX2;
+}
+
+int cpu_detect_get_sse()
+{
+ return __cpu_detect_caps.hasSSE;
+}
+
+int cpu_detect_get_sse2()
+{
+ return __cpu_detect_caps.hasSSE2;
+}
+
+int cpu_detect_get_sse3()
+{
+ return __cpu_detect_caps.hasSSE3;
+}
+
+int cpu_detect_get_ssse3()
+{
+ return __cpu_detect_caps.hasSSSE3;
+}
+
+int cpu_detect_get_3dnow()
+{
+ return __cpu_detect_caps.has3DNow;
+}
+
+int cpu_detect_get_3dnow2()
+{
+ return __cpu_detect_caps.has3DNowExt;
+}
+
+int cpu_detect_get_altivec()
+{
+ return __cpu_detect_caps.hasAltiVec;
+}
+
diff --git a/src/gallium/auxiliary/util/u_cpu_detect.h b/src/gallium/auxiliary/util/u_cpu_detect.h
new file mode 100644
index 0000000000..1612d49286
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_cpu_detect.h
@@ -0,0 +1,78 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Dennis Smit
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ***************************************************************************/
+
+/*
+ * Based on the work of Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#ifndef _CPU_DETECT_H
+#define _CPU_DETECT_H
+
+typedef enum {
+ CPU_DETECT_TYPE_MIPS,
+ CPU_DETECT_TYPE_ALPHA,
+ CPU_DETECT_TYPE_SPARC,
+ CPU_DETECT_TYPE_X86,
+ CPU_DETECT_TYPE_POWERPC,
+ CPU_DETECT_TYPE_OTHER
+} cpu_detect_type;
+
+struct cpu_detect_caps {
+ cpu_detect_type type;
+ int nrcpu;
+
+ /* Feature flags */
+ int x86cpuType;
+ int cacheline;
+
+ int hasTSC;
+ int hasMMX;
+ int hasMMX2;
+ int hasSSE;
+ int hasSSE2;
+ int hasSSE3;
+ int hasSSSE3;
+ int has3DNow;
+ int has3DNowExt;
+ int hasAltiVec;
+};
+
+/* prototypes */
+void cpu_detect_initialize(void);
+struct cpu_detect_caps *cpu_detect_get_caps(void);
+
+int cpu_detect_get_tsc(void);
+int cpu_detect_get_mmx(void);
+int cpu_detect_get_mmx2(void);
+int cpu_detect_get_sse(void);
+int cpu_detect_get_sse2(void);
+int cpu_detect_get_sse3(void);
+int cpu_detect_get_ssse3(void);
+int cpu_detect_get_3dnow(void);
+int cpu_detect_get_3dnow2(void);
+int cpu_detect_get_altivec(void);
+
+#endif /* _CPU_DETECT_H */
diff --git a/src/gallium/auxiliary/util/u_debug.c b/src/gallium/auxiliary/util/u_debug.c
new file mode 100644
index 0000000000..96d400c839
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug.c
@@ -0,0 +1,823 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_config.h"
+
+#include <stdarg.h>
+
+
+#ifdef PIPE_SUBSYSTEM_WINDOWS_DISPLAY
+
+#include <windows.h>
+#include <winddi.h>
+
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <windows.h>
+#include <types.h>
+
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
+#endif
+#include <windows.h>
+#include <stdio.h>
+
+#else
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#endif
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_string.h"
+#include "util/u_stream.h"
+#include "util/u_math.h"
+#include "util/u_tile.h"
+
+
+#ifdef PIPE_SUBSYSTEM_WINDOWS_DISPLAY
+static INLINE void
+_EngDebugPrint(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ EngDebugPrint("", (PCHAR)format, ap);
+ va_end(ap);
+}
+#endif
+
+
+void _debug_vprintf(const char *format, va_list ap)
+{
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+ /* EngDebugPrint does not handle float point arguments, so we need to use
+ * our own vsnprintf implementation. It is also very slow, so buffer until
+ * we find a newline. */
+ static char buf[512] = {'\0'};
+ size_t len = strlen(buf);
+ int ret = util_vsnprintf(buf + len, sizeof(buf) - len, format, ap);
+ if(ret > (int)(sizeof(buf) - len - 1) || util_strchr(buf + len, '\n')) {
+ _EngDebugPrint("%s", buf);
+ buf[0] = '\0';
+ }
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ /* OutputDebugStringA can be very slow, so buffer until we find a newline. */
+ static char buf[4096] = {'\0'};
+ size_t len = strlen(buf);
+ int ret = util_vsnprintf(buf + len, sizeof(buf) - len, format, ap);
+ if(ret > (int)(sizeof(buf) - len - 1) || util_strchr(buf + len, '\n')) {
+ OutputDebugStringA(buf);
+ buf[0] = '\0';
+ }
+
+ if(GetConsoleWindow() && !IsDebuggerPresent()) {
+ fflush(stdout);
+ vfprintf(stderr, format, ap);
+ fflush(stderr);
+ }
+
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+ wchar_t *wide_format;
+ long wide_str_len;
+ char buf[512];
+ int ret;
+#if (_WIN32_WCE < 600)
+ ret = vsprintf(buf, format, ap);
+ if(ret < 0){
+ sprintf(buf, "Cant handle debug print!");
+ ret = 25;
+ }
+#else
+ ret = vsprintf_s(buf, 512, format, ap);
+ if(ret < 0){
+ sprintf_s(buf, 512, "Cant handle debug print!");
+ ret = 25;
+ }
+#endif
+ buf[ret] = '\0';
+ /* Format is ascii - needs to be converted to wchar_t for printing */
+ wide_str_len = MultiByteToWideChar(CP_ACP, 0, (const char *) buf, -1, NULL, 0);
+ wide_format = (wchar_t *) malloc((wide_str_len+1) * sizeof(wchar_t));
+ if (wide_format) {
+ MultiByteToWideChar(CP_ACP, 0, (const char *) buf, -1,
+ wide_format, wide_str_len);
+ NKDbgPrintfW(wide_format, wide_format);
+ free(wide_format);
+ }
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ /* TODO */
+#else /* !PIPE_SUBSYSTEM_WINDOWS */
+ fflush(stdout);
+ vfprintf(stderr, format, ap);
+#endif
+}
+
+
+#ifdef DEBUG
+void debug_print_blob( const char *name,
+ const void *blob,
+ unsigned size )
+{
+ const unsigned *ublob = (const unsigned *)blob;
+ unsigned i;
+
+ debug_printf("%s (%d dwords%s)\n", name, size/4,
+ size%4 ? "... plus a few bytes" : "");
+
+ for (i = 0; i < size/4; i++) {
+ debug_printf("%d:\t%08x\n", i, ublob[i]);
+ }
+}
+#endif
+
+
+#ifndef debug_break
+void debug_break(void)
+{
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ DebugBreak();
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+ EngDebugBreak();
+#else
+ abort();
+#endif
+}
+#endif
+
+
+#ifdef PIPE_SUBSYSTEM_WINDOWS_DISPLAY
+static const char *
+find(const char *start, const char *end, char c)
+{
+ const char *p;
+ for(p = start; !end || p != end; ++p) {
+ if(*p == c)
+ return p;
+ if(*p < 32)
+ break;
+ }
+ return NULL;
+}
+
+static int
+compare(const char *start, const char *end, const char *s)
+{
+ const char *p, *q;
+ for(p = start, q = s; p != end && *q != '\0'; ++p, ++q) {
+ if(*p != *q)
+ return 0;
+ }
+ return p == end && *q == '\0';
+}
+
+static void
+copy(char *dst, const char *start, const char *end, size_t n)
+{
+ const char *p;
+ char *q;
+ for(p = start, q = dst, n = n - 1; p != end && n; ++p, ++q, --n)
+ *q = *p;
+ *q = '\0';
+}
+#endif
+
+
+static INLINE const char *
+_debug_get_option(const char *name)
+{
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+ /* EngMapFile creates the file if it does not exists, so it must either be
+ * disabled on release versions (or put in a less conspicuous place). */
+#ifdef DEBUG
+ const char *result = NULL;
+ ULONG_PTR iFile = 0;
+ const void *pMap = NULL;
+ const char *sol, *eol, *sep;
+ static char output[1024];
+
+ pMap = EngMapFile(L"\\??\\c:\\gallium.cfg", 0, &iFile);
+ if(pMap) {
+ sol = (const char *)pMap;
+ while(1) {
+ /* TODO: handle LF line endings */
+ eol = find(sol, NULL, '\r');
+ if(!eol || eol == sol)
+ break;
+ sep = find(sol, eol, '=');
+ if(!sep)
+ break;
+ if(compare(sol, sep, name)) {
+ copy(output, sep + 1, eol, sizeof(output));
+ result = output;
+ break;
+ }
+ sol = eol + 2;
+ }
+ EngUnmapFile(iFile);
+ }
+ return result;
+#else
+ return NULL;
+#endif
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_CE) || defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ /* TODO: implement */
+ return NULL;
+#else
+ return getenv(name);
+#endif
+}
+
+const char *
+debug_get_option(const char *name, const char *dfault)
+{
+ const char *result;
+
+ result = _debug_get_option(name);
+ if(!result)
+ result = dfault;
+
+ debug_printf("%s: %s = %s\n", __FUNCTION__, name, result ? result : "(null)");
+
+ return result;
+}
+
+boolean
+debug_get_bool_option(const char *name, boolean dfault)
+{
+ const char *str = _debug_get_option(name);
+ boolean result;
+
+ if(str == NULL)
+ result = dfault;
+ else if(!util_strcmp(str, "n"))
+ result = FALSE;
+ else if(!util_strcmp(str, "no"))
+ result = FALSE;
+ else if(!util_strcmp(str, "0"))
+ result = FALSE;
+ else if(!util_strcmp(str, "f"))
+ result = FALSE;
+ else if(!util_strcmp(str, "false"))
+ result = FALSE;
+ else
+ result = TRUE;
+
+ debug_printf("%s: %s = %s\n", __FUNCTION__, name, result ? "TRUE" : "FALSE");
+
+ return result;
+}
+
+
+long
+debug_get_num_option(const char *name, long dfault)
+{
+ long result;
+ const char *str;
+
+ str = _debug_get_option(name);
+ if(!str)
+ result = dfault;
+ else {
+ long sign;
+ char c;
+ c = *str++;
+ if(c == '-') {
+ sign = -1;
+ c = *str++;
+ }
+ else {
+ sign = 1;
+ }
+ result = 0;
+ while('0' <= c && c <= '9') {
+ result = result*10 + (c - '0');
+ c = *str++;
+ }
+ result *= sign;
+ }
+
+ debug_printf("%s: %s = %li\n", __FUNCTION__, name, result);
+
+ return result;
+}
+
+
+unsigned long
+debug_get_flags_option(const char *name,
+ const struct debug_named_value *flags,
+ unsigned long dfault)
+{
+ unsigned long result;
+ const char *str;
+
+ str = _debug_get_option(name);
+ if(!str)
+ result = dfault;
+ else if (!util_strcmp(str, "help")) {
+ result = dfault;
+ while (flags->name) {
+ debug_printf("%s: help for %s: %s [0x%lx]\n", __FUNCTION__, name, flags->name, flags->value);
+ flags++;
+ }
+ }
+ else {
+ result = 0;
+ while( flags->name ) {
+ if (!util_strcmp(str, "all") || util_strstr(str, flags->name ))
+ result |= flags->value;
+ ++flags;
+ }
+ }
+
+ if (str) {
+ debug_printf("%s: %s = 0x%lx (%s)\n", __FUNCTION__, name, result, str);
+ }
+ else {
+ debug_printf("%s: %s = 0x%lx\n", __FUNCTION__, name, result);
+ }
+
+ return result;
+}
+
+
+void _debug_assert_fail(const char *expr,
+ const char *file,
+ unsigned line,
+ const char *function)
+{
+ _debug_printf("%s:%u:%s: Assertion `%s' failed.\n", file, line, function, expr);
+#if defined(PIPE_OS_WINDOWS) && !defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ if (debug_get_bool_option("GALLIUM_ABORT_ON_ASSERT", FALSE))
+#else
+ if (debug_get_bool_option("GALLIUM_ABORT_ON_ASSERT", TRUE))
+#endif
+ debug_break();
+ else
+ _debug_printf("continuing...\n");
+}
+
+
+const char *
+debug_dump_enum(const struct debug_named_value *names,
+ unsigned long value)
+{
+ static char rest[64];
+
+ while(names->name) {
+ if(names->value == value)
+ return names->name;
+ ++names;
+ }
+
+ util_snprintf(rest, sizeof(rest), "0x%08lx", value);
+ return rest;
+}
+
+
+const char *
+debug_dump_enum_noprefix(const struct debug_named_value *names,
+ const char *prefix,
+ unsigned long value)
+{
+ static char rest[64];
+
+ while(names->name) {
+ if(names->value == value) {
+ const char *name = names->name;
+ while (*name == *prefix) {
+ name++;
+ prefix++;
+ }
+ return name;
+ }
+ ++names;
+ }
+
+
+
+ util_snprintf(rest, sizeof(rest), "0x%08lx", value);
+ return rest;
+}
+
+
+const char *
+debug_dump_flags(const struct debug_named_value *names,
+ unsigned long value)
+{
+ static char output[4096];
+ static char rest[256];
+ int first = 1;
+
+ output[0] = '\0';
+
+ while(names->name) {
+ if((names->value & value) == names->value) {
+ if (!first)
+ util_strncat(output, "|", sizeof(output));
+ else
+ first = 0;
+ util_strncat(output, names->name, sizeof(output));
+ value &= ~names->value;
+ }
+ ++names;
+ }
+
+ if (value) {
+ if (!first)
+ util_strncat(output, "|", sizeof(output));
+ else
+ first = 0;
+
+ util_snprintf(rest, sizeof(rest), "0x%08lx", value);
+ util_strncat(output, rest, sizeof(output));
+ }
+
+ if(first)
+ return "0";
+
+ return output;
+}
+
+
+static const struct debug_named_value pipe_format_names[] = {
+#ifdef DEBUG
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_NONE),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A8R8G8B8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_X8R8G8B8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_B8G8R8A8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_B8G8R8X8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A1R5G5B5_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A4R4G4B4_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R5G6B5_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A2B10G10R10_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_L8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_I8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A8L8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_L16_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_YCBCR),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_YCBCR_REV),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_Z16_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_Z32_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_Z32_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_S8Z24_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_Z24S8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_X8Z24_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_Z24X8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_S8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R64_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R64G64_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R64G64B64_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R64G64B64A64_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32A32_FLOAT),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32A32_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32A32_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32A32_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R32G32B32A32_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16A16_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16A16_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16A16_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R16G16B16A16_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8A8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8X8_UNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8A8_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8X8_USCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8A8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8X8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_B6G5R5_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A8B8G8R8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_X8B8G8R8_SNORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8A8_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8X8_SSCALED),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_L8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A8L8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8A8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_R8G8B8X8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_A8R8G8B8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_X8R8G8B8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_B8G8R8A8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_B8G8R8X8_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_X8UB8UG8SR8S_NORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_B6UG5SR5S_NORM),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT1_RGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT1_RGBA),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT3_RGBA),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT5_RGBA),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT1_SRGB),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT1_SRGBA),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT3_SRGBA),
+ DEBUG_NAMED_VALUE(PIPE_FORMAT_DXT5_SRGBA),
+#endif
+ DEBUG_NAMED_VALUE_END
+};
+
+#ifdef DEBUG
+void debug_print_format(const char *msg, unsigned fmt )
+{
+ debug_printf("%s: %s\n", msg, debug_dump_enum(pipe_format_names, fmt));
+}
+#endif
+
+const char *pf_name( enum pipe_format format )
+{
+ return debug_dump_enum(pipe_format_names, format);
+}
+
+
+#ifdef DEBUG
+void debug_dump_image(const char *prefix,
+ unsigned format, unsigned cpp,
+ unsigned width, unsigned height,
+ unsigned stride,
+ const void *data)
+{
+#ifdef PIPE_SUBSYSTEM_WINDOWS_DISPLAY
+ static unsigned no = 0;
+ char filename[256];
+ WCHAR wfilename[sizeof(filename)];
+ ULONG_PTR iFile = 0;
+ struct {
+ unsigned format;
+ unsigned cpp;
+ unsigned width;
+ unsigned height;
+ } header;
+ unsigned char *pMap = NULL;
+ unsigned i;
+
+ util_snprintf(filename, sizeof(filename), "\\??\\c:\\%03u%s.raw", ++no, prefix);
+ for(i = 0; i < sizeof(filename); ++i)
+ wfilename[i] = (WCHAR)filename[i];
+
+ pMap = (unsigned char *)EngMapFile(wfilename, sizeof(header) + height*width*cpp, &iFile);
+ if(!pMap)
+ return;
+
+ header.format = format;
+ header.cpp = cpp;
+ header.width = width;
+ header.height = height;
+ memcpy(pMap, &header, sizeof(header));
+ pMap += sizeof(header);
+
+ for(i = 0; i < height; ++i) {
+ memcpy(pMap, (unsigned char *)data + stride*i, cpp*width);
+ pMap += cpp*width;
+ }
+
+ EngUnmapFile(iFile);
+#endif
+}
+
+void debug_dump_surface(const char *prefix,
+ struct pipe_surface *surface)
+{
+ struct pipe_texture *texture;
+ struct pipe_screen *screen;
+ struct pipe_transfer *transfer;
+ void *data;
+
+ if (!surface)
+ return;
+
+ texture = surface->texture;
+ screen = texture->screen;
+
+ transfer = screen->get_tex_transfer(screen, texture, surface->face,
+ surface->level, surface->zslice,
+ PIPE_TRANSFER_READ, 0, 0, surface->width,
+ surface->height);
+
+ data = screen->transfer_map(screen, transfer);
+ if(!data)
+ goto error;
+
+ debug_dump_image(prefix,
+ transfer->format,
+ transfer->block.size,
+ transfer->nblocksx,
+ transfer->nblocksy,
+ transfer->stride,
+ data);
+
+ screen->transfer_unmap(screen, transfer);
+error:
+ screen->tex_transfer_destroy(transfer);
+}
+
+
+#pragma pack(push,2)
+struct bmp_file_header {
+ uint16_t bfType;
+ uint32_t bfSize;
+ uint16_t bfReserved1;
+ uint16_t bfReserved2;
+ uint32_t bfOffBits;
+};
+#pragma pack(pop)
+
+struct bmp_info_header {
+ uint32_t biSize;
+ int32_t biWidth;
+ int32_t biHeight;
+ uint16_t biPlanes;
+ uint16_t biBitCount;
+ uint32_t biCompression;
+ uint32_t biSizeImage;
+ int32_t biXPelsPerMeter;
+ int32_t biYPelsPerMeter;
+ uint32_t biClrUsed;
+ uint32_t biClrImportant;
+};
+
+struct bmp_rgb_quad {
+ uint8_t rgbBlue;
+ uint8_t rgbGreen;
+ uint8_t rgbRed;
+ uint8_t rgbAlpha;
+};
+
+void
+debug_dump_surface_bmp(const char *filename,
+ struct pipe_surface *surface)
+{
+#ifndef PIPE_SUBSYSTEM_WINDOWS_MINIPORT
+ struct pipe_transfer *transfer;
+ struct pipe_texture *texture = surface->texture;
+ struct pipe_screen *screen = texture->screen;
+
+ transfer = screen->get_tex_transfer(screen, texture, surface->face,
+ surface->level, surface->zslice,
+ PIPE_TRANSFER_READ, 0, 0, surface->width,
+ surface->height);
+
+ debug_dump_transfer_bmp(filename, transfer);
+
+ screen->tex_transfer_destroy(transfer);
+#endif
+}
+
+void
+debug_dump_transfer_bmp(const char *filename,
+ struct pipe_transfer *transfer)
+{
+#ifndef PIPE_SUBSYSTEM_WINDOWS_MINIPORT
+ float *rgba;
+
+ if (!transfer)
+ goto error1;
+
+ rgba = MALLOC(transfer->width*transfer->height*4*sizeof(float));
+ if(!rgba)
+ goto error1;
+
+ pipe_get_tile_rgba(transfer, 0, 0,
+ transfer->width, transfer->height,
+ rgba);
+
+ debug_dump_float_rgba_bmp(filename,
+ transfer->width, transfer->height,
+ rgba, transfer->width);
+
+ FREE(rgba);
+error1:
+ ;
+#endif
+}
+
+void
+debug_dump_float_rgba_bmp(const char *filename,
+ unsigned width, unsigned height,
+ float *rgba, unsigned stride)
+{
+#ifndef PIPE_SUBSYSTEM_WINDOWS_MINIPORT
+ struct util_stream *stream;
+ struct bmp_file_header bmfh;
+ struct bmp_info_header bmih;
+ unsigned x, y;
+
+ if(!rgba)
+ goto error1;
+
+ bmfh.bfType = 0x4d42;
+ bmfh.bfSize = 14 + 40 + height*width*4;
+ bmfh.bfReserved1 = 0;
+ bmfh.bfReserved2 = 0;
+ bmfh.bfOffBits = 14 + 40;
+
+ bmih.biSize = 40;
+ bmih.biWidth = width;
+ bmih.biHeight = height;
+ bmih.biPlanes = 1;
+ bmih.biBitCount = 32;
+ bmih.biCompression = 0;
+ bmih.biSizeImage = height*width*4;
+ bmih.biXPelsPerMeter = 0;
+ bmih.biYPelsPerMeter = 0;
+ bmih.biClrUsed = 0;
+ bmih.biClrImportant = 0;
+
+ stream = util_stream_create(filename, bmfh.bfSize);
+ if(!stream)
+ goto error1;
+
+ util_stream_write(stream, &bmfh, 14);
+ util_stream_write(stream, &bmih, 40);
+
+ y = height;
+ while(y--) {
+ float *ptr = rgba + (stride * y * 4);
+ for(x = 0; x < width; ++x)
+ {
+ struct bmp_rgb_quad pixel;
+ pixel.rgbRed = float_to_ubyte(ptr[x*4 + 0]);
+ pixel.rgbGreen = float_to_ubyte(ptr[x*4 + 1]);
+ pixel.rgbBlue = float_to_ubyte(ptr[x*4 + 2]);
+ pixel.rgbAlpha = 255;
+ util_stream_write(stream, &pixel, 4);
+ }
+ }
+
+ util_stream_close(stream);
+error1:
+ ;
+#endif
+}
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_debug.h b/src/gallium/auxiliary/util/u_debug.h
new file mode 100644
index 0000000000..1380d98d7e
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug.h
@@ -0,0 +1,384 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Cross-platform debugging helpers.
+ *
+ * For now it just has assert and printf replacements, but it might be extended
+ * with stack trace reports and more advanced logging in the near future.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef U_DEBUG_H_
+#define U_DEBUG_H_
+
+
+#include <stdarg.h>
+
+#include "pipe/p_compiler.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#if defined(DBG) || defined(DEBUG)
+#ifndef DEBUG
+#define DEBUG 1
+#endif
+#else
+#ifndef NDEBUG
+#define NDEBUG 1
+#endif
+#endif
+
+
+/* MSVC bebore VC7 does not have the __FUNCTION__ macro */
+#if defined(_MSC_VER) && _MSC_VER < 1300
+#define __FUNCTION__ "???"
+#endif
+
+
+void _debug_vprintf(const char *format, va_list ap);
+
+
+static INLINE void
+_debug_printf(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ _debug_vprintf(format, ap);
+ va_end(ap);
+}
+
+
+/**
+ * Print debug messages.
+ *
+ * The actual channel used to output debug message is platform specific. To
+ * avoid misformating or truncation, follow these rules of thumb:
+ * - output whole lines
+ * - avoid outputing large strings (512 bytes is the current maximum length
+ * that is guaranteed to be printed in all platforms)
+ */
+#if !defined(PIPE_OS_HAIKU)
+static INLINE void
+debug_printf(const char *format, ...)
+{
+#ifdef DEBUG
+ va_list ap;
+ va_start(ap, format);
+ _debug_vprintf(format, ap);
+ va_end(ap);
+#else
+ (void) format; /* silence warning */
+#endif
+}
+
+#endif /* !PIPE_OS_HAIKU */
+
+/*
+ * ... isn't portable so we need to pass arguments in parentheses.
+ *
+ * usage:
+ * debug_printf_once(("awnser: %i\n", 42));
+ */
+#define debug_printf_once(args) \
+ do { \
+ static boolean once = TRUE; \
+ if (once) { \
+ once = FALSE; \
+ debug_printf args; \
+ } \
+ } while (0)
+
+
+#ifdef DEBUG
+#define debug_vprintf(_format, _ap) _debug_vprintf(_format, _ap)
+#else
+#define debug_vprintf(_format, _ap) ((void)0)
+#endif
+
+
+#ifdef DEBUG
+/**
+ * Dump a blob in hex to the same place that debug_printf sends its
+ * messages.
+ */
+void debug_print_blob( const char *name, const void *blob, unsigned size );
+
+/* Print a message along with a prettified format string
+ */
+void debug_print_format(const char *msg, unsigned fmt );
+#else
+#define debug_print_blob(_name, _blob, _size) ((void)0)
+#define debug_print_format(_msg, _fmt) ((void)0)
+#endif
+
+
+/**
+ * Hard-coded breakpoint.
+ */
+#ifdef DEBUG
+#if (defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)) && defined(PIPE_CC_GCC)
+#define debug_break() __asm("int3")
+#elif defined(PIPE_CC_MSVC)
+#define debug_break() __debugbreak()
+#else
+void debug_break(void);
+#endif
+#else /* !DEBUG */
+#define debug_break() ((void)0)
+#endif /* !DEBUG */
+
+
+long
+debug_get_num_option(const char *name, long dfault);
+
+void _debug_assert_fail(const char *expr,
+ const char *file,
+ unsigned line,
+ const char *function);
+
+
+/**
+ * Assert macro
+ *
+ * Do not expect that the assert call terminates -- errors must be handled
+ * regardless of assert behavior.
+ */
+#ifdef DEBUG
+#define debug_assert(expr) ((expr) ? (void)0 : _debug_assert_fail(#expr, __FILE__, __LINE__, __FUNCTION__))
+#else
+#define debug_assert(expr) ((void)(expr))
+#endif
+
+
+/** Override standard assert macro */
+#ifdef assert
+#undef assert
+#endif
+#define assert(expr) debug_assert(expr)
+
+
+/**
+ * Output the current function name.
+ */
+#ifdef DEBUG
+#define debug_checkpoint() \
+ _debug_printf("%s\n", __FUNCTION__)
+#else
+#define debug_checkpoint() \
+ ((void)0)
+#endif
+
+
+/**
+ * Output the full source code position.
+ */
+#ifdef DEBUG
+#define debug_checkpoint_full() \
+ _debug_printf("%s:%u:%s", __FILE__, __LINE__, __FUNCTION__)
+#else
+#define debug_checkpoint_full() \
+ ((void)0)
+#endif
+
+
+/**
+ * Output a warning message. Muted on release version.
+ */
+#ifdef DEBUG
+#define debug_warning(__msg) \
+ _debug_printf("%s:%u:%s: warning: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
+#else
+#define debug_warning(__msg) \
+ ((void)0)
+#endif
+
+
+/**
+ * Output an error message. Not muted on release version.
+ */
+#ifdef DEBUG
+#define debug_error(__msg) \
+ _debug_printf("%s:%u:%s: error: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
+#else
+#define debug_error(__msg) \
+ _debug_printf("error: %s\n", __msg)
+#endif
+
+
+/**
+ * Used by debug_dump_enum and debug_dump_flags to describe symbols.
+ */
+struct debug_named_value
+{
+ const char *name;
+ unsigned long value;
+};
+
+
+/**
+ * Some C pre-processor magic to simplify creating named values.
+ *
+ * Example:
+ * @code
+ * static const debug_named_value my_names[] = {
+ * DEBUG_NAMED_VALUE(MY_ENUM_VALUE_X),
+ * DEBUG_NAMED_VALUE(MY_ENUM_VALUE_Y),
+ * DEBUG_NAMED_VALUE(MY_ENUM_VALUE_Z),
+ * DEBUG_NAMED_VALUE_END
+ * };
+ *
+ * ...
+ * debug_printf("%s = %s\n",
+ * name,
+ * debug_dump_enum(my_names, my_value));
+ * ...
+ * @endcode
+ */
+#define DEBUG_NAMED_VALUE(__symbol) {#__symbol, (unsigned long)__symbol}
+#define DEBUG_NAMED_VALUE_END {NULL, 0}
+
+
+/**
+ * Convert a enum value to a string.
+ */
+const char *
+debug_dump_enum(const struct debug_named_value *names,
+ unsigned long value);
+
+const char *
+debug_dump_enum_noprefix(const struct debug_named_value *names,
+ const char *prefix,
+ unsigned long value);
+
+
+/**
+ * Convert binary flags value to a string.
+ */
+const char *
+debug_dump_flags(const struct debug_named_value *names,
+ unsigned long value);
+
+
+/**
+ * Get option.
+ *
+ * It is an alias for getenv on Linux.
+ *
+ * On Windows it reads C:\gallium.cfg, which is a text file with CR+LF line
+ * endings with one option per line as
+ *
+ * NAME=value
+ *
+ * This file must be terminated with an extra empty line.
+ */
+const char *
+debug_get_option(const char *name, const char *dfault);
+
+boolean
+debug_get_bool_option(const char *name, boolean dfault);
+
+long
+debug_get_num_option(const char *name, long dfault);
+
+unsigned long
+debug_get_flags_option(const char *name,
+ const struct debug_named_value *flags,
+ unsigned long dfault);
+
+
+void *
+debug_malloc(const char *file, unsigned line, const char *function,
+ size_t size);
+
+void
+debug_free(const char *file, unsigned line, const char *function,
+ void *ptr);
+
+void *
+debug_calloc(const char *file, unsigned line, const char *function,
+ size_t count, size_t size );
+
+void *
+debug_realloc(const char *file, unsigned line, const char *function,
+ void *old_ptr, size_t old_size, size_t new_size );
+
+unsigned long
+debug_memory_begin(void);
+
+void
+debug_memory_end(unsigned long beginning);
+
+
+#if defined(PROFILE) && defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+
+void
+debug_profile_start(void);
+
+void
+debug_profile_stop(void);
+
+#endif
+
+
+#ifdef DEBUG
+struct pipe_surface;
+struct pipe_transfer;
+void debug_dump_image(const char *prefix,
+ unsigned format, unsigned cpp,
+ unsigned width, unsigned height,
+ unsigned stride,
+ const void *data);
+void debug_dump_surface(const char *prefix,
+ struct pipe_surface *surface);
+void debug_dump_surface_bmp(const char *filename,
+ struct pipe_surface *surface);
+void debug_dump_transfer_bmp(const char *filename,
+ struct pipe_transfer *transfer);
+void debug_dump_float_rgba_bmp(const char *filename,
+ unsigned width, unsigned height,
+ float *rgba, unsigned stride);
+#else
+#define debug_dump_image(prefix, format, cpp, width, height, stride, data) ((void)0)
+#define debug_dump_surface(prefix, surface) ((void)0)
+#define debug_dump_surface_bmp(filename, surface) ((void)0)
+#define debug_dump_transfer_bmp(filename, transfer) ((void)0)
+#define debug_dump_rgba_float_bmp(filename, width, height, rgba, stride) ((void)0)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_DEBUG_H_ */
diff --git a/src/gallium/auxiliary/util/u_debug_dump.c b/src/gallium/auxiliary/util/u_debug_dump.c
new file mode 100644
index 0000000000..6bdecde048
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_dump.c
@@ -0,0 +1,189 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "util/u_debug.h"
+#include "util/u_debug_dump.h"
+
+
+#define DEBUG_DUMP_INVALID_NAME "<invalid>"
+
+
+#if 0
+static const char *
+debug_dump_strip_prefix(const char *name,
+ const char *prefix)
+{
+ const char *stripped;
+ assert(name);
+ assert(prefix);
+ stripped = name;
+ while(*prefix) {
+ if(*stripped != *prefix)
+ return name;
+
+ ++stripped;
+ ++prefix;
+ }
+ return stripped;
+}
+#endif
+
+static const char *
+debug_dump_enum_continuous(unsigned value,
+ unsigned num_names,
+ const char **names)
+{
+ if (value >= num_names)
+ return DEBUG_DUMP_INVALID_NAME;
+ return names[value];
+}
+
+
+#define DEFINE_DEBUG_DUMP_CONTINUOUS(_name) \
+ const char * \
+ debug_dump_##_name(unsigned value, boolean shortened) \
+ { \
+ if(shortened) \
+ return debug_dump_enum_continuous(value, Elements(debug_dump_##_name##_short_names), debug_dump_##_name##_short_names); \
+ else \
+ return debug_dump_enum_continuous(value, Elements(debug_dump_##_name##_names), debug_dump_##_name##_names); \
+ }
+
+
+static const char *
+debug_dump_blend_factor_names[] = {
+ DEBUG_DUMP_INVALID_NAME, /* 0x0 */
+ "PIPE_BLENDFACTOR_ONE",
+ "PIPE_BLENDFACTOR_SRC_COLOR",
+ "PIPE_BLENDFACTOR_SRC_ALPHA",
+ "PIPE_BLENDFACTOR_DST_ALPHA",
+ "PIPE_BLENDFACTOR_DST_COLOR",
+ "PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE",
+ "PIPE_BLENDFACTOR_CONST_COLOR",
+ "PIPE_BLENDFACTOR_CONST_ALPHA",
+ "PIPE_BLENDFACTOR_SRC1_COLOR",
+ "PIPE_BLENDFACTOR_SRC1_ALPHA",
+ DEBUG_DUMP_INVALID_NAME, /* 0x0b */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0c */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0d */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0e */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0f */
+ DEBUG_DUMP_INVALID_NAME, /* 0x10 */
+ "PIPE_BLENDFACTOR_ZERO",
+ "PIPE_BLENDFACTOR_INV_SRC_COLOR",
+ "PIPE_BLENDFACTOR_INV_SRC_ALPHA",
+ "PIPE_BLENDFACTOR_INV_DST_ALPHA",
+ "PIPE_BLENDFACTOR_INV_DST_COLOR",
+ DEBUG_DUMP_INVALID_NAME, /* 0x16 */
+ "PIPE_BLENDFACTOR_INV_CONST_COLOR",
+ "PIPE_BLENDFACTOR_INV_CONST_ALPHA",
+ "PIPE_BLENDFACTOR_INV_SRC1_COLOR",
+ "PIPE_BLENDFACTOR_INV_SRC1_ALPHA"
+};
+
+static const char *
+debug_dump_blend_factor_short_names[] = {
+ DEBUG_DUMP_INVALID_NAME, /* 0x0 */
+ "one",
+ "src_color",
+ "src_alpha",
+ "dst_alpha",
+ "dst_color",
+ "src_alpha_saturate",
+ "const_color",
+ "const_alpha",
+ "src1_color",
+ "src1_alpha",
+ DEBUG_DUMP_INVALID_NAME, /* 0x0b */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0c */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0d */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0e */
+ DEBUG_DUMP_INVALID_NAME, /* 0x0f */
+ DEBUG_DUMP_INVALID_NAME, /* 0x10 */
+ "zero",
+ "inv_src_color",
+ "inv_src_alpha",
+ "inv_dst_alpha",
+ "inv_dst_color",
+ DEBUG_DUMP_INVALID_NAME, /* 0x16 */
+ "inv_const_color",
+ "inv_const_alpha",
+ "inv_src1_color",
+ "inv_src1_alpha"
+};
+
+DEFINE_DEBUG_DUMP_CONTINUOUS(blend_factor)
+
+
+static const char *
+debug_dump_blend_func_names[] = {
+ "PIPE_BLEND_ADD",
+ "PIPE_BLEND_SUBTRACT",
+ "PIPE_BLEND_REVERSE_SUBTRACT",
+ "PIPE_BLEND_MIN",
+ "PIPE_BLEND_MAX"
+};
+
+static const char *
+debug_dump_blend_func_short_names[] = {
+ "add",
+ "sub",
+ "rev_sub",
+ "min",
+ "max"
+};
+
+DEFINE_DEBUG_DUMP_CONTINUOUS(blend_func)
+
+
+static const char *
+debug_dump_func_names[] = {
+ "PIPE_FUNC_NEVER",
+ "PIPE_FUNC_LESS",
+ "PIPE_FUNC_EQUAL",
+ "PIPE_FUNC_LEQUAL",
+ "PIPE_FUNC_GREATER",
+ "PIPE_FUNC_NOTEQUAL",
+ "PIPE_FUNC_GEQUAL",
+ "PIPE_FUNC_ALWAYS"
+};
+
+static const char *
+debug_dump_func_short_names[] = {
+ "never",
+ "less",
+ "equal",
+ "less_equal",
+ "greater",
+ "not_equal",
+ "greater_equal",
+ "always"
+};
+
+DEFINE_DEBUG_DUMP_CONTINUOUS(func)
diff --git a/src/gallium/auxiliary/util/u_debug_dump.h b/src/gallium/auxiliary/util/u_debug_dump.h
new file mode 100644
index 0000000000..102935559c
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_dump.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Dump data in human/machine readable format.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_DEBUG_DUMP_H_
+#define U_DEBUG_DUMP_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+const char *
+debug_dump_blend_factor(unsigned value, boolean shortened);
+
+const char *
+debug_dump_blend_func(unsigned value, boolean shortened);
+
+const char *
+debug_dump_func(unsigned value, boolean shortened);
+
+
+/* FIXME: Move the other debug_dump_xxx functions out of u_debug.h into here. */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_DEBUG_H_ */
diff --git a/src/gallium/auxiliary/util/u_debug_memory.c b/src/gallium/auxiliary/util/u_debug_memory.c
new file mode 100644
index 0000000000..7623cb9398
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_memory.c
@@ -0,0 +1,324 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Memory debugging.
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+#include <windows.h>
+#include <winddi.h>
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+#include <wdm.h>
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#endif
+
+#include "util/u_debug.h"
+#include "util/u_debug_stack.h"
+#include "util/u_double_list.h"
+
+
+#define DEBUG_MEMORY_MAGIC 0x6e34090aU
+#define DEBUG_MEMORY_STACK 0 /* XXX: disabled until we have symbol lookup */
+
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY) && !defined(WINCE)
+#define real_malloc(_size) EngAllocMem(0, _size, 'D3AG')
+#define real_free(_ptr) EngFreeMem(_ptr)
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+#define real_malloc(_size) ExAllocatePool(0, _size)
+#define real_free(_ptr) ExFreePool(_ptr)
+#else
+#define real_malloc(_size) malloc(_size)
+#define real_free(_ptr) free(_ptr)
+#endif
+
+
+struct debug_memory_header
+{
+ struct list_head head;
+
+ unsigned long no;
+ const char *file;
+ unsigned line;
+ const char *function;
+#if DEBUG_MEMORY_STACK
+ struct debug_stack_frame backtrace[DEBUG_MEMORY_STACK];
+#endif
+ size_t size;
+
+ unsigned magic;
+};
+
+struct debug_memory_footer
+{
+ unsigned magic;
+};
+
+
+static struct list_head list = { &list, &list };
+
+static unsigned long last_no = 0;
+
+
+static INLINE struct debug_memory_header *
+header_from_data(void *data)
+{
+ if(data)
+ return (struct debug_memory_header *)((char *)data - sizeof(struct debug_memory_header));
+ else
+ return NULL;
+}
+
+static INLINE void *
+data_from_header(struct debug_memory_header *hdr)
+{
+ if(hdr)
+ return (void *)((char *)hdr + sizeof(struct debug_memory_header));
+ else
+ return NULL;
+}
+
+static INLINE struct debug_memory_footer *
+footer_from_header(struct debug_memory_header *hdr)
+{
+ if(hdr)
+ return (struct debug_memory_footer *)((char *)hdr + sizeof(struct debug_memory_header) + hdr->size);
+ else
+ return NULL;
+}
+
+
+void *
+debug_malloc(const char *file, unsigned line, const char *function,
+ size_t size)
+{
+ struct debug_memory_header *hdr;
+ struct debug_memory_footer *ftr;
+
+ hdr = real_malloc(sizeof(*hdr) + size + sizeof(*ftr));
+ if(!hdr) {
+ debug_printf("%s:%u:%s: out of memory when trying to allocate %lu bytes\n",
+ file, line, function,
+ (long unsigned)size);
+ return NULL;
+ }
+
+ hdr->no = last_no++;
+ hdr->file = file;
+ hdr->line = line;
+ hdr->function = function;
+ hdr->size = size;
+ hdr->magic = DEBUG_MEMORY_MAGIC;
+
+#if DEBUG_MEMORY_STACK
+ debug_backtrace_capture(hdr->backtrace, 0, DEBUG_MEMORY_STACK);
+#endif
+
+ ftr = footer_from_header(hdr);
+ ftr->magic = DEBUG_MEMORY_MAGIC;
+
+ LIST_ADDTAIL(&hdr->head, &list);
+
+ return data_from_header(hdr);
+}
+
+void
+debug_free(const char *file, unsigned line, const char *function,
+ void *ptr)
+{
+ struct debug_memory_header *hdr;
+ struct debug_memory_footer *ftr;
+
+ if(!ptr)
+ return;
+
+ hdr = header_from_data(ptr);
+ if(hdr->magic != DEBUG_MEMORY_MAGIC) {
+ debug_printf("%s:%u:%s: freeing bad or corrupted memory %p\n",
+ file, line, function,
+ ptr);
+ debug_assert(0);
+ return;
+ }
+
+ ftr = footer_from_header(hdr);
+ if(ftr->magic != DEBUG_MEMORY_MAGIC) {
+ debug_printf("%s:%u:%s: buffer overflow %p\n",
+ hdr->file, hdr->line, hdr->function,
+ ptr);
+ debug_assert(0);
+ }
+
+ LIST_DEL(&hdr->head);
+ hdr->magic = 0;
+ ftr->magic = 0;
+
+ real_free(hdr);
+}
+
+void *
+debug_calloc(const char *file, unsigned line, const char *function,
+ size_t count, size_t size )
+{
+ void *ptr = debug_malloc( file, line, function, count * size );
+ if( ptr )
+ memset( ptr, 0, count * size );
+ return ptr;
+}
+
+void *
+debug_realloc(const char *file, unsigned line, const char *function,
+ void *old_ptr, size_t old_size, size_t new_size )
+{
+ struct debug_memory_header *old_hdr, *new_hdr;
+ struct debug_memory_footer *old_ftr, *new_ftr;
+ void *new_ptr;
+
+ if(!old_ptr)
+ return debug_malloc( file, line, function, new_size );
+
+ if(!new_size) {
+ debug_free( file, line, function, old_ptr );
+ return NULL;
+ }
+
+ old_hdr = header_from_data(old_ptr);
+ if(old_hdr->magic != DEBUG_MEMORY_MAGIC) {
+ debug_printf("%s:%u:%s: reallocating bad or corrupted memory %p\n",
+ file, line, function,
+ old_ptr);
+ debug_assert(0);
+ return NULL;
+ }
+
+ old_ftr = footer_from_header(old_hdr);
+ if(old_ftr->magic != DEBUG_MEMORY_MAGIC) {
+ debug_printf("%s:%u:%s: buffer overflow %p\n",
+ old_hdr->file, old_hdr->line, old_hdr->function,
+ old_ptr);
+ debug_assert(0);
+ }
+
+ /* alloc new */
+ new_hdr = real_malloc(sizeof(*new_hdr) + new_size + sizeof(*new_ftr));
+ if(!new_hdr) {
+ debug_printf("%s:%u:%s: out of memory when trying to allocate %lu bytes\n",
+ file, line, function,
+ (long unsigned)new_size);
+ return NULL;
+ }
+ new_hdr->no = old_hdr->no;
+ new_hdr->file = old_hdr->file;
+ new_hdr->line = old_hdr->line;
+ new_hdr->function = old_hdr->function;
+ new_hdr->size = new_size;
+ new_hdr->magic = DEBUG_MEMORY_MAGIC;
+
+ new_ftr = footer_from_header(new_hdr);
+ new_ftr->magic = DEBUG_MEMORY_MAGIC;
+
+ LIST_REPLACE(&old_hdr->head, &new_hdr->head);
+
+ /* copy data */
+ new_ptr = data_from_header(new_hdr);
+ memcpy( new_ptr, old_ptr, old_size < new_size ? old_size : new_size );
+
+ /* free old */
+ old_hdr->magic = 0;
+ old_ftr->magic = 0;
+ real_free(old_hdr);
+
+ return new_ptr;
+}
+
+unsigned long
+debug_memory_begin(void)
+{
+ return last_no;
+}
+
+void
+debug_memory_end(unsigned long start_no)
+{
+ size_t total_size = 0;
+ struct list_head *entry;
+
+ if(start_no == last_no)
+ return;
+
+ entry = list.prev;
+ for (; entry != &list; entry = entry->prev) {
+ struct debug_memory_header *hdr;
+ void *ptr;
+ struct debug_memory_footer *ftr;
+
+ hdr = LIST_ENTRY(struct debug_memory_header, entry, head);
+ ptr = data_from_header(hdr);
+ ftr = footer_from_header(hdr);
+
+ if(hdr->magic != DEBUG_MEMORY_MAGIC) {
+ debug_printf("%s:%u:%s: bad or corrupted memory %p\n",
+ hdr->file, hdr->line, hdr->function,
+ ptr);
+ debug_assert(0);
+ }
+
+ if((start_no <= hdr->no && hdr->no < last_no) ||
+ (last_no < start_no && (hdr->no < last_no || start_no <= hdr->no))) {
+ debug_printf("%s:%u:%s: %u bytes at %p not freed\n",
+ hdr->file, hdr->line, hdr->function,
+ hdr->size, ptr);
+#if DEBUG_MEMORY_STACK
+ debug_backtrace_dump(hdr->backtrace, DEBUG_MEMORY_STACK);
+#endif
+ total_size += hdr->size;
+ }
+
+ if(ftr->magic != DEBUG_MEMORY_MAGIC) {
+ debug_printf("%s:%u:%s: buffer overflow %p\n",
+ hdr->file, hdr->line, hdr->function,
+ ptr);
+ debug_assert(0);
+ }
+ }
+
+ if(total_size) {
+ debug_printf("Total of %u KB of system memory apparently leaked\n",
+ (total_size + 1023)/1024);
+ }
+ else {
+ debug_printf("No memory leaks detected.\n");
+ }
+}
diff --git a/src/gallium/auxiliary/util/u_debug_profile.c b/src/gallium/auxiliary/util/u_debug_profile.c
new file mode 100644
index 0000000000..6d8b244c3a
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_profile.c
@@ -0,0 +1,320 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Poor-man profiling.
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ *
+ * @sa http://blogs.msdn.com/joshpoley/archive/2008/03/12/poor-man-s-profiler.aspx
+ * @sa http://www.johnpanzer.com/aci_cuj/index.html
+ */
+
+#include "pipe/p_config.h"
+
+#if defined(PROFILE) && defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+
+#include <windows.h>
+#include <winddi.h>
+
+#include "util/u_debug.h"
+#include "util/u_string.h"
+
+
+#define PROFILE_TABLE_SIZE (1024*1024)
+#define FILE_NAME_SIZE 256
+
+struct debug_profile_entry
+{
+ uintptr_t caller;
+ uintptr_t callee;
+ uint64_t samples;
+};
+
+static unsigned long enabled = 0;
+
+static WCHAR wFileName[FILE_NAME_SIZE] = L"\\??\\c:\\00000000.prof";
+static ULONG_PTR iFile = 0;
+
+static struct debug_profile_entry *table = NULL;
+static unsigned long free_table_entries = 0;
+static unsigned long max_table_entries = 0;
+
+uint64_t start_stamp = 0;
+uint64_t end_stamp = 0;
+
+
+static void
+debug_profile_entry(uintptr_t caller, uintptr_t callee, uint64_t samples)
+{
+ unsigned hash = ( caller + callee ) & PROFILE_TABLE_SIZE - 1;
+
+ while(1) {
+ if(table[hash].caller == 0 && table[hash].callee == 0) {
+ table[hash].caller = caller;
+ table[hash].callee = callee;
+ table[hash].samples = samples;
+ --free_table_entries;
+ break;
+ }
+ else if(table[hash].caller == caller && table[hash].callee == callee) {
+ table[hash].samples += samples;
+ break;
+ }
+ else {
+ ++hash;
+ }
+ }
+}
+
+
+static uintptr_t caller_stack[1024];
+static unsigned last_caller = 0;
+
+
+static int64_t delta(void) {
+ int64_t result = end_stamp - start_stamp;
+ if(result > UINT64_C(0xffffffff))
+ result = 0;
+ return result;
+}
+
+
+static void __cdecl
+debug_profile_enter(uintptr_t callee)
+{
+ uintptr_t caller = last_caller ? caller_stack[last_caller - 1] : 0;
+
+ if (caller)
+ debug_profile_entry(caller, 0, delta());
+ debug_profile_entry(caller, callee, 1);
+ caller_stack[last_caller++] = callee;
+}
+
+
+static void __cdecl
+debug_profile_exit(uintptr_t callee)
+{
+ debug_profile_entry(callee, 0, delta());
+ if(last_caller)
+ --last_caller;
+}
+
+
+/**
+ * Called at the start of every method or function.
+ *
+ * @sa http://msdn.microsoft.com/en-us/library/c63a9b7h.aspx
+ */
+void __declspec(naked) __cdecl
+_penter(void) {
+ _asm {
+ push eax
+ mov eax, [enabled]
+ test eax, eax
+ jz skip
+
+ push edx
+
+ rdtsc
+ mov dword ptr [end_stamp], eax
+ mov dword ptr [end_stamp+4], edx
+
+ xor eax, eax
+ mov [enabled], eax
+
+ mov eax, [esp+8]
+
+ push ebx
+ push ecx
+ push ebp
+ push edi
+ push esi
+
+ push eax
+ call debug_profile_enter
+ add esp, 4
+
+ pop esi
+ pop edi
+ pop ebp
+ pop ecx
+ pop ebx
+
+ mov eax, 1
+ mov [enabled], eax
+
+ rdtsc
+ mov dword ptr [start_stamp], eax
+ mov dword ptr [start_stamp+4], edx
+
+ pop edx
+skip:
+ pop eax
+ ret
+ }
+}
+
+
+/**
+ * Called at the end of Calls the end of every method or function.
+ *
+ * @sa http://msdn.microsoft.com/en-us/library/xc11y76y.aspx
+ */
+void __declspec(naked) __cdecl
+_pexit(void) {
+ _asm {
+ push eax
+ mov eax, [enabled]
+ test eax, eax
+ jz skip
+
+ push edx
+
+ rdtsc
+ mov dword ptr [end_stamp], eax
+ mov dword ptr [end_stamp+4], edx
+
+ xor eax, eax
+ mov [enabled], eax
+
+ mov eax, [esp+8]
+
+ push ebx
+ push ecx
+ push ebp
+ push edi
+ push esi
+
+ push eax
+ call debug_profile_exit
+ add esp, 4
+
+ pop esi
+ pop edi
+ pop ebp
+ pop ecx
+ pop ebx
+
+ mov eax, 1
+ mov [enabled], eax
+
+ rdtsc
+ mov dword ptr [start_stamp], eax
+ mov dword ptr [start_stamp+4], edx
+
+ pop edx
+skip:
+ pop eax
+ ret
+ }
+}
+
+
+/**
+ * Reference function for calibration.
+ */
+void __declspec(naked)
+__debug_profile_reference(void) {
+ _asm {
+ call _penter
+ call _pexit
+ ret
+ }
+}
+
+
+void
+debug_profile_start(void)
+{
+ WCHAR *p;
+
+ // increment starting from the less significant digit
+ p = &wFileName[14];
+ while(1) {
+ if(*p == '9') {
+ *p-- = '0';
+ }
+ else {
+ *p += 1;
+ break;
+ }
+ }
+
+ table = EngMapFile(wFileName,
+ PROFILE_TABLE_SIZE*sizeof(struct debug_profile_entry),
+ &iFile);
+ if(table) {
+ unsigned i;
+
+ free_table_entries = max_table_entries = PROFILE_TABLE_SIZE;
+ memset(table, 0, PROFILE_TABLE_SIZE*sizeof(struct debug_profile_entry));
+
+ table[0].caller = (uintptr_t)&__debug_profile_reference;
+ table[0].callee = 0;
+ table[0].samples = 0;
+ --free_table_entries;
+
+ _asm {
+ push edx
+ push eax
+
+ rdtsc
+ mov dword ptr [start_stamp], eax
+ mov dword ptr [start_stamp+4], edx
+
+ pop edx
+ pop eax
+ }
+
+ last_caller = 0;
+
+ enabled = 1;
+
+ for(i = 0; i < 8; ++i) {
+ _asm {
+ call __debug_profile_reference
+ }
+ }
+ }
+}
+
+
+void
+debug_profile_stop(void)
+{
+ enabled = 0;
+
+ if(iFile)
+ EngUnmapFile(iFile);
+ iFile = 0;
+ table = NULL;
+ free_table_entries = max_table_entries = 0;
+}
+
+#endif /* PROFILE */
diff --git a/src/gallium/auxiliary/util/u_debug_stack.c b/src/gallium/auxiliary/util/u_debug_stack.c
new file mode 100644
index 0000000000..528a1c394b
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_stack.c
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Stack backtracing.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "u_debug.h"
+#include "u_debug_symbol.h"
+#include "u_debug_stack.h"
+
+
+void
+debug_backtrace_capture(struct debug_stack_frame *backtrace,
+ unsigned start_frame,
+ unsigned nr_frames)
+{
+ const void **frame_pointer = NULL;
+ unsigned i = 0;
+
+ if(!nr_frames)
+ return;
+
+#if defined(PIPE_CC_GCC)
+ frame_pointer = ((const void **)__builtin_frame_address(1));
+#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
+ __asm {
+ mov frame_pointer, ebp
+ }
+ frame_pointer = (const void **)frame_pointer[0];
+#else
+ frame_pointer = NULL;
+#endif
+
+
+#ifdef PIPE_ARCH_X86
+ while(nr_frames) {
+ const void **next_frame_pointer;
+
+ if(!frame_pointer)
+ break;
+
+ if(start_frame)
+ --start_frame;
+ else {
+ backtrace[i++].function = frame_pointer[1];
+ --nr_frames;
+ }
+
+ next_frame_pointer = (const void **)frame_pointer[0];
+
+ /* Limit the stack walk to avoid referencing undefined memory */
+ if((uintptr_t)next_frame_pointer <= (uintptr_t)frame_pointer ||
+ (uintptr_t)next_frame_pointer > (uintptr_t)frame_pointer + 64*1024)
+ break;
+
+ frame_pointer = next_frame_pointer;
+ }
+#endif
+
+ while(nr_frames) {
+ backtrace[i++].function = NULL;
+ --nr_frames;
+ }
+}
+
+
+void
+debug_backtrace_dump(const struct debug_stack_frame *backtrace,
+ unsigned nr_frames)
+{
+ unsigned i;
+
+ for(i = 0; i < nr_frames; ++i) {
+ if(!backtrace[i].function)
+ break;
+ debug_symbol_print(backtrace[i].function);
+ }
+}
+
diff --git a/src/gallium/auxiliary/util/u_debug_stack.h b/src/gallium/auxiliary/util/u_debug_stack.h
new file mode 100644
index 0000000000..f50f04e0f7
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_stack.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_DEBUG_STACK_H_
+#define U_DEBUG_STACK_H_
+
+
+/**
+ * @file
+ * Stack backtracing.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct debug_stack_frame
+{
+ const void *function;
+};
+
+
+void
+debug_backtrace_capture(struct debug_stack_frame *backtrace,
+ unsigned start_frame,
+ unsigned nr_frames);
+
+void
+debug_backtrace_dump(const struct debug_stack_frame *backtrace,
+ unsigned nr_frames);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_DEBUG_STACK_H_ */
diff --git a/src/gallium/auxiliary/util/u_debug_symbol.c b/src/gallium/auxiliary/util/u_debug_symbol.c
new file mode 100644
index 0000000000..811931f81b
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_symbol.c
@@ -0,0 +1,250 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Symbol lookup.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_compiler.h"
+
+#include "u_debug.h"
+#include "u_debug_symbol.h"
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER) && defined(PIPE_ARCH_X86)
+
+#include <windows.h>
+#include <stddef.h>
+#include <imagehlp.h>
+
+/*
+ * TODO: Cleanup code.
+ * TODO: Support x86_64
+ */
+
+static BOOL bSymInitialized = FALSE;
+
+static HMODULE hModule_Imagehlp = NULL;
+
+typedef BOOL (WINAPI *PFNSYMINITIALIZE)(HANDLE, LPSTR, BOOL);
+static PFNSYMINITIALIZE pfnSymInitialize = NULL;
+
+static
+BOOL WINAPI j_SymInitialize(HANDLE hProcess, PSTR UserSearchPath, BOOL fInvadeProcess)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymInitialize || (pfnSymInitialize = (PFNSYMINITIALIZE) GetProcAddress(hModule_Imagehlp, "SymInitialize")))
+ )
+ return pfnSymInitialize(hProcess, UserSearchPath, fInvadeProcess);
+ else
+ return FALSE;
+}
+
+typedef BOOL (WINAPI *PFNSYMCLEANUP)(HANDLE);
+static PFNSYMCLEANUP pfnSymCleanup = NULL;
+
+static
+BOOL WINAPI j_SymCleanup(HANDLE hProcess)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymCleanup || (pfnSymCleanup = (PFNSYMCLEANUP) GetProcAddress(hModule_Imagehlp, "SymCleanup")))
+ )
+ return pfnSymCleanup(hProcess);
+ else
+ return FALSE;
+}
+
+typedef DWORD (WINAPI *PFNSYMSETOPTIONS)(DWORD);
+static PFNSYMSETOPTIONS pfnSymSetOptions = NULL;
+
+static
+DWORD WINAPI j_SymSetOptions(DWORD SymOptions)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymSetOptions || (pfnSymSetOptions = (PFNSYMSETOPTIONS) GetProcAddress(hModule_Imagehlp, "SymSetOptions")))
+ )
+ return pfnSymSetOptions(SymOptions);
+ else
+ return FALSE;
+}
+
+typedef BOOL (WINAPI *PFNSYMUNDNAME)(PIMAGEHLP_SYMBOL, PSTR, DWORD);
+static PFNSYMUNDNAME pfnSymUnDName = NULL;
+
+static
+BOOL WINAPI j_SymUnDName(PIMAGEHLP_SYMBOL Symbol, PSTR UnDecName, DWORD UnDecNameLength)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymUnDName || (pfnSymUnDName = (PFNSYMUNDNAME) GetProcAddress(hModule_Imagehlp, "SymUnDName")))
+ )
+ return pfnSymUnDName(Symbol, UnDecName, UnDecNameLength);
+ else
+ return FALSE;
+}
+
+typedef PFUNCTION_TABLE_ACCESS_ROUTINE PFNSYMFUNCTIONTABLEACCESS;
+static PFNSYMFUNCTIONTABLEACCESS pfnSymFunctionTableAccess = NULL;
+
+static
+PVOID WINAPI j_SymFunctionTableAccess(HANDLE hProcess, DWORD AddrBase)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymFunctionTableAccess || (pfnSymFunctionTableAccess = (PFNSYMFUNCTIONTABLEACCESS) GetProcAddress(hModule_Imagehlp, "SymFunctionTableAccess")))
+ )
+ return pfnSymFunctionTableAccess(hProcess, AddrBase);
+ else
+ return NULL;
+}
+
+typedef PGET_MODULE_BASE_ROUTINE PFNSYMGETMODULEBASE;
+static PFNSYMGETMODULEBASE pfnSymGetModuleBase = NULL;
+
+static
+DWORD WINAPI j_SymGetModuleBase(HANDLE hProcess, DWORD dwAddr)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymGetModuleBase || (pfnSymGetModuleBase = (PFNSYMGETMODULEBASE) GetProcAddress(hModule_Imagehlp, "SymGetModuleBase")))
+ )
+ return pfnSymGetModuleBase(hProcess, dwAddr);
+ else
+ return 0;
+}
+
+typedef BOOL (WINAPI *PFNSTACKWALK)(DWORD, HANDLE, HANDLE, LPSTACKFRAME, LPVOID, PREAD_PROCESS_MEMORY_ROUTINE, PFUNCTION_TABLE_ACCESS_ROUTINE, PGET_MODULE_BASE_ROUTINE, PTRANSLATE_ADDRESS_ROUTINE);
+static PFNSTACKWALK pfnStackWalk = NULL;
+
+static
+BOOL WINAPI j_StackWalk(
+ DWORD MachineType,
+ HANDLE hProcess,
+ HANDLE hThread,
+ LPSTACKFRAME StackFrame,
+ PVOID ContextRecord,
+ PREAD_PROCESS_MEMORY_ROUTINE ReadMemoryRoutine,
+ PFUNCTION_TABLE_ACCESS_ROUTINE FunctionTableAccessRoutine,
+ PGET_MODULE_BASE_ROUTINE GetModuleBaseRoutine,
+ PTRANSLATE_ADDRESS_ROUTINE TranslateAddress
+)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnStackWalk || (pfnStackWalk = (PFNSTACKWALK) GetProcAddress(hModule_Imagehlp, "StackWalk")))
+ )
+ return pfnStackWalk(
+ MachineType,
+ hProcess,
+ hThread,
+ StackFrame,
+ ContextRecord,
+ ReadMemoryRoutine,
+ FunctionTableAccessRoutine,
+ GetModuleBaseRoutine,
+ TranslateAddress
+ );
+ else
+ return FALSE;
+}
+
+typedef BOOL (WINAPI *PFNSYMGETSYMFROMADDR)(HANDLE, DWORD, LPDWORD, PIMAGEHLP_SYMBOL);
+static PFNSYMGETSYMFROMADDR pfnSymGetSymFromAddr = NULL;
+
+static
+BOOL WINAPI j_SymGetSymFromAddr(HANDLE hProcess, DWORD Address, PDWORD Displacement, PIMAGEHLP_SYMBOL Symbol)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymGetSymFromAddr || (pfnSymGetSymFromAddr = (PFNSYMGETSYMFROMADDR) GetProcAddress(hModule_Imagehlp, "SymGetSymFromAddr")))
+ )
+ return pfnSymGetSymFromAddr(hProcess, Address, Displacement, Symbol);
+ else
+ return FALSE;
+}
+
+typedef BOOL (WINAPI *PFNSYMGETLINEFROMADDR)(HANDLE, DWORD, LPDWORD, PIMAGEHLP_LINE);
+static PFNSYMGETLINEFROMADDR pfnSymGetLineFromAddr = NULL;
+
+static
+BOOL WINAPI j_SymGetLineFromAddr(HANDLE hProcess, DWORD dwAddr, PDWORD pdwDisplacement, PIMAGEHLP_LINE Line)
+{
+ if(
+ (hModule_Imagehlp || (hModule_Imagehlp = LoadLibraryA("IMAGEHLP.DLL"))) &&
+ (pfnSymGetLineFromAddr || (pfnSymGetLineFromAddr = (PFNSYMGETLINEFROMADDR) GetProcAddress(hModule_Imagehlp, "SymGetLineFromAddr")))
+ )
+ return pfnSymGetLineFromAddr(hProcess, dwAddr, pdwDisplacement, Line);
+ else
+ return FALSE;
+}
+
+
+static INLINE boolean
+debug_symbol_print_imagehlp(const void *addr)
+{
+ HANDLE hProcess;
+ BYTE symbolBuffer[1024];
+ PIMAGEHLP_SYMBOL pSymbol = (PIMAGEHLP_SYMBOL) symbolBuffer;
+ DWORD dwDisplacement = 0; // Displacement of the input address, relative to the start of the symbol
+
+ hProcess = GetCurrentProcess();
+
+ pSymbol->SizeOfStruct = sizeof(symbolBuffer);
+ pSymbol->MaxNameLength = sizeof(symbolBuffer) - offsetof(IMAGEHLP_SYMBOL, Name);
+
+ if(!bSymInitialized) {
+ j_SymSetOptions(/* SYMOPT_UNDNAME | */ SYMOPT_LOAD_LINES);
+ if(j_SymInitialize(hProcess, NULL, TRUE))
+ bSymInitialized = TRUE;
+ }
+
+ if(!j_SymGetSymFromAddr(hProcess, (DWORD)addr, &dwDisplacement, pSymbol))
+ return FALSE;
+
+ debug_printf("\t%s\n", pSymbol->Name);
+
+ return TRUE;
+
+}
+#endif
+
+
+void
+debug_symbol_print(const void *addr)
+{
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER) && defined(PIPE_ARCH_X86)
+ if(debug_symbol_print_imagehlp(addr))
+ return;
+#endif
+
+ debug_printf("\t%p\n", addr);
+}
diff --git a/src/gallium/auxiliary/util/u_debug_symbol.h b/src/gallium/auxiliary/util/u_debug_symbol.h
new file mode 100644
index 0000000000..021586987b
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_debug_symbol.h
@@ -0,0 +1,53 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_DEBUG_SYMBOL_H_
+#define U_DEBUG_SYMBOL_H_
+
+
+/**
+ * @file
+ * Symbol lookup.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+void
+debug_symbol_print(const void *addr);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_DEBUG_SYMBOL_H_ */
diff --git a/src/gallium/auxiliary/util/u_double_list.h b/src/gallium/auxiliary/util/u_double_list.h
new file mode 100644
index 0000000000..53bb1342dd
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_double_list.h
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ *
+ * Is not threadsafe, so common operations need to
+ * be protected using an external mutex.
+ */
+
+#ifndef _U_DOUBLE_LIST_H_
+#define _U_DOUBLE_LIST_H_
+
+
+#include <stddef.h>
+
+
+struct list_head
+{
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+
+#define LIST_INITHEAD(__item) \
+ do { \
+ (__item)->prev = (__item); \
+ (__item)->next = (__item); \
+ } while (0)
+
+#define LIST_ADD(__item, __list) \
+ do { \
+ (__item)->prev = (__list); \
+ (__item)->next = (__list)->next; \
+ (__list)->next->prev = (__item); \
+ (__list)->next = (__item); \
+ } while (0)
+
+#define LIST_ADDTAIL(__item, __list) \
+ do { \
+ (__item)->next = (__list); \
+ (__item)->prev = (__list)->prev; \
+ (__list)->prev->next = (__item); \
+ (__list)->prev = (__item); \
+ } while(0)
+
+#define LIST_REPLACE(__from, __to) \
+ do { \
+ (__to)->prev = (__from)->prev; \
+ (__to)->next = (__from)->next; \
+ (__from)->next->prev = (__to); \
+ (__from)->prev->next = (__to); \
+ } while (0)
+
+#define LIST_DEL(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ } while(0)
+
+#define LIST_DELINIT(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ (__item)->next = (__item); \
+ (__item)->prev = (__item); \
+ } while(0)
+
+#define LIST_ENTRY(__type, __item, __field) \
+ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
+
+#define LIST_IS_EMPTY(__list) \
+ ((__list)->next == (__list))
+
+
+#endif /*_U_DOUBLE_LIST_H_*/
diff --git a/src/gallium/auxiliary/util/u_draw_quad.c b/src/gallium/auxiliary/util/u_draw_quad.c
new file mode 100644
index 0000000000..4110485fb1
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_draw_quad.c
@@ -0,0 +1,134 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "util/u_draw_quad.h"
+
+
+/**
+ * Draw a simple vertex buffer / primitive.
+ * Limited to float[4] vertex attribs, tightly packed.
+ */
+void
+util_draw_vertex_buffer(struct pipe_context *pipe,
+ struct pipe_buffer *vbuf,
+ uint offset,
+ uint prim_type,
+ uint num_verts,
+ uint num_attribs)
+{
+ struct pipe_vertex_buffer vbuffer;
+ struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
+ uint i;
+
+ assert(num_attribs <= PIPE_MAX_ATTRIBS);
+
+ /* tell pipe about the vertex buffer */
+ memset(&vbuffer, 0, sizeof(vbuffer));
+ vbuffer.buffer = vbuf;
+ vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
+ vbuffer.buffer_offset = offset;
+ vbuffer.max_index = num_verts - 1;
+ pipe->set_vertex_buffers(pipe, 1, &vbuffer);
+
+ /* tell pipe about the vertex attributes */
+ for (i = 0; i < num_attribs; i++) {
+ velements[i].src_offset = i * 4 * sizeof(float);
+ velements[i].vertex_buffer_index = 0;
+ velements[i].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ velements[i].nr_components = 4;
+ }
+ pipe->set_vertex_elements(pipe, num_attribs, velements);
+
+ /* draw */
+ pipe->draw_arrays(pipe, prim_type, 0, num_verts);
+}
+
+
+
+/**
+ * Draw screen-aligned textured quad.
+ * Note: this function allocs/destroys a vertex buffer and isn't especially
+ * efficient.
+ */
+void
+util_draw_texquad(struct pipe_context *pipe,
+ float x0, float y0, float x1, float y1, float z)
+{
+ struct pipe_buffer *vbuf;
+ uint numAttribs = 2, vertexBytes, i, j;
+
+ vertexBytes = 4 * (4 * numAttribs * sizeof(float));
+
+ /* XXX create one-time */
+ vbuf = pipe_buffer_create(pipe->screen, 32,
+ PIPE_BUFFER_USAGE_VERTEX, vertexBytes);
+ if (vbuf) {
+ float *v = (float *) pipe_buffer_map(pipe->screen, vbuf,
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ if (v) {
+ /*
+ * Load vertex buffer
+ */
+ for (i = j = 0; i < 4; i++) {
+ v[j + 2] = z; /* z */
+ v[j + 3] = 1.0; /* w */
+ v[j + 6] = 0.0; /* r */
+ v[j + 7] = 1.0; /* q */
+ j += 8;
+ }
+
+ v[0] = x0;
+ v[1] = y0;
+ v[4] = 0.0; /*s*/
+ v[5] = 0.0; /*t*/
+
+ v[8] = x1;
+ v[9] = y0;
+ v[12] = 1.0;
+ v[13] = 0.0;
+
+ v[16] = x1;
+ v[17] = y1;
+ v[20] = 1.0;
+ v[21] = 1.0;
+
+ v[24] = x0;
+ v[25] = y1;
+ v[28] = 0.0;
+ v[29] = 1.0;
+
+ pipe_buffer_unmap(pipe->screen, vbuf);
+ util_draw_vertex_buffer(pipe, vbuf, 0, PIPE_PRIM_TRIANGLE_FAN, 4, 2);
+ }
+
+ pipe_buffer_reference(&vbuf, NULL);
+ }
+}
diff --git a/src/gallium/auxiliary/util/u_draw_quad.h b/src/gallium/auxiliary/util/u_draw_quad.h
new file mode 100644
index 0000000000..00d3f5b715
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_draw_quad.h
@@ -0,0 +1,54 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_DRAWQUAD_H
+#define U_DRAWQUAD_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct pipe_buffer;
+
+extern void
+util_draw_vertex_buffer(struct pipe_context *pipe,
+ struct pipe_buffer *vbuf, uint offset,
+ uint num_attribs, uint num_verts, uint prim_type);
+
+
+extern void
+util_draw_texquad(struct pipe_context *pipe,
+ float x0, float y0, float x1, float y1, float z);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_fifo.h b/src/gallium/auxiliary/util/u_fifo.h
new file mode 100644
index 0000000000..9e007de1ad
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_fifo.h
@@ -0,0 +1,94 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 Jakob Bornecrantz
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_FIFO_H
+#define U_FIFO_H
+
+#include "util/u_memory.h"
+
+struct util_fifo
+{
+ size_t head;
+ size_t tail;
+ size_t num;
+ size_t size;
+};
+
+static INLINE struct util_fifo *
+u_fifo_create(size_t size)
+{
+ struct util_fifo *fifo;
+ fifo = MALLOC(sizeof(*fifo) + size * sizeof(void*));
+
+ fifo->head = 0;
+ fifo->tail = 0;
+ fifo->num = 0;
+ fifo->size = size;
+
+ return fifo;
+}
+
+static INLINE boolean
+u_fifo_add(struct util_fifo *fifo, void *ptr)
+{
+ void **array = (void**)&fifo[1];
+ if (fifo->num >= fifo->size)
+ return FALSE;
+
+ if (++fifo->head >= fifo->size)
+ fifo->head = 0;
+
+ array[fifo->head] = ptr;
+
+ ++fifo->num;
+
+ return TRUE;
+}
+
+static INLINE boolean
+u_fifo_pop(struct util_fifo *fifo, void **ptr)
+{
+ void **array = (void**)&fifo[1];
+
+ if (!fifo->num)
+ return FALSE;
+
+ if (++fifo->tail >= fifo->size)
+ fifo->tail = 0;
+
+ *ptr = array[fifo->tail];
+
+ ++fifo->num;
+
+ return TRUE;
+}
+
+static INLINE void
+u_fifo_destroy(struct util_fifo *fifo)
+{
+ FREE(fifo);
+}
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_format.c b/src/gallium/auxiliary/util/u_format.c
new file mode 100644
index 0000000000..98ea13b60b
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_format.c
@@ -0,0 +1,46 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Vmware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "u_format.h"
+
+
+const struct util_format_description *
+util_format_description(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description_table;
+
+ while(TRUE) {
+ if(desc->format == format)
+ return desc;
+
+ if(desc->format == PIPE_FORMAT_NONE)
+ return NULL;
+
+ ++desc;
+ };
+}
diff --git a/src/gallium/auxiliary/util/u_format.csv b/src/gallium/auxiliary/util/u_format.csv
new file mode 100644
index 0000000000..6e82983e58
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_format.csv
@@ -0,0 +1,99 @@
+PIPE_FORMAT_A8R8G8B8_UNORM , arith , 1, 1, un8 , un8 , un8 , un8 , zyxw, rgb
+PIPE_FORMAT_X8R8G8B8_UNORM , arith , 1, 1, un8 , un8 , un8 , un8 , zyx1, rgb
+PIPE_FORMAT_B8G8R8A8_UNORM , arith , 1, 1, un8 , un8 , un8 , un8 , yzwx, rgb
+PIPE_FORMAT_B8G8R8X8_UNORM , arith , 1, 1, un8 , un8 , un8 , un8 , yzw1, rgb
+PIPE_FORMAT_A1R5G5B5_UNORM , arith , 1, 1, un1 , un5 , un5 , un5 , zyxw, rgb
+PIPE_FORMAT_A4R4G4B4_UNORM , arith , 1, 1, un4 , un4 , un4 , un4 , zyxw, rgb
+PIPE_FORMAT_R5G6B5_UNORM , arith , 1, 1, un5 , un6 , un5 , , zyx1, rgb
+PIPE_FORMAT_A2B10G10R10_UNORM , arith , 1, 1, un10, un10, un10, un2 , xyzw, rgb
+PIPE_FORMAT_L8_UNORM , arith , 1, 1, un8 , , , , xxx1, rgb
+PIPE_FORMAT_A8_UNORM , arith , 1, 1, un8 , , , , 000x, rgb
+PIPE_FORMAT_I8_UNORM , arith , 1, 1, un8 , , , , xxxx, rgb
+PIPE_FORMAT_A8L8_UNORM , arith , 1, 1, un8 , un8 , , , xxxy, rgb
+PIPE_FORMAT_L16_UNORM , arith , 1, 1, un16, , , , xxx1, rgb
+PIPE_FORMAT_Z16_UNORM , array , 1, 1, un16, , , , x___, zs
+PIPE_FORMAT_Z32_UNORM , array , 1, 1, un32, , , , x___, zs
+PIPE_FORMAT_Z32_FLOAT , array , 1, 1, f32 , , , , x___, zs
+PIPE_FORMAT_S8Z24_UNORM , arith , 1, 1, un24, un8 , , , xy__, zs
+PIPE_FORMAT_Z24S8_UNORM , arith , 1, 1, un8 , un24, , , yx__, zs
+PIPE_FORMAT_X8Z24_UNORM , arith , 1, 1, un24, un8 , , , x___, zs
+PIPE_FORMAT_Z24X8_UNORM , arith , 1, 1, un8 , un24, , , y___, zs
+PIPE_FORMAT_S8_UNORM , array , 1, 1, un8 , , , , _x__, zs
+PIPE_FORMAT_R64_FLOAT , array , 1, 1, f64 , , , , x001, rgb
+PIPE_FORMAT_R64G64_FLOAT , array , 1, 1, f64 , f64 , , , xy01, rgb
+PIPE_FORMAT_R64G64B64_FLOAT , array , 1, 1, f64 , f64 , f64 , , xyz1, rgb
+PIPE_FORMAT_R64G64B64A64_FLOAT , array , 1, 1, f64 , f64 , f64 , f64 , xyzw, rgb
+PIPE_FORMAT_R32_FLOAT , array , 1, 1, f32 , , , , x001, rgb
+PIPE_FORMAT_R32G32_FLOAT , array , 1, 1, f32 , f32 , , , xy01, rgb
+PIPE_FORMAT_R32G32B32_FLOAT , array , 1, 1, f32 , f32 , f32 , , xyz1, rgb
+PIPE_FORMAT_R32G32B32A32_FLOAT , array , 1, 1, f32 , f32 , f32 , f32 , xyzw, rgb
+PIPE_FORMAT_R32_UNORM , array , 1, 1, un32, , , , x001, rgb
+PIPE_FORMAT_R32G32_UNORM , array , 1, 1, un32, un32, , , xy01, rgb
+PIPE_FORMAT_R32G32B32_UNORM , array , 1, 1, un32, un32, un32, , xyz1, rgb
+PIPE_FORMAT_R32G32B32A32_UNORM , array , 1, 1, un32, un32, un32, un32, xyzw, rgb
+PIPE_FORMAT_R32_USCALED , array , 1, 1, u32 , , , , x001, rgb
+PIPE_FORMAT_R32G32_USCALED , array , 1, 1, u32 , u32 , , , xy01, rgb
+PIPE_FORMAT_R32G32B32_USCALED , array , 1, 1, u32 , u32 , u32 , , xyz1, rgb
+PIPE_FORMAT_R32G32B32A32_USCALED , array , 1, 1, u32 , u32 , u32 , u32 , xyzw, rgb
+PIPE_FORMAT_R32_SNORM , array , 1, 1, sn32, , , , x001, rgb
+PIPE_FORMAT_R32G32_SNORM , array , 1, 1, sn32, sn32, , , xy01, rgb
+PIPE_FORMAT_R32G32B32_SNORM , array , 1, 1, sn32, sn32, sn32, , xyz1, rgb
+PIPE_FORMAT_R32G32B32A32_SNORM , array , 1, 1, sn32, sn32, sn32, sn32, xyzw, rgb
+PIPE_FORMAT_R32_SSCALED , array , 1, 1, s32 , , , , x001, rgb
+PIPE_FORMAT_R32G32_SSCALED , array , 1, 1, s32 , s32 , , , xy01, rgb
+PIPE_FORMAT_R32G32B32_SSCALED , array , 1, 1, s32 , s32 , s32 , , xyz1, rgb
+PIPE_FORMAT_R32G32B32A32_SSCALED , array , 1, 1, s32 , s32 , s32 , s32 , xyzw, rgb
+PIPE_FORMAT_R16_UNORM , array , 1, 1, un16, , , , x001, rgb
+PIPE_FORMAT_R16G16_UNORM , array , 1, 1, un16, un16, , , xy01, rgb
+PIPE_FORMAT_R16G16B16_UNORM , array , 1, 1, un16, un16, un16, , xyz1, rgb
+PIPE_FORMAT_R16G16B16A16_UNORM , array , 1, 1, un16, un16, un16, un16, xyzw, rgb
+PIPE_FORMAT_R16_USCALED , array , 1, 1, u16 , , , , x001, rgb
+PIPE_FORMAT_R16G16_USCALED , array , 1, 1, u16 , u16 , , , xy01, rgb
+PIPE_FORMAT_R16G16B16_USCALED , array , 1, 1, u16 , u16 , u16 , , xyz1, rgb
+PIPE_FORMAT_R16G16B16A16_USCALED , array , 1, 1, u16 , u16 , u16 , u16 , xyzw, rgb
+PIPE_FORMAT_R16_SNORM , array , 1, 1, sn16, , , , x001, rgb
+PIPE_FORMAT_R16G16_SNORM , array , 1, 1, sn16, sn16, , , xy01, rgb
+PIPE_FORMAT_R16G16B16_SNORM , array , 1, 1, sn16, sn16, sn16, , xyz1, rgb
+PIPE_FORMAT_R16G16B16A16_SNORM , array , 1, 1, sn16, sn16, sn16, sn16, xyzw, rgb
+PIPE_FORMAT_R16_SSCALED , array , 1, 1, s16 , , , , x001, rgb
+PIPE_FORMAT_R16G16_SSCALED , array , 1, 1, s16 , s16 , , , xy01, rgb
+PIPE_FORMAT_R16G16B16_SSCALED , array , 1, 1, s16 , s16 , s16 , , xyz1, rgb
+PIPE_FORMAT_R16G16B16A16_SSCALED , array , 1, 1, s16 , s16 , s16 , s16 , xyzw, rgb
+PIPE_FORMAT_R8_UNORM , array , 1, 1, un8 , , , , x001, rgb
+PIPE_FORMAT_R8G8_UNORM , array , 1, 1, un8 , un8 , , , xy01, rgb
+PIPE_FORMAT_R8G8B8_UNORM , array , 1, 1, un8 , un8 , un8 , , xyz1, rgb
+PIPE_FORMAT_R8G8B8A8_UNORM , array , 1, 1, un8 , un8 , un8 , un8 , xyzw, rgb
+PIPE_FORMAT_R8G8B8X8_UNORM , array , 1, 1, un8 , un8 , un8 , un8 , xyz1, rgb
+PIPE_FORMAT_R8_USCALED , array , 1, 1, u8 , , , , x001, rgb
+PIPE_FORMAT_R8G8_USCALED , array , 1, 1, u8 , u8 , , , xy01, rgb
+PIPE_FORMAT_R8G8B8_USCALED , array , 1, 1, u8 , u8 , u8 , , xyz1, rgb
+PIPE_FORMAT_R8G8B8A8_USCALED , array , 1, 1, u8 , u8 , u8 , u8 , xyzw, rgb
+PIPE_FORMAT_R8G8B8X8_USCALED , array , 1, 1, u8 , u8 , u8 , u8 , xyz1, rgb
+PIPE_FORMAT_R8_SNORM , array , 1, 1, sn8 , , , , x001, rgb
+PIPE_FORMAT_R8G8_SNORM , array , 1, 1, sn8 , sn8 , , , xy01, rgb
+PIPE_FORMAT_R8G8B8_SNORM , array , 1, 1, sn8 , sn8 , sn8 , , xyz1, rgb
+PIPE_FORMAT_R8G8B8A8_SNORM , array , 1, 1, sn8 , sn8 , sn8 , sn8 , xyzw, rgb
+PIPE_FORMAT_R8G8B8X8_SNORM , array , 1, 1, sn8 , sn8 , sn8 , sn8 , xyz1, rgb
+PIPE_FORMAT_B6G5R5_SNORM , arith , 1, 1, sn5 , sn5 , sn6 , , zyx1, rgb
+PIPE_FORMAT_A8B8G8R8_SNORM , arith , 1, 1, sn8 , sn8 , sn8 , sn8 , zyxw, rgb
+PIPE_FORMAT_X8B8G8R8_SNORM , arith , 1, 1, sn8 , sn8 , sn8 , sn8 , zyx1, rgb
+PIPE_FORMAT_R8_SSCALED , array , 1, 1, s8 , , , , x001, rgb
+PIPE_FORMAT_R8G8_SSCALED , array , 1, 1, s8 , s8 , , , xy01, rgb
+PIPE_FORMAT_R8G8B8_SSCALED , array , 1, 1, s8 , s8 , s8 , , xyz1, rgb
+PIPE_FORMAT_R8G8B8A8_SSCALED , array , 1, 1, s8 , s8 , s8 , s8 , xyzw, rgb
+PIPE_FORMAT_R8G8B8X8_SSCALED , array , 1, 1, s8 , s8 , s8 , s8 , xyz1, rgb
+PIPE_FORMAT_R32_FIXED , array , 1, 1, h32 , , , , x001, rgb
+PIPE_FORMAT_R32G32_FIXED , array , 1, 1, h32 , h32 , , , xy01, rgb
+PIPE_FORMAT_R32G32B32_FIXED , array , 1, 1, h32 , h32 , h32 , , xyz1, rgb
+PIPE_FORMAT_R32G32B32A32_FIXED , array , 1, 1, h32 , h32 , h32 , h32 , xyzw, rgb
+PIPE_FORMAT_L8_SRGB , arith , 1, 1, u8 , , , , xxx1, srgb
+PIPE_FORMAT_A8L8_SRGB , arith , 1, 1, u8 , u8 , , , xxxy, srgb
+PIPE_FORMAT_R8G8B8_SRGB , arith , 1, 1, u8 , u8 , u8 , , xyz1, srgb
+PIPE_FORMAT_R8G8B8A8_SRGB , arith , 1, 1, u8 , u8 , u8 , u8 , xyzw, srgb
+PIPE_FORMAT_R8G8B8X8_SRGB , arith , 1, 1, u8 , u8 , u8 , u8 , xyz1, srgb
+PIPE_FORMAT_A8R8G8B8_SRGB , arith , 1, 1, u8 , u8 , u8 , u8 , wxyz, srgb
+PIPE_FORMAT_X8R8G8B8_SRGB , arith , 1, 1, u8 , u8 , u8 , u8 , 1xyz, srgb
+PIPE_FORMAT_B8G8R8A8_SRGB , arith , 1, 1, u8 , u8 , u8 , u8 , zyxw, srgb
+PIPE_FORMAT_B8G8R8X8_SRGB , arith , 1, 1, u8 , u8 , u8 , u8 , zyx1, srgb
+PIPE_FORMAT_X8UB8UG8SR8S_NORM , arith , 1, 1, sn8 , sn8 , un8 , x8 , 1zyx, rgb
+PIPE_FORMAT_B6UG5SR5S_NORM , arith , 1, 1, sn5 , sn5 , un6 , , xyz1, rgb
diff --git a/src/gallium/auxiliary/util/u_format.h b/src/gallium/auxiliary/util/u_format.h
new file mode 100644
index 0000000000..7b5b7fcda5
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_format.h
@@ -0,0 +1,138 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Vmware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_FORMAT_H
+#define U_FORMAT_H
+
+
+#include "pipe/p_format.h"
+
+
+enum util_format_layout {
+ UTIL_FORMAT_LAYOUT_SCALAR = 0,
+ UTIL_FORMAT_LAYOUT_ARITH = 1,
+ UTIL_FORMAT_LAYOUT_ARRAY = 2,
+ UTIL_FORMAT_LAYOUT_YUV = 3,
+ UTIL_FORMAT_LAYOUT_DXT = 4
+};
+
+
+struct util_format_block
+{
+ /** Block width in pixels */
+ unsigned width;
+
+ /** Block height in pixels */
+ unsigned height;
+
+ /** Block size in bytes */
+ unsigned bits;
+};
+
+
+enum util_format_type {
+ UTIL_FORMAT_TYPE_VOID = 0,
+ UTIL_FORMAT_TYPE_UNSIGNED = 1,
+ UTIL_FORMAT_TYPE_SIGNED = 2,
+ UTIL_FORMAT_TYPE_FIXED = 3,
+ UTIL_FORMAT_TYPE_FLOAT = 4
+};
+
+
+enum util_format_swizzle {
+ UTIL_FORMAT_SWIZZLE_X = 0,
+ UTIL_FORMAT_SWIZZLE_Y = 1,
+ UTIL_FORMAT_SWIZZLE_Z = 2,
+ UTIL_FORMAT_SWIZZLE_W = 3,
+ UTIL_FORMAT_SWIZZLE_0 = 4,
+ UTIL_FORMAT_SWIZZLE_1 = 5,
+ UTIL_FORMAT_SWIZZLE_NONE = 6
+};
+
+
+enum util_format_colorspace {
+ UTIL_FORMAT_COLORSPACE_RGB = 0,
+ UTIL_FORMAT_COLORSPACE_SRGB = 1,
+ UTIL_FORMAT_COLORSPACE_YUV = 2,
+ UTIL_FORMAT_COLORSPACE_ZS = 3,
+};
+
+
+struct util_format_channel_description
+{
+ unsigned type:6;
+ unsigned normalized:1;
+ unsigned size:9;
+};
+
+
+struct util_format_description
+{
+ enum pipe_format format;
+ const char *name;
+ struct util_format_block block;
+ enum util_format_layout layout;
+ struct util_format_channel_description channel[4];
+ unsigned char swizzle[4];
+ enum util_format_colorspace colorspace;
+};
+
+
+extern const struct util_format_description
+util_format_description_table[];
+
+
+const struct util_format_description *
+util_format_description(enum pipe_format format);
+
+
+void
+util_format_read_4f(enum pipe_format format,
+ float *dst, unsigned dst_stride,
+ const void *src, unsigned src_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_write_4f(enum pipe_format format,
+ const float *src, unsigned src_stride,
+ void *dst, unsigned dst_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_read_4ub(enum pipe_format format,
+ uint8_t *dst, unsigned dst_stride,
+ const void *src, unsigned src_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_write_4ub(enum pipe_format format,
+ const uint8_t *src, unsigned src_stride,
+ void *dst, unsigned dst_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+#endif /* ! U_FORMAT_H */
diff --git a/src/gallium/auxiliary/util/u_format_access.py b/src/gallium/auxiliary/util/u_format_access.py
new file mode 100644
index 0000000000..eeb1a9657f
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_format_access.py
@@ -0,0 +1,498 @@
+#!/usr/bin/env python
+
+'''
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Pixel format accessor functions.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+'''
+
+
+import sys
+
+from u_format_parse import *
+
+
+def short_name(format):
+ '''Make up a short norm for a format, suitable to be used as suffix in
+ function names.'''
+
+ name = format.name
+ if name.startswith('PIPE_FORMAT_'):
+ name = name[len('PIPE_FORMAT_'):]
+ name = name.lower()
+ return name
+
+
+def is_format_supported(format):
+ '''Determines whether we actually have the plumbing necessary to generate the
+ to read/write to/from this format.'''
+
+ # FIXME: Ideally we would support any format combination here.
+
+ # XXX: It should be straightforward to support srgb
+ if format.colorspace not in ('rgb', 'zs'):
+ return False
+
+ if format.layout not in (ARITH, ARRAY):
+ return False
+
+ for i in range(4):
+ type = format.in_types[i]
+ if type.kind not in (VOID, UNSIGNED, FLOAT):
+ return False
+
+ # We can only read a color from a depth/stencil format if the depth channel is present
+ if format.colorspace == 'zs' and format.out_swizzle[0] == SWIZZLE_NONE:
+ return False
+
+ return True
+
+
+def native_type(format):
+ '''Get the native appropriate for a format.'''
+
+ if format.layout == ARITH:
+ # For arithmetic pixel formats return the integer type that matches the whole pixel
+ return 'uint%u_t' % format.block_size()
+ elif format.layout == ARRAY:
+ # For array pixel formats return the integer type that matches the color channel
+ type = format.in_types[0]
+ if type.kind == UNSIGNED:
+ return 'uint%u_t' % type.size
+ elif type.kind == SIGNED:
+ return 'int%u_t' % type.size
+ elif type.kind == FLOAT:
+ if type.size == 32:
+ return 'float'
+ elif type.size == 64:
+ return 'double'
+ else:
+ assert False
+ else:
+ assert False
+ else:
+ assert False
+
+
+def intermediate_native_type(bits, sign):
+ '''Find a native type adequate to hold intermediate results of the request bit size.'''
+
+ bytes = 4 # don't use anything smaller than 32bits
+ while bytes * 8 < bits:
+ bytes *= 2
+ bits = bytes*8
+
+ if sign:
+ return 'int%u_t' % bits
+ else:
+ return 'uint%u_t' % bits
+
+
+def get_one_shift(type):
+ '''Get the number of the bit that matches unity for this type.'''
+ if type.kind == 'FLOAT':
+ assert False
+ if not type.norm:
+ return 0
+ if type.kind == UNSIGNED:
+ return type.size
+ if type.kind == SIGNED:
+ return type.size - 1
+ if type.kind == FIXED:
+ return type.size / 2
+ assert False
+
+
+def get_one(type):
+ '''Get the value of unity for this type.'''
+ if type.kind == 'FLOAT' or not type.norm:
+ return 1
+ else:
+ return (1 << get_one_shift(type)) - 1
+
+
+def generate_clamp():
+ '''Code generate the clamping functions for each type.
+
+ We don't use a macro so that arguments with side effects,
+ like *src_pixel++ are correctly handled.
+ '''
+
+ for suffix, native_type in [
+ ('', 'double'),
+ ('f', 'float'),
+ ('ui', 'unsigned int'),
+ ('si', 'int'),
+ ]:
+ print 'static INLINE %s' % native_type
+ print 'clamp%s(%s value, %s lbound, %s ubound)' % (suffix, native_type, native_type, native_type)
+ print '{'
+ print ' if(value < lbound)'
+ print ' return lbound;'
+ print ' if(value > ubound)'
+ print ' return ubound;'
+ print ' return value;'
+ print '}'
+ print
+
+
+def clamp_expr(src_type, dst_type, dst_native_type, value):
+ '''Generate the expression to clamp the value in the source type to the
+ destination type range.'''
+
+ if src_type == dst_type:
+ return value
+
+ # Pick the approriate clamp function
+ if src_type.kind == FLOAT:
+ if src_type.size == 32:
+ func = 'clampf'
+ elif src_type.size == 64:
+ func = 'clamp'
+ else:
+ assert False
+ elif src_type.kind == UNSIGNED:
+ func = 'clampui'
+ elif src_type.kind == SIGNED:
+ func = 'clampsi'
+ else:
+ assert False
+
+ # Clamp floats to [-1, 1] or [0, 1] range
+ if src_type.kind == FLOAT and dst_type.norm:
+ max = 1
+ if src_type.sign and dst_type.sign:
+ min = -1
+ else:
+ min = 0
+ return '%s(%s, %s, %s)' % (func, value, min, max)
+
+ # FIXME: Also clamp scaled values
+
+ return value
+
+
+def conversion_expr(src_type, dst_type, dst_native_type, value):
+ '''Generate the expression to convert a value between two types.'''
+
+ if src_type == dst_type:
+ return value
+
+ if src_type.kind == FLOAT and dst_type.kind == FLOAT:
+ return '(%s)%s' % (dst_native_type, value)
+
+ if not src_type.norm and not dst_type.norm:
+ return '(%s)%s' % (dst_native_type, value)
+
+ value = clamp_expr(src_type, dst_type, dst_native_type, value)
+
+ if dst_type.kind == FLOAT:
+ if src_type.norm:
+ one = get_one(src_type)
+ if src_type.size <= 23:
+ scale = '(1.0f/0x%x)' % one
+ else:
+ # bigger than single precision mantissa, use double
+ scale = '(1.0/0x%x)' % one
+ value = '(%s * %s)' % (value, scale)
+ return '(%s)%s' % (dst_native_type, value)
+
+ if src_type.kind == FLOAT:
+ if dst_type.norm:
+ dst_one = get_one(dst_type)
+ if dst_type.size <= 23:
+ scale = '0x%x' % dst_one
+ else:
+ # bigger than single precision mantissa, use double
+ scale = '(double)0x%x' % dst_one
+ value = '(%s * %s)' % (value, scale)
+ return '(%s)%s' % (dst_native_type, value)
+
+ if src_type.kind == dst_type.kind:
+ src_one = get_one(src_type)
+ dst_one = get_one(dst_type)
+
+ if src_one > dst_one and src_type.norm and dst_type.norm:
+ # We can just bitshift
+ src_shift = get_one_shift(src_type)
+ dst_shift = get_one_shift(dst_type)
+ value = '(%s >> %s)' % (value, src_shift - dst_shift)
+ else:
+ # We need to rescale using an intermediate type big enough to hold the multiplication of both
+ tmp_native_type = intermediate_native_type(src_type.size + dst_type.size, src_type.sign and dst_type.sign)
+ value = '(%s)%s' % (tmp_native_type, value)
+ value = '%s * 0x%x / 0x%x' % (value, dst_one, src_one)
+ value = '(%s)%s' % (dst_native_type, value)
+ return value
+
+ assert False
+
+
+def generate_format_read(format, dst_type, dst_native_type, dst_suffix):
+ '''Generate the function to read pixels from a particular format'''
+
+ name = short_name(format)
+
+ src_native_type = native_type(format)
+
+ print 'static void'
+ print 'util_format_%s_read_%s(%s *dst, unsigned dst_stride, const uint8_t *src, unsigned src_stride, unsigned x0, unsigned y0, unsigned w, unsigned h)' % (name, dst_suffix, dst_native_type)
+ print '{'
+ print ' unsigned x, y;'
+ print ' const uint8_t *src_row = src + y0*src_stride;'
+ print ' %s *dst_row = dst;' % dst_native_type
+ print ' for (y = 0; y < h; ++y) {'
+ print ' const %s *src_pixel = (const %s *)(src_row + x0*%u);' % (src_native_type, src_native_type, format.stride())
+ print ' %s *dst_pixel = dst_row;' %dst_native_type
+ print ' for (x = 0; x < w; ++x) {'
+
+ names = ['']*4
+ if format.colorspace == 'rgb':
+ for i in range(4):
+ swizzle = format.out_swizzle[i]
+ if swizzle < 4:
+ names[swizzle] += 'rgba'[i]
+ elif format.colorspace == 'zs':
+ swizzle = format.out_swizzle[0]
+ if swizzle < 4:
+ names[swizzle] = 'z'
+ else:
+ assert False
+ else:
+ assert False
+
+ if format.layout == ARITH:
+ print ' %s pixel = *src_pixel++;' % src_native_type
+ shift = 0;
+ for i in range(4):
+ src_type = format.in_types[i]
+ width = src_type.size
+ if names[i]:
+ value = 'pixel'
+ mask = (1 << width) - 1
+ if shift:
+ value = '(%s >> %u)' % (value, shift)
+ if shift + width < format.block_size():
+ value = '(%s & 0x%x)' % (value, mask)
+ value = conversion_expr(src_type, dst_type, dst_native_type, value)
+ print ' %s %s = %s;' % (dst_native_type, names[i], value)
+ shift += width
+ elif format.layout == ARRAY:
+ for i in range(4):
+ src_type = format.in_types[i]
+ if names[i]:
+ value = '(*src_pixel++)'
+ value = conversion_expr(src_type, dst_type, dst_native_type, value)
+ print ' %s %s = %s;' % (dst_native_type, names[i], value)
+ else:
+ assert False
+
+ for i in range(4):
+ if format.colorspace == 'rgb':
+ swizzle = format.out_swizzle[i]
+ if swizzle < 4:
+ value = names[swizzle]
+ elif swizzle == SWIZZLE_0:
+ value = '0'
+ elif swizzle == SWIZZLE_1:
+ value = '1'
+ else:
+ assert False
+ elif format.colorspace == 'zs':
+ if i < 3:
+ value = 'z'
+ else:
+ value = '1'
+ else:
+ assert False
+ print ' *dst_pixel++ = %s; /* %s */' % (value, 'rgba'[i])
+
+ print ' }'
+ print ' src_row += src_stride;'
+ print ' dst_row += dst_stride/sizeof(%s);' % dst_native_type
+ print ' }'
+ print '}'
+ print
+
+
+def generate_format_write(format, src_type, src_native_type, src_suffix):
+ '''Generate the function to write pixels to a particular format'''
+
+ name = short_name(format)
+
+ dst_native_type = native_type(format)
+
+ print 'static void'
+ print 'util_format_%s_write_%s(const %s *src, unsigned src_stride, uint8_t *dst, unsigned dst_stride, unsigned x0, unsigned y0, unsigned w, unsigned h)' % (name, src_suffix, src_native_type)
+ print '{'
+ print ' unsigned x, y;'
+ print ' uint8_t *dst_row = dst + y0*dst_stride;'
+ print ' const %s *src_row = src;' % src_native_type
+ print ' for (y = 0; y < h; ++y) {'
+ print ' %s *dst_pixel = (%s *)(dst_row + x0*%u);' % (dst_native_type, dst_native_type, format.stride())
+ print ' const %s *src_pixel = src_row;' %src_native_type
+ print ' for (x = 0; x < w; ++x) {'
+
+ inv_swizzle = [None]*4
+ if format.colorspace == 'rgb':
+ for i in range(4):
+ swizzle = format.out_swizzle[i]
+ if swizzle < 4:
+ inv_swizzle[swizzle] = i
+ elif format.colorspace == 'zs':
+ swizzle = format.out_swizzle[0]
+ if swizzle < 4:
+ inv_swizzle[swizzle] = 0
+ else:
+ assert False
+
+ if format.layout == ARITH:
+ print ' %s pixel = 0;' % dst_native_type
+ shift = 0;
+ for i in range(4):
+ dst_type = format.in_types[i]
+ width = dst_type.size
+ if inv_swizzle[i] is not None:
+ value = 'src_pixel[%u]' % inv_swizzle[i]
+ value = conversion_expr(src_type, dst_type, dst_native_type, value)
+ if shift:
+ value = '(%s << %u)' % (value, shift)
+ print ' pixel |= %s;' % value
+ shift += width
+ print ' *dst_pixel++ = pixel;'
+ elif format.layout == ARRAY:
+ for i in range(4):
+ dst_type = format.in_types[i]
+ if inv_swizzle[i] is not None:
+ value = 'src_pixel[%u]' % inv_swizzle[i]
+ value = conversion_expr(src_type, dst_type, dst_native_type, value)
+ print ' *dst_pixel++ = %s;' % value
+ else:
+ assert False
+ print ' src_pixel += 4;'
+
+ print ' }'
+ print ' dst_row += dst_stride;'
+ print ' src_row += src_stride/sizeof(%s);' % src_native_type
+ print ' }'
+ print '}'
+ print
+
+
+def generate_read(formats, dst_type, dst_native_type, dst_suffix):
+ '''Generate the dispatch function to read pixels from any format'''
+
+ for format in formats:
+ if is_format_supported(format):
+ generate_format_read(format, dst_type, dst_native_type, dst_suffix)
+
+ print 'void'
+ print 'util_format_read_%s(enum pipe_format format, %s *dst, unsigned dst_stride, const void *src, unsigned src_stride, unsigned x, unsigned y, unsigned w, unsigned h)' % (dst_suffix, dst_native_type)
+ print '{'
+ print ' void (*func)(%s *dst, unsigned dst_stride, const uint8_t *src, unsigned src_stride, unsigned x0, unsigned y0, unsigned w, unsigned h);' % dst_native_type
+ print ' switch(format) {'
+ for format in formats:
+ if is_format_supported(format):
+ print ' case %s:' % format.name
+ print ' func = &util_format_%s_read_%s;' % (short_name(format), dst_suffix)
+ print ' break;'
+ print ' default:'
+ print ' debug_printf("unsupported format\\n");'
+ print ' return;'
+ print ' }'
+ print ' func(dst, dst_stride, (const uint8_t *)src, src_stride, x, y, w, h);'
+ print '}'
+ print
+
+
+def generate_write(formats, src_type, src_native_type, src_suffix):
+ '''Generate the dispatch function to write pixels to any format'''
+
+ for format in formats:
+ if is_format_supported(format):
+ generate_format_write(format, src_type, src_native_type, src_suffix)
+
+ print 'void'
+ print 'util_format_write_%s(enum pipe_format format, const %s *src, unsigned src_stride, void *dst, unsigned dst_stride, unsigned x, unsigned y, unsigned w, unsigned h)' % (src_suffix, src_native_type)
+
+ print '{'
+ print ' void (*func)(const %s *src, unsigned src_stride, uint8_t *dst, unsigned dst_stride, unsigned x0, unsigned y0, unsigned w, unsigned h);' % src_native_type
+ print ' switch(format) {'
+ for format in formats:
+ if is_format_supported(format):
+ print ' case %s:' % format.name
+ print ' func = &util_format_%s_write_%s;' % (short_name(format), src_suffix)
+ print ' break;'
+ print ' default:'
+ print ' debug_printf("unsupported format\\n");'
+ print ' return;'
+ print ' }'
+ print ' func(src, src_stride, (uint8_t *)dst, dst_stride, x, y, w, h);'
+ print '}'
+ print
+
+
+def main():
+ formats = []
+ for arg in sys.argv[1:]:
+ formats.extend(parse(arg))
+
+ print '/* This file is autogenerated by u_format_access.py from u_format.csv. Do not edit directly. */'
+ print
+ # This will print the copyright message on the top of this file
+ print __doc__.strip()
+ print
+ print '#include "pipe/p_compiler.h"'
+ print '#include "u_format.h"'
+ print '#include "u_math.h"'
+ print
+
+ generate_clamp()
+
+ type = Type(FLOAT, False, 32)
+ native_type = 'float'
+ suffix = '4f'
+
+ generate_read(formats, type, native_type, suffix)
+ generate_write(formats, type, native_type, suffix)
+
+ type = Type(UNSIGNED, True, 8)
+ native_type = 'uint8_t'
+ suffix = '4ub'
+
+ generate_read(formats, type, native_type, suffix)
+ generate_write(formats, type, native_type, suffix)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/auxiliary/util/u_format_parse.py b/src/gallium/auxiliary/util/u_format_parse.py
new file mode 100755
index 0000000000..493aff7112
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_format_parse.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+'''
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+'''
+
+
+import sys
+
+
+VOID, UNSIGNED, SIGNED, FIXED, FLOAT = range(5)
+
+SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_0, SWIZZLE_1, SWIZZLE_NONE, = range(7)
+
+ARITH = 'arith'
+ARRAY = 'array'
+
+
+class Type:
+ '''Describe the type of a color channel.'''
+
+ def __init__(self, kind, norm, size):
+ self.kind = kind
+ self.norm = norm
+ self.size = size
+ self.sign = kind in (SIGNED, FIXED, FLOAT)
+
+ def __str__(self):
+ s = str(self.kind)
+ if self.norm:
+ s += 'n'
+ s += str(self.size)
+ return s
+
+ def __eq__(self, other):
+ return self.kind == other.kind and self.norm == other.norm and self.size == other.size
+
+
+class Format:
+ '''Describe a pixel format.'''
+
+ def __init__(self, name, layout, block_width, block_height, in_types, out_swizzle, colorspace):
+ self.name = name
+ self.layout = layout
+ self.block_width = block_width
+ self.block_height = block_height
+ self.in_types = in_types
+ self.out_swizzle = out_swizzle
+ self.name = name
+ self.colorspace = colorspace
+
+ def __str__(self):
+ return self.name
+
+ def block_size(self):
+ size = 0
+ for type in self.in_types:
+ size += type.size
+ return size
+
+ def stride(self):
+ return self.block_size()/8
+
+
+_kind_parse_map = {
+ '': VOID,
+ 'x': VOID,
+ 'u': UNSIGNED,
+ 's': SIGNED,
+ 'h': FIXED,
+ 'f': FLOAT,
+}
+
+_swizzle_parse_map = {
+ 'x': SWIZZLE_X,
+ 'y': SWIZZLE_Y,
+ 'z': SWIZZLE_Z,
+ 'w': SWIZZLE_W,
+ '0': SWIZZLE_0,
+ '1': SWIZZLE_1,
+ '_': SWIZZLE_NONE,
+}
+
+def parse(filename):
+ '''Parse the format descrition in CSV format in terms of the
+ Type and Format classes above.'''
+
+ stream = open(filename)
+ formats = []
+ for line in stream:
+ line = line.rstrip()
+ fields = [field.strip() for field in line.split(',')]
+ name = fields[0]
+ layout = fields[1]
+ block_width, block_height = map(int, fields[2:4])
+ in_types = []
+ for field in fields[4:8]:
+ if field:
+ kind = _kind_parse_map[field[0]]
+ if field[1] == 'n':
+ norm = True
+ size = int(field[2:])
+ else:
+ norm = False
+ size = int(field[1:])
+ else:
+ kind = VOID
+ norm = False
+ size = 0
+ in_type = Type(kind, norm, size)
+ in_types.append(in_type)
+ out_swizzle = [_swizzle_parse_map[swizzle] for swizzle in fields[8]]
+ colorspace = fields[9]
+ formats.append(Format(name, layout, block_width, block_height, in_types, out_swizzle, colorspace))
+ return formats
+
diff --git a/src/gallium/auxiliary/util/u_format_table.py b/src/gallium/auxiliary/util/u_format_table.py
new file mode 100755
index 0000000000..8834568e8e
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_format_table.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+
+'''
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+'''
+
+
+import sys
+
+from u_format_parse import *
+
+
+def layout_map(layout):
+ return 'UTIL_FORMAT_LAYOUT_' + str(layout).upper()
+
+
+def colorspace_map(colorspace):
+ return 'UTIL_FORMAT_COLORSPACE_' + str(colorspace).upper()
+
+
+colorspace_channels_map = {
+ 'rgb': 'rgba',
+ 'rgba': 'rgba',
+ 'zs': 'zs',
+ 'yuv': ['y1', 'y2', 'u', 'v'],
+ 'dxt': []
+}
+
+
+kind_map = {
+ VOID: "UTIL_FORMAT_TYPE_VOID",
+ UNSIGNED: "UTIL_FORMAT_TYPE_UNSIGNED",
+ SIGNED: "UTIL_FORMAT_TYPE_SIGNED",
+ FIXED: "UTIL_FORMAT_TYPE_FIXED",
+ FLOAT: "UTIL_FORMAT_TYPE_FLOAT",
+}
+
+
+def bool_map(value):
+ if value:
+ return "TRUE"
+ else:
+ return "FALSE"
+
+
+swizzle_map = {
+ SWIZZLE_X: "UTIL_FORMAT_SWIZZLE_X",
+ SWIZZLE_Y: "UTIL_FORMAT_SWIZZLE_Y",
+ SWIZZLE_Z: "UTIL_FORMAT_SWIZZLE_Z",
+ SWIZZLE_W: "UTIL_FORMAT_SWIZZLE_W",
+ SWIZZLE_0: "UTIL_FORMAT_SWIZZLE_0",
+ SWIZZLE_1: "UTIL_FORMAT_SWIZZLE_1",
+ SWIZZLE_NONE: "UTIL_FORMAT_SWIZZLE_NONE",
+}
+
+
+def write_format_table(formats):
+ print '/* This file is autogenerated by u_format_table.py from u_format.csv. Do not edit directly. */'
+ print
+ # This will print the copyright message on the top of this file
+ print __doc__.strip()
+ print
+ print '#include "u_format.h"'
+ print
+ print 'const struct util_format_description'
+ print 'util_format_description_table[] = '
+ print "{"
+ for format in formats:
+ print " {"
+ print " %s," % (format.name,)
+ print " \"%s\"," % (format.name,)
+ print " {%u, %u, %u}, /* block */" % (format.block_width, format.block_height, format.block_size())
+ print " %s," % (layout_map(format.layout),)
+ print " {"
+ for i in range(4):
+ type = format.in_types[i]
+ if i < 3:
+ sep = ","
+ else:
+ sep = ""
+ print " {%s, %s, %u}%s /* %s */" % (kind_map[type.kind], bool_map(type.norm), type.size, sep, "xyzw"[i])
+ print " },"
+ print " {"
+ for i in range(4):
+ swizzle = format.out_swizzle[i]
+ if i < 3:
+ sep = ","
+ else:
+ sep = ""
+ try:
+ comment = layout_channels_map[format.layout][i]
+ except:
+ comment = 'ignored'
+ print " %s%s /* %s */" % (swizzle_map[swizzle], sep, comment)
+ print " },"
+ print " %s," % (colorspace_map(format.colorspace),)
+ print " },"
+ print " {"
+ print " PIPE_FORMAT_NONE,"
+ print " \"PIPE_FORMAT_NONE\","
+ print " {0, 0, 0},"
+ print " 0,"
+ print " {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}},"
+ print " {0, 0, 0, 0},"
+ print " 0"
+ print " },"
+ print "};"
+
+
+def main():
+
+ formats = []
+ for arg in sys.argv[1:]:
+ formats.extend(parse(arg))
+ write_format_table(formats)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/auxiliary/util/u_gen_mipmap.c b/src/gallium/auxiliary/util/u_gen_mipmap.c
new file mode 100644
index 0000000000..f06c0e463d
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_gen_mipmap.c
@@ -0,0 +1,1613 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Mipmap generation utility
+ *
+ * @author Brian Paul
+ */
+
+
+#include "pipe/p_context.h"
+#include "util/u_debug.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_state.h"
+
+#include "util/u_memory.h"
+#include "util/u_draw_quad.h"
+#include "util/u_gen_mipmap.h"
+#include "util/u_simple_shaders.h"
+
+#include "cso_cache/cso_context.h"
+
+
+struct gen_mipmap_state
+{
+ struct pipe_context *pipe;
+ struct cso_context *cso;
+
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state depthstencil;
+ struct pipe_rasterizer_state rasterizer;
+ struct pipe_sampler_state sampler;
+
+ void *vs;
+ void *fs;
+
+ struct pipe_buffer *vbuf; /**< quad vertices */
+ unsigned vbuf_slot;
+
+ float vertices[4][2][4]; /**< vertex/texcoords for quad */
+};
+
+
+
+enum dtype
+{
+ DTYPE_UBYTE,
+ DTYPE_UBYTE_3_3_2,
+ DTYPE_USHORT,
+ DTYPE_USHORT_4_4_4_4,
+ DTYPE_USHORT_5_6_5,
+ DTYPE_USHORT_1_5_5_5_REV,
+ DTYPE_UINT,
+ DTYPE_FLOAT,
+ DTYPE_HALF_FLOAT
+};
+
+
+typedef ushort half_float;
+
+
+static half_float
+float_to_half(float f)
+{
+ /* XXX fix this */
+ return 0;
+}
+
+static float
+half_to_float(half_float h)
+{
+ /* XXX fix this */
+ return 0.0f;
+}
+
+
+
+
+/**
+ * \name Support macros for do_row and do_row_3d
+ *
+ * The macro madness is here for two reasons. First, it compacts the code
+ * slightly. Second, it makes it much easier to adjust the specifics of the
+ * filter to tune the rounding characteristics.
+ */
+/*@{*/
+#define DECLARE_ROW_POINTERS(t, e) \
+ const t(*rowA)[e] = (const t(*)[e]) srcRowA; \
+ const t(*rowB)[e] = (const t(*)[e]) srcRowB; \
+ const t(*rowC)[e] = (const t(*)[e]) srcRowC; \
+ const t(*rowD)[e] = (const t(*)[e]) srcRowD; \
+ t(*dst)[e] = (t(*)[e]) dstRow
+
+#define DECLARE_ROW_POINTERS0(t) \
+ const t *rowA = (const t *) srcRowA; \
+ const t *rowB = (const t *) srcRowB; \
+ const t *rowC = (const t *) srcRowC; \
+ const t *rowD = (const t *) srcRowD; \
+ t *dst = (t *) dstRow
+
+#define FILTER_SUM_3D(Aj, Ak, Bj, Bk, Cj, Ck, Dj, Dk) \
+ ((unsigned) Aj + (unsigned) Ak \
+ + (unsigned) Bj + (unsigned) Bk \
+ + (unsigned) Cj + (unsigned) Ck \
+ + (unsigned) Dj + (unsigned) Dk \
+ + 4) >> 3
+
+#define FILTER_3D(e) \
+ do { \
+ dst[i][e] = FILTER_SUM_3D(rowA[j][e], rowA[k][e], \
+ rowB[j][e], rowB[k][e], \
+ rowC[j][e], rowC[k][e], \
+ rowD[j][e], rowD[k][e]); \
+ } while(0)
+
+#define FILTER_F_3D(e) \
+ do { \
+ dst[i][e] = (rowA[j][e] + rowA[k][e] \
+ + rowB[j][e] + rowB[k][e] \
+ + rowC[j][e] + rowC[k][e] \
+ + rowD[j][e] + rowD[k][e]) * 0.125F; \
+ } while(0)
+
+#define FILTER_HF_3D(e) \
+ do { \
+ const float aj = half_to_float(rowA[j][e]); \
+ const float ak = half_to_float(rowA[k][e]); \
+ const float bj = half_to_float(rowB[j][e]); \
+ const float bk = half_to_float(rowB[k][e]); \
+ const float cj = half_to_float(rowC[j][e]); \
+ const float ck = half_to_float(rowC[k][e]); \
+ const float dj = half_to_float(rowD[j][e]); \
+ const float dk = half_to_float(rowD[k][e]); \
+ dst[i][e] = float_to_half((aj + ak + bj + bk + cj + ck + dj + dk) \
+ * 0.125F); \
+ } while(0)
+/*@}*/
+
+
+/**
+ * Average together two rows of a source image to produce a single new
+ * row in the dest image. It's legal for the two source rows to point
+ * to the same data. The source width must be equal to either the
+ * dest width or two times the dest width.
+ * \param datatype GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, GL_FLOAT, etc.
+ * \param comps number of components per pixel (1..4)
+ */
+static void
+do_row(enum dtype datatype, uint comps, int srcWidth,
+ const void *srcRowA, const void *srcRowB,
+ int dstWidth, void *dstRow)
+{
+ const uint k0 = (srcWidth == dstWidth) ? 0 : 1;
+ const uint colStride = (srcWidth == dstWidth) ? 1 : 2;
+
+ assert(comps >= 1);
+ assert(comps <= 4);
+
+ /* This assertion is no longer valid with non-power-of-2 textures
+ assert(srcWidth == dstWidth || srcWidth == 2 * dstWidth);
+ */
+
+ if (datatype == DTYPE_UBYTE && comps == 4) {
+ uint i, j, k;
+ const ubyte(*rowA)[4] = (const ubyte(*)[4]) srcRowA;
+ const ubyte(*rowB)[4] = (const ubyte(*)[4]) srcRowB;
+ ubyte(*dst)[4] = (ubyte(*)[4]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] + rowB[j][0] + rowB[k][0]) / 4;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] + rowB[j][1] + rowB[k][1]) / 4;
+ dst[i][2] = (rowA[j][2] + rowA[k][2] + rowB[j][2] + rowB[k][2]) / 4;
+ dst[i][3] = (rowA[j][3] + rowA[k][3] + rowB[j][3] + rowB[k][3]) / 4;
+ }
+ }
+ else if (datatype == DTYPE_UBYTE && comps == 3) {
+ uint i, j, k;
+ const ubyte(*rowA)[3] = (const ubyte(*)[3]) srcRowA;
+ const ubyte(*rowB)[3] = (const ubyte(*)[3]) srcRowB;
+ ubyte(*dst)[3] = (ubyte(*)[3]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] + rowB[j][0] + rowB[k][0]) / 4;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] + rowB[j][1] + rowB[k][1]) / 4;
+ dst[i][2] = (rowA[j][2] + rowA[k][2] + rowB[j][2] + rowB[k][2]) / 4;
+ }
+ }
+ else if (datatype == DTYPE_UBYTE && comps == 2) {
+ uint i, j, k;
+ const ubyte(*rowA)[2] = (const ubyte(*)[2]) srcRowA;
+ const ubyte(*rowB)[2] = (const ubyte(*)[2]) srcRowB;
+ ubyte(*dst)[2] = (ubyte(*)[2]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] + rowB[j][0] + rowB[k][0]) >> 2;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] + rowB[j][1] + rowB[k][1]) >> 2;
+ }
+ }
+ else if (datatype == DTYPE_UBYTE && comps == 1) {
+ uint i, j, k;
+ const ubyte *rowA = (const ubyte *) srcRowA;
+ const ubyte *rowB = (const ubyte *) srcRowB;
+ ubyte *dst = (ubyte *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i] = (rowA[j] + rowA[k] + rowB[j] + rowB[k]) >> 2;
+ }
+ }
+
+ else if (datatype == DTYPE_USHORT && comps == 4) {
+ uint i, j, k;
+ const ushort(*rowA)[4] = (const ushort(*)[4]) srcRowA;
+ const ushort(*rowB)[4] = (const ushort(*)[4]) srcRowB;
+ ushort(*dst)[4] = (ushort(*)[4]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] + rowB[j][0] + rowB[k][0]) / 4;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] + rowB[j][1] + rowB[k][1]) / 4;
+ dst[i][2] = (rowA[j][2] + rowA[k][2] + rowB[j][2] + rowB[k][2]) / 4;
+ dst[i][3] = (rowA[j][3] + rowA[k][3] + rowB[j][3] + rowB[k][3]) / 4;
+ }
+ }
+ else if (datatype == DTYPE_USHORT && comps == 3) {
+ uint i, j, k;
+ const ushort(*rowA)[3] = (const ushort(*)[3]) srcRowA;
+ const ushort(*rowB)[3] = (const ushort(*)[3]) srcRowB;
+ ushort(*dst)[3] = (ushort(*)[3]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] + rowB[j][0] + rowB[k][0]) / 4;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] + rowB[j][1] + rowB[k][1]) / 4;
+ dst[i][2] = (rowA[j][2] + rowA[k][2] + rowB[j][2] + rowB[k][2]) / 4;
+ }
+ }
+ else if (datatype == DTYPE_USHORT && comps == 2) {
+ uint i, j, k;
+ const ushort(*rowA)[2] = (const ushort(*)[2]) srcRowA;
+ const ushort(*rowB)[2] = (const ushort(*)[2]) srcRowB;
+ ushort(*dst)[2] = (ushort(*)[2]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] + rowB[j][0] + rowB[k][0]) / 4;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] + rowB[j][1] + rowB[k][1]) / 4;
+ }
+ }
+ else if (datatype == DTYPE_USHORT && comps == 1) {
+ uint i, j, k;
+ const ushort *rowA = (const ushort *) srcRowA;
+ const ushort *rowB = (const ushort *) srcRowB;
+ ushort *dst = (ushort *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i] = (rowA[j] + rowA[k] + rowB[j] + rowB[k]) / 4;
+ }
+ }
+
+ else if (datatype == DTYPE_FLOAT && comps == 4) {
+ uint i, j, k;
+ const float(*rowA)[4] = (const float(*)[4]) srcRowA;
+ const float(*rowB)[4] = (const float(*)[4]) srcRowB;
+ float(*dst)[4] = (float(*)[4]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] +
+ rowB[j][0] + rowB[k][0]) * 0.25F;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] +
+ rowB[j][1] + rowB[k][1]) * 0.25F;
+ dst[i][2] = (rowA[j][2] + rowA[k][2] +
+ rowB[j][2] + rowB[k][2]) * 0.25F;
+ dst[i][3] = (rowA[j][3] + rowA[k][3] +
+ rowB[j][3] + rowB[k][3]) * 0.25F;
+ }
+ }
+ else if (datatype == DTYPE_FLOAT && comps == 3) {
+ uint i, j, k;
+ const float(*rowA)[3] = (const float(*)[3]) srcRowA;
+ const float(*rowB)[3] = (const float(*)[3]) srcRowB;
+ float(*dst)[3] = (float(*)[3]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] +
+ rowB[j][0] + rowB[k][0]) * 0.25F;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] +
+ rowB[j][1] + rowB[k][1]) * 0.25F;
+ dst[i][2] = (rowA[j][2] + rowA[k][2] +
+ rowB[j][2] + rowB[k][2]) * 0.25F;
+ }
+ }
+ else if (datatype == DTYPE_FLOAT && comps == 2) {
+ uint i, j, k;
+ const float(*rowA)[2] = (const float(*)[2]) srcRowA;
+ const float(*rowB)[2] = (const float(*)[2]) srcRowB;
+ float(*dst)[2] = (float(*)[2]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i][0] = (rowA[j][0] + rowA[k][0] +
+ rowB[j][0] + rowB[k][0]) * 0.25F;
+ dst[i][1] = (rowA[j][1] + rowA[k][1] +
+ rowB[j][1] + rowB[k][1]) * 0.25F;
+ }
+ }
+ else if (datatype == DTYPE_FLOAT && comps == 1) {
+ uint i, j, k;
+ const float *rowA = (const float *) srcRowA;
+ const float *rowB = (const float *) srcRowB;
+ float *dst = (float *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i] = (rowA[j] + rowA[k] + rowB[j] + rowB[k]) * 0.25F;
+ }
+ }
+
+#if 0
+ else if (datatype == HALF_DTYPE_FLOAT && comps == 4) {
+ uint i, j, k, comp;
+ const half_float(*rowA)[4] = (const half_float(*)[4]) srcRowA;
+ const half_float(*rowB)[4] = (const half_float(*)[4]) srcRowB;
+ half_float(*dst)[4] = (half_float(*)[4]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ for (comp = 0; comp < 4; comp++) {
+ float aj, ak, bj, bk;
+ aj = half_to_float(rowA[j][comp]);
+ ak = half_to_float(rowA[k][comp]);
+ bj = half_to_float(rowB[j][comp]);
+ bk = half_to_float(rowB[k][comp]);
+ dst[i][comp] = float_to_half((aj + ak + bj + bk) * 0.25F);
+ }
+ }
+ }
+ else if (datatype == DTYPE_HALF_FLOAT && comps == 3) {
+ uint i, j, k, comp;
+ const half_float(*rowA)[3] = (const half_float(*)[3]) srcRowA;
+ const half_float(*rowB)[3] = (const half_float(*)[3]) srcRowB;
+ half_float(*dst)[3] = (half_float(*)[3]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ for (comp = 0; comp < 3; comp++) {
+ float aj, ak, bj, bk;
+ aj = half_to_float(rowA[j][comp]);
+ ak = half_to_float(rowA[k][comp]);
+ bj = half_to_float(rowB[j][comp]);
+ bk = half_to_float(rowB[k][comp]);
+ dst[i][comp] = float_to_half((aj + ak + bj + bk) * 0.25F);
+ }
+ }
+ }
+ else if (datatype == DTYPE_HALF_FLOAT && comps == 2) {
+ uint i, j, k, comp;
+ const half_float(*rowA)[2] = (const half_float(*)[2]) srcRowA;
+ const half_float(*rowB)[2] = (const half_float(*)[2]) srcRowB;
+ half_float(*dst)[2] = (half_float(*)[2]) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ for (comp = 0; comp < 2; comp++) {
+ float aj, ak, bj, bk;
+ aj = half_to_float(rowA[j][comp]);
+ ak = half_to_float(rowA[k][comp]);
+ bj = half_to_float(rowB[j][comp]);
+ bk = half_to_float(rowB[k][comp]);
+ dst[i][comp] = float_to_half((aj + ak + bj + bk) * 0.25F);
+ }
+ }
+ }
+ else if (datatype == DTYPE_HALF_FLOAT && comps == 1) {
+ uint i, j, k;
+ const half_float *rowA = (const half_float *) srcRowA;
+ const half_float *rowB = (const half_float *) srcRowB;
+ half_float *dst = (half_float *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ float aj, ak, bj, bk;
+ aj = half_to_float(rowA[j]);
+ ak = half_to_float(rowA[k]);
+ bj = half_to_float(rowB[j]);
+ bk = half_to_float(rowB[k]);
+ dst[i] = float_to_half((aj + ak + bj + bk) * 0.25F);
+ }
+ }
+#endif
+
+ else if (datatype == DTYPE_UINT && comps == 1) {
+ uint i, j, k;
+ const uint *rowA = (const uint *) srcRowA;
+ const uint *rowB = (const uint *) srcRowB;
+ uint *dst = (uint *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ dst[i] = rowA[j] / 4 + rowA[k] / 4 + rowB[j] / 4 + rowB[k] / 4;
+ }
+ }
+
+ else if (datatype == DTYPE_USHORT_5_6_5 && comps == 3) {
+ uint i, j, k;
+ const ushort *rowA = (const ushort *) srcRowA;
+ const ushort *rowB = (const ushort *) srcRowB;
+ ushort *dst = (ushort *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0x1f;
+ const int rowAr1 = rowA[k] & 0x1f;
+ const int rowBr0 = rowB[j] & 0x1f;
+ const int rowBr1 = rowB[k] & 0x1f;
+ const int rowAg0 = (rowA[j] >> 5) & 0x3f;
+ const int rowAg1 = (rowA[k] >> 5) & 0x3f;
+ const int rowBg0 = (rowB[j] >> 5) & 0x3f;
+ const int rowBg1 = (rowB[k] >> 5) & 0x3f;
+ const int rowAb0 = (rowA[j] >> 11) & 0x1f;
+ const int rowAb1 = (rowA[k] >> 11) & 0x1f;
+ const int rowBb0 = (rowB[j] >> 11) & 0x1f;
+ const int rowBb1 = (rowB[k] >> 11) & 0x1f;
+ const int red = (rowAr0 + rowAr1 + rowBr0 + rowBr1) >> 2;
+ const int green = (rowAg0 + rowAg1 + rowBg0 + rowBg1) >> 2;
+ const int blue = (rowAb0 + rowAb1 + rowBb0 + rowBb1) >> 2;
+ dst[i] = (blue << 11) | (green << 5) | red;
+ }
+ }
+ else if (datatype == DTYPE_USHORT_4_4_4_4 && comps == 4) {
+ uint i, j, k;
+ const ushort *rowA = (const ushort *) srcRowA;
+ const ushort *rowB = (const ushort *) srcRowB;
+ ushort *dst = (ushort *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0xf;
+ const int rowAr1 = rowA[k] & 0xf;
+ const int rowBr0 = rowB[j] & 0xf;
+ const int rowBr1 = rowB[k] & 0xf;
+ const int rowAg0 = (rowA[j] >> 4) & 0xf;
+ const int rowAg1 = (rowA[k] >> 4) & 0xf;
+ const int rowBg0 = (rowB[j] >> 4) & 0xf;
+ const int rowBg1 = (rowB[k] >> 4) & 0xf;
+ const int rowAb0 = (rowA[j] >> 8) & 0xf;
+ const int rowAb1 = (rowA[k] >> 8) & 0xf;
+ const int rowBb0 = (rowB[j] >> 8) & 0xf;
+ const int rowBb1 = (rowB[k] >> 8) & 0xf;
+ const int rowAa0 = (rowA[j] >> 12) & 0xf;
+ const int rowAa1 = (rowA[k] >> 12) & 0xf;
+ const int rowBa0 = (rowB[j] >> 12) & 0xf;
+ const int rowBa1 = (rowB[k] >> 12) & 0xf;
+ const int red = (rowAr0 + rowAr1 + rowBr0 + rowBr1) >> 2;
+ const int green = (rowAg0 + rowAg1 + rowBg0 + rowBg1) >> 2;
+ const int blue = (rowAb0 + rowAb1 + rowBb0 + rowBb1) >> 2;
+ const int alpha = (rowAa0 + rowAa1 + rowBa0 + rowBa1) >> 2;
+ dst[i] = (alpha << 12) | (blue << 8) | (green << 4) | red;
+ }
+ }
+ else if (datatype == DTYPE_USHORT_1_5_5_5_REV && comps == 4) {
+ uint i, j, k;
+ const ushort *rowA = (const ushort *) srcRowA;
+ const ushort *rowB = (const ushort *) srcRowB;
+ ushort *dst = (ushort *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0x1f;
+ const int rowAr1 = rowA[k] & 0x1f;
+ const int rowBr0 = rowB[j] & 0x1f;
+ const int rowBr1 = rowB[k] & 0x1f;
+ const int rowAg0 = (rowA[j] >> 5) & 0x1f;
+ const int rowAg1 = (rowA[k] >> 5) & 0x1f;
+ const int rowBg0 = (rowB[j] >> 5) & 0x1f;
+ const int rowBg1 = (rowB[k] >> 5) & 0x1f;
+ const int rowAb0 = (rowA[j] >> 10) & 0x1f;
+ const int rowAb1 = (rowA[k] >> 10) & 0x1f;
+ const int rowBb0 = (rowB[j] >> 10) & 0x1f;
+ const int rowBb1 = (rowB[k] >> 10) & 0x1f;
+ const int rowAa0 = (rowA[j] >> 15) & 0x1;
+ const int rowAa1 = (rowA[k] >> 15) & 0x1;
+ const int rowBa0 = (rowB[j] >> 15) & 0x1;
+ const int rowBa1 = (rowB[k] >> 15) & 0x1;
+ const int red = (rowAr0 + rowAr1 + rowBr0 + rowBr1) >> 2;
+ const int green = (rowAg0 + rowAg1 + rowBg0 + rowBg1) >> 2;
+ const int blue = (rowAb0 + rowAb1 + rowBb0 + rowBb1) >> 2;
+ const int alpha = (rowAa0 + rowAa1 + rowBa0 + rowBa1) >> 2;
+ dst[i] = (alpha << 15) | (blue << 10) | (green << 5) | red;
+ }
+ }
+ else if (datatype == DTYPE_UBYTE_3_3_2 && comps == 3) {
+ uint i, j, k;
+ const ubyte *rowA = (const ubyte *) srcRowA;
+ const ubyte *rowB = (const ubyte *) srcRowB;
+ ubyte *dst = (ubyte *) dstRow;
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0x3;
+ const int rowAr1 = rowA[k] & 0x3;
+ const int rowBr0 = rowB[j] & 0x3;
+ const int rowBr1 = rowB[k] & 0x3;
+ const int rowAg0 = (rowA[j] >> 2) & 0x7;
+ const int rowAg1 = (rowA[k] >> 2) & 0x7;
+ const int rowBg0 = (rowB[j] >> 2) & 0x7;
+ const int rowBg1 = (rowB[k] >> 2) & 0x7;
+ const int rowAb0 = (rowA[j] >> 5) & 0x7;
+ const int rowAb1 = (rowA[k] >> 5) & 0x7;
+ const int rowBb0 = (rowB[j] >> 5) & 0x7;
+ const int rowBb1 = (rowB[k] >> 5) & 0x7;
+ const int red = (rowAr0 + rowAr1 + rowBr0 + rowBr1) >> 2;
+ const int green = (rowAg0 + rowAg1 + rowBg0 + rowBg1) >> 2;
+ const int blue = (rowAb0 + rowAb1 + rowBb0 + rowBb1) >> 2;
+ dst[i] = (blue << 5) | (green << 2) | red;
+ }
+ }
+ else {
+ debug_printf("bad format in do_row()");
+ }
+}
+
+
+/**
+ * Average together four rows of a source image to produce a single new
+ * row in the dest image. It's legal for the two source rows to point
+ * to the same data. The source width must be equal to either the
+ * dest width or two times the dest width.
+ *
+ * \param datatype GL pixel type \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT,
+ * \c GL_FLOAT, etc.
+ * \param comps number of components per pixel (1..4)
+ * \param srcWidth Width of a row in the source data
+ * \param srcRowA Pointer to one of the rows of source data
+ * \param srcRowB Pointer to one of the rows of source data
+ * \param srcRowC Pointer to one of the rows of source data
+ * \param srcRowD Pointer to one of the rows of source data
+ * \param dstWidth Width of a row in the destination data
+ * \param srcRowA Pointer to the row of destination data
+ */
+static void
+do_row_3D(enum dtype datatype, uint comps, int srcWidth,
+ const void *srcRowA, const void *srcRowB,
+ const void *srcRowC, const void *srcRowD,
+ int dstWidth, void *dstRow)
+{
+ const uint k0 = (srcWidth == dstWidth) ? 0 : 1;
+ const uint colStride = (srcWidth == dstWidth) ? 1 : 2;
+ uint i, j, k;
+
+ assert(comps >= 1);
+ assert(comps <= 4);
+
+ if ((datatype == DTYPE_UBYTE) && (comps == 4)) {
+ DECLARE_ROW_POINTERS(ubyte, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ FILTER_3D(1);
+ FILTER_3D(2);
+ FILTER_3D(3);
+ }
+ }
+ else if ((datatype == DTYPE_UBYTE) && (comps == 3)) {
+ DECLARE_ROW_POINTERS(ubyte, 3);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ FILTER_3D(1);
+ FILTER_3D(2);
+ }
+ }
+ else if ((datatype == DTYPE_UBYTE) && (comps == 2)) {
+ DECLARE_ROW_POINTERS(ubyte, 2);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ FILTER_3D(1);
+ }
+ }
+ else if ((datatype == DTYPE_UBYTE) && (comps == 1)) {
+ DECLARE_ROW_POINTERS(ubyte, 1);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ }
+ }
+ else if ((datatype == DTYPE_USHORT) && (comps == 4)) {
+ DECLARE_ROW_POINTERS(ushort, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ FILTER_3D(1);
+ FILTER_3D(2);
+ FILTER_3D(3);
+ }
+ }
+ else if ((datatype == DTYPE_USHORT) && (comps == 3)) {
+ DECLARE_ROW_POINTERS(ushort, 3);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ FILTER_3D(1);
+ FILTER_3D(2);
+ }
+ }
+ else if ((datatype == DTYPE_USHORT) && (comps == 2)) {
+ DECLARE_ROW_POINTERS(ushort, 2);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ FILTER_3D(1);
+ }
+ }
+ else if ((datatype == DTYPE_USHORT) && (comps == 1)) {
+ DECLARE_ROW_POINTERS(ushort, 1);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_3D(0);
+ }
+ }
+ else if ((datatype == DTYPE_FLOAT) && (comps == 4)) {
+ DECLARE_ROW_POINTERS(float, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_F_3D(0);
+ FILTER_F_3D(1);
+ FILTER_F_3D(2);
+ FILTER_F_3D(3);
+ }
+ }
+ else if ((datatype == DTYPE_FLOAT) && (comps == 3)) {
+ DECLARE_ROW_POINTERS(float, 3);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_F_3D(0);
+ FILTER_F_3D(1);
+ FILTER_F_3D(2);
+ }
+ }
+ else if ((datatype == DTYPE_FLOAT) && (comps == 2)) {
+ DECLARE_ROW_POINTERS(float, 2);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_F_3D(0);
+ FILTER_F_3D(1);
+ }
+ }
+ else if ((datatype == DTYPE_FLOAT) && (comps == 1)) {
+ DECLARE_ROW_POINTERS(float, 1);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_F_3D(0);
+ }
+ }
+ else if ((datatype == DTYPE_HALF_FLOAT) && (comps == 4)) {
+ DECLARE_ROW_POINTERS(half_float, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_HF_3D(0);
+ FILTER_HF_3D(1);
+ FILTER_HF_3D(2);
+ FILTER_HF_3D(3);
+ }
+ }
+ else if ((datatype == DTYPE_HALF_FLOAT) && (comps == 3)) {
+ DECLARE_ROW_POINTERS(half_float, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_HF_3D(0);
+ FILTER_HF_3D(1);
+ FILTER_HF_3D(2);
+ }
+ }
+ else if ((datatype == DTYPE_HALF_FLOAT) && (comps == 2)) {
+ DECLARE_ROW_POINTERS(half_float, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_HF_3D(0);
+ FILTER_HF_3D(1);
+ }
+ }
+ else if ((datatype == DTYPE_HALF_FLOAT) && (comps == 1)) {
+ DECLARE_ROW_POINTERS(half_float, 4);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ FILTER_HF_3D(0);
+ }
+ }
+ else if ((datatype == DTYPE_UINT) && (comps == 1)) {
+ const uint *rowA = (const uint *) srcRowA;
+ const uint *rowB = (const uint *) srcRowB;
+ const uint *rowC = (const uint *) srcRowC;
+ const uint *rowD = (const uint *) srcRowD;
+ float *dst = (float *) dstRow;
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const uint64_t tmp = (((uint64_t) rowA[j] + (uint64_t) rowA[k])
+ + ((uint64_t) rowB[j] + (uint64_t) rowB[k])
+ + ((uint64_t) rowC[j] + (uint64_t) rowC[k])
+ + ((uint64_t) rowD[j] + (uint64_t) rowD[k]));
+ dst[i] = (float)((double) tmp * 0.125);
+ }
+ }
+ else if ((datatype == DTYPE_USHORT_5_6_5) && (comps == 3)) {
+ DECLARE_ROW_POINTERS0(ushort);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0x1f;
+ const int rowAr1 = rowA[k] & 0x1f;
+ const int rowBr0 = rowB[j] & 0x1f;
+ const int rowBr1 = rowB[k] & 0x1f;
+ const int rowCr0 = rowC[j] & 0x1f;
+ const int rowCr1 = rowC[k] & 0x1f;
+ const int rowDr0 = rowD[j] & 0x1f;
+ const int rowDr1 = rowD[k] & 0x1f;
+ const int rowAg0 = (rowA[j] >> 5) & 0x3f;
+ const int rowAg1 = (rowA[k] >> 5) & 0x3f;
+ const int rowBg0 = (rowB[j] >> 5) & 0x3f;
+ const int rowBg1 = (rowB[k] >> 5) & 0x3f;
+ const int rowCg0 = (rowC[j] >> 5) & 0x3f;
+ const int rowCg1 = (rowC[k] >> 5) & 0x3f;
+ const int rowDg0 = (rowD[j] >> 5) & 0x3f;
+ const int rowDg1 = (rowD[k] >> 5) & 0x3f;
+ const int rowAb0 = (rowA[j] >> 11) & 0x1f;
+ const int rowAb1 = (rowA[k] >> 11) & 0x1f;
+ const int rowBb0 = (rowB[j] >> 11) & 0x1f;
+ const int rowBb1 = (rowB[k] >> 11) & 0x1f;
+ const int rowCb0 = (rowC[j] >> 11) & 0x1f;
+ const int rowCb1 = (rowC[k] >> 11) & 0x1f;
+ const int rowDb0 = (rowD[j] >> 11) & 0x1f;
+ const int rowDb1 = (rowD[k] >> 11) & 0x1f;
+ const int r = FILTER_SUM_3D(rowAr0, rowAr1, rowBr0, rowBr1,
+ rowCr0, rowCr1, rowDr0, rowDr1);
+ const int g = FILTER_SUM_3D(rowAg0, rowAg1, rowBg0, rowBg1,
+ rowCg0, rowCg1, rowDg0, rowDg1);
+ const int b = FILTER_SUM_3D(rowAb0, rowAb1, rowBb0, rowBb1,
+ rowCb0, rowCb1, rowDb0, rowDb1);
+ dst[i] = (b << 11) | (g << 5) | r;
+ }
+ }
+ else if ((datatype == DTYPE_USHORT_4_4_4_4) && (comps == 4)) {
+ DECLARE_ROW_POINTERS0(ushort);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0xf;
+ const int rowAr1 = rowA[k] & 0xf;
+ const int rowBr0 = rowB[j] & 0xf;
+ const int rowBr1 = rowB[k] & 0xf;
+ const int rowCr0 = rowC[j] & 0xf;
+ const int rowCr1 = rowC[k] & 0xf;
+ const int rowDr0 = rowD[j] & 0xf;
+ const int rowDr1 = rowD[k] & 0xf;
+ const int rowAg0 = (rowA[j] >> 4) & 0xf;
+ const int rowAg1 = (rowA[k] >> 4) & 0xf;
+ const int rowBg0 = (rowB[j] >> 4) & 0xf;
+ const int rowBg1 = (rowB[k] >> 4) & 0xf;
+ const int rowCg0 = (rowC[j] >> 4) & 0xf;
+ const int rowCg1 = (rowC[k] >> 4) & 0xf;
+ const int rowDg0 = (rowD[j] >> 4) & 0xf;
+ const int rowDg1 = (rowD[k] >> 4) & 0xf;
+ const int rowAb0 = (rowA[j] >> 8) & 0xf;
+ const int rowAb1 = (rowA[k] >> 8) & 0xf;
+ const int rowBb0 = (rowB[j] >> 8) & 0xf;
+ const int rowBb1 = (rowB[k] >> 8) & 0xf;
+ const int rowCb0 = (rowC[j] >> 8) & 0xf;
+ const int rowCb1 = (rowC[k] >> 8) & 0xf;
+ const int rowDb0 = (rowD[j] >> 8) & 0xf;
+ const int rowDb1 = (rowD[k] >> 8) & 0xf;
+ const int rowAa0 = (rowA[j] >> 12) & 0xf;
+ const int rowAa1 = (rowA[k] >> 12) & 0xf;
+ const int rowBa0 = (rowB[j] >> 12) & 0xf;
+ const int rowBa1 = (rowB[k] >> 12) & 0xf;
+ const int rowCa0 = (rowC[j] >> 12) & 0xf;
+ const int rowCa1 = (rowC[k] >> 12) & 0xf;
+ const int rowDa0 = (rowD[j] >> 12) & 0xf;
+ const int rowDa1 = (rowD[k] >> 12) & 0xf;
+ const int r = FILTER_SUM_3D(rowAr0, rowAr1, rowBr0, rowBr1,
+ rowCr0, rowCr1, rowDr0, rowDr1);
+ const int g = FILTER_SUM_3D(rowAg0, rowAg1, rowBg0, rowBg1,
+ rowCg0, rowCg1, rowDg0, rowDg1);
+ const int b = FILTER_SUM_3D(rowAb0, rowAb1, rowBb0, rowBb1,
+ rowCb0, rowCb1, rowDb0, rowDb1);
+ const int a = FILTER_SUM_3D(rowAa0, rowAa1, rowBa0, rowBa1,
+ rowCa0, rowCa1, rowDa0, rowDa1);
+
+ dst[i] = (a << 12) | (b << 8) | (g << 4) | r;
+ }
+ }
+ else if ((datatype == DTYPE_USHORT_1_5_5_5_REV) && (comps == 4)) {
+ DECLARE_ROW_POINTERS0(ushort);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0x1f;
+ const int rowAr1 = rowA[k] & 0x1f;
+ const int rowBr0 = rowB[j] & 0x1f;
+ const int rowBr1 = rowB[k] & 0x1f;
+ const int rowCr0 = rowC[j] & 0x1f;
+ const int rowCr1 = rowC[k] & 0x1f;
+ const int rowDr0 = rowD[j] & 0x1f;
+ const int rowDr1 = rowD[k] & 0x1f;
+ const int rowAg0 = (rowA[j] >> 5) & 0x1f;
+ const int rowAg1 = (rowA[k] >> 5) & 0x1f;
+ const int rowBg0 = (rowB[j] >> 5) & 0x1f;
+ const int rowBg1 = (rowB[k] >> 5) & 0x1f;
+ const int rowCg0 = (rowC[j] >> 5) & 0x1f;
+ const int rowCg1 = (rowC[k] >> 5) & 0x1f;
+ const int rowDg0 = (rowD[j] >> 5) & 0x1f;
+ const int rowDg1 = (rowD[k] >> 5) & 0x1f;
+ const int rowAb0 = (rowA[j] >> 10) & 0x1f;
+ const int rowAb1 = (rowA[k] >> 10) & 0x1f;
+ const int rowBb0 = (rowB[j] >> 10) & 0x1f;
+ const int rowBb1 = (rowB[k] >> 10) & 0x1f;
+ const int rowCb0 = (rowC[j] >> 10) & 0x1f;
+ const int rowCb1 = (rowC[k] >> 10) & 0x1f;
+ const int rowDb0 = (rowD[j] >> 10) & 0x1f;
+ const int rowDb1 = (rowD[k] >> 10) & 0x1f;
+ const int rowAa0 = (rowA[j] >> 15) & 0x1;
+ const int rowAa1 = (rowA[k] >> 15) & 0x1;
+ const int rowBa0 = (rowB[j] >> 15) & 0x1;
+ const int rowBa1 = (rowB[k] >> 15) & 0x1;
+ const int rowCa0 = (rowC[j] >> 15) & 0x1;
+ const int rowCa1 = (rowC[k] >> 15) & 0x1;
+ const int rowDa0 = (rowD[j] >> 15) & 0x1;
+ const int rowDa1 = (rowD[k] >> 15) & 0x1;
+ const int r = FILTER_SUM_3D(rowAr0, rowAr1, rowBr0, rowBr1,
+ rowCr0, rowCr1, rowDr0, rowDr1);
+ const int g = FILTER_SUM_3D(rowAg0, rowAg1, rowBg0, rowBg1,
+ rowCg0, rowCg1, rowDg0, rowDg1);
+ const int b = FILTER_SUM_3D(rowAb0, rowAb1, rowBb0, rowBb1,
+ rowCb0, rowCb1, rowDb0, rowDb1);
+ const int a = FILTER_SUM_3D(rowAa0, rowAa1, rowBa0, rowBa1,
+ rowCa0, rowCa1, rowDa0, rowDa1);
+
+ dst[i] = (a << 15) | (b << 10) | (g << 5) | r;
+ }
+ }
+ else if ((datatype == DTYPE_UBYTE_3_3_2) && (comps == 3)) {
+ DECLARE_ROW_POINTERS0(ushort);
+
+ for (i = j = 0, k = k0; i < (uint) dstWidth;
+ i++, j += colStride, k += colStride) {
+ const int rowAr0 = rowA[j] & 0x3;
+ const int rowAr1 = rowA[k] & 0x3;
+ const int rowBr0 = rowB[j] & 0x3;
+ const int rowBr1 = rowB[k] & 0x3;
+ const int rowCr0 = rowC[j] & 0x3;
+ const int rowCr1 = rowC[k] & 0x3;
+ const int rowDr0 = rowD[j] & 0x3;
+ const int rowDr1 = rowD[k] & 0x3;
+ const int rowAg0 = (rowA[j] >> 2) & 0x7;
+ const int rowAg1 = (rowA[k] >> 2) & 0x7;
+ const int rowBg0 = (rowB[j] >> 2) & 0x7;
+ const int rowBg1 = (rowB[k] >> 2) & 0x7;
+ const int rowCg0 = (rowC[j] >> 2) & 0x7;
+ const int rowCg1 = (rowC[k] >> 2) & 0x7;
+ const int rowDg0 = (rowD[j] >> 2) & 0x7;
+ const int rowDg1 = (rowD[k] >> 2) & 0x7;
+ const int rowAb0 = (rowA[j] >> 5) & 0x7;
+ const int rowAb1 = (rowA[k] >> 5) & 0x7;
+ const int rowBb0 = (rowB[j] >> 5) & 0x7;
+ const int rowBb1 = (rowB[k] >> 5) & 0x7;
+ const int rowCb0 = (rowC[j] >> 5) & 0x7;
+ const int rowCb1 = (rowC[k] >> 5) & 0x7;
+ const int rowDb0 = (rowD[j] >> 5) & 0x7;
+ const int rowDb1 = (rowD[k] >> 5) & 0x7;
+ const int r = FILTER_SUM_3D(rowAr0, rowAr1, rowBr0, rowBr1,
+ rowCr0, rowCr1, rowDr0, rowDr1);
+ const int g = FILTER_SUM_3D(rowAg0, rowAg1, rowBg0, rowBg1,
+ rowCg0, rowCg1, rowDg0, rowDg1);
+ const int b = FILTER_SUM_3D(rowAb0, rowAb1, rowBb0, rowBb1,
+ rowCb0, rowCb1, rowDb0, rowDb1);
+ dst[i] = (b << 5) | (g << 2) | r;
+ }
+ }
+ else {
+ debug_printf("bad format in do_row_3D()");
+ }
+}
+
+
+
+static void
+format_to_type_comps(enum pipe_format pformat,
+ enum dtype *datatype, uint *comps)
+{
+ /* XXX I think this could be implemented in terms of the pf_*() functions */
+ switch (pformat) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ case PIPE_FORMAT_R8G8B8A8_SRGB:
+ case PIPE_FORMAT_R8G8B8X8_SRGB:
+ case PIPE_FORMAT_A8R8G8B8_SRGB:
+ case PIPE_FORMAT_X8R8G8B8_SRGB:
+ case PIPE_FORMAT_B8G8R8A8_SRGB:
+ case PIPE_FORMAT_B8G8R8X8_SRGB:
+ case PIPE_FORMAT_R8G8B8_SRGB:
+ *datatype = DTYPE_UBYTE;
+ *comps = 4;
+ return;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ *datatype = DTYPE_USHORT_1_5_5_5_REV;
+ *comps = 4;
+ return;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ *datatype = DTYPE_USHORT_4_4_4_4;
+ *comps = 4;
+ return;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ *datatype = DTYPE_USHORT_5_6_5;
+ *comps = 3;
+ return;
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_L8_SRGB:
+ case PIPE_FORMAT_A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ *datatype = DTYPE_UBYTE;
+ *comps = 1;
+ return;
+ case PIPE_FORMAT_A8L8_UNORM:
+ case PIPE_FORMAT_A8L8_SRGB:
+ *datatype = DTYPE_UBYTE;
+ *comps = 2;
+ return;
+ default:
+ assert(0);
+ *datatype = DTYPE_UBYTE;
+ *comps = 0;
+ break;
+ }
+}
+
+
+static void
+reduce_1d(enum pipe_format pformat,
+ int srcWidth, const ubyte *srcPtr,
+ int dstWidth, ubyte *dstPtr)
+{
+ enum dtype datatype;
+ uint comps;
+
+ format_to_type_comps(pformat, &datatype, &comps);
+
+ /* we just duplicate the input row, kind of hack, saves code */
+ do_row(datatype, comps,
+ srcWidth, srcPtr, srcPtr,
+ dstWidth, dstPtr);
+}
+
+
+/**
+ * Strides are in bytes. If zero, it'll be computed as width * bpp.
+ */
+static void
+reduce_2d(enum pipe_format pformat,
+ int srcWidth, int srcHeight,
+ int srcRowStride, const ubyte *srcPtr,
+ int dstWidth, int dstHeight,
+ int dstRowStride, ubyte *dstPtr)
+{
+ enum dtype datatype;
+ uint comps;
+ const int bpt = pf_get_size(pformat);
+ const ubyte *srcA, *srcB;
+ ubyte *dst;
+ int row;
+
+ format_to_type_comps(pformat, &datatype, &comps);
+
+ if (!srcRowStride)
+ srcRowStride = bpt * srcWidth;
+
+ if (!dstRowStride)
+ dstRowStride = bpt * dstWidth;
+
+ /* Compute src and dst pointers */
+ srcA = srcPtr;
+ if (srcHeight > 1)
+ srcB = srcA + srcRowStride;
+ else
+ srcB = srcA;
+ dst = dstPtr;
+
+ for (row = 0; row < dstHeight; row++) {
+ do_row(datatype, comps,
+ srcWidth, srcA, srcB,
+ dstWidth, dst);
+ srcA += 2 * srcRowStride;
+ srcB += 2 * srcRowStride;
+ dst += dstRowStride;
+ }
+}
+
+
+static void
+reduce_3d(enum pipe_format pformat,
+ int srcWidth, int srcHeight, int srcDepth,
+ int srcRowStride, const ubyte *srcPtr,
+ int dstWidth, int dstHeight, int dstDepth,
+ int dstRowStride, ubyte *dstPtr)
+{
+ const int bpt = pf_get_size(pformat);
+ const int border = 0;
+ int img, row;
+ int bytesPerSrcImage, bytesPerDstImage;
+ int bytesPerSrcRow, bytesPerDstRow;
+ int srcImageOffset, srcRowOffset;
+ enum dtype datatype;
+ uint comps;
+
+ format_to_type_comps(pformat, &datatype, &comps);
+
+ bytesPerSrcImage = srcWidth * srcHeight * bpt;
+ bytesPerDstImage = dstWidth * dstHeight * bpt;
+
+ bytesPerSrcRow = srcWidth * bpt;
+ bytesPerDstRow = dstWidth * bpt;
+
+ /* Offset between adjacent src images to be averaged together */
+ srcImageOffset = (srcDepth == dstDepth) ? 0 : bytesPerSrcImage;
+
+ /* Offset between adjacent src rows to be averaged together */
+ srcRowOffset = (srcHeight == dstHeight) ? 0 : srcWidth * bpt;
+
+ /*
+ * Need to average together up to 8 src pixels for each dest pixel.
+ * Break that down into 3 operations:
+ * 1. take two rows from source image and average them together.
+ * 2. take two rows from next source image and average them together.
+ * 3. take the two averaged rows and average them for the final dst row.
+ */
+
+ /*
+ _mesa_printf("mip3d %d x %d x %d -> %d x %d x %d\n",
+ srcWidth, srcHeight, srcDepth, dstWidth, dstHeight, dstDepth);
+ */
+
+ for (img = 0; img < dstDepth; img++) {
+ /* first source image pointer, skipping border */
+ const ubyte *imgSrcA = srcPtr
+ + (bytesPerSrcImage + bytesPerSrcRow + border) * bpt * border
+ + img * (bytesPerSrcImage + srcImageOffset);
+ /* second source image pointer, skipping border */
+ const ubyte *imgSrcB = imgSrcA + srcImageOffset;
+ /* address of the dest image, skipping border */
+ ubyte *imgDst = dstPtr
+ + (bytesPerDstImage + bytesPerDstRow + border) * bpt * border
+ + img * bytesPerDstImage;
+
+ /* setup the four source row pointers and the dest row pointer */
+ const ubyte *srcImgARowA = imgSrcA;
+ const ubyte *srcImgARowB = imgSrcA + srcRowOffset;
+ const ubyte *srcImgBRowA = imgSrcB;
+ const ubyte *srcImgBRowB = imgSrcB + srcRowOffset;
+ ubyte *dstImgRow = imgDst;
+
+ for (row = 0; row < dstHeight; row++) {
+ do_row_3D(datatype, comps, srcWidth,
+ srcImgARowA, srcImgARowB,
+ srcImgBRowA, srcImgBRowB,
+ dstWidth, dstImgRow);
+
+ /* advance to next rows */
+ srcImgARowA += bytesPerSrcRow + srcRowOffset;
+ srcImgARowB += bytesPerSrcRow + srcRowOffset;
+ srcImgBRowA += bytesPerSrcRow + srcRowOffset;
+ srcImgBRowB += bytesPerSrcRow + srcRowOffset;
+ dstImgRow += bytesPerDstRow;
+ }
+ }
+}
+
+
+
+
+static void
+make_1d_mipmap(struct gen_mipmap_state *ctx,
+ struct pipe_texture *pt,
+ uint face, uint baseLevel, uint lastLevel)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ const uint zslice = 0;
+ uint dstLevel;
+
+ for (dstLevel = baseLevel + 1; dstLevel <= lastLevel; dstLevel++) {
+ const uint srcLevel = dstLevel - 1;
+ struct pipe_transfer *srcTrans, *dstTrans;
+ void *srcMap, *dstMap;
+
+ srcTrans = screen->get_tex_transfer(screen, pt, face, srcLevel, zslice,
+ PIPE_TRANSFER_READ, 0, 0,
+ pt->width[srcLevel],
+ pt->height[srcLevel]);
+ dstTrans = screen->get_tex_transfer(screen, pt, face, dstLevel, zslice,
+ PIPE_TRANSFER_WRITE, 0, 0,
+ pt->width[dstLevel],
+ pt->height[dstLevel]);
+
+ srcMap = (ubyte *) screen->transfer_map(screen, srcTrans);
+ dstMap = (ubyte *) screen->transfer_map(screen, dstTrans);
+
+ reduce_1d(pt->format,
+ srcTrans->width, srcMap,
+ dstTrans->width, dstMap);
+
+ screen->transfer_unmap(screen, srcTrans);
+ screen->transfer_unmap(screen, dstTrans);
+
+ screen->tex_transfer_destroy(srcTrans);
+ screen->tex_transfer_destroy(dstTrans);
+ }
+}
+
+
+static void
+make_2d_mipmap(struct gen_mipmap_state *ctx,
+ struct pipe_texture *pt,
+ uint face, uint baseLevel, uint lastLevel)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ const uint zslice = 0;
+ uint dstLevel;
+
+ assert(pt->block.width == 1);
+ assert(pt->block.height == 1);
+
+ for (dstLevel = baseLevel + 1; dstLevel <= lastLevel; dstLevel++) {
+ const uint srcLevel = dstLevel - 1;
+ struct pipe_transfer *srcTrans, *dstTrans;
+ ubyte *srcMap, *dstMap;
+
+ srcTrans = screen->get_tex_transfer(screen, pt, face, srcLevel, zslice,
+ PIPE_TRANSFER_READ, 0, 0,
+ pt->width[srcLevel],
+ pt->height[srcLevel]);
+ dstTrans = screen->get_tex_transfer(screen, pt, face, dstLevel, zslice,
+ PIPE_TRANSFER_WRITE, 0, 0,
+ pt->width[dstLevel],
+ pt->height[dstLevel]);
+
+ srcMap = (ubyte *) screen->transfer_map(screen, srcTrans);
+ dstMap = (ubyte *) screen->transfer_map(screen, dstTrans);
+
+ reduce_2d(pt->format,
+ srcTrans->width, srcTrans->height,
+ srcTrans->stride, srcMap,
+ dstTrans->width, dstTrans->height,
+ dstTrans->stride, dstMap);
+
+ screen->transfer_unmap(screen, srcTrans);
+ screen->transfer_unmap(screen, dstTrans);
+
+ screen->tex_transfer_destroy(srcTrans);
+ screen->tex_transfer_destroy(dstTrans);
+ }
+}
+
+
+static void
+make_3d_mipmap(struct gen_mipmap_state *ctx,
+ struct pipe_texture *pt,
+ uint face, uint baseLevel, uint lastLevel)
+{
+#if 0
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ uint dstLevel, zslice = 0;
+
+ assert(pt->block.width == 1);
+ assert(pt->block.height == 1);
+
+ for (dstLevel = baseLevel + 1; dstLevel <= lastLevel; dstLevel++) {
+ const uint srcLevel = dstLevel - 1;
+ struct pipe_transfer *srcTrans, *dstTrans;
+ ubyte *srcMap, *dstMap;
+
+ srcTrans = screen->get_tex_transfer(screen, pt, face, srcLevel, zslice,
+ PIPE_TRANSFER_READ, 0, 0,
+ pt->width[srcLevel],
+ pt->height[srcLevel]);
+ dstTrans = screen->get_tex_transfer(screen, pt, face, dstLevel, zslice,
+ PIPE_TRANSFER_WRITE, 0, 0,
+ pt->width[dstLevel],
+ pt->height[dstLevel]);
+
+ srcMap = (ubyte *) screen->transfer_map(screen, srcTrans);
+ dstMap = (ubyte *) screen->transfer_map(screen, dstTrans);
+
+ reduce_3d(pt->format,
+ srcTrans->width, srcTrans->height,
+ srcTrans->stride, srcMap,
+ dstTrans->width, dstTrans->height,
+ dstTrans->stride, dstMap);
+
+ screen->transfer_unmap(screen, srcTrans);
+ screen->transfer_unmap(screen, dstTrans);
+
+ screen->tex_transfer_destroy(srcTrans);
+ screen->tex_transfer_destroy(dstTrans);
+ }
+#else
+ (void) reduce_3d;
+#endif
+}
+
+
+static void
+fallback_gen_mipmap(struct gen_mipmap_state *ctx,
+ struct pipe_texture *pt,
+ uint face, uint baseLevel, uint lastLevel)
+{
+ switch (pt->target) {
+ case PIPE_TEXTURE_1D:
+ make_1d_mipmap(ctx, pt, face, baseLevel, lastLevel);
+ break;
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_CUBE:
+ make_2d_mipmap(ctx, pt, face, baseLevel, lastLevel);
+ break;
+ case PIPE_TEXTURE_3D:
+ make_3d_mipmap(ctx, pt, face, baseLevel, lastLevel);
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * Create a mipmap generation context.
+ * The idea is to create one of these and re-use it each time we need to
+ * generate a mipmap.
+ */
+struct gen_mipmap_state *
+util_create_gen_mipmap(struct pipe_context *pipe,
+ struct cso_context *cso)
+{
+ struct gen_mipmap_state *ctx;
+ uint i;
+
+ ctx = CALLOC_STRUCT(gen_mipmap_state);
+ if (!ctx)
+ return NULL;
+
+ ctx->pipe = pipe;
+ ctx->cso = cso;
+
+ /* disabled blending/masking */
+ memset(&ctx->blend, 0, sizeof(ctx->blend));
+ ctx->blend.colormask = PIPE_MASK_RGBA;
+
+ /* no-op depth/stencil/alpha */
+ memset(&ctx->depthstencil, 0, sizeof(ctx->depthstencil));
+
+ /* rasterizer */
+ memset(&ctx->rasterizer, 0, sizeof(ctx->rasterizer));
+ ctx->rasterizer.front_winding = PIPE_WINDING_CW;
+ ctx->rasterizer.cull_mode = PIPE_WINDING_NONE;
+ ctx->rasterizer.bypass_vs_clip_and_viewport = 1;
+ ctx->rasterizer.gl_rasterization_rules = 1;
+
+ /* sampler state */
+ memset(&ctx->sampler, 0, sizeof(ctx->sampler));
+ ctx->sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ ctx->sampler.normalized_coords = 1;
+
+ /* vertex shader - still needed to specify mapping from fragment
+ * shader input semantics to vertex elements
+ */
+ {
+ const uint semantic_names[] = { TGSI_SEMANTIC_POSITION,
+ TGSI_SEMANTIC_GENERIC };
+ const uint semantic_indexes[] = { 0, 0 };
+ ctx->vs = util_make_vertex_passthrough_shader(pipe, 2, semantic_names,
+ semantic_indexes);
+ }
+
+ /* fragment shader */
+ ctx->fs = util_make_fragment_tex_shader(pipe);
+
+ /* vertex data that doesn't change */
+ for (i = 0; i < 4; i++) {
+ ctx->vertices[i][0][2] = 0.0f; /* z */
+ ctx->vertices[i][0][3] = 1.0f; /* w */
+ ctx->vertices[i][1][3] = 1.0f; /* q */
+ }
+
+ /* Note: the actual vertex buffer is allocated as needed below */
+
+ return ctx;
+}
+
+
+/**
+ * Get next "slot" of vertex space in the vertex buffer.
+ * We're allocating one large vertex buffer and using it piece by piece.
+ */
+static unsigned
+get_next_slot(struct gen_mipmap_state *ctx)
+{
+ const unsigned max_slots = 4096 / sizeof ctx->vertices;
+
+ if (ctx->vbuf_slot >= max_slots)
+ util_gen_mipmap_flush( ctx );
+
+ if (!ctx->vbuf) {
+ ctx->vbuf = pipe_buffer_create(ctx->pipe->screen,
+ 32,
+ PIPE_BUFFER_USAGE_VERTEX,
+ max_slots * sizeof ctx->vertices);
+ }
+
+ return ctx->vbuf_slot++ * sizeof ctx->vertices;
+}
+
+
+static unsigned
+set_vertex_data(struct gen_mipmap_state *ctx,
+ enum pipe_texture_target tex_target,
+ uint face, float width, float height)
+{
+ unsigned offset;
+
+ /* vert[0].position */
+ ctx->vertices[0][0][0] = 0.0f; /*x*/
+ ctx->vertices[0][0][1] = 0.0f; /*y*/
+
+ /* vert[1].position */
+ ctx->vertices[1][0][0] = width;
+ ctx->vertices[1][0][1] = 0.0f;
+
+ /* vert[2].position */
+ ctx->vertices[2][0][0] = width;
+ ctx->vertices[2][0][1] = height;
+
+ /* vert[3].position */
+ ctx->vertices[3][0][0] = 0.0f;
+ ctx->vertices[3][0][1] = height;
+
+ /* Setup vertex texcoords. This is a little tricky for cube maps. */
+ if (tex_target == PIPE_TEXTURE_CUBE) {
+ static const float st[4][2] = {
+ {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}
+ };
+ float rx, ry, rz;
+ uint i;
+
+ /* loop over quad verts */
+ for (i = 0; i < 4; i++) {
+ /* Compute sc = +/-scale and tc = +/-scale.
+ * Not +/-1 to avoid cube face selection ambiguity near the edges,
+ * though that can still sometimes happen with this scale factor...
+ */
+ const float scale = 0.9999f;
+ const float sc = (2.0f * st[i][0] - 1.0f) * scale;
+ const float tc = (2.0f * st[i][1] - 1.0f) * scale;
+
+ switch (face) {
+ case PIPE_TEX_FACE_POS_X:
+ rx = 1.0f;
+ ry = -tc;
+ rz = -sc;
+ break;
+ case PIPE_TEX_FACE_NEG_X:
+ rx = -1.0f;
+ ry = -tc;
+ rz = sc;
+ break;
+ case PIPE_TEX_FACE_POS_Y:
+ rx = sc;
+ ry = 1.0f;
+ rz = tc;
+ break;
+ case PIPE_TEX_FACE_NEG_Y:
+ rx = sc;
+ ry = -1.0f;
+ rz = -tc;
+ break;
+ case PIPE_TEX_FACE_POS_Z:
+ rx = sc;
+ ry = -tc;
+ rz = 1.0f;
+ break;
+ case PIPE_TEX_FACE_NEG_Z:
+ rx = -sc;
+ ry = -tc;
+ rz = -1.0f;
+ break;
+ default:
+ assert(0);
+ }
+
+ ctx->vertices[i][1][0] = rx; /*s*/
+ ctx->vertices[i][1][1] = ry; /*t*/
+ ctx->vertices[i][1][2] = rz; /*r*/
+ }
+ }
+ else {
+ /* 1D/2D */
+ ctx->vertices[0][1][0] = 0.0f; /*s*/
+ ctx->vertices[0][1][1] = 0.0f; /*t*/
+ ctx->vertices[0][1][2] = 0.0f; /*r*/
+
+ ctx->vertices[1][1][0] = 1.0f;
+ ctx->vertices[1][1][1] = 0.0f;
+ ctx->vertices[1][1][2] = 0.0f;
+
+ ctx->vertices[2][1][0] = 1.0f;
+ ctx->vertices[2][1][1] = 1.0f;
+ ctx->vertices[2][1][2] = 0.0f;
+
+ ctx->vertices[3][1][0] = 0.0f;
+ ctx->vertices[3][1][1] = 1.0f;
+ ctx->vertices[3][1][2] = 0.0f;
+ }
+
+ offset = get_next_slot( ctx );
+
+ pipe_buffer_write(ctx->pipe->screen, ctx->vbuf,
+ offset, sizeof(ctx->vertices), ctx->vertices);
+
+ return offset;
+}
+
+
+
+/**
+ * Destroy a mipmap generation context
+ */
+void
+util_destroy_gen_mipmap(struct gen_mipmap_state *ctx)
+{
+ struct pipe_context *pipe = ctx->pipe;
+
+ pipe->delete_vs_state(pipe, ctx->vs);
+ pipe->delete_fs_state(pipe, ctx->fs);
+
+ pipe_buffer_reference(&ctx->vbuf, NULL);
+
+ FREE(ctx);
+}
+
+
+
+/* Release vertex buffer at end of frame to avoid synchronous
+ * rendering.
+ */
+void util_gen_mipmap_flush( struct gen_mipmap_state *ctx )
+{
+ pipe_buffer_reference(&ctx->vbuf, NULL);
+ ctx->vbuf_slot = 0;
+}
+
+
+/**
+ * Generate mipmap images. It's assumed all needed texture memory is
+ * already allocated.
+ *
+ * \param pt the texture to generate mipmap levels for
+ * \param face which cube face to generate mipmaps for (0 for non-cube maps)
+ * \param baseLevel the first mipmap level to use as a src
+ * \param lastLevel the last mipmap level to generate
+ * \param filter the minification filter used to generate mipmap levels with
+ * \param filter one of PIPE_TEX_FILTER_LINEAR, PIPE_TEX_FILTER_NEAREST
+ */
+void
+util_gen_mipmap(struct gen_mipmap_state *ctx,
+ struct pipe_texture *pt,
+ uint face, uint baseLevel, uint lastLevel, uint filter)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_framebuffer_state fb;
+ uint dstLevel;
+ uint zslice = 0;
+ uint offset;
+
+ /* check if we can render in the texture's format */
+ if (!screen->is_format_supported(screen, pt->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0)) {
+ fallback_gen_mipmap(ctx, pt, face, baseLevel, lastLevel);
+ return;
+ }
+
+ /* save state (restored below) */
+ cso_save_blend(ctx->cso);
+ cso_save_depth_stencil_alpha(ctx->cso);
+ cso_save_rasterizer(ctx->cso);
+ cso_save_samplers(ctx->cso);
+ cso_save_sampler_textures(ctx->cso);
+ cso_save_framebuffer(ctx->cso);
+ cso_save_fragment_shader(ctx->cso);
+ cso_save_vertex_shader(ctx->cso);
+
+ /* bind our state */
+ cso_set_blend(ctx->cso, &ctx->blend);
+ cso_set_depth_stencil_alpha(ctx->cso, &ctx->depthstencil);
+ cso_set_rasterizer(ctx->cso, &ctx->rasterizer);
+
+ cso_set_fragment_shader_handle(ctx->cso, ctx->fs);
+ cso_set_vertex_shader_handle(ctx->cso, ctx->vs);
+
+ /* init framebuffer state */
+ memset(&fb, 0, sizeof(fb));
+ fb.nr_cbufs = 1;
+
+ /* set min/mag to same filter for faster sw speed */
+ ctx->sampler.mag_img_filter = filter;
+ ctx->sampler.min_img_filter = filter;
+
+ /*
+ * XXX for small mipmap levels, it may be faster to use the software
+ * fallback path...
+ */
+ for (dstLevel = baseLevel + 1; dstLevel <= lastLevel; dstLevel++) {
+ const uint srcLevel = dstLevel - 1;
+
+ struct pipe_surface *surf =
+ screen->get_tex_surface(screen, pt, face, dstLevel, zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /*
+ * Setup framebuffer / dest surface
+ */
+ fb.cbufs[0] = surf;
+ fb.width = pt->width[dstLevel];
+ fb.height = pt->height[dstLevel];
+ cso_set_framebuffer(ctx->cso, &fb);
+
+ /*
+ * Setup sampler state
+ * Note: we should only have to set the min/max LOD clamps to ensure
+ * we grab texels from the right mipmap level. But some hardware
+ * has trouble with min clamping so we also set the lod_bias to
+ * try to work around that.
+ */
+ ctx->sampler.min_lod = ctx->sampler.max_lod = (float) srcLevel;
+ ctx->sampler.lod_bias = (float) srcLevel;
+ cso_single_sampler(ctx->cso, 0, &ctx->sampler);
+ cso_single_sampler_done(ctx->cso);
+
+ cso_set_sampler_textures(ctx->cso, 1, &pt);
+
+ /* quad coords in window coords (bypassing vs, clip and viewport) */
+ offset = set_vertex_data(ctx,
+ pt->target,
+ face,
+ (float) pt->width[dstLevel],
+ (float) pt->height[dstLevel]);
+
+ util_draw_vertex_buffer(ctx->pipe,
+ ctx->vbuf,
+ offset,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+ /* need to signal that the texture has changed _after_ rendering to it */
+ pipe_surface_reference( &surf, NULL );
+ }
+
+ /* restore state we changed */
+ cso_restore_blend(ctx->cso);
+ cso_restore_depth_stencil_alpha(ctx->cso);
+ cso_restore_rasterizer(ctx->cso);
+ cso_restore_samplers(ctx->cso);
+ cso_restore_sampler_textures(ctx->cso);
+ cso_restore_framebuffer(ctx->cso);
+ cso_restore_fragment_shader(ctx->cso);
+ cso_restore_vertex_shader(ctx->cso);
+}
diff --git a/src/gallium/auxiliary/util/u_gen_mipmap.h b/src/gallium/auxiliary/util/u_gen_mipmap.h
new file mode 100644
index 0000000000..54608f9466
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_gen_mipmap.h
@@ -0,0 +1,70 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_GENMIPMAP_H
+#define U_GENMIPMAP_H
+
+#include "pipe/p_state.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_context;
+struct pipe_texture;
+struct cso_context;
+
+struct gen_mipmap_state;
+
+
+extern struct gen_mipmap_state *
+util_create_gen_mipmap(struct pipe_context *pipe, struct cso_context *cso);
+
+
+extern void
+util_destroy_gen_mipmap(struct gen_mipmap_state *ctx);
+
+/* Release vertex buffer at end of frame to avoid synchronous
+ * rendering.
+ */
+extern void
+util_gen_mipmap_flush( struct gen_mipmap_state *ctx );
+
+
+extern void
+util_gen_mipmap(struct gen_mipmap_state *ctx,
+ struct pipe_texture *pt,
+ uint face, uint baseLevel, uint lastLevel, uint filter);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_handle_table.c b/src/gallium/auxiliary/util/u_handle_table.c
new file mode 100644
index 0000000000..3703718a62
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_handle_table.c
@@ -0,0 +1,298 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Generic handle table implementation.
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+#include "util/u_memory.h"
+#include "util/u_handle_table.h"
+
+
+#define HANDLE_TABLE_INITIAL_SIZE 16
+
+
+struct handle_table
+{
+ /** Object array. Empty handles have a null object */
+ void **objects;
+
+ /** Number of objects the handle can currently hold */
+ unsigned size;
+ /** Number of consecutive objects allocated at the start of the table */
+ unsigned filled;
+
+ /** Optional object destructor */
+ void (*destroy)(void *object);
+};
+
+
+struct handle_table *
+handle_table_create(void)
+{
+ struct handle_table *ht;
+
+ ht = MALLOC_STRUCT(handle_table);
+ if(!ht)
+ return NULL;
+
+ ht->objects = (void **)CALLOC(HANDLE_TABLE_INITIAL_SIZE, sizeof(void *));
+ if(!ht->objects) {
+ FREE(ht);
+ return NULL;
+ }
+
+ ht->size = HANDLE_TABLE_INITIAL_SIZE;
+ ht->filled = 0;
+
+ ht->destroy = NULL;
+
+ return ht;
+}
+
+
+void
+handle_table_set_destroy(struct handle_table *ht,
+ void (*destroy)(void *object))
+{
+ assert(ht);
+ if (!ht)
+ return;
+ ht->destroy = destroy;
+}
+
+
+/**
+ * Resize the table if necessary
+ */
+static INLINE int
+handle_table_resize(struct handle_table *ht,
+ unsigned minimum_size)
+{
+ unsigned new_size;
+ void **new_objects;
+
+ if(ht->size > minimum_size)
+ return ht->size;
+
+ new_size = ht->size;
+ while(!(new_size > minimum_size))
+ new_size *= 2;
+ assert(new_size);
+
+ new_objects = (void **)REALLOC((void *)ht->objects,
+ ht->size*sizeof(void *),
+ new_size*sizeof(void *));
+ if(!new_objects)
+ return 0;
+
+ memset(new_objects + ht->size, 0, (new_size - ht->size)*sizeof(void *));
+
+ ht->size = new_size;
+ ht->objects = new_objects;
+
+ return ht->size;
+}
+
+
+static INLINE void
+handle_table_clear(struct handle_table *ht,
+ unsigned index)
+{
+ void *object;
+
+ /* The order here is important so that the object being destroyed is not
+ * present in the table when seen by the destroy callback, because the
+ * destroy callback may directly or indirectly call the other functions in
+ * this module.
+ */
+
+ object = ht->objects[index];
+ if(object) {
+ ht->objects[index] = NULL;
+
+ if(ht->destroy)
+ ht->destroy(object);
+ }
+}
+
+
+unsigned
+handle_table_add(struct handle_table *ht,
+ void *object)
+{
+ unsigned index;
+ unsigned handle;
+
+ assert(ht);
+ assert(object);
+ if(!object || !ht)
+ return 0;
+
+ /* linear search for an empty handle */
+ while(ht->filled < ht->size) {
+ if(!ht->objects[ht->filled])
+ break;
+ ++ht->filled;
+ }
+
+ index = ht->filled;
+ handle = index + 1;
+
+ /* check integer overflow */
+ if(!handle)
+ return 0;
+
+ /* grow the table if necessary */
+ if(!handle_table_resize(ht, index))
+ return 0;
+
+ assert(!ht->objects[index]);
+ ht->objects[index] = object;
+ ++ht->filled;
+
+ return handle;
+}
+
+
+unsigned
+handle_table_set(struct handle_table *ht,
+ unsigned handle,
+ void *object)
+{
+ unsigned index;
+
+ assert(ht);
+ assert(handle);
+ if(!handle || !ht)
+ return 0;
+
+ assert(object);
+ if(!object)
+ return 0;
+
+ index = handle - 1;
+
+ /* grow the table if necessary */
+ if(!handle_table_resize(ht, index))
+ return 0;
+
+ handle_table_clear(ht, index);
+
+ ht->objects[index] = object;
+
+ return handle;
+}
+
+
+void *
+handle_table_get(struct handle_table *ht,
+ unsigned handle)
+{
+ void *object;
+
+ assert(ht);
+ assert(handle);
+ if(!handle || !ht || handle > ht->size)
+ return NULL;
+
+ object = ht->objects[handle - 1];
+
+ return object;
+}
+
+
+void
+handle_table_remove(struct handle_table *ht,
+ unsigned handle)
+{
+ void *object;
+ unsigned index;
+
+ assert(ht);
+ assert(handle);
+ if(!handle || !ht || handle > ht->size)
+ return;
+
+ index = handle - 1;
+ object = ht->objects[index];
+ if(!object)
+ return;
+
+ handle_table_clear(ht, index);
+
+ if(index < ht->filled)
+ ht->filled = index;
+}
+
+
+unsigned
+handle_table_get_next_handle(struct handle_table *ht,
+ unsigned handle)
+{
+ unsigned index;
+
+ for(index = handle; index < ht->size; ++index) {
+ if(ht->objects[index])
+ return index + 1;
+ }
+
+ return 0;
+}
+
+
+unsigned
+handle_table_get_first_handle(struct handle_table *ht)
+{
+ return handle_table_get_next_handle(ht, 0);
+}
+
+
+void
+handle_table_destroy(struct handle_table *ht)
+{
+ unsigned index;
+ assert(ht);
+
+ if (!ht)
+ return;
+
+ if(ht->destroy)
+ for(index = 0; index < ht->size; ++index)
+ handle_table_clear(ht, index);
+
+ FREE(ht->objects);
+ FREE(ht);
+}
+
diff --git a/src/gallium/auxiliary/util/u_handle_table.h b/src/gallium/auxiliary/util/u_handle_table.h
new file mode 100644
index 0000000000..d080135c9f
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_handle_table.h
@@ -0,0 +1,116 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Generic handle table.
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef U_HANDLE_TABLE_H_
+#define U_HANDLE_TABLE_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Abstract data type to map integer handles to objects.
+ *
+ * Also referred as "pointer array".
+ */
+struct handle_table;
+
+
+struct handle_table *
+handle_table_create(void);
+
+
+/**
+ * Set an optional destructor callback.
+ *
+ * If set, it will be called during handle_table_remove and
+ * handle_table_destroy calls.
+ */
+void
+handle_table_set_destroy(struct handle_table *ht,
+ void (*destroy)(void *object));
+
+
+/**
+ * Add a new object.
+ *
+ * Returns a zero handle on failure (out of memory).
+ */
+unsigned
+handle_table_add(struct handle_table *ht,
+ void *object);
+
+/**
+ * Returns zero on failure (out of memory).
+ */
+unsigned
+handle_table_set(struct handle_table *ht,
+ unsigned handle,
+ void *object);
+
+/**
+ * Fetch an existing object.
+ *
+ * Returns NULL for an invalid handle.
+ */
+void *
+handle_table_get(struct handle_table *ht,
+ unsigned handle);
+
+
+void
+handle_table_remove(struct handle_table *ht,
+ unsigned handle);
+
+
+void
+handle_table_destroy(struct handle_table *ht);
+
+
+unsigned
+handle_table_get_first_handle(struct handle_table *ht);
+
+
+unsigned
+handle_table_get_next_handle(struct handle_table *ht,
+ unsigned handle);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_HANDLE_TABLE_H_ */
diff --git a/src/gallium/auxiliary/util/u_hash.c b/src/gallium/auxiliary/util/u_hash.c
new file mode 100644
index 0000000000..b67653ec70
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_hash.c
@@ -0,0 +1,121 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Hash functions implementation.
+ *
+ * @author Jose Fonseca
+ */
+
+
+#include "u_hash.h"
+
+
+static const uint32_t
+util_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+
+/**
+ * @sa http://www.w3.org/TR/PNG/#D-CRCAppendix
+ */
+uint32_t
+util_hash_crc32(const void *data, size_t size)
+{
+ uint8_t *p = (uint8_t *)data;
+ uint32_t crc = 0xffffffff;
+
+ while (size--)
+ crc = util_crc32_table[(crc ^ *p++) & 0xff] ^ (crc >> 8);
+
+ return crc;
+}
diff --git a/src/gallium/auxiliary/util/u_hash.h b/src/gallium/auxiliary/util/u_hash.h
new file mode 100644
index 0000000000..8d92b07c85
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_hash.h
@@ -0,0 +1,55 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Hash functions.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_HASH_H_
+#define U_HASH_H_
+
+
+#include "pipe/p_compiler.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+uint32_t
+util_hash_crc32(const void *data, size_t size);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_HASH_H_ */
diff --git a/src/gallium/auxiliary/util/u_hash_table.c b/src/gallium/auxiliary/util/u_hash_table.c
new file mode 100644
index 0000000000..8c2a8f454c
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_hash_table.c
@@ -0,0 +1,292 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * General purpose hash table implementation.
+ *
+ * Just uses the cso_hash for now, but it might be better switch to a linear
+ * probing hash table implementation at some point -- as it is said they have
+ * better lookup and cache performance and it appears to be possible to write
+ * a lock-free implementation of such hash tables .
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+#include "cso_cache/cso_hash.h"
+
+#include "util/u_memory.h"
+#include "util/u_hash_table.h"
+
+
+struct hash_table
+{
+ struct cso_hash *cso;
+
+ /** Hash function */
+ unsigned (*hash)(void *key);
+
+ /** Compare two keys */
+ int (*compare)(void *key1, void *key2);
+
+ /* TODO: key, value destructors? */
+};
+
+
+struct hash_table_item
+{
+ void *key;
+ void *value;
+};
+
+
+static INLINE struct hash_table_item *
+hash_table_item(struct cso_hash_iter iter)
+{
+ return (struct hash_table_item *)cso_hash_iter_data(iter);
+}
+
+
+struct hash_table *
+hash_table_create(unsigned (*hash)(void *key),
+ int (*compare)(void *key1, void *key2))
+{
+ struct hash_table *ht;
+
+ ht = MALLOC_STRUCT(hash_table);
+ if(!ht)
+ return NULL;
+
+ ht->cso = cso_hash_create();
+ if(!ht->cso) {
+ FREE(ht);
+ return NULL;
+ }
+
+ ht->hash = hash;
+ ht->compare = compare;
+
+ return ht;
+}
+
+
+static INLINE struct cso_hash_iter
+hash_table_find_iter(struct hash_table *ht,
+ void *key,
+ unsigned key_hash)
+{
+ struct cso_hash_iter iter;
+ struct hash_table_item *item;
+
+ iter = cso_hash_find(ht->cso, key_hash);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct hash_table_item *)cso_hash_iter_data(iter);
+ if (!ht->compare(item->key, key))
+ break;
+ iter = cso_hash_iter_next(iter);
+ }
+
+ return iter;
+}
+
+
+static INLINE struct hash_table_item *
+hash_table_find_item(struct hash_table *ht,
+ void *key,
+ unsigned key_hash)
+{
+ struct cso_hash_iter iter;
+ struct hash_table_item *item;
+
+ iter = cso_hash_find(ht->cso, key_hash);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct hash_table_item *)cso_hash_iter_data(iter);
+ if (!ht->compare(item->key, key))
+ return item;
+ iter = cso_hash_iter_next(iter);
+ }
+
+ return NULL;
+}
+
+
+enum pipe_error
+hash_table_set(struct hash_table *ht,
+ void *key,
+ void *value)
+{
+ unsigned key_hash;
+ struct hash_table_item *item;
+ struct cso_hash_iter iter;
+
+ assert(ht);
+ if (!ht)
+ return PIPE_ERROR_BAD_INPUT;
+
+ key_hash = ht->hash(key);
+
+ item = hash_table_find_item(ht, key, key_hash);
+ if(item) {
+ /* TODO: key/value destruction? */
+ item->value = value;
+ return PIPE_OK;
+ }
+
+ item = MALLOC_STRUCT(hash_table_item);
+ if(!item)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ item->key = key;
+ item->value = value;
+
+ iter = cso_hash_insert(ht->cso, key_hash, item);
+ if(cso_hash_iter_is_null(iter)) {
+ FREE(item);
+ return PIPE_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PIPE_OK;
+}
+
+
+void *
+hash_table_get(struct hash_table *ht,
+ void *key)
+{
+ unsigned key_hash;
+ struct hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return NULL;
+
+ key_hash = ht->hash(key);
+
+ item = hash_table_find_item(ht, key, key_hash);
+ if(!item)
+ return NULL;
+
+ return item->value;
+}
+
+
+void
+hash_table_remove(struct hash_table *ht,
+ void *key)
+{
+ unsigned key_hash;
+ struct cso_hash_iter iter;
+ struct hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ key_hash = ht->hash(key);
+
+ iter = hash_table_find_iter(ht, key, key_hash);
+ if(cso_hash_iter_is_null(iter))
+ return;
+
+ item = hash_table_item(iter);
+ assert(item);
+ FREE(item);
+
+ cso_hash_erase(ht->cso, iter);
+}
+
+
+void
+hash_table_clear(struct hash_table *ht)
+{
+ struct cso_hash_iter iter;
+ struct hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ iter = cso_hash_first_node(ht->cso);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct hash_table_item *)cso_hash_take(ht->cso, cso_hash_iter_key(iter));
+ FREE(item);
+ iter = cso_hash_first_node(ht->cso);
+ }
+}
+
+
+enum pipe_error
+hash_table_foreach(struct hash_table *ht,
+ enum pipe_error (*callback)(void *key, void *value, void *data),
+ void *data)
+{
+ struct cso_hash_iter iter;
+ struct hash_table_item *item;
+ enum pipe_error result;
+
+ assert(ht);
+ if (!ht)
+ return PIPE_ERROR_BAD_INPUT;
+
+ iter = cso_hash_first_node(ht->cso);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct hash_table_item *)cso_hash_iter_data(iter);
+ result = callback(item->key, item->value, data);
+ if(result != PIPE_OK)
+ return result;
+ iter = cso_hash_iter_next(iter);
+ }
+
+ return PIPE_OK;
+}
+
+
+void
+hash_table_destroy(struct hash_table *ht)
+{
+ struct cso_hash_iter iter;
+ struct hash_table_item *item;
+
+ assert(ht);
+ if (!ht)
+ return;
+
+ iter = cso_hash_first_node(ht->cso);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct hash_table_item *)cso_hash_iter_data(iter);
+ FREE(item);
+ iter = cso_hash_iter_next(iter);
+ }
+
+ cso_hash_delete(ht->cso);
+
+ FREE(ht);
+}
diff --git a/src/gallium/auxiliary/util/u_hash_table.h b/src/gallium/auxiliary/util/u_hash_table.h
new file mode 100644
index 0000000000..feee881582
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_hash_table.h
@@ -0,0 +1,95 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * General purpose hash table.
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef U_HASH_TABLE_H_
+#define U_HASH_TABLE_H_
+
+
+#include "pipe/p_error.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Generic purpose hash table.
+ */
+struct hash_table;
+
+
+/**
+ * Create an hash table.
+ *
+ * @param hash hash function
+ * @param compare should return 0 for two equal keys.
+ */
+struct hash_table *
+hash_table_create(unsigned (*hash)(void *key),
+ int (*compare)(void *key1, void *key2));
+
+
+enum pipe_error
+hash_table_set(struct hash_table *ht,
+ void *key,
+ void *value);
+
+void *
+hash_table_get(struct hash_table *ht,
+ void *key);
+
+
+void
+hash_table_remove(struct hash_table *ht,
+ void *key);
+
+
+void
+hash_table_clear(struct hash_table *ht);
+
+
+enum pipe_error
+hash_table_foreach(struct hash_table *ht,
+ enum pipe_error (*callback)(void *key, void *value, void *data),
+ void *data);
+
+void
+hash_table_destroy(struct hash_table *ht);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_HASH_TABLE_H_ */
diff --git a/src/gallium/auxiliary/util/u_keymap.c b/src/gallium/auxiliary/util/u_keymap.c
new file mode 100644
index 0000000000..508a2ee063
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_keymap.c
@@ -0,0 +1,319 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Key lookup/associative container.
+ *
+ * Like Jose's u_hash_table, based on CSO cache code for now.
+ *
+ * Author: Brian Paul
+ */
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_error.h"
+
+#include "cso_cache/cso_hash.h"
+
+#include "util/u_memory.h"
+#include "util/u_keymap.h"
+
+
+struct keymap
+{
+ struct cso_hash *cso;
+ unsigned key_size;
+ unsigned max_entries; /* XXX not obeyed net */
+ unsigned num_entries;
+ keymap_delete_func delete_func;
+};
+
+
+struct keymap_item
+{
+ void *key, *value;
+};
+
+
+/**
+ * This the default key-delete function used when the client doesn't
+ * provide one.
+ */
+static void
+default_delete_func(const struct keymap *map,
+ const void *key, void *data, void *user)
+{
+ FREE((void*) data);
+}
+
+
+static INLINE struct keymap_item *
+hash_table_item(struct cso_hash_iter iter)
+{
+ return (struct keymap_item *) cso_hash_iter_data(iter);
+}
+
+
+/**
+ * Return 4-byte hash key for a block of bytes.
+ */
+static unsigned
+hash(const void *key, unsigned keySize)
+{
+ unsigned i, hash;
+
+ keySize /= 4; /* convert from bytes to uints */
+
+ hash = 0;
+ for (i = 0; i < keySize; i++) {
+ hash ^= (i + 1) * ((const unsigned *) key)[i];
+ }
+
+ /*hash = hash ^ (hash >> 11) ^ (hash >> 22);*/
+
+ return hash;
+}
+
+
+/**
+ * Create a new map.
+ * \param keySize size of the keys in bytes
+ * \param maxEntries max number of entries to allow (~0 = infinity)
+ * \param deleteFunc optional callback to call when entries
+ * are deleted/replaced
+ */
+struct keymap *
+util_new_keymap(unsigned keySize, unsigned maxEntries,
+ keymap_delete_func deleteFunc)
+{
+ struct keymap *map = MALLOC_STRUCT(keymap);
+ if (!map)
+ return NULL;
+
+ map->cso = cso_hash_create();
+ if (!map->cso) {
+ FREE(map);
+ return NULL;
+ }
+
+ map->max_entries = maxEntries;
+ map->num_entries = 0;
+ map->key_size = keySize;
+ map->delete_func = deleteFunc ? deleteFunc : default_delete_func;
+
+ return map;
+}
+
+
+/**
+ * Delete/free a keymap and all entries. The deleteFunc that was given at
+ * create time will be called for each entry.
+ * \param user user-provided pointer passed through to the delete callback
+ */
+void
+util_delete_keymap(struct keymap *map, void *user)
+{
+ util_keymap_remove_all(map, user);
+ cso_hash_delete(map->cso);
+ FREE(map);
+}
+
+
+static INLINE struct cso_hash_iter
+hash_table_find_iter(const struct keymap *map, const void *key,
+ unsigned key_hash)
+{
+ struct cso_hash_iter iter;
+ struct keymap_item *item;
+
+ iter = cso_hash_find(map->cso, key_hash);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct keymap_item *) cso_hash_iter_data(iter);
+ if (!memcmp(item->key, key, map->key_size))
+ break;
+ iter = cso_hash_iter_next(iter);
+ }
+
+ return iter;
+}
+
+
+static INLINE struct keymap_item *
+hash_table_find_item(const struct keymap *map, const void *key,
+ unsigned key_hash)
+{
+ struct cso_hash_iter iter = hash_table_find_iter(map, key, key_hash);
+ if (cso_hash_iter_is_null(iter)) {
+ return NULL;
+ }
+ else {
+ return hash_table_item(iter);
+ }
+}
+
+
+/**
+ * Insert a new key + data pointer into the table.
+ * Note: we create a copy of the key, but not the data!
+ * If the key is already present in the table, replace the existing
+ * entry (calling the delete callback on the previous entry).
+ * If the maximum capacity of the map is reached an old entry
+ * will be deleted (the delete callback will be called).
+ */
+boolean
+util_keymap_insert(struct keymap *map, const void *key,
+ const void *data, void *user)
+{
+ unsigned key_hash;
+ struct keymap_item *item;
+ struct cso_hash_iter iter;
+
+ assert(map);
+ if (!map)
+ return FALSE;
+
+ key_hash = hash(key, map->key_size);
+
+ item = hash_table_find_item(map, key, key_hash);
+ if (item) {
+ /* call delete callback for old entry/item */
+ map->delete_func(map, item->key, item->value, user);
+ item->value = (void *) data;
+ return TRUE;
+ }
+
+ item = MALLOC_STRUCT(keymap_item);
+ if (!item)
+ return FALSE;
+
+ item->key = mem_dup(key, map->key_size);
+ item->value = (void *) data;
+
+ iter = cso_hash_insert(map->cso, key_hash, item);
+ if (cso_hash_iter_is_null(iter)) {
+ FREE(item);
+ return FALSE;
+ }
+
+ map->num_entries++;
+
+ return TRUE;
+}
+
+
+/**
+ * Look up a key in the map and return the associated data pointer.
+ */
+const void *
+util_keymap_lookup(const struct keymap *map, const void *key)
+{
+ unsigned key_hash;
+ struct keymap_item *item;
+
+ assert(map);
+ if (!map)
+ return NULL;
+
+ key_hash = hash(key, map->key_size);
+
+ item = hash_table_find_item(map, key, key_hash);
+ if (!item)
+ return NULL;
+
+ return item->value;
+}
+
+
+/**
+ * Remove an entry from the map.
+ * The delete callback will be called if the given key/entry is found.
+ * \param user passed to the delete callback as the last param.
+ */
+void
+util_keymap_remove(struct keymap *map, const void *key, void *user)
+{
+ unsigned key_hash;
+ struct cso_hash_iter iter;
+ struct keymap_item *item;
+
+ assert(map);
+ if (!map)
+ return;
+
+ key_hash = hash(key, map->key_size);
+
+ iter = hash_table_find_iter(map, key, key_hash);
+ if (cso_hash_iter_is_null(iter))
+ return;
+
+ item = hash_table_item(iter);
+ assert(item);
+ if (!item)
+ return;
+ map->delete_func(map, item->key, item->value, user);
+ FREE(item->key);
+ FREE(item);
+
+ map->num_entries--;
+
+ cso_hash_erase(map->cso, iter);
+}
+
+
+/**
+ * Remove all entries from the map, calling the delete callback for each.
+ * \param user passed to the delete callback as the last param.
+ */
+void
+util_keymap_remove_all(struct keymap *map, void *user)
+{
+ struct cso_hash_iter iter;
+ struct keymap_item *item;
+
+ assert(map);
+ if (!map)
+ return;
+
+ iter = cso_hash_first_node(map->cso);
+ while (!cso_hash_iter_is_null(iter)) {
+ item = (struct keymap_item *)
+ cso_hash_take(map->cso, cso_hash_iter_key(iter));
+ map->delete_func(map, item->key, item->value, user);
+ FREE(item->key);
+ FREE(item);
+ iter = cso_hash_first_node(map->cso);
+ }
+}
+
+
+extern void
+util_keymap_info(const struct keymap *map)
+{
+ debug_printf("Keymap %p: %u of max %u entries\n",
+ (void *) map, map->num_entries, map->max_entries);
+}
diff --git a/src/gallium/auxiliary/util/u_keymap.h b/src/gallium/auxiliary/util/u_keymap.h
new file mode 100644
index 0000000000..8d60a76fc3
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_keymap.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_KEYMAP_H
+#define U_KEYMAP_H
+
+#include "pipe/p_compiler.h"
+
+
+/** opaque keymap type */
+struct keymap;
+
+
+/** Delete/callback function type */
+typedef void (*keymap_delete_func)(const struct keymap *map,
+ const void *key, void *data,
+ void *user);
+
+
+extern struct keymap *
+util_new_keymap(unsigned keySize, unsigned maxEntries,
+ keymap_delete_func deleteFunc);
+
+extern void
+util_delete_keymap(struct keymap *map, void *user);
+
+extern boolean
+util_keymap_insert(struct keymap *map, const void *key,
+ const void *data, void *user);
+
+extern const void *
+util_keymap_lookup(const struct keymap *map, const void *key);
+
+extern void
+util_keymap_remove(struct keymap *map, const void *key, void *user);
+
+extern void
+util_keymap_remove_all(struct keymap *map, void *user);
+
+extern void
+util_keymap_info(const struct keymap *map);
+
+
+#endif /* U_KEYMAP_H */
diff --git a/src/gallium/auxiliary/util/u_linear.c b/src/gallium/auxiliary/util/u_linear.c
new file mode 100644
index 0000000000..a1dce3f5cf
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_linear.c
@@ -0,0 +1,101 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Functions for converting tiled data to linear and vice versa.
+ */
+
+
+#include "util/u_debug.h"
+#include "u_linear.h"
+
+void
+pipe_linear_to_tile(size_t src_stride, const void *src_ptr,
+ struct pipe_tile_info *t, void *dst_ptr)
+{
+ int x, y, z;
+ char *ptr;
+ size_t bytes = t->cols * t->block.size;
+ char *dst_ptr2 = (char *) dst_ptr;
+
+ assert(pipe_linear_check_tile(t));
+
+ /* lets write lineary to the tiled buffer */
+ for (y = 0; y < t->tiles_y; y++) {
+ for (x = 0; x < t->tiles_x; x++) {
+ /* this inner loop could be replace with SSE magic */
+ ptr = (char*)src_ptr + src_stride * t->rows * y + bytes * x;
+ for (z = 0; z < t->rows; z++) {
+ memcpy(dst_ptr2, ptr, bytes);
+ dst_ptr2 += bytes;
+ ptr += src_stride;
+ }
+ }
+ }
+}
+
+void pipe_linear_from_tile(struct pipe_tile_info *t, const void *src_ptr,
+ size_t dst_stride, void *dst_ptr)
+{
+ int x, y, z;
+ char *ptr;
+ size_t bytes = t->cols * t->block.size;
+ const char *src_ptr2 = (const char *) src_ptr;
+
+ /* lets read lineary from the tiled buffer */
+ for (y = 0; y < t->tiles_y; y++) {
+ for (x = 0; x < t->tiles_x; x++) {
+ /* this inner loop could be replace with SSE magic */
+ ptr = (char*)dst_ptr + dst_stride * t->rows * y + bytes * x;
+ for (z = 0; z < t->rows; z++) {
+ memcpy(ptr, src_ptr2, bytes);
+ src_ptr2 += bytes;
+ ptr += dst_stride;
+ }
+ }
+ }
+}
+
+void
+pipe_linear_fill_info(struct pipe_tile_info *t,
+ const struct pipe_format_block *block,
+ unsigned tile_width, unsigned tile_height,
+ unsigned tiles_x, unsigned tiles_y)
+{
+ t->block = *block;
+
+ t->tile.width = tile_width;
+ t->tile.height = tile_height;
+ t->cols = t->tile.width / t->block.width;
+ t->rows = t->tile.height / t->block.height;
+ t->tile.size = t->cols * t->rows * t->block.size;
+
+ t->tiles_x = tiles_x;
+ t->tiles_y = tiles_y;
+ t->stride = t->cols * t->tiles_x * t->block.size;
+ t->size = t->tiles_x * t->tiles_y * t->tile.size;
+}
diff --git a/src/gallium/auxiliary/util/u_linear.h b/src/gallium/auxiliary/util/u_linear.h
new file mode 100644
index 0000000000..b74308ffa3
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_linear.h
@@ -0,0 +1,92 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Functions for converting tiled data to linear and vice versa.
+ */
+
+
+#ifndef U_LINEAR_H
+#define U_LINEAR_H
+
+#include "pipe/p_format.h"
+
+struct pipe_tile_info
+{
+ unsigned size;
+ unsigned stride;
+
+ /* The number of tiles */
+ unsigned tiles_x;
+ unsigned tiles_y;
+
+ /* size of each tile expressed in blocks */
+ unsigned cols;
+ unsigned rows;
+
+ /* Describe the tile in pixels */
+ struct pipe_format_block tile;
+
+ /* Describe each block within the tile */
+ struct pipe_format_block block;
+};
+
+void pipe_linear_to_tile(size_t src_stride, const void *src_ptr,
+ struct pipe_tile_info *t, void *dst_ptr);
+
+void pipe_linear_from_tile(struct pipe_tile_info *t, const void *src_ptr,
+ size_t dst_stride, void *dst_ptr);
+
+/**
+ * Convenience function to fillout a pipe_tile_info struct.
+ * @t info to fill out.
+ * @block block info about pixel layout
+ * @tile_width the width of the tile in pixels
+ * @tile_height the height of the tile in pixels
+ * @tiles_x number of tiles in x axis
+ * @tiles_y number of tiles in y axis
+ */
+void pipe_linear_fill_info(struct pipe_tile_info *t,
+ const struct pipe_format_block *block,
+ unsigned tile_width, unsigned tile_height,
+ unsigned tiles_x, unsigned tiles_y);
+
+static INLINE boolean pipe_linear_check_tile(const struct pipe_tile_info *t)
+{
+ if (t->tile.size != t->block.size * t->cols * t->rows)
+ return FALSE;
+
+ if (t->stride != t->block.size * t->cols * t->tiles_x)
+ return FALSE;
+
+ if (t->size < t->stride * t->rows * t->tiles_y)
+ return FALSE;
+
+ return TRUE;
+}
+
+#endif /* U_LINEAR_H */
diff --git a/src/gallium/auxiliary/util/u_math.c b/src/gallium/auxiliary/util/u_math.c
new file mode 100644
index 0000000000..2811475fa0
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_math.c
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#include "util/u_math.h"
+
+
+/** 2^x, for x in [-1.0, 1.0) */
+float pow2_table[POW2_TABLE_SIZE];
+
+
+static void
+init_pow2_table(void)
+{
+ int i;
+ for (i = 0; i < POW2_TABLE_SIZE; i++)
+ pow2_table[i] = (float) pow(2.0, (i - POW2_TABLE_OFFSET) / POW2_TABLE_SCALE);
+}
+
+
+/** log2(x), for x in [1.0, 2.0) */
+float log2_table[LOG2_TABLE_SIZE];
+
+
+static void
+init_log2_table(void)
+{
+ unsigned i;
+ for (i = 0; i < LOG2_TABLE_SIZE; i++)
+ log2_table[i] = (float) log2(1.0 + i * (1.0 / LOG2_TABLE_SCALE));
+}
+
+
+/**
+ * One time init for math utilities.
+ */
+void
+util_init_math(void)
+{
+ static boolean initialized = FALSE;
+ if (!initialized) {
+ init_pow2_table();
+ init_log2_table();
+ initialized = TRUE;
+ }
+}
+
+
diff --git a/src/gallium/auxiliary/util/u_math.h b/src/gallium/auxiliary/util/u_math.h
new file mode 100644
index 0000000000..b12c97dfb4
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_math.h
@@ -0,0 +1,516 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Math utilities and approximations for common math functions.
+ * Reduced precision is usually acceptable in shaders...
+ *
+ * "fast" is used in the names of functions which are low-precision,
+ * or at least lower-precision than the normal C lib functions.
+ */
+
+
+#ifndef U_MATH_H
+#define U_MATH_H
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+__inline double ceil(double val)
+{
+ double ceil_val;
+
+ if ((val - (long) val) == 0) {
+ ceil_val = val;
+ }
+ else {
+ if (val > 0) {
+ ceil_val = (long) val + 1;
+ }
+ else {
+ ceil_val = (long) val;
+ }
+ }
+
+ return ceil_val;
+}
+
+#ifndef PIPE_SUBSYSTEM_WINDOWS_CE_OGL
+__inline double floor(double val)
+{
+ double floor_val;
+
+ if ((val - (long) val) == 0) {
+ floor_val = val;
+ }
+ else {
+ if (val > 0) {
+ floor_val = (long) val;
+ }
+ else {
+ floor_val = (long) val - 1;
+ }
+ }
+
+ return floor_val;
+}
+#endif
+
+#pragma function(pow)
+__inline double __cdecl pow(double val, double exponent)
+{
+ /* XXX */
+ assert(0);
+ return 0;
+}
+
+#pragma function(log)
+__inline double __cdecl log(double val)
+{
+ /* XXX */
+ assert(0);
+ return 0;
+}
+
+#pragma function(atan2)
+__inline double __cdecl atan2(double val)
+{
+ /* XXX */
+ assert(0);
+ return 0;
+}
+#else
+#include <math.h>
+#include <stdarg.h>
+#endif
+
+
+#if defined(_MSC_VER)
+
+#if _MSC_VER < 1400 && !defined(__cplusplus) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+
+static INLINE float cosf( float f )
+{
+ return (float) cos( (double) f );
+}
+
+static INLINE float sinf( float f )
+{
+ return (float) sin( (double) f );
+}
+
+static INLINE float ceilf( float f )
+{
+ return (float) ceil( (double) f );
+}
+
+static INLINE float floorf( float f )
+{
+ return (float) floor( (double) f );
+}
+
+static INLINE float powf( float f, float g )
+{
+ return (float) pow( (double) f, (double) g );
+}
+
+static INLINE float sqrtf( float f )
+{
+ return (float) sqrt( (double) f );
+}
+
+static INLINE float fabsf( float f )
+{
+ return (float) fabs( (double) f );
+}
+
+static INLINE float logf( float f )
+{
+ return (float) log( (double) f );
+}
+
+#else
+/* Work-around an extra semi-colon in VS 2005 logf definition */
+#ifdef logf
+#undef logf
+#define logf(x) ((float)log((double)(x)))
+#endif /* logf */
+#endif
+
+static INLINE double log2( double x )
+{
+ const double invln2 = 1.442695041;
+ return log( x ) * invln2;
+}
+
+#endif /* _MSC_VER */
+
+
+
+
+
+#define POW2_TABLE_SIZE_LOG2 9
+#define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2)
+#define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2)
+#define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2))
+extern float pow2_table[POW2_TABLE_SIZE];
+
+
+/**
+ * Initialize math module. This should be called before using any
+ * other functions in this module.
+ */
+extern void
+util_init_math(void);
+
+
+union fi {
+ float f;
+ int32_t i;
+ uint32_t ui;
+};
+
+
+/**
+ * Fast version of 2^x
+ * Identity: exp2(a + b) = exp2(a) * exp2(b)
+ * Let ipart = int(x)
+ * Let fpart = x - ipart;
+ * So, exp2(x) = exp2(ipart) * exp2(fpart)
+ * Compute exp2(ipart) with i << ipart
+ * Compute exp2(fpart) with lookup table.
+ */
+static INLINE float
+util_fast_exp2(float x)
+{
+ int32_t ipart;
+ float fpart, mpart;
+ union fi epart;
+
+ if(x > 129.00000f)
+ return 3.402823466e+38f;
+
+ if (x < -126.99999f)
+ return 0.0f;
+
+ ipart = (int32_t) x;
+ fpart = x - (float) ipart;
+
+ /* same as
+ * epart.f = (float) (1 << ipart)
+ * but faster and without integer overflow for ipart > 31
+ */
+ epart.i = (ipart + 127 ) << 23;
+
+ mpart = pow2_table[POW2_TABLE_OFFSET + (int)(fpart * POW2_TABLE_SCALE)];
+
+ return epart.f * mpart;
+}
+
+
+/**
+ * Fast approximation to exp(x).
+ */
+static INLINE float
+util_fast_exp(float x)
+{
+ const float k = 1.44269f; /* = log2(e) */
+ return util_fast_exp2(k * x);
+}
+
+
+#define LOG2_TABLE_SIZE_LOG2 16
+#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
+#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
+extern float log2_table[LOG2_TABLE_SIZE];
+
+
+/**
+ * Fast approximation to log2(x).
+ */
+static INLINE float
+util_fast_log2(float x)
+{
+ union fi num;
+ float epart, mpart;
+ num.f = x;
+ epart = (float)(((num.i & 0x7f800000) >> 23) - 127);
+ /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */
+ mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)];
+ return epart + mpart;
+}
+
+
+/**
+ * Fast approximation to x^y.
+ */
+static INLINE float
+util_fast_pow(float x, float y)
+{
+ return util_fast_exp2(util_fast_log2(x) * y);
+}
+
+
+/**
+ * Floor(x), returned as int.
+ */
+static INLINE int
+util_ifloor(float f)
+{
+ int ai, bi;
+ double af, bf;
+ union fi u;
+ af = (3 << 22) + 0.5 + (double) f;
+ bf = (3 << 22) + 0.5 - (double) f;
+ u.f = (float) af; ai = u.i;
+ u.f = (float) bf; bi = u.i;
+ return (ai - bi) >> 1;
+}
+
+
+/**
+ * Round float to nearest int.
+ */
+static INLINE int
+util_iround(float f)
+{
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
+ int r;
+ __asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st");
+ return r;
+#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
+ int r;
+ _asm {
+ fld f
+ fistp r
+ }
+ return r;
+#else
+ if (f >= 0.0f)
+ return (int) (f + 0.5f);
+ else
+ return (int) (f - 0.5f);
+#endif
+}
+
+
+
+/**
+ * Test if x is NaN or +/- infinity.
+ */
+static INLINE boolean
+util_is_inf_or_nan(float x)
+{
+ union fi tmp;
+ tmp.f = x;
+ return !(int)((unsigned int)((tmp.i & 0x7fffffff)-0x7f800000) >> 31);
+}
+
+
+/**
+ * Test whether x is a power of two.
+ */
+static INLINE boolean
+util_is_pot(unsigned x)
+{
+ return (x & (x - 1)) == 0;
+}
+
+
+/**
+ * Find first bit set in word. Least significant bit is 1.
+ * Return 0 if no bits set.
+ */
+#if defined(_MSC_VER) && _MSC_VER >= 1300
+static INLINE
+unsigned long ffs( unsigned long u )
+{
+ unsigned long i;
+ if (_BitScanForward(&i, u))
+ return i + 1;
+ else
+ return 0;
+}
+#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
+static INLINE
+unsigned ffs( unsigned u )
+{
+ unsigned i;
+
+ if (u == 0) {
+ return 0;
+ }
+
+ __asm bsf eax, [u]
+ __asm inc eax
+ __asm mov [i], eax
+
+ return i;
+}
+#elif defined(__MINGW32__)
+#define ffs __builtin_ffs
+#endif
+
+#ifdef __MINGW32__
+#define ffs __builtin_ffs
+#endif
+
+
+/* Could also binary search for the highest bit.
+ */
+static INLINE unsigned
+util_unsigned_logbase2(unsigned n)
+{
+ unsigned log2 = 0;
+ while (n >>= 1)
+ ++log2;
+ return log2;
+}
+
+
+/**
+ * Return float bits.
+ */
+static INLINE unsigned
+fui( float f )
+{
+ union fi fi;
+ fi.f = f;
+ return fi.ui;
+}
+
+
+/**
+ * Convert ubyte to float in [0, 1].
+ * XXX a 256-entry lookup table would be slightly faster.
+ */
+static INLINE float
+ubyte_to_float(ubyte ub)
+{
+ return (float) ub * (1.0f / 255.0f);
+}
+
+
+/**
+ * Convert float in [0,1] to ubyte in [0,255] with clamping.
+ */
+static INLINE ubyte
+float_to_ubyte(float f)
+{
+ const int ieee_0996 = 0x3f7f0000; /* 0.996 or so */
+ union fi tmp;
+
+ tmp.f = f;
+ if (tmp.i < 0) {
+ return (ubyte) 0;
+ }
+ else if (tmp.i >= ieee_0996) {
+ return (ubyte) 255;
+ }
+ else {
+ tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
+ return (ubyte) tmp.i;
+ }
+}
+
+
+/**
+ * Calc log base 2
+ */
+static INLINE unsigned
+util_logbase2(unsigned n)
+{
+ unsigned log2 = 0;
+ while (n >>= 1)
+ ++log2;
+ return log2;
+}
+
+
+/**
+ * Clamp X to [MIN, MAX].
+ * This is a macro to allow float, int, uint, etc. types.
+ */
+#define CLAMP( X, MIN, MAX ) ( (X)<(MIN) ? (MIN) : ((X)>(MAX) ? (MAX) : (X)) )
+
+#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
+#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
+
+
+static INLINE int
+align(int value, int alignment)
+{
+ return (value + alignment - 1) & ~(alignment - 1);
+}
+
+static INLINE unsigned
+minify(unsigned value)
+{
+ return MAX2(1, value >> 1);
+}
+
+#ifndef COPY_4V
+#define COPY_4V( DST, SRC ) \
+do { \
+ (DST)[0] = (SRC)[0]; \
+ (DST)[1] = (SRC)[1]; \
+ (DST)[2] = (SRC)[2]; \
+ (DST)[3] = (SRC)[3]; \
+} while (0)
+#endif
+
+
+#ifndef COPY_4FV
+#define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC)
+#endif
+
+
+#ifndef ASSIGN_4V
+#define ASSIGN_4V( DST, V0, V1, V2, V3 ) \
+do { \
+ (DST)[0] = (V0); \
+ (DST)[1] = (V1); \
+ (DST)[2] = (V2); \
+ (DST)[3] = (V3); \
+} while (0)
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_MATH_H */
diff --git a/src/gallium/auxiliary/util/u_memory.h b/src/gallium/auxiliary/util/u_memory.h
new file mode 100644
index 0000000000..c3f8c91833
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_memory.h
@@ -0,0 +1,242 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Memory functions
+ */
+
+
+#ifndef U_MEMORY_H
+#define U_MEMORY_H
+
+
+#include "util/u_pointer.h"
+#include "util/u_debug.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Define ENOMEM for WINCE */
+#if (_WIN32_WCE < 600)
+#ifndef ENOMEM
+#define ENOMEM 12
+#endif
+#endif
+
+
+#if defined(PIPE_OS_WINDOWS) && defined(DEBUG)
+
+/* memory debugging */
+
+#include "util/u_debug.h"
+
+#define MALLOC( _size ) \
+ debug_malloc( __FILE__, __LINE__, __FUNCTION__, _size )
+#define CALLOC( _count, _size ) \
+ debug_calloc(__FILE__, __LINE__, __FUNCTION__, _count, _size )
+#define FREE( _ptr ) \
+ debug_free( __FILE__, __LINE__, __FUNCTION__, _ptr )
+#define REALLOC( _ptr, _old_size, _size ) \
+ debug_realloc( __FILE__, __LINE__, __FUNCTION__, _ptr, _old_size, _size )
+
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+
+void * __stdcall
+EngAllocMem(
+ unsigned long Flags,
+ unsigned long MemSize,
+ unsigned long Tag );
+
+void __stdcall
+EngFreeMem(
+ void *Mem );
+
+#define MALLOC( _size ) EngAllocMem( 0, _size, 'D3AG' )
+#define _FREE( _ptr ) EngFreeMem( _ptr )
+
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+
+void *
+ExAllocatePool(
+ unsigned long PoolType,
+ size_t NumberOfBytes);
+
+void
+ExFreePool(void *P);
+
+#define MALLOC(_size) ExAllocatePool(0, _size)
+#define _FREE(_ptr) ExFreePool(_ptr)
+
+#else
+
+#define MALLOC( SIZE ) malloc( SIZE )
+#define CALLOC( COUNT, SIZE ) calloc( COUNT, SIZE )
+#define FREE( PTR ) free( PTR )
+
+static INLINE void *
+_REALLOC( void *old_ptr, unsigned old_size, unsigned new_size )
+{
+ (void) old_size;
+ return realloc(old_ptr, new_size);
+}
+#define REALLOC( a, b, c ) _REALLOC( a, b, c )
+#endif
+
+
+#ifndef CALLOC
+static INLINE void *
+CALLOC( unsigned count, unsigned size )
+{
+ void *ptr = MALLOC( count * size );
+ if( ptr ) {
+ memset( ptr, 0, count * size );
+ }
+ return ptr;
+}
+#endif /* !CALLOC */
+
+#ifndef FREE
+static INLINE void
+FREE( void *ptr )
+{
+ if( ptr ) {
+ _FREE( ptr );
+ }
+}
+#endif /* !FREE */
+
+#ifndef REALLOC
+static INLINE void *
+REALLOC( void *old_ptr, unsigned old_size, unsigned new_size )
+{
+ void *new_ptr = NULL;
+
+ if (new_size != 0) {
+ unsigned copy_size = old_size < new_size ? old_size : new_size;
+ new_ptr = MALLOC( new_size );
+ if (new_ptr && old_ptr && copy_size) {
+ memcpy( new_ptr, old_ptr, copy_size );
+ }
+ }
+
+ FREE( old_ptr );
+ return new_ptr;
+}
+#endif /* !REALLOC */
+
+
+#define MALLOC_STRUCT(T) (struct T *) MALLOC(sizeof(struct T))
+
+#define CALLOC_STRUCT(T) (struct T *) CALLOC(1, sizeof(struct T))
+
+#define CALLOC_VARIANT_LENGTH_STRUCT(T,more_size) ((struct T *) CALLOC(1, sizeof(struct T) + more_size))
+
+
+/**
+ * Return memory on given byte alignment
+ */
+static INLINE void *
+align_malloc(size_t bytes, uint alignment)
+{
+#if defined(HAVE_POSIX_MEMALIGN)
+ void *mem;
+ alignment = (alignment + (uint)sizeof(void*) - 1) & ~((uint)sizeof(void*) - 1);
+ if(posix_memalign(& mem, alignment, bytes) != 0)
+ return NULL;
+ return mem;
+#else
+ char *ptr, *buf;
+
+ assert( alignment > 0 );
+
+ ptr = (char *) MALLOC(bytes + alignment + sizeof(void *));
+ if (!ptr)
+ return NULL;
+
+ buf = (char *) align_pointer( ptr + sizeof(void *), alignment );
+ *(char **)(buf - sizeof(void *)) = ptr;
+
+ return buf;
+#endif /* defined(HAVE_POSIX_MEMALIGN) */
+}
+
+/**
+ * Free memory returned by align_malloc().
+ */
+static INLINE void
+align_free(void *ptr)
+{
+#if defined(HAVE_POSIX_MEMALIGN)
+ FREE(ptr);
+#else
+ if (ptr) {
+ void **cubbyHole = (void **) ((char *) ptr - sizeof(void *));
+ void *realAddr = *cubbyHole;
+ FREE(realAddr);
+ }
+#endif /* defined(HAVE_POSIX_MEMALIGN) */
+}
+
+
+/**
+ * Duplicate a block of memory.
+ */
+static INLINE void *
+mem_dup(const void *src, uint size)
+{
+ void *dup = MALLOC(size);
+ if (dup)
+ memcpy(dup, src, size);
+ return dup;
+}
+
+
+/**
+ * Number of elements in an array.
+ */
+#ifndef Elements
+#define Elements(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+
+/**
+ * Offset of a field in a struct, in bytes.
+ */
+#define Offset(TYPE, MEMBER) ((unsigned)&(((TYPE *)NULL)->MEMBER))
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* U_MEMORY_H */
diff --git a/src/gallium/auxiliary/util/u_mm.c b/src/gallium/auxiliary/util/u_mm.c
new file mode 100644
index 0000000000..4b75d4ba1d
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_mm.c
@@ -0,0 +1,289 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+#include "util/u_memory.h"
+#include "util/u_mm.h"
+
+
+void
+u_mmDumpMemInfo(const struct mem_block *heap)
+{
+ debug_printf("Memory heap %p:\n", (void *) heap);
+ if (heap == 0) {
+ debug_printf(" heap == 0\n");
+ }
+ else {
+ const struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ debug_printf(" Offset:%08x, Size:%08x, %c%c\n", p->ofs, p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ debug_printf("\nFree list:\n");
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ debug_printf(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs, p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ }
+ debug_printf("End of memory blocks\n");
+}
+
+
+struct mem_block *
+u_mmInit(int ofs, int size)
+{
+ struct mem_block *heap, *block;
+
+ if (size <= 0)
+ return NULL;
+
+ heap = CALLOC_STRUCT(mem_block);
+ if (!heap)
+ return NULL;
+
+ block = CALLOC_STRUCT(mem_block);
+ if (!block) {
+ FREE(heap);
+ return NULL;
+ }
+
+ heap->next = block;
+ heap->prev = block;
+ heap->next_free = block;
+ heap->prev_free = block;
+
+ block->heap = heap;
+ block->next = heap;
+ block->prev = heap;
+ block->next_free = heap;
+ block->prev_free = heap;
+
+ block->ofs = ofs;
+ block->size = size;
+ block->free = 1;
+
+ return heap;
+}
+
+
+static struct mem_block *
+SliceBlock(struct mem_block *p,
+ int startofs, int size,
+ int reserved, int alignment)
+{
+ struct mem_block *newblock;
+
+ /* break left [p, newblock, p->next], then p = newblock */
+ if (startofs > p->ofs) {
+ newblock = CALLOC_STRUCT(mem_block);
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs;
+ newblock->size = p->size - (startofs - p->ofs);
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* break right, also [p, newblock, p->next] */
+ if (size < p->size) {
+ newblock = CALLOC_STRUCT(mem_block);
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs + size;
+ newblock->size = p->size - size;
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size = size;
+ }
+
+ /* p = middle block */
+ p->free = 0;
+
+ /* Remove p from the free list:
+ */
+ p->next_free->prev_free = p->prev_free;
+ p->prev_free->next_free = p->next_free;
+
+ p->next_free = 0;
+ p->prev_free = 0;
+
+ p->reserved = reserved;
+ return p;
+}
+
+
+struct mem_block *
+u_mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+{
+ struct mem_block *p;
+ const int mask = (1 << align2)-1;
+ int startofs = 0;
+ int endofs;
+
+ assert(size >= 0);
+ assert(align2 >= 0);
+ assert(align2 <= 12); /* sanity check, 2^12 (4KB) enough? */
+
+ if (!heap || align2 < 0 || size <= 0)
+ return NULL;
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ assert(p->free);
+
+ startofs = (p->ofs + mask) & ~mask;
+ if ( startofs < startSearch ) {
+ startofs = startSearch;
+ }
+ endofs = startofs+size;
+ if (endofs <= (p->ofs+p->size))
+ break;
+ }
+
+ if (p == heap)
+ return NULL;
+
+ assert(p->free);
+ p = SliceBlock(p,startofs,size,0,mask+1);
+
+ return p;
+}
+
+
+struct mem_block *
+u_mmFindBlock(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->ofs == start)
+ return p;
+ }
+
+ return NULL;
+}
+
+
+static INLINE int
+Join2Blocks(struct mem_block *p)
+{
+ /* XXX there should be some assertions here */
+
+ /* NOTE: heap->free == 0 */
+
+ if (p->free && p->next->free) {
+ struct mem_block *q = p->next;
+
+ assert(p->ofs + p->size == q->ofs);
+ p->size += q->size;
+
+ p->next = q->next;
+ q->next->prev = p;
+
+ q->next_free->prev_free = q->prev_free;
+ q->prev_free->next_free = q->next_free;
+
+ FREE(q);
+ return 1;
+ }
+ return 0;
+}
+
+int
+u_mmFreeMem(struct mem_block *b)
+{
+ if (!b)
+ return 0;
+
+ if (b->free) {
+ debug_printf("block already free\n");
+ return -1;
+ }
+ if (b->reserved) {
+ debug_printf("block is reserved\n");
+ return -1;
+ }
+
+ b->free = 1;
+ b->next_free = b->heap->next_free;
+ b->prev_free = b->heap;
+ b->next_free->prev_free = b;
+ b->prev_free->next_free = b;
+
+ Join2Blocks(b);
+ if (b->prev != b->heap)
+ Join2Blocks(b->prev);
+
+ return 0;
+}
+
+
+void
+u_mmDestroy(struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap)
+ return;
+
+ for (p = heap->next; p != heap; ) {
+ struct mem_block *next = p->next;
+ FREE(p);
+ p = next;
+ }
+
+ FREE(heap);
+}
diff --git a/src/gallium/auxiliary/util/u_mm.h b/src/gallium/auxiliary/util/u_mm.h
new file mode 100644
index 0000000000..6b158aae6e
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_mm.h
@@ -0,0 +1,91 @@
+/**************************************************************************
+ *
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Memory manager code. Primarily used by device drivers to manage texture
+ * heaps, etc.
+ */
+
+
+#ifndef _U_MM_H_
+#define _U_MM_H_
+
+
+struct mem_block {
+ struct mem_block *next, *prev;
+ struct mem_block *next_free, *prev_free;
+ struct mem_block *heap;
+ int ofs,size;
+ unsigned int free:1;
+ unsigned int reserved:1;
+};
+
+
+
+/**
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+extern struct mem_block *u_mmInit(int ofs, int size);
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input: size = size of block
+ * align2 = 2^align2 bytes alignment
+ * startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+extern struct mem_block *u_mmAllocMem(struct mem_block *heap, int size, int align2,
+ int startSearch);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+extern int u_mmFreeMem(struct mem_block *b);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a heap, start offset
+ * return: pointer to a block
+ */
+extern struct mem_block *u_mmFindBlock(struct mem_block *heap, int start);
+
+/**
+ * destroy MM
+ */
+extern void u_mmDestroy(struct mem_block *mmInit);
+
+/**
+ * For debugging purposes.
+ */
+extern void u_mmDumpMemInfo(const struct mem_block *mmInit);
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_network.c b/src/gallium/auxiliary/util/u_network.c
new file mode 100644
index 0000000000..bc4b758406
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_network.c
@@ -0,0 +1,188 @@
+
+#include "pipe/p_compiler.h"
+#include "util/u_network.h"
+#include "util/u_debug.h"
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+# include <winsock2.h>
+# include <windows.h>
+#elif defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU)
+# include <sys/socket.h>
+# include <netinet/in.h>
+# include <unistd.h>
+# include <fcntl.h>
+# include <netdb.h>
+#else
+# warning "No socket implementation"
+#endif
+
+boolean
+u_socket_init()
+{
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ WORD wVersionRequested;
+ WSADATA wsaData;
+ int err;
+
+ /* Use the MAKEWORD(lowbyte, highbyte) macro declared in Windef.h */
+ wVersionRequested = MAKEWORD(1, 1);
+
+ err = WSAStartup(wVersionRequested, &wsaData);
+ if (err != 0) {
+ debug_printf("WSAStartup failed with error: %d\n", err);
+ return FALSE;
+ }
+ return TRUE;
+#elif defined(PIPE_HAVE_SOCKETS)
+ return TRUE;
+#else
+ return FALSE;
+#endif
+}
+
+void
+u_socket_stop()
+{
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ WSACleanup();
+#endif
+}
+
+void
+u_socket_close(int s)
+{
+ if (s < 0)
+ return;
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU)
+ shutdown(s, SHUT_RDWR);
+ close(s);
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ shutdown(s, SD_BOTH);
+ closesocket(s);
+#else
+ assert(0);
+#endif
+}
+
+int u_socket_accept(int s)
+{
+#if defined(PIPE_HAVE_SOCKETS)
+ return accept(s, NULL, NULL);
+#else
+ return -1;
+#endif
+}
+
+int
+u_socket_send(int s, void *data, size_t size)
+{
+#if defined(PIPE_HAVE_SOCKETS)
+ return send(s, data, size, 0);
+#else
+ return -1;
+#endif
+}
+
+int
+u_socket_peek(int s, void *data, size_t size)
+{
+#if defined(PIPE_HAVE_SOCKETS)
+ return recv(s, data, size, MSG_PEEK);
+#else
+ return -1;
+#endif
+}
+
+int
+u_socket_recv(int s, void *data, size_t size)
+{
+#if defined(PIPE_HAVE_SOCKETS)
+ return recv(s, data, size, 0);
+#else
+ return -1;
+#endif
+}
+
+int
+u_socket_connect(const char *hostname, uint16_t port)
+{
+#if defined(PIPE_HAVE_SOCKETS)
+ int s;
+ struct sockaddr_in sa;
+ struct hostent *host = NULL;
+
+ memset(&sa, 0, sizeof(struct sockaddr_in));
+ host = gethostbyname(hostname);
+ if (!host)
+ return -1;
+
+ memcpy((char *)&sa.sin_addr,host->h_addr,host->h_length);
+ sa.sin_family= host->h_addrtype;
+ sa.sin_port = htons(port);
+
+ s = socket(host->h_addrtype, SOCK_STREAM, IPPROTO_TCP);
+ if (s < 0)
+ return -1;
+
+ if (connect(s, (struct sockaddr *)&sa, sizeof(sa))) {
+ u_socket_close(s);
+ return -1;
+ }
+
+ return s;
+#else
+ assert(0);
+ return -1;
+#endif
+}
+
+int
+u_socket_listen_on_port(uint16_t portnum)
+{
+#if defined(PIPE_HAVE_SOCKETS)
+ int s;
+ struct sockaddr_in sa;
+ memset(&sa, 0, sizeof(struct sockaddr_in));
+
+ sa.sin_family = AF_INET;
+ sa.sin_port = htons(portnum);
+
+ s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (s < 0)
+ return -1;
+
+ if (bind(s, (struct sockaddr *)&sa, sizeof(struct sockaddr_in)) == -1) {
+ u_socket_close(s);
+ return -1;
+ }
+
+ listen(s, 0);
+
+ return s;
+#else
+ assert(0);
+ return -1;
+#endif
+}
+
+void
+u_socket_block(int s, boolean block)
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU)
+ int old = fcntl(s, F_GETFL, 0);
+ if (old == -1)
+ return;
+
+ /* TODO obey block */
+ if (block)
+ fcntl(s, F_SETFL, old & ~O_NONBLOCK);
+ else
+ fcntl(s, F_SETFL, old | O_NONBLOCK);
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ u_long iMode = block ? 0 : 1;
+ ioctlsocket(s, FIONBIO, &iMode);
+#else
+ assert(0);
+#endif
+}
diff --git a/src/gallium/auxiliary/util/u_network.h b/src/gallium/auxiliary/util/u_network.h
new file mode 100644
index 0000000000..8c778f492c
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_network.h
@@ -0,0 +1,24 @@
+
+#ifndef _U_NETWORK_H_
+#define _U_NETWORK_H_
+
+#include "pipe/p_compiler.h"
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+# define PIPE_HAVE_SOCKETS
+#elif defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU)
+# define PIPE_HAVE_SOCKETS
+#endif
+
+boolean u_socket_init(void);
+void u_socket_stop(void);
+void u_socket_close(int s);
+int u_socket_listen_on_port(uint16_t portnum);
+int u_socket_accept(int s);
+int u_socket_connect(const char *host, uint16_t port);
+int u_socket_send(int s, void *data, size_t size);
+int u_socket_peek(int s, void *data, size_t size);
+int u_socket_recv(int s, void *data, size_t size);
+void u_socket_block(int s, boolean block);
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_pack_color.h b/src/gallium/auxiliary/util/u_pack_color.h
new file mode 100644
index 0000000000..eda883b3b9
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_pack_color.h
@@ -0,0 +1,502 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Functions to produce packed colors/Z from floats.
+ */
+
+
+#ifndef U_PACK_COLOR_H
+#define U_PACK_COLOR_H
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "util/u_math.h"
+
+
+/**
+ * Pack ubyte R,G,B,A into dest pixel.
+ */
+static INLINE void
+util_pack_color_ub(ubyte r, ubyte g, ubyte b, ubyte a,
+ enum pipe_format format, void *dest)
+{
+ switch (format) {
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (r << 24) | (g << 16) | (b << 8) | a;
+ }
+ return;
+ case PIPE_FORMAT_R8G8B8X8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (r << 24) | (g << 16) | (b << 8) | 0xff;
+ }
+ return;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ return;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ return;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (b << 24) | (g << 16) | (r << 8) | a;
+ }
+ return;
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (b << 24) | (g << 16) | (r << 8) | 0xff;
+ }
+ return;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ {
+ ushort *d = (ushort *) dest;
+ *d = ((r & 0xf8) << 8) | ((g & 0xfc) << 3) | (b >> 3);
+ }
+ return;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ {
+ ushort *d = (ushort *) dest;
+ *d = ((a & 0x80) << 8) | ((r & 0xf8) << 7) | ((g & 0xf8) << 2) | (b >> 3);
+ }
+ return;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ {
+ ushort *d = (ushort *) dest;
+ *d = ((a & 0xf0) << 8) | ((r & 0xf0) << 4) | ((g & 0xf0) << 0) | (b >> 4);
+ }
+ return;
+ case PIPE_FORMAT_A8_UNORM:
+ {
+ ubyte *d = (ubyte *) dest;
+ *d = a;
+ }
+ return;
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ {
+ ubyte *d = (ubyte *) dest;
+ *d = r;
+ }
+ return;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ {
+ float *d = (float *) dest;
+ d[0] = (float)r / 255.0f;
+ d[1] = (float)g / 255.0f;
+ d[2] = (float)b / 255.0f;
+ d[3] = (float)a / 255.0f;
+ }
+ return;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ {
+ float *d = (float *) dest;
+ d[0] = (float)r / 255.0f;
+ d[1] = (float)g / 255.0f;
+ d[2] = (float)b / 255.0f;
+ }
+ return;
+
+ /* XXX lots more cases to add */
+ default:
+ debug_print_format("gallium: unhandled format in util_pack_color_ub()", format);
+ assert(0);
+ }
+}
+
+
+/**
+ * Unpack RGBA from a packed pixel, returning values as ubytes in [0,255].
+ */
+static INLINE void
+util_unpack_color_ub(enum pipe_format format, const void *src,
+ ubyte *r, ubyte *g, ubyte *b, ubyte *a)
+{
+ switch (format) {
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ {
+ uint p = ((const uint *) src)[0];
+ *r = (ubyte) ((p >> 24) & 0xff);
+ *g = (ubyte) ((p >> 16) & 0xff);
+ *b = (ubyte) ((p >> 8) & 0xff);
+ *a = (ubyte) ((p >> 0) & 0xff);
+ }
+ return;
+ case PIPE_FORMAT_R8G8B8X8_UNORM:
+ {
+ uint p = ((const uint *) src)[0];
+ *r = (ubyte) ((p >> 24) & 0xff);
+ *g = (ubyte) ((p >> 16) & 0xff);
+ *b = (ubyte) ((p >> 8) & 0xff);
+ *a = (ubyte) 0xff;
+ }
+ return;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ {
+ uint p = ((const uint *) src)[0];
+ *r = (ubyte) ((p >> 16) & 0xff);
+ *g = (ubyte) ((p >> 8) & 0xff);
+ *b = (ubyte) ((p >> 0) & 0xff);
+ *a = (ubyte) ((p >> 24) & 0xff);
+ }
+ return;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ {
+ uint p = ((const uint *) src)[0];
+ *r = (ubyte) ((p >> 16) & 0xff);
+ *g = (ubyte) ((p >> 8) & 0xff);
+ *b = (ubyte) ((p >> 0) & 0xff);
+ *a = (ubyte) 0xff;
+ }
+ return;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ {
+ uint p = ((const uint *) src)[0];
+ *r = (ubyte) ((p >> 8) & 0xff);
+ *g = (ubyte) ((p >> 16) & 0xff);
+ *b = (ubyte) ((p >> 24) & 0xff);
+ *a = (ubyte) ((p >> 0) & 0xff);
+ }
+ return;
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ {
+ uint p = ((const uint *) src)[0];
+ *r = (ubyte) ((p >> 8) & 0xff);
+ *g = (ubyte) ((p >> 16) & 0xff);
+ *b = (ubyte) ((p >> 24) & 0xff);
+ *a = (ubyte) 0xff;
+ }
+ return;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ {
+ ushort p = ((const ushort *) src)[0];
+ *r = (ubyte) (((p >> 8) & 0xf8) | ((p >> 13) & 0x7));
+ *g = (ubyte) (((p >> 3) & 0xfc) | ((p >> 9) & 0x3));
+ *b = (ubyte) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
+ *a = (ubyte) 0xff;
+ }
+ return;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ {
+ ushort p = ((const ushort *) src)[0];
+ *r = (ubyte) (((p >> 7) & 0xf8) | ((p >> 12) & 0x7));
+ *g = (ubyte) (((p >> 2) & 0xf8) | ((p >> 7) & 0x7));
+ *b = (ubyte) (((p << 3) & 0xf8) | ((p >> 2) & 0x7));
+ *a = (ubyte) (0xff * (p >> 15));
+ }
+ return;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ {
+ ushort p = ((const ushort *) src)[0];
+ *r = (ubyte) (((p >> 4) & 0xf0) | ((p >> 8) & 0xf));
+ *g = (ubyte) (((p >> 0) & 0xf0) | ((p >> 4) & 0xf));
+ *b = (ubyte) (((p << 4) & 0xf0) | ((p >> 0) & 0xf));
+ *a = (ubyte) (((p >> 8) & 0xf0) | ((p >> 12) & 0xf));
+ }
+ return;
+ case PIPE_FORMAT_A8_UNORM:
+ {
+ ubyte p = ((const ubyte *) src)[0];
+ *r = *g = *b = (ubyte) 0xff;
+ *a = p;
+ }
+ return;
+ case PIPE_FORMAT_L8_UNORM:
+ {
+ ubyte p = ((const ubyte *) src)[0];
+ *r = *g = *b = p;
+ *a = (ubyte) 0xff;
+ }
+ return;
+ case PIPE_FORMAT_I8_UNORM:
+ {
+ ubyte p = ((const ubyte *) src)[0];
+ *r = *g = *b = *a = p;
+ }
+ return;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ {
+ const float *p = (const float *) src;
+ *r = float_to_ubyte(p[0]);
+ *g = float_to_ubyte(p[1]);
+ *b = float_to_ubyte(p[2]);
+ *a = float_to_ubyte(p[3]);
+ }
+ return;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ {
+ const float *p = (const float *) src;
+ *r = float_to_ubyte(p[0]);
+ *g = float_to_ubyte(p[1]);
+ *b = float_to_ubyte(p[2]);
+ *a = (ubyte) 0xff;
+ }
+ return;
+
+ case PIPE_FORMAT_R32G32_FLOAT:
+ {
+ const float *p = (const float *) src;
+ *r = float_to_ubyte(p[0]);
+ *g = float_to_ubyte(p[1]);
+ *b = *a = (ubyte) 0xff;
+ }
+ return;
+
+ case PIPE_FORMAT_R32_FLOAT:
+ {
+ const float *p = (const float *) src;
+ *r = float_to_ubyte(p[0]);
+ *g = *b = *a = (ubyte) 0xff;
+ }
+ return;
+
+ /* XXX lots more cases to add */
+ default:
+ debug_print_format("gallium: unhandled format in util_unpack_color_ub()",
+ format);
+ assert(0);
+ }
+}
+
+
+
+/**
+ * Note rgba outside [0,1] will be clamped for int pixel formats.
+ */
+static INLINE void
+util_pack_color(const float rgba[4], enum pipe_format format, void *dest)
+{
+ ubyte r, g, b, a;
+
+ if (pf_size_x(format) <= 8) {
+ /* format uses 8-bit components or less */
+ r = float_to_ubyte(rgba[0]);
+ g = float_to_ubyte(rgba[1]);
+ b = float_to_ubyte(rgba[2]);
+ a = float_to_ubyte(rgba[3]);
+ }
+
+ switch (format) {
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (r << 24) | (g << 16) | (b << 8) | a;
+ }
+ return;
+ case PIPE_FORMAT_R8G8B8X8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (r << 24) | (g << 16) | (b << 8) | 0xff;
+ }
+ return;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ return;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ return;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (b << 24) | (g << 16) | (r << 8) | a;
+ }
+ return;
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ {
+ uint *d = (uint *) dest;
+ *d = (b << 24) | (g << 16) | (r << 8) | 0xff;
+ }
+ return;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ {
+ ushort *d = (ushort *) dest;
+ *d = ((r & 0xf8) << 8) | ((g & 0xfc) << 3) | (b >> 3);
+ }
+ return;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ {
+ ushort *d = (ushort *) dest;
+ *d = ((a & 0x80) << 8) | ((r & 0xf8) << 7) | ((g & 0xf8) << 2) | (b >> 3);
+ }
+ return;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ {
+ ushort *d = (ushort *) dest;
+ *d = ((a & 0xf0) << 8) | ((r & 0xf0) << 4) | ((g & 0xf0) << 0) | (b >> 4);
+ }
+ return;
+ case PIPE_FORMAT_A8_UNORM:
+ {
+ ubyte *d = (ubyte *) dest;
+ *d = a;
+ }
+ return;
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ {
+ ubyte *d = (ubyte *) dest;
+ *d = r;
+ }
+ return;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ {
+ float *d = (float *) dest;
+ d[0] = rgba[0];
+ d[1] = rgba[1];
+ d[2] = rgba[2];
+ d[3] = rgba[3];
+ }
+ return;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ {
+ float *d = (float *) dest;
+ d[0] = rgba[0];
+ d[1] = rgba[1];
+ d[2] = rgba[2];
+ }
+ return;
+ /* XXX lots more cases to add */
+ default:
+ debug_print_format("gallium: unhandled format in util_pack_color()", format);
+ assert(0);
+ }
+}
+
+
+/**
+ * Note: it's assumed that z is in [0,1]
+ */
+static INLINE uint
+util_pack_z(enum pipe_format format, double z)
+{
+ if (z == 0.0)
+ return 0;
+
+ switch (format) {
+ case PIPE_FORMAT_Z16_UNORM:
+ if (z == 1.0)
+ return 0xffff;
+ return (uint) (z * 0xffff);
+ case PIPE_FORMAT_Z32_UNORM:
+ /* special-case to avoid overflow */
+ if (z == 1.0)
+ return 0xffffffff;
+ return (uint) (z * 0xffffffff);
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ if (z == 1.0)
+ return 0xffffff;
+ return (uint) (z * 0xffffff);
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ if (z == 1.0)
+ return 0xffffff00;
+ return ((uint) (z * 0xffffff)) << 8;
+ case PIPE_FORMAT_S8_UNORM:
+ /* this case can get it via util_pack_z_stencil() */
+ return 0;
+ default:
+ debug_print_format("gallium: unhandled format in util_pack_z()", format);
+ assert(0);
+ return 0;
+ }
+}
+
+
+/**
+ * Pack Z and/or stencil values into a 32-bit value described by format.
+ * Note: it's assumed that z is in [0,1] and s in [0,255]
+ */
+static INLINE uint
+util_pack_z_stencil(enum pipe_format format, double z, uint s)
+{
+ unsigned packed = util_pack_z(format, z);
+
+ switch (format) {
+ case PIPE_FORMAT_S8Z24_UNORM:
+ packed |= s << 24;
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ packed |= s;
+ break;
+ case PIPE_FORMAT_S8_UNORM:
+ packed |= s;
+ break;
+ default:
+ break;
+ }
+
+ return packed;
+}
+
+
+/**
+ * Pack 4 ubytes into a 4-byte word
+ */
+static INLINE unsigned
+pack_ub4(ubyte b0, ubyte b1, ubyte b2, ubyte b3)
+{
+ return ((((unsigned int)b0) << 0) |
+ (((unsigned int)b1) << 8) |
+ (((unsigned int)b2) << 16) |
+ (((unsigned int)b3) << 24));
+}
+
+
+/**
+ * Pack/convert 4 floats into one 4-byte word.
+ */
+static INLINE unsigned
+pack_ui32_float4(float a, float b, float c, float d)
+{
+ return pack_ub4( float_to_ubyte(a),
+ float_to_ubyte(b),
+ float_to_ubyte(c),
+ float_to_ubyte(d) );
+}
+
+
+
+#endif /* U_PACK_COLOR_H */
diff --git a/src/gallium/auxiliary/util/u_pointer.h b/src/gallium/auxiliary/util/u_pointer.h
new file mode 100644
index 0000000000..e1af9f11cb
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_pointer.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_POINTER_H
+#define U_POINTER_H
+
+#include "pipe/p_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static INLINE intptr_t
+pointer_to_intptr( const void *p )
+{
+ union {
+ const void *p;
+ intptr_t i;
+ } pi;
+ pi.p = p;
+ return pi.i;
+}
+
+static INLINE void *
+intptr_to_pointer( intptr_t i )
+{
+ union {
+ void *p;
+ intptr_t i;
+ } pi;
+ pi.i = i;
+ return pi.p;
+}
+
+static INLINE uintptr_t
+pointer_to_uintptr( const void *ptr )
+{
+ union {
+ const void *p;
+ uintptr_t u;
+ } pu;
+ pu.p = ptr;
+ return pu.u;
+}
+
+static INLINE void *
+uintptr_to_pointer( uintptr_t u )
+{
+ union {
+ void *p;
+ uintptr_t u;
+ } pu;
+ pu.u = u;
+ return pu.p;
+}
+
+/**
+ * Return a pointer aligned to next multiple of N bytes.
+ */
+static INLINE void *
+align_pointer( const void *unaligned, uintptr_t alignment )
+{
+ uintptr_t aligned = (pointer_to_uintptr( unaligned ) + alignment - 1) & ~(alignment - 1);
+ return uintptr_to_pointer( aligned );
+}
+
+
+/**
+ * Return a pointer aligned to next multiple of 16 bytes.
+ */
+static INLINE void *
+align16( void *unaligned )
+{
+ return align_pointer( unaligned, 16 );
+}
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_POINTER_H */
diff --git a/src/gallium/auxiliary/util/u_prim.h b/src/gallium/auxiliary/util/u_prim.h
new file mode 100644
index 0000000000..a9b533eea7
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_prim.h
@@ -0,0 +1,138 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_BLIT_H
+#define U_BLIT_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "pipe/p_defines.h"
+
+static INLINE boolean u_validate_pipe_prim( unsigned pipe_prim, unsigned nr )
+{
+ boolean ok = TRUE;
+
+ switch (pipe_prim) {
+ case PIPE_PRIM_POINTS:
+ ok = (nr >= 1);
+ break;
+ case PIPE_PRIM_LINES:
+ ok = (nr >= 2);
+ break;
+ case PIPE_PRIM_LINE_STRIP:
+ case PIPE_PRIM_LINE_LOOP:
+ ok = (nr >= 2);
+ break;
+ case PIPE_PRIM_TRIANGLES:
+ ok = (nr >= 3);
+ break;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ case PIPE_PRIM_TRIANGLE_FAN:
+ case PIPE_PRIM_POLYGON:
+ ok = (nr >= 3);
+ break;
+ case PIPE_PRIM_QUADS:
+ ok = (nr >= 4);
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ ok = (nr >= 4);
+ break;
+ default:
+ ok = 0;
+ break;
+ }
+
+ return ok;
+}
+
+
+static INLINE boolean u_trim_pipe_prim( unsigned pipe_prim, unsigned *nr )
+{
+ boolean ok = TRUE;
+
+ switch (pipe_prim) {
+ case PIPE_PRIM_POINTS:
+ ok = (*nr >= 1);
+ break;
+ case PIPE_PRIM_LINES:
+ ok = (*nr >= 2);
+ *nr -= (*nr % 2);
+ break;
+ case PIPE_PRIM_LINE_STRIP:
+ case PIPE_PRIM_LINE_LOOP:
+ ok = (*nr >= 2);
+ break;
+ case PIPE_PRIM_TRIANGLES:
+ ok = (*nr >= 3);
+ *nr -= (*nr % 3);
+ break;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ case PIPE_PRIM_TRIANGLE_FAN:
+ case PIPE_PRIM_POLYGON:
+ ok = (*nr >= 3);
+ break;
+ case PIPE_PRIM_QUADS:
+ ok = (*nr >= 4);
+ *nr -= (*nr % 4);
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ ok = (*nr >= 4);
+ *nr -= (*nr % 2);
+ break;
+ default:
+ ok = 0;
+ break;
+ }
+
+ if (!ok)
+ *nr = 0;
+
+ return ok;
+}
+
+
+static INLINE unsigned u_reduced_prim( unsigned pipe_prim )
+{
+ switch (pipe_prim) {
+ case PIPE_PRIM_POINTS:
+ return PIPE_PRIM_POINTS;
+
+ case PIPE_PRIM_LINES:
+ case PIPE_PRIM_LINE_STRIP:
+ case PIPE_PRIM_LINE_LOOP:
+ return PIPE_PRIM_LINES;
+
+ default:
+ return PIPE_PRIM_TRIANGLES;
+ }
+}
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_rect.c b/src/gallium/auxiliary/util/u_rect.c
new file mode 100644
index 0000000000..9866b6fc8a
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_rect.c
@@ -0,0 +1,301 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Rectangle-related helper functions.
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_rect.h"
+
+
+/**
+ * Copy 2D rect from one place to another.
+ * Position and sizes are in pixels.
+ * src_pitch may be negative to do vertical flip of pixels from source.
+ */
+void
+util_copy_rect(ubyte * dst,
+ const struct pipe_format_block *block,
+ unsigned dst_stride,
+ unsigned dst_x,
+ unsigned dst_y,
+ unsigned width,
+ unsigned height,
+ const ubyte * src,
+ int src_stride,
+ unsigned src_x,
+ int src_y)
+{
+ unsigned i;
+ int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
+
+ assert(block->size > 0);
+ assert(block->width > 0);
+ assert(block->height > 0);
+ assert(src_x >= 0);
+ assert(src_y >= 0);
+ assert(dst_x >= 0);
+ assert(dst_y >= 0);
+
+ dst_x /= block->width;
+ dst_y /= block->height;
+ width = (width + block->width - 1)/block->width;
+ height = (height + block->height - 1)/block->height;
+ src_x /= block->width;
+ src_y /= block->height;
+
+ dst += dst_x * block->size;
+ src += src_x * block->size;
+ dst += dst_y * dst_stride;
+ src += src_y * src_stride_pos;
+ width *= block->size;
+
+ if (width == dst_stride && width == src_stride)
+ memcpy(dst, src, height * width);
+ else {
+ for (i = 0; i < height; i++) {
+ memcpy(dst, src, width);
+ dst += dst_stride;
+ src += src_stride;
+ }
+ }
+}
+
+void
+util_fill_rect(ubyte * dst,
+ const struct pipe_format_block *block,
+ unsigned dst_stride,
+ unsigned dst_x,
+ unsigned dst_y,
+ unsigned width,
+ unsigned height,
+ uint32_t value)
+{
+ unsigned i, j;
+ unsigned width_size;
+
+ assert(block->size > 0);
+ assert(block->width > 0);
+ assert(block->height > 0);
+ assert(dst_x >= 0);
+ assert(dst_y >= 0);
+
+ dst_x /= block->width;
+ dst_y /= block->height;
+ width = (width + block->width - 1)/block->width;
+ height = (height + block->height - 1)/block->height;
+
+ dst += dst_x * block->size;
+ dst += dst_y * dst_stride;
+ width_size = width * block->size;
+
+ switch (block->size) {
+ case 1:
+ if(dst_stride == width_size)
+ memset(dst, (ubyte) value, height * width_size);
+ else {
+ for (i = 0; i < height; i++) {
+ memset(dst, (ubyte) value, width_size);
+ dst += dst_stride;
+ }
+ }
+ break;
+ case 2:
+ for (i = 0; i < height; i++) {
+ uint16_t *row = (uint16_t *)dst;
+ for (j = 0; j < width; j++)
+ *row++ = (uint16_t) value;
+ dst += dst_stride;
+ }
+ break;
+ case 4:
+ for (i = 0; i < height; i++) {
+ uint32_t *row = (uint32_t *)dst;
+ for (j = 0; j < width; j++)
+ *row++ = value;
+ dst += dst_stride;
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+
+
+/**
+ * Fallback function for pipe->surface_copy().
+ * Note: (X,Y)=(0,0) is always the upper-left corner.
+ * if do_flip, flip the image vertically on its way from src rect to dst rect.
+ * XXX should probably put this in new u_surface.c file...
+ */
+void
+util_surface_copy(struct pipe_context *pipe,
+ boolean do_flip,
+ struct pipe_surface *dst,
+ unsigned dst_x, unsigned dst_y,
+ struct pipe_surface *src,
+ unsigned src_x, unsigned src_y,
+ unsigned w, unsigned h)
+{
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_transfer *src_trans, *dst_trans;
+ void *dst_map;
+ const void *src_map;
+
+ assert(src->texture && dst->texture);
+ if (!src->texture || !dst->texture)
+ return;
+ src_trans = screen->get_tex_transfer(screen,
+ src->texture,
+ src->face,
+ src->level,
+ src->zslice,
+ PIPE_TRANSFER_READ,
+ src_x, src_y, w, h);
+
+ dst_trans = screen->get_tex_transfer(screen,
+ dst->texture,
+ dst->face,
+ dst->level,
+ dst->zslice,
+ PIPE_TRANSFER_WRITE,
+ dst_x, dst_y, w, h);
+
+ assert(dst_trans->block.size == src_trans->block.size);
+ assert(dst_trans->block.width == src_trans->block.width);
+ assert(dst_trans->block.height == src_trans->block.height);
+
+ src_map = pipe->screen->transfer_map(screen, src_trans);
+ dst_map = pipe->screen->transfer_map(screen, dst_trans);
+
+ assert(src_map);
+ assert(dst_map);
+
+ if (src_map && dst_map) {
+ /* If do_flip, invert src_y position and pass negative src stride */
+ util_copy_rect(dst_map,
+ &dst_trans->block,
+ dst_trans->stride,
+ 0, 0,
+ w, h,
+ src_map,
+ do_flip ? -(int) src_trans->stride : src_trans->stride,
+ 0,
+ do_flip ? h - 1 : 0);
+ }
+
+ pipe->screen->transfer_unmap(pipe->screen, src_trans);
+ pipe->screen->transfer_unmap(pipe->screen, dst_trans);
+
+ screen->tex_transfer_destroy(src_trans);
+ screen->tex_transfer_destroy(dst_trans);
+}
+
+
+
+#define UBYTE_TO_USHORT(B) ((B) | ((B) << 8))
+
+
+/**
+ * Fallback for pipe->surface_fill() function.
+ * XXX should probably put this in new u_surface.c file...
+ */
+void
+util_surface_fill(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height, unsigned value)
+{
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_transfer *dst_trans;
+ void *dst_map;
+
+ assert(dst->texture);
+ if (!dst->texture)
+ return;
+ dst_trans = screen->get_tex_transfer(screen,
+ dst->texture,
+ dst->face,
+ dst->level,
+ dst->zslice,
+ PIPE_TRANSFER_WRITE,
+ dstx, dsty, width, height);
+
+ dst_map = pipe->screen->transfer_map(screen, dst_trans);
+
+ assert(dst_map);
+
+ if (dst_map) {
+ assert(dst_trans->stride > 0);
+
+ switch (dst_trans->block.size) {
+ case 1:
+ case 2:
+ case 4:
+ util_fill_rect(dst_map, &dst_trans->block, dst_trans->stride,
+ 0, 0, width, height, value);
+ break;
+ case 8:
+ {
+ /* expand the 4-byte clear value to an 8-byte value */
+ ushort *row = (ushort *) dst_map;
+ ushort val0 = UBYTE_TO_USHORT((value >> 0) & 0xff);
+ ushort val1 = UBYTE_TO_USHORT((value >> 8) & 0xff);
+ ushort val2 = UBYTE_TO_USHORT((value >> 16) & 0xff);
+ ushort val3 = UBYTE_TO_USHORT((value >> 24) & 0xff);
+ unsigned i, j;
+ val0 = (val0 << 8) | val0;
+ val1 = (val1 << 8) | val1;
+ val2 = (val2 << 8) | val2;
+ val3 = (val3 << 8) | val3;
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
+ row[j*4+0] = val0;
+ row[j*4+1] = val1;
+ row[j*4+2] = val2;
+ row[j*4+3] = val3;
+ }
+ row += dst_trans->stride/2;
+ }
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+ pipe->screen->transfer_unmap(pipe->screen, dst_trans);
+ screen->tex_transfer_destroy(dst_trans);
+}
diff --git a/src/gallium/auxiliary/util/u_rect.h b/src/gallium/auxiliary/util/u_rect.h
new file mode 100644
index 0000000000..daa50834d3
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_rect.h
@@ -0,0 +1,72 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Pipe copy/fill rect helpers.
+ */
+
+
+#ifndef U_RECT_H
+#define U_RECT_H
+
+
+#include "pipe/p_format.h"
+
+struct pipe_context;
+struct pipe_surface;
+
+
+extern void
+util_copy_rect(ubyte * dst, const struct pipe_format_block *block,
+ unsigned dst_stride, unsigned dst_x, unsigned dst_y,
+ unsigned width, unsigned height, const ubyte * src,
+ int src_stride, unsigned src_x, int src_y);
+
+extern void
+util_fill_rect(ubyte * dst, const struct pipe_format_block *block,
+ unsigned dst_stride, unsigned dst_x, unsigned dst_y,
+ unsigned width, unsigned height, uint32_t value);
+
+
+extern void
+util_surface_copy(struct pipe_context *pipe,
+ boolean do_flip,
+ struct pipe_surface *dst,
+ unsigned dst_x, unsigned dst_y,
+ struct pipe_surface *src,
+ unsigned src_x, unsigned src_y,
+ unsigned w, unsigned h);
+
+extern void
+util_surface_fill(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height, unsigned value);
+
+
+#endif /* U_RECT_H */
diff --git a/src/gallium/auxiliary/util/u_simple_list.h b/src/gallium/auxiliary/util/u_simple_list.h
new file mode 100644
index 0000000000..f5f43b0faa
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_simple_list.h
@@ -0,0 +1,197 @@
+/**
+ * \file simple_list.h
+ * Simple macros for type-safe, intrusive lists.
+ *
+ * Intended to work with a list sentinal which is created as an empty
+ * list. Insert & delete are O(1).
+ *
+ * \author
+ * (C) 1997, Keith Whitwell
+ */
+
+/*
+ * Mesa 3-D graphics library
+ * Version: 3.5
+ *
+ * Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _U_SIMPLE_LIST_H_
+#define _U_SIMPLE_LIST_H_
+
+/**
+ * Remove an element from list.
+ *
+ * \param elem element to remove.
+ */
+#define remove_from_list(elem) \
+do { \
+ (elem)->next->prev = (elem)->prev; \
+ (elem)->prev->next = (elem)->next; \
+} while (0)
+
+/**
+ * Insert an element to the list head.
+ *
+ * \param list list.
+ * \param elem element to insert.
+ */
+#define insert_at_head(list, elem) \
+do { \
+ (elem)->prev = list; \
+ (elem)->next = (list)->next; \
+ (list)->next->prev = elem; \
+ (list)->next = elem; \
+} while(0)
+
+/**
+ * Insert an element to the list tail.
+ *
+ * \param list list.
+ * \param elem element to insert.
+ */
+#define insert_at_tail(list, elem) \
+do { \
+ (elem)->next = list; \
+ (elem)->prev = (list)->prev; \
+ (list)->prev->next = elem; \
+ (list)->prev = elem; \
+} while(0)
+
+/**
+ * Move an element to the list head.
+ *
+ * \param list list.
+ * \param elem element to move.
+ */
+#define move_to_head(list, elem) \
+do { \
+ remove_from_list(elem); \
+ insert_at_head(list, elem); \
+} while (0)
+
+/**
+ * Move an element to the list tail.
+ *
+ * \param list list.
+ * \param elem element to move.
+ */
+#define move_to_tail(list, elem) \
+do { \
+ remove_from_list(elem); \
+ insert_at_tail(list, elem); \
+} while (0)
+
+/**
+ * Make a empty list empty.
+ *
+ * \param sentinal list (sentinal element).
+ */
+#define make_empty_list(sentinal) \
+do { \
+ (sentinal)->next = sentinal; \
+ (sentinal)->prev = sentinal; \
+} while (0)
+
+/**
+ * Get list first element.
+ *
+ * \param list list.
+ *
+ * \return pointer to first element.
+ */
+#define first_elem(list) ((list)->next)
+
+/**
+ * Get list last element.
+ *
+ * \param list list.
+ *
+ * \return pointer to last element.
+ */
+#define last_elem(list) ((list)->prev)
+
+/**
+ * Get next element.
+ *
+ * \param elem element.
+ *
+ * \return pointer to next element.
+ */
+#define next_elem(elem) ((elem)->next)
+
+/**
+ * Get previous element.
+ *
+ * \param elem element.
+ *
+ * \return pointer to previous element.
+ */
+#define prev_elem(elem) ((elem)->prev)
+
+/**
+ * Test whether element is at end of the list.
+ *
+ * \param list list.
+ * \param elem element.
+ *
+ * \return non-zero if element is at end of list, or zero otherwise.
+ */
+#define at_end(list, elem) ((elem) == (list))
+
+/**
+ * Test if a list is empty.
+ *
+ * \param list list.
+ *
+ * \return non-zero if list empty, or zero otherwise.
+ */
+#define is_empty_list(list) ((list)->next == (list))
+
+/**
+ * Walk through the elements of a list.
+ *
+ * \param ptr pointer to the current element.
+ * \param list list.
+ *
+ * \note It should be followed by a { } block or a single statement, as in a \c
+ * for loop.
+ */
+#define foreach(ptr, list) \
+ for( ptr=(list)->next ; ptr!=list ; ptr=(ptr)->next )
+
+/**
+ * Walk through the elements of a list.
+ *
+ * Same as #foreach but lets you unlink the current value during a list
+ * traversal. Useful for freeing a list, element by element.
+ *
+ * \param ptr pointer to the current element.
+ * \param t temporary pointer.
+ * \param list list.
+ *
+ * \note It should be followed by a { } block or a single statement, as in a \c
+ * for loop.
+ */
+#define foreach_s(ptr, t, list) \
+ for(ptr=(list)->next,t=(ptr)->next; list != ptr; ptr=t, t=(t)->next)
+
+#endif /* _U_SIMPLE_LIST_H_ */
diff --git a/src/gallium/auxiliary/util/u_simple_screen.c b/src/gallium/auxiliary/util/u_simple_screen.c
new file mode 100644
index 0000000000..5238299015
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_simple_screen.c
@@ -0,0 +1,153 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "u_simple_screen.h"
+
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+
+static struct pipe_buffer *
+pass_buffer_create(struct pipe_screen *screen,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct pipe_buffer *buffer =
+ screen->winsys->buffer_create(screen->winsys, alignment, usage, size);
+
+ buffer->screen = screen;
+
+ return buffer;
+}
+
+static struct pipe_buffer *
+pass_user_buffer_create(struct pipe_screen *screen,
+ void *ptr,
+ unsigned bytes)
+{
+ struct pipe_buffer *buffer =
+ screen->winsys->user_buffer_create(screen->winsys, ptr, bytes);
+
+ buffer->screen = screen;
+
+ return buffer;
+}
+
+static struct pipe_buffer *
+pass_surface_buffer_create(struct pipe_screen *screen,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ struct pipe_buffer *buffer =
+ screen->winsys->surface_buffer_create(screen->winsys, width, height,
+ format, usage, tex_usage, stride);
+
+ buffer->screen = screen;
+
+ return buffer;
+}
+
+static void *
+pass_buffer_map(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned usage)
+{
+ return screen->winsys->buffer_map(screen->winsys, buf, usage);
+}
+
+static void
+pass_buffer_unmap(struct pipe_screen *screen,
+ struct pipe_buffer *buf)
+{
+ screen->winsys->buffer_unmap(screen->winsys, buf);
+}
+
+static void
+pass_buffer_destroy(struct pipe_buffer *buf)
+{
+ buf->screen->winsys->buffer_destroy(buf);
+}
+
+
+static void
+pass_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf,
+ void *context_private)
+{
+ screen->winsys->flush_frontbuffer(screen->winsys, surf, context_private);
+}
+
+static void
+pass_fence_reference(struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ screen->winsys->fence_reference(screen->winsys, ptr, fence);
+}
+
+static int
+pass_fence_signalled(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return screen->winsys->fence_signalled(screen->winsys, fence, flag);
+}
+
+static int
+pass_fence_finish(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return screen->winsys->fence_finish(screen->winsys, fence, flag);
+}
+
+void
+u_simple_screen_init(struct pipe_screen *screen)
+{
+ screen->buffer_create = pass_buffer_create;
+ screen->user_buffer_create = pass_user_buffer_create;
+ screen->surface_buffer_create = pass_surface_buffer_create;
+
+ screen->buffer_map = pass_buffer_map;
+ screen->buffer_unmap = pass_buffer_unmap;
+ screen->buffer_destroy = pass_buffer_destroy;
+ screen->flush_frontbuffer = pass_flush_frontbuffer;
+ screen->fence_reference = pass_fence_reference;
+ screen->fence_signalled = pass_fence_signalled;
+ screen->fence_finish = pass_fence_finish;
+}
+
+const char *
+u_simple_screen_winsys_name(struct pipe_screen *screen)
+{
+ return screen->winsys->get_name(screen->winsys);
+}
diff --git a/src/gallium/auxiliary/util/u_simple_screen.h b/src/gallium/auxiliary/util/u_simple_screen.h
new file mode 100644
index 0000000000..6612a8a7c0
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_simple_screen.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_SIMPLE_SCREEN_H
+#define U_SIMPLE_SCREEN_H
+
+struct pipe_screen;
+struct pipe_winsys;
+
+/**
+ * The following function initializes a simple passthrough screen.
+ *
+ * All the relevant screen function pointers will forwarded to the
+ * winsys.
+ */
+void u_simple_screen_init(struct pipe_screen *screen);
+
+/**
+ * Returns the name of the winsys associated with this screen.
+ */
+const char* u_simple_screen_winsys_name(struct pipe_screen *screen);
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_simple_shaders.c b/src/gallium/auxiliary/util/u_simple_shaders.c
new file mode 100644
index 0000000000..e5fc9b0cee
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_simple_shaders.c
@@ -0,0 +1,153 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Simple vertex/fragment shader generators.
+ *
+ * @author Brian Paul
+ */
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_simple_shaders.h"
+#include "tgsi/tgsi_ureg.h"
+
+
+
+/**
+ * Make simple vertex pass-through shader.
+ */
+void *
+util_make_vertex_passthrough_shader(struct pipe_context *pipe,
+ uint num_attribs,
+ const uint *semantic_names,
+ const uint *semantic_indexes)
+
+{
+ struct ureg_program *ureg;
+ uint i;
+
+ ureg = ureg_create( TGSI_PROCESSOR_VERTEX );
+ if (ureg == NULL)
+ return NULL;
+
+ for (i = 0; i < num_attribs; i++) {
+ struct ureg_src src;
+ struct ureg_dst dst;
+
+ src = ureg_DECL_vs_input( ureg,
+ semantic_names[i],
+ semantic_indexes[i]);
+
+ dst = ureg_DECL_output( ureg,
+ semantic_names[i],
+ semantic_indexes[i]);
+
+ ureg_MOV( ureg, dst, src );
+ }
+
+ ureg_END( ureg );
+
+ return ureg_create_shader_and_destroy( ureg, pipe );
+}
+
+
+
+
+/**
+ * Make simple fragment texture shader:
+ * IMM {0,0,0,1} // (if writemask != 0xf)
+ * MOV OUT[0], IMM[0] // (if writemask != 0xf)
+ * TEX OUT[0].writemask, IN[0], SAMP[0], 2D;
+ * END;
+ */
+void *
+util_make_fragment_tex_shader_writemask(struct pipe_context *pipe,
+ unsigned writemask )
+{
+ struct ureg_program *ureg;
+ struct ureg_src sampler;
+ struct ureg_src tex;
+ struct ureg_dst out;
+
+ ureg = ureg_create( TGSI_PROCESSOR_FRAGMENT );
+ if (ureg == NULL)
+ return NULL;
+
+ sampler = ureg_DECL_sampler( ureg, 0 );
+
+ tex = ureg_DECL_fs_input( ureg,
+ TGSI_SEMANTIC_GENERIC, 0,
+ TGSI_INTERPOLATE_PERSPECTIVE );
+
+ out = ureg_DECL_output( ureg,
+ TGSI_SEMANTIC_COLOR,
+ 0 );
+
+ ureg_TEX( ureg, out, TGSI_TEXTURE_2D, tex, sampler );
+ ureg_END( ureg );
+
+ return ureg_create_shader_and_destroy( ureg, pipe );
+}
+
+void *
+util_make_fragment_tex_shader(struct pipe_context *pipe )
+{
+ return util_make_fragment_tex_shader_writemask( pipe,
+ TGSI_WRITEMASK_XYZW );
+}
+
+
+
+/**
+ * Make simple fragment color pass-through shader.
+ */
+void *
+util_make_fragment_passthrough_shader(struct pipe_context *pipe)
+{
+ struct ureg_program *ureg;
+ struct ureg_src src;
+ struct ureg_dst dst;
+
+ ureg = ureg_create( TGSI_PROCESSOR_FRAGMENT );
+ if (ureg == NULL)
+ return NULL;
+
+ src = ureg_DECL_fs_input( ureg, TGSI_SEMANTIC_COLOR, 0,
+ TGSI_INTERPOLATE_PERSPECTIVE );
+
+ dst = ureg_DECL_output( ureg, TGSI_SEMANTIC_COLOR, 0 );
+
+ ureg_MOV( ureg, dst, src );
+ ureg_END( ureg );
+
+ return ureg_create_shader_and_destroy( ureg, pipe );
+}
+
+
diff --git a/src/gallium/auxiliary/util/u_simple_shaders.h b/src/gallium/auxiliary/util/u_simple_shaders.h
new file mode 100644
index 0000000000..d2e80d6eb4
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_simple_shaders.h
@@ -0,0 +1,69 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_SIMPLE_SHADERS_H
+#define U_SIMPLE_SHADERS_H
+
+
+#include "pipe/p_compiler.h"
+
+
+struct pipe_context;
+struct pipe_shader_state;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+extern void *
+util_make_vertex_passthrough_shader(struct pipe_context *pipe,
+ uint num_attribs,
+ const uint *semantic_names,
+ const uint *semantic_indexes);
+
+
+extern void *
+util_make_fragment_tex_shader_writemask(struct pipe_context *pipe,
+ unsigned writemask );
+
+extern void *
+util_make_fragment_tex_shader(struct pipe_context *pipe);
+
+
+extern void *
+util_make_fragment_passthrough_shader(struct pipe_context *pipe);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_snprintf.c b/src/gallium/auxiliary/util/u_snprintf.c
new file mode 100644
index 0000000000..65bdd0df2b
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_snprintf.c
@@ -0,0 +1,1497 @@
+/*
+ * Copyright (c) 1995 Patrick Powell.
+ *
+ * This code is based on code written by Patrick Powell <papowell@astart.com>.
+ * It may be used for any purpose as long as this notice remains intact on all
+ * source code distributions.
+ */
+
+/*
+ * Copyright (c) 2008 Holger Weiss.
+ *
+ * This version of the code is maintained by Holger Weiss <holger@jhweiss.de>.
+ * My changes to the code may freely be used, modified and/or redistributed for
+ * any purpose. It would be nice if additions and fixes to this file (including
+ * trivial code cleanups) would be sent back in order to let me include them in
+ * the version available at <http://www.jhweiss.de/software/snprintf.html>.
+ * However, this is not a requirement for using or redistributing (possibly
+ * modified) versions of this file, nor is leaving this notice intact mandatory.
+ */
+
+/*
+ * History
+ *
+ * 2008-01-20 Holger Weiss <holger@jhweiss.de> for C99-snprintf 1.1:
+ *
+ * Fixed the detection of infinite floating point values on IRIX (and
+ * possibly other systems) and applied another few minor cleanups.
+ *
+ * 2008-01-06 Holger Weiss <holger@jhweiss.de> for C99-snprintf 1.0:
+ *
+ * Added a lot of new features, fixed many bugs, and incorporated various
+ * improvements done by Andrew Tridgell <tridge@samba.org>, Russ Allbery
+ * <rra@stanford.edu>, Hrvoje Niksic <hniksic@xemacs.org>, Damien Miller
+ * <djm@mindrot.org>, and others for the Samba, INN, Wget, and OpenSSH
+ * projects. The additions include: support the "e", "E", "g", "G", and
+ * "F" conversion specifiers (and use conversion style "f" or "F" for the
+ * still unsupported "a" and "A" specifiers); support the "hh", "ll", "j",
+ * "t", and "z" length modifiers; support the "#" flag and the (non-C99)
+ * "'" flag; use localeconv(3) (if available) to get both the current
+ * locale's decimal point character and the separator between groups of
+ * digits; fix the handling of various corner cases of field width and
+ * precision specifications; fix various floating point conversion bugs;
+ * handle infinite and NaN floating point values; don't attempt to write to
+ * the output buffer (which may be NULL) if a size of zero was specified;
+ * check for integer overflow of the field width, precision, and return
+ * values and during the floating point conversion; use the OUTCHAR() macro
+ * instead of a function for better performance; provide asprintf(3) and
+ * vasprintf(3) functions; add new test cases. The replacement functions
+ * have been renamed to use an "rpl_" prefix, the function calls in the
+ * main project (and in this file) must be redefined accordingly for each
+ * replacement function which is needed (by using Autoconf or other means).
+ * Various other minor improvements have been applied and the coding style
+ * was cleaned up for consistency.
+ *
+ * 2007-07-23 Holger Weiss <holger@jhweiss.de> for Mutt 1.5.13:
+ *
+ * C99 compliant snprintf(3) and vsnprintf(3) functions return the number
+ * of characters that would have been written to a sufficiently sized
+ * buffer (excluding the '\0'). The original code simply returned the
+ * length of the resulting output string, so that's been fixed.
+ *
+ * 1998-03-05 Michael Elkins <me@mutt.org> for Mutt 0.90.8:
+ *
+ * The original code assumed that both snprintf(3) and vsnprintf(3) were
+ * missing. Some systems only have snprintf(3) but not vsnprintf(3), so
+ * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF.
+ *
+ * 1998-01-27 Thomas Roessler <roessler@does-not-exist.org> for Mutt 0.89i:
+ *
+ * The PGP code was using unsigned hexadecimal formats. Unfortunately,
+ * unsigned formats simply didn't work.
+ *
+ * 1997-10-22 Brandon Long <blong@fiction.net> for Mutt 0.87.1:
+ *
+ * Ok, added some minimal floating point support, which means this probably
+ * requires libm on most operating systems. Don't yet support the exponent
+ * (e,E) and sigfig (g,G). Also, fmtint() was pretty badly broken, it just
+ * wasn't being exercised in ways which showed it, so that's been fixed.
+ * Also, formatted the code to Mutt conventions, and removed dead code left
+ * over from the original. Also, there is now a builtin-test, run with:
+ * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm && ./snprintf
+ *
+ * 2996-09-15 Brandon Long <blong@fiction.net> for Mutt 0.43:
+ *
+ * This was ugly. It is still ugly. I opted out of floating point
+ * numbers, but the formatter understands just about everything from the
+ * normal C string format, at least as far as I can tell from the Solaris
+ * 2.5 printf(3S) man page.
+ */
+
+/*
+ * ToDo
+ *
+ * - Add wide character support.
+ * - Add support for "%a" and "%A" conversions.
+ * - Create test routines which predefine the expected results. Our test cases
+ * usually expose bugs in system implementations rather than in ours :-)
+ */
+
+/*
+ * Usage
+ *
+ * 1) The following preprocessor macros should be defined to 1 if the feature or
+ * file in question is available on the target system (by using Autoconf or
+ * other means), though basic functionality should be available as long as
+ * HAVE_STDARG_H and HAVE_STDLIB_H are defined correctly:
+ *
+ * HAVE_VSNPRINTF
+ * HAVE_SNPRINTF
+ * HAVE_VASPRINTF
+ * HAVE_ASPRINTF
+ * HAVE_STDARG_H
+ * HAVE_STDDEF_H
+ * HAVE_STDINT_H
+ * HAVE_STDLIB_H
+ * HAVE_INTTYPES_H
+ * HAVE_LOCALE_H
+ * HAVE_LOCALECONV
+ * HAVE_LCONV_DECIMAL_POINT
+ * HAVE_LCONV_THOUSANDS_SEP
+ * HAVE_LONG_DOUBLE
+ * HAVE_LONG_LONG_INT
+ * HAVE_UNSIGNED_LONG_LONG_INT
+ * HAVE_INTMAX_T
+ * HAVE_UINTMAX_T
+ * HAVE_UINTPTR_T
+ * HAVE_PTRDIFF_T
+ * HAVE_VA_COPY
+ * HAVE___VA_COPY
+ *
+ * 2) The calls to the functions which should be replaced must be redefined
+ * throughout the project files (by using Autoconf or other means):
+ *
+ * #define vsnprintf rpl_vsnprintf
+ * #define snprintf rpl_snprintf
+ * #define vasprintf rpl_vasprintf
+ * #define asprintf rpl_asprintf
+ *
+ * 3) The required replacement functions should be declared in some header file
+ * included throughout the project files:
+ *
+ * #if HAVE_CONFIG_H
+ * #include <config.h>
+ * #endif
+ * #if HAVE_STDARG_H
+ * #include <stdarg.h>
+ * #if !HAVE_VSNPRINTF
+ * int rpl_vsnprintf(char *, size_t, const char *, va_list);
+ * #endif
+ * #if !HAVE_SNPRINTF
+ * int rpl_snprintf(char *, size_t, const char *, ...);
+ * #endif
+ * #if !HAVE_VASPRINTF
+ * int rpl_vasprintf(char **, const char *, va_list);
+ * #endif
+ * #if !HAVE_ASPRINTF
+ * int rpl_asprintf(char **, const char *, ...);
+ * #endif
+ * #endif
+ *
+ * Autoconf macros for handling step 1 and step 2 are available at
+ * <http://www.jhweiss.de/software/snprintf.html>.
+ */
+
+#include "pipe/p_config.h"
+
+#if HAVE_CONFIG_H
+#include <config.h>
+#else
+#ifdef WIN32
+#define vsnprintf util_vsnprintf
+#define snprintf util_snprintf
+#define HAVE_VSNPRINTF 0
+#define HAVE_SNPRINTF 0
+#define HAVE_VASPRINTF 1 /* not needed */
+#define HAVE_ASPRINTF 1 /* not needed */
+#define HAVE_STDARG_H 1
+#define HAVE_STDDEF_H 1
+#define HAVE_STDINT_H 0
+#define HAVE_STDLIB_H 1
+#define HAVE_INTTYPES_H 0
+#define HAVE_LOCALE_H 0
+#define HAVE_LOCALECONV 0
+#define HAVE_LCONV_DECIMAL_POINT 0
+#define HAVE_LCONV_THOUSANDS_SEP 0
+#define HAVE_LONG_DOUBLE 0
+#define HAVE_LONG_LONG_INT 1
+#define HAVE_UNSIGNED_LONG_LONG_INT 1
+#define HAVE_INTMAX_T 0
+#define HAVE_UINTMAX_T 0
+#define HAVE_UINTPTR_T 1
+#define HAVE_PTRDIFF_T 1
+#define HAVE_VA_COPY 0
+#define HAVE___VA_COPY 0
+#else
+#define HAVE_VSNPRINTF 1
+#define HAVE_SNPRINTF 1
+#define HAVE_VASPRINTF 1
+#define HAVE_ASPRINTF 1
+#endif
+#endif /* HAVE_CONFIG_H */
+
+#if !HAVE_SNPRINTF || !HAVE_VSNPRINTF || !HAVE_ASPRINTF || !HAVE_VASPRINTF
+#include <stdio.h> /* For NULL, size_t, vsnprintf(3), and vasprintf(3). */
+#ifdef VA_START
+#undef VA_START
+#endif /* defined(VA_START) */
+#ifdef VA_SHIFT
+#undef VA_SHIFT
+#endif /* defined(VA_SHIFT) */
+#if HAVE_STDARG_H
+#include <stdarg.h>
+#define VA_START(ap, last) va_start(ap, last)
+#define VA_SHIFT(ap, value, type) /* No-op for ANSI C. */
+#else /* Assume <varargs.h> is available. */
+#include <varargs.h>
+#define VA_START(ap, last) va_start(ap) /* "last" is ignored. */
+#define VA_SHIFT(ap, value, type) value = va_arg(ap, type)
+#endif /* HAVE_STDARG_H */
+
+#if !HAVE_VASPRINTF
+#if HAVE_STDLIB_H
+#include <stdlib.h> /* For malloc(3). */
+#endif /* HAVE_STDLIB_H */
+#ifdef VA_COPY
+#undef VA_COPY
+#endif /* defined(VA_COPY) */
+#ifdef VA_END_COPY
+#undef VA_END_COPY
+#endif /* defined(VA_END_COPY) */
+#if HAVE_VA_COPY
+#define VA_COPY(dest, src) va_copy(dest, src)
+#define VA_END_COPY(ap) va_end(ap)
+#elif HAVE___VA_COPY
+#define VA_COPY(dest, src) __va_copy(dest, src)
+#define VA_END_COPY(ap) va_end(ap)
+#else
+#define VA_COPY(dest, src) (void)mymemcpy(&dest, &src, sizeof(va_list))
+#define VA_END_COPY(ap) /* No-op. */
+#define NEED_MYMEMCPY 1
+static void *mymemcpy(void *, void *, size_t);
+#endif /* HAVE_VA_COPY */
+#endif /* !HAVE_VASPRINTF */
+
+#if !HAVE_VSNPRINTF
+#include <limits.h> /* For *_MAX. */
+#if HAVE_INTTYPES_H
+#include <inttypes.h> /* For intmax_t (if not defined in <stdint.h>). */
+#endif /* HAVE_INTTYPES_H */
+#if HAVE_LOCALE_H
+#include <locale.h> /* For localeconv(3). */
+#endif /* HAVE_LOCALE_H */
+#if HAVE_STDDEF_H
+#include <stddef.h> /* For ptrdiff_t. */
+#endif /* HAVE_STDDEF_H */
+#if HAVE_STDINT_H
+#include <stdint.h> /* For intmax_t. */
+#endif /* HAVE_STDINT_H */
+
+/* Support for unsigned long long int. We may also need ULLONG_MAX. */
+#ifndef ULONG_MAX /* We may need ULONG_MAX as a fallback. */
+#ifdef UINT_MAX
+#define ULONG_MAX UINT_MAX
+#else
+#define ULONG_MAX INT_MAX
+#endif /* defined(UINT_MAX) */
+#endif /* !defined(ULONG_MAX) */
+#ifdef ULLONG
+#undef ULLONG
+#endif /* defined(ULLONG) */
+#if HAVE_UNSIGNED_LONG_LONG_INT
+#define ULLONG unsigned long long int
+#ifndef ULLONG_MAX
+#define ULLONG_MAX ULONG_MAX
+#endif /* !defined(ULLONG_MAX) */
+#else
+#define ULLONG unsigned long int
+#ifdef ULLONG_MAX
+#undef ULLONG_MAX
+#endif /* defined(ULLONG_MAX) */
+#define ULLONG_MAX ULONG_MAX
+#endif /* HAVE_LONG_LONG_INT */
+
+/* Support for uintmax_t. We also need UINTMAX_MAX. */
+#ifdef UINTMAX_T
+#undef UINTMAX_T
+#endif /* defined(UINTMAX_T) */
+#if HAVE_UINTMAX_T || defined(uintmax_t)
+#define UINTMAX_T uintmax_t
+#ifndef UINTMAX_MAX
+#define UINTMAX_MAX ULLONG_MAX
+#endif /* !defined(UINTMAX_MAX) */
+#else
+#define UINTMAX_T ULLONG
+#ifdef UINTMAX_MAX
+#undef UINTMAX_MAX
+#endif /* defined(UINTMAX_MAX) */
+#define UINTMAX_MAX ULLONG_MAX
+#endif /* HAVE_UINTMAX_T || defined(uintmax_t) */
+
+/* Support for long double. */
+#ifndef LDOUBLE
+#if HAVE_LONG_DOUBLE
+#define LDOUBLE long double
+#else
+#define LDOUBLE double
+#endif /* HAVE_LONG_DOUBLE */
+#endif /* !defined(LDOUBLE) */
+
+/* Support for long long int. */
+#ifndef LLONG
+#if HAVE_LONG_LONG_INT
+#define LLONG long long int
+#else
+#define LLONG long int
+#endif /* HAVE_LONG_LONG_INT */
+#endif /* !defined(LLONG) */
+
+/* Support for intmax_t. */
+#ifndef INTMAX_T
+#if HAVE_INTMAX_T || defined(intmax_t)
+#define INTMAX_T intmax_t
+#else
+#define INTMAX_T LLONG
+#endif /* HAVE_INTMAX_T || defined(intmax_t) */
+#endif /* !defined(INTMAX_T) */
+
+/* Support for uintptr_t. */
+#ifndef UINTPTR_T
+#if HAVE_UINTPTR_T || defined(uintptr_t)
+#define UINTPTR_T uintptr_t
+#else
+#define UINTPTR_T unsigned long int
+#endif /* HAVE_UINTPTR_T || defined(uintptr_t) */
+#endif /* !defined(UINTPTR_T) */
+
+/* WinCE5.0 does not have uintptr_t defined */
+#if (_WIN32_WCE < 600)
+#ifdef UINTPTR_T
+#undef UINTPTR_T
+#endif
+#define UINTPTR_T unsigned long int
+#endif
+
+
+/* Support for ptrdiff_t. */
+#ifndef PTRDIFF_T
+#if HAVE_PTRDIFF_T || defined(ptrdiff_t)
+#define PTRDIFF_T ptrdiff_t
+#else
+#define PTRDIFF_T long int
+#endif /* HAVE_PTRDIFF_T || defined(ptrdiff_t) */
+#endif /* !defined(PTRDIFF_T) */
+
+/*
+ * We need an unsigned integer type corresponding to ptrdiff_t (cf. C99:
+ * 7.19.6.1, 7). However, we'll simply use PTRDIFF_T and convert it to an
+ * unsigned type if necessary. This should work just fine in practice.
+ */
+#ifndef UPTRDIFF_T
+#define UPTRDIFF_T PTRDIFF_T
+#endif /* !defined(UPTRDIFF_T) */
+
+/*
+ * We need a signed integer type corresponding to size_t (cf. C99: 7.19.6.1, 7).
+ * However, we'll simply use size_t and convert it to a signed type if
+ * necessary. This should work just fine in practice.
+ */
+#ifndef SSIZE_T
+#define SSIZE_T size_t
+#endif /* !defined(SSIZE_T) */
+
+/* Either ERANGE or E2BIG should be available everywhere. */
+#ifndef ERANGE
+#define ERANGE E2BIG
+#endif /* !defined(ERANGE) */
+#ifndef EOVERFLOW
+#define EOVERFLOW ERANGE
+#endif /* !defined(EOVERFLOW) */
+
+/*
+ * Buffer size to hold the octal string representation of UINT128_MAX without
+ * nul-termination ("3777777777777777777777777777777777777777777").
+ */
+#ifdef MAX_CONVERT_LENGTH
+#undef MAX_CONVERT_LENGTH
+#endif /* defined(MAX_CONVERT_LENGTH) */
+#define MAX_CONVERT_LENGTH 43
+
+/* Format read states. */
+#define PRINT_S_DEFAULT 0
+#define PRINT_S_FLAGS 1
+#define PRINT_S_WIDTH 2
+#define PRINT_S_DOT 3
+#define PRINT_S_PRECISION 4
+#define PRINT_S_MOD 5
+#define PRINT_S_CONV 6
+
+/* Format flags. */
+#define PRINT_F_MINUS (1 << 0)
+#define PRINT_F_PLUS (1 << 1)
+#define PRINT_F_SPACE (1 << 2)
+#define PRINT_F_NUM (1 << 3)
+#define PRINT_F_ZERO (1 << 4)
+#define PRINT_F_QUOTE (1 << 5)
+#define PRINT_F_UP (1 << 6)
+#define PRINT_F_UNSIGNED (1 << 7)
+#define PRINT_F_TYPE_G (1 << 8)
+#define PRINT_F_TYPE_E (1 << 9)
+
+/* Conversion flags. */
+#define PRINT_C_CHAR 1
+#define PRINT_C_SHORT 2
+#define PRINT_C_LONG 3
+#define PRINT_C_LLONG 4
+#define PRINT_C_LDOUBLE 5
+#define PRINT_C_SIZE 6
+#define PRINT_C_PTRDIFF 7
+#define PRINT_C_INTMAX 8
+
+#ifndef MAX
+#define MAX(x, y) ((x >= y) ? x : y)
+#endif /* !defined(MAX) */
+#ifndef CHARTOINT
+#define CHARTOINT(ch) (ch - '0')
+#endif /* !defined(CHARTOINT) */
+#ifndef ISDIGIT
+#define ISDIGIT(ch) ('0' <= (unsigned char)ch && (unsigned char)ch <= '9')
+#endif /* !defined(ISDIGIT) */
+#ifndef ISNAN
+#define ISNAN(x) (x != x)
+#endif /* !defined(ISNAN) */
+#ifndef ISINF
+#define ISINF(x) (x != 0.0 && x + x == x)
+#endif /* !defined(ISINF) */
+
+#ifdef OUTCHAR
+#undef OUTCHAR
+#endif /* defined(OUTCHAR) */
+#define OUTCHAR(str, len, size, ch) \
+do { \
+ if (len + 1 < size) \
+ str[len] = ch; \
+ (len)++; \
+} while (/* CONSTCOND */ 0)
+
+static void fmtstr(char *, size_t *, size_t, const char *, int, int, int);
+static void fmtint(char *, size_t *, size_t, INTMAX_T, int, int, int, int);
+static void fmtflt(char *, size_t *, size_t, LDOUBLE, int, int, int, int *);
+static void printsep(char *, size_t *, size_t);
+static int getnumsep(int);
+static int getexponent(LDOUBLE);
+static int convert(UINTMAX_T, char *, size_t, int, int);
+static UINTMAX_T cast(LDOUBLE);
+static UINTMAX_T myround(LDOUBLE);
+static LDOUBLE mypow10(int);
+
+int
+util_vsnprintf(char *str, size_t size, const char *format, va_list args)
+{
+ LDOUBLE fvalue;
+ INTMAX_T value;
+ unsigned char cvalue;
+ const char *strvalue;
+ INTMAX_T *intmaxptr;
+ PTRDIFF_T *ptrdiffptr;
+ SSIZE_T *sizeptr;
+ LLONG *llongptr;
+ long int *longptr;
+ int *intptr;
+ short int *shortptr;
+ signed char *charptr;
+ size_t len = 0;
+ int overflow = 0;
+ int base = 0;
+ int cflags = 0;
+ int flags = 0;
+ int width = 0;
+ int precision = -1;
+ int state = PRINT_S_DEFAULT;
+ char ch = *format++;
+
+ /*
+ * C99 says: "If `n' is zero, nothing is written, and `s' may be a null
+ * pointer." (7.19.6.5, 2) We're forgiving and allow a NULL pointer
+ * even if a size larger than zero was specified. At least NetBSD's
+ * snprintf(3) does the same, as well as other versions of this file.
+ * (Though some of these versions will write to a non-NULL buffer even
+ * if a size of zero was specified, which violates the standard.)
+ */
+ if (str == NULL && size != 0)
+ size = 0;
+
+ while (ch != '\0')
+ switch (state) {
+ case PRINT_S_DEFAULT:
+ if (ch == '%')
+ state = PRINT_S_FLAGS;
+ else
+ OUTCHAR(str, len, size, ch);
+ ch = *format++;
+ break;
+ case PRINT_S_FLAGS:
+ switch (ch) {
+ case '-':
+ flags |= PRINT_F_MINUS;
+ ch = *format++;
+ break;
+ case '+':
+ flags |= PRINT_F_PLUS;
+ ch = *format++;
+ break;
+ case ' ':
+ flags |= PRINT_F_SPACE;
+ ch = *format++;
+ break;
+ case '#':
+ flags |= PRINT_F_NUM;
+ ch = *format++;
+ break;
+ case '0':
+ flags |= PRINT_F_ZERO;
+ ch = *format++;
+ break;
+ case '\'': /* SUSv2 flag (not in C99). */
+ flags |= PRINT_F_QUOTE;
+ ch = *format++;
+ break;
+ default:
+ state = PRINT_S_WIDTH;
+ break;
+ }
+ break;
+ case PRINT_S_WIDTH:
+ if (ISDIGIT(ch)) {
+ ch = CHARTOINT(ch);
+ if (width > (INT_MAX - ch) / 10) {
+ overflow = 1;
+ goto out;
+ }
+ width = 10 * width + ch;
+ ch = *format++;
+ } else if (ch == '*') {
+ /*
+ * C99 says: "A negative field width argument is
+ * taken as a `-' flag followed by a positive
+ * field width." (7.19.6.1, 5)
+ */
+ if ((width = va_arg(args, int)) < 0) {
+ flags |= PRINT_F_MINUS;
+ width = -width;
+ }
+ ch = *format++;
+ state = PRINT_S_DOT;
+ } else
+ state = PRINT_S_DOT;
+ break;
+ case PRINT_S_DOT:
+ if (ch == '.') {
+ state = PRINT_S_PRECISION;
+ ch = *format++;
+ } else
+ state = PRINT_S_MOD;
+ break;
+ case PRINT_S_PRECISION:
+ if (precision == -1)
+ precision = 0;
+ if (ISDIGIT(ch)) {
+ ch = CHARTOINT(ch);
+ if (precision > (INT_MAX - ch) / 10) {
+ overflow = 1;
+ goto out;
+ }
+ precision = 10 * precision + ch;
+ ch = *format++;
+ } else if (ch == '*') {
+ /*
+ * C99 says: "A negative precision argument is
+ * taken as if the precision were omitted."
+ * (7.19.6.1, 5)
+ */
+ if ((precision = va_arg(args, int)) < 0)
+ precision = -1;
+ ch = *format++;
+ state = PRINT_S_MOD;
+ } else
+ state = PRINT_S_MOD;
+ break;
+ case PRINT_S_MOD:
+ switch (ch) {
+ case 'h':
+ ch = *format++;
+ if (ch == 'h') { /* It's a char. */
+ ch = *format++;
+ cflags = PRINT_C_CHAR;
+ } else
+ cflags = PRINT_C_SHORT;
+ break;
+ case 'l':
+ ch = *format++;
+ if (ch == 'l') { /* It's a long long. */
+ ch = *format++;
+ cflags = PRINT_C_LLONG;
+ } else
+ cflags = PRINT_C_LONG;
+ break;
+ case 'L':
+ cflags = PRINT_C_LDOUBLE;
+ ch = *format++;
+ break;
+ case 'j':
+ cflags = PRINT_C_INTMAX;
+ ch = *format++;
+ break;
+ case 't':
+ cflags = PRINT_C_PTRDIFF;
+ ch = *format++;
+ break;
+ case 'z':
+ cflags = PRINT_C_SIZE;
+ ch = *format++;
+ break;
+ }
+ state = PRINT_S_CONV;
+ break;
+ case PRINT_S_CONV:
+ switch (ch) {
+ case 'd':
+ /* FALLTHROUGH */
+ case 'i':
+ switch (cflags) {
+ case PRINT_C_CHAR:
+ value = (signed char)va_arg(args, int);
+ break;
+ case PRINT_C_SHORT:
+ value = (short int)va_arg(args, int);
+ break;
+ case PRINT_C_LONG:
+ value = va_arg(args, long int);
+ break;
+ case PRINT_C_LLONG:
+ value = va_arg(args, LLONG);
+ break;
+ case PRINT_C_SIZE:
+ value = va_arg(args, SSIZE_T);
+ break;
+ case PRINT_C_INTMAX:
+ value = va_arg(args, INTMAX_T);
+ break;
+ case PRINT_C_PTRDIFF:
+ value = va_arg(args, PTRDIFF_T);
+ break;
+ default:
+ value = va_arg(args, int);
+ break;
+ }
+ fmtint(str, &len, size, value, 10, width,
+ precision, flags);
+ break;
+ case 'X':
+ flags |= PRINT_F_UP;
+ /* FALLTHROUGH */
+ case 'x':
+ base = 16;
+ /* FALLTHROUGH */
+ case 'o':
+ if (base == 0)
+ base = 8;
+ /* FALLTHROUGH */
+ case 'u':
+ if (base == 0)
+ base = 10;
+ flags |= PRINT_F_UNSIGNED;
+ switch (cflags) {
+ case PRINT_C_CHAR:
+ value = (unsigned char)va_arg(args,
+ unsigned int);
+ break;
+ case PRINT_C_SHORT:
+ value = (unsigned short int)va_arg(args,
+ unsigned int);
+ break;
+ case PRINT_C_LONG:
+ value = va_arg(args, unsigned long int);
+ break;
+ case PRINT_C_LLONG:
+ value = va_arg(args, ULLONG);
+ break;
+ case PRINT_C_SIZE:
+ value = va_arg(args, size_t);
+ break;
+ case PRINT_C_INTMAX:
+ value = va_arg(args, UINTMAX_T);
+ break;
+ case PRINT_C_PTRDIFF:
+ value = va_arg(args, UPTRDIFF_T);
+ break;
+ default:
+ value = va_arg(args, unsigned int);
+ break;
+ }
+ fmtint(str, &len, size, value, base, width,
+ precision, flags);
+ break;
+ case 'A':
+ /* Not yet supported, we'll use "%F". */
+ /* FALLTHROUGH */
+ case 'F':
+ flags |= PRINT_F_UP;
+ case 'a':
+ /* Not yet supported, we'll use "%f". */
+ /* FALLTHROUGH */
+ case 'f':
+ if (cflags == PRINT_C_LDOUBLE)
+ fvalue = va_arg(args, LDOUBLE);
+ else
+ fvalue = va_arg(args, double);
+ fmtflt(str, &len, size, fvalue, width,
+ precision, flags, &overflow);
+ if (overflow)
+ goto out;
+ break;
+ case 'E':
+ flags |= PRINT_F_UP;
+ /* FALLTHROUGH */
+ case 'e':
+ flags |= PRINT_F_TYPE_E;
+ if (cflags == PRINT_C_LDOUBLE)
+ fvalue = va_arg(args, LDOUBLE);
+ else
+ fvalue = va_arg(args, double);
+ fmtflt(str, &len, size, fvalue, width,
+ precision, flags, &overflow);
+ if (overflow)
+ goto out;
+ break;
+ case 'G':
+ flags |= PRINT_F_UP;
+ /* FALLTHROUGH */
+ case 'g':
+ flags |= PRINT_F_TYPE_G;
+ if (cflags == PRINT_C_LDOUBLE)
+ fvalue = va_arg(args, LDOUBLE);
+ else
+ fvalue = va_arg(args, double);
+ /*
+ * If the precision is zero, it is treated as
+ * one (cf. C99: 7.19.6.1, 8).
+ */
+ if (precision == 0)
+ precision = 1;
+ fmtflt(str, &len, size, fvalue, width,
+ precision, flags, &overflow);
+ if (overflow)
+ goto out;
+ break;
+ case 'c':
+ cvalue = (unsigned char)va_arg(args, int);
+ OUTCHAR(str, len, size, cvalue);
+ break;
+ case 's':
+ strvalue = va_arg(args, char *);
+ fmtstr(str, &len, size, strvalue, width,
+ precision, flags);
+ break;
+ case 'p':
+ /*
+ * C99 says: "The value of the pointer is
+ * converted to a sequence of printing
+ * characters, in an implementation-defined
+ * manner." (C99: 7.19.6.1, 8)
+ */
+ if ((strvalue = va_arg(args, void *)) == NULL)
+ /*
+ * We use the glibc format. BSD prints
+ * "0x0", SysV "0".
+ */
+ fmtstr(str, &len, size, "(nil)", width,
+ -1, flags);
+ else {
+ /*
+ * We use the BSD/glibc format. SysV
+ * omits the "0x" prefix (which we emit
+ * using the PRINT_F_NUM flag).
+ */
+ flags |= PRINT_F_NUM;
+ flags |= PRINT_F_UNSIGNED;
+ fmtint(str, &len, size,
+ (UINTPTR_T)strvalue, 16, width,
+ precision, flags);
+ }
+ break;
+ case 'n':
+ switch (cflags) {
+ case PRINT_C_CHAR:
+ charptr = va_arg(args, signed char *);
+ *charptr = (signed char)len;
+ break;
+ case PRINT_C_SHORT:
+ shortptr = va_arg(args, short int *);
+ *shortptr = (short int)len;
+ break;
+ case PRINT_C_LONG:
+ longptr = va_arg(args, long int *);
+ *longptr = (long int)len;
+ break;
+ case PRINT_C_LLONG:
+ llongptr = va_arg(args, LLONG *);
+ *llongptr = (LLONG)len;
+ break;
+ case PRINT_C_SIZE:
+ /*
+ * C99 says that with the "z" length
+ * modifier, "a following `n' conversion
+ * specifier applies to a pointer to a
+ * signed integer type corresponding to
+ * size_t argument." (7.19.6.1, 7)
+ */
+ sizeptr = va_arg(args, SSIZE_T *);
+ *sizeptr = len;
+ break;
+ case PRINT_C_INTMAX:
+ intmaxptr = va_arg(args, INTMAX_T *);
+ *intmaxptr = len;
+ break;
+ case PRINT_C_PTRDIFF:
+ ptrdiffptr = va_arg(args, PTRDIFF_T *);
+ *ptrdiffptr = len;
+ break;
+ default:
+ intptr = va_arg(args, int *);
+ *intptr = (int)len;
+ break;
+ }
+ break;
+ case '%': /* Print a "%" character verbatim. */
+ OUTCHAR(str, len, size, ch);
+ break;
+ default: /* Skip other characters. */
+ break;
+ }
+ ch = *format++;
+ state = PRINT_S_DEFAULT;
+ base = cflags = flags = width = 0;
+ precision = -1;
+ break;
+ }
+out:
+ if (len < size)
+ str[len] = '\0';
+ else if (size > 0)
+ str[size - 1] = '\0';
+
+ if (overflow || len >= INT_MAX) {
+ return -1;
+ }
+ return (int)len;
+}
+
+static void
+fmtstr(char *str, size_t *len, size_t size, const char *value, int width,
+ int precision, int flags)
+{
+ int padlen, strln; /* Amount to pad. */
+ int noprecision = (precision == -1);
+
+ if (value == NULL) /* We're forgiving. */
+ value = "(null)";
+
+ /* If a precision was specified, don't read the string past it. */
+ for (strln = 0; value[strln] != '\0' &&
+ (noprecision || strln < precision); strln++)
+ continue;
+
+ if ((padlen = width - strln) < 0)
+ padlen = 0;
+ if (flags & PRINT_F_MINUS) /* Left justify. */
+ padlen = -padlen;
+
+ while (padlen > 0) { /* Leading spaces. */
+ OUTCHAR(str, *len, size, ' ');
+ padlen--;
+ }
+ while (*value != '\0' && (noprecision || precision-- > 0)) {
+ OUTCHAR(str, *len, size, *value);
+ value++;
+ }
+ while (padlen < 0) { /* Trailing spaces. */
+ OUTCHAR(str, *len, size, ' ');
+ padlen++;
+ }
+}
+
+static void
+fmtint(char *str, size_t *len, size_t size, INTMAX_T value, int base, int width,
+ int precision, int flags)
+{
+ UINTMAX_T uvalue;
+ char iconvert[MAX_CONVERT_LENGTH];
+ char sign = 0;
+ char hexprefix = 0;
+ int spadlen = 0; /* Amount to space pad. */
+ int zpadlen = 0; /* Amount to zero pad. */
+ int pos;
+ int separators = (flags & PRINT_F_QUOTE);
+ int noprecision = (precision == -1);
+
+ if (flags & PRINT_F_UNSIGNED)
+ uvalue = value;
+ else {
+ uvalue = (value >= 0) ? value : -value;
+ if (value < 0)
+ sign = '-';
+ else if (flags & PRINT_F_PLUS) /* Do a sign. */
+ sign = '+';
+ else if (flags & PRINT_F_SPACE)
+ sign = ' ';
+ }
+
+ pos = convert(uvalue, iconvert, sizeof(iconvert), base,
+ flags & PRINT_F_UP);
+
+ if (flags & PRINT_F_NUM && uvalue != 0) {
+ /*
+ * C99 says: "The result is converted to an `alternative form'.
+ * For `o' conversion, it increases the precision, if and only
+ * if necessary, to force the first digit of the result to be a
+ * zero (if the value and precision are both 0, a single 0 is
+ * printed). For `x' (or `X') conversion, a nonzero result has
+ * `0x' (or `0X') prefixed to it." (7.19.6.1, 6)
+ */
+ switch (base) {
+ case 8:
+ if (precision <= pos)
+ precision = pos + 1;
+ break;
+ case 16:
+ hexprefix = (flags & PRINT_F_UP) ? 'X' : 'x';
+ break;
+ }
+ }
+
+ if (separators) /* Get the number of group separators we'll print. */
+ separators = getnumsep(pos);
+
+ zpadlen = precision - pos - separators;
+ spadlen = width /* Minimum field width. */
+ - separators /* Number of separators. */
+ - MAX(precision, pos) /* Number of integer digits. */
+ - ((sign != 0) ? 1 : 0) /* Will we print a sign? */
+ - ((hexprefix != 0) ? 2 : 0); /* Will we print a prefix? */
+
+ if (zpadlen < 0)
+ zpadlen = 0;
+ if (spadlen < 0)
+ spadlen = 0;
+
+ /*
+ * C99 says: "If the `0' and `-' flags both appear, the `0' flag is
+ * ignored. For `d', `i', `o', `u', `x', and `X' conversions, if a
+ * precision is specified, the `0' flag is ignored." (7.19.6.1, 6)
+ */
+ if (flags & PRINT_F_MINUS) /* Left justify. */
+ spadlen = -spadlen;
+ else if (flags & PRINT_F_ZERO && noprecision) {
+ zpadlen += spadlen;
+ spadlen = 0;
+ }
+ while (spadlen > 0) { /* Leading spaces. */
+ OUTCHAR(str, *len, size, ' ');
+ spadlen--;
+ }
+ if (sign != 0) /* Sign. */
+ OUTCHAR(str, *len, size, sign);
+ if (hexprefix != 0) { /* A "0x" or "0X" prefix. */
+ OUTCHAR(str, *len, size, '0');
+ OUTCHAR(str, *len, size, hexprefix);
+ }
+ while (zpadlen > 0) { /* Leading zeros. */
+ OUTCHAR(str, *len, size, '0');
+ zpadlen--;
+ }
+ while (pos > 0) { /* The actual digits. */
+ pos--;
+ OUTCHAR(str, *len, size, iconvert[pos]);
+ if (separators > 0 && pos > 0 && pos % 3 == 0)
+ printsep(str, len, size);
+ }
+ while (spadlen < 0) { /* Trailing spaces. */
+ OUTCHAR(str, *len, size, ' ');
+ spadlen++;
+ }
+}
+
+static void
+fmtflt(char *str, size_t *len, size_t size, LDOUBLE fvalue, int width,
+ int precision, int flags, int *overflow)
+{
+ LDOUBLE ufvalue;
+ UINTMAX_T intpart;
+ UINTMAX_T fracpart;
+ UINTMAX_T mask;
+ const char *infnan = NULL;
+ char iconvert[MAX_CONVERT_LENGTH];
+ char fconvert[MAX_CONVERT_LENGTH];
+ char econvert[4]; /* "e-12" (without nul-termination). */
+ char esign = 0;
+ char sign = 0;
+ int leadfraczeros = 0;
+ int exponent = 0;
+ int emitpoint = 0;
+ int omitzeros = 0;
+ int omitcount = 0;
+ int padlen = 0;
+ int epos = 0;
+ int fpos = 0;
+ int ipos = 0;
+ int separators = (flags & PRINT_F_QUOTE);
+ int estyle = (flags & PRINT_F_TYPE_E);
+#if HAVE_LOCALECONV && HAVE_LCONV_DECIMAL_POINT
+ struct lconv *lc = localeconv();
+#endif /* HAVE_LOCALECONV && HAVE_LCONV_DECIMAL_POINT */
+
+ /*
+ * AIX' man page says the default is 0, but C99 and at least Solaris'
+ * and NetBSD's man pages say the default is 6, and sprintf(3) on AIX
+ * defaults to 6.
+ */
+ if (precision == -1)
+ precision = 6;
+
+ if (fvalue < 0.0)
+ sign = '-';
+ else if (flags & PRINT_F_PLUS) /* Do a sign. */
+ sign = '+';
+ else if (flags & PRINT_F_SPACE)
+ sign = ' ';
+
+ if (ISNAN(fvalue))
+ infnan = (flags & PRINT_F_UP) ? "NAN" : "nan";
+ else if (ISINF(fvalue))
+ infnan = (flags & PRINT_F_UP) ? "INF" : "inf";
+
+ if (infnan != NULL) {
+ if (sign != 0)
+ iconvert[ipos++] = sign;
+ while (*infnan != '\0')
+ iconvert[ipos++] = *infnan++;
+ fmtstr(str, len, size, iconvert, width, ipos, flags);
+ return;
+ }
+
+ /* "%e" (or "%E") or "%g" (or "%G") conversion. */
+ if (flags & PRINT_F_TYPE_E || flags & PRINT_F_TYPE_G) {
+ if (flags & PRINT_F_TYPE_G) {
+ /*
+ * For "%g" (and "%G") conversions, the precision
+ * specifies the number of significant digits, which
+ * includes the digits in the integer part. The
+ * conversion will or will not be using "e-style" (like
+ * "%e" or "%E" conversions) depending on the precision
+ * and on the exponent. However, the exponent can be
+ * affected by rounding the converted value, so we'll
+ * leave this decision for later. Until then, we'll
+ * assume that we're going to do an "e-style" conversion
+ * (in order to get the exponent calculated). For
+ * "e-style", the precision must be decremented by one.
+ */
+ precision--;
+ /*
+ * For "%g" (and "%G") conversions, trailing zeros are
+ * removed from the fractional portion of the result
+ * unless the "#" flag was specified.
+ */
+ if (!(flags & PRINT_F_NUM))
+ omitzeros = 1;
+ }
+ exponent = getexponent(fvalue);
+ estyle = 1;
+ }
+
+again:
+ /*
+ * Sorry, we only support 9, 19, or 38 digits (that is, the number of
+ * digits of the 32-bit, the 64-bit, or the 128-bit UINTMAX_MAX value
+ * minus one) past the decimal point due to our conversion method.
+ */
+ switch (sizeof(UINTMAX_T)) {
+ case 16:
+ if (precision > 38)
+ precision = 38;
+ break;
+ case 8:
+ if (precision > 19)
+ precision = 19;
+ break;
+ default:
+ if (precision > 9)
+ precision = 9;
+ break;
+ }
+
+ ufvalue = (fvalue >= 0.0) ? fvalue : -fvalue;
+ if (estyle) /* We want exactly one integer digit. */
+ ufvalue /= mypow10(exponent);
+
+ if ((intpart = cast(ufvalue)) == UINTMAX_MAX) {
+ *overflow = 1;
+ return;
+ }
+
+ /*
+ * Factor of ten with the number of digits needed for the fractional
+ * part. For example, if the precision is 3, the mask will be 1000.
+ */
+#if defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ mask = (unsigned long)mypow10(precision);
+#else
+ mask = (UINTMAX_T)mypow10(precision);
+#endif
+ /*
+ * We "cheat" by converting the fractional part to integer by
+ * multiplying by a factor of ten.
+ */
+ if ((fracpart = myround(mask * (ufvalue - intpart))) >= mask) {
+ /*
+ * For example, ufvalue = 2.99962, intpart = 2, and mask = 1000
+ * (because precision = 3). Now, myround(1000 * 0.99962) will
+ * return 1000. So, the integer part must be incremented by one
+ * and the fractional part must be set to zero.
+ */
+ intpart++;
+ fracpart = 0;
+ if (estyle && intpart == 10) {
+ /*
+ * The value was rounded up to ten, but we only want one
+ * integer digit if using "e-style". So, the integer
+ * part must be set to one and the exponent must be
+ * incremented by one.
+ */
+ intpart = 1;
+ exponent++;
+ }
+ }
+
+ /*
+ * Now that we know the real exponent, we can check whether or not to
+ * use "e-style" for "%g" (and "%G") conversions. If we don't need
+ * "e-style", the precision must be adjusted and the integer and
+ * fractional parts must be recalculated from the original value.
+ *
+ * C99 says: "Let P equal the precision if nonzero, 6 if the precision
+ * is omitted, or 1 if the precision is zero. Then, if a conversion
+ * with style `E' would have an exponent of X:
+ *
+ * - if P > X >= -4, the conversion is with style `f' (or `F') and
+ * precision P - (X + 1).
+ *
+ * - otherwise, the conversion is with style `e' (or `E') and precision
+ * P - 1." (7.19.6.1, 8)
+ *
+ * Note that we had decremented the precision by one.
+ */
+ if (flags & PRINT_F_TYPE_G && estyle &&
+ precision + 1 > exponent && exponent >= -4) {
+ precision -= exponent;
+ estyle = 0;
+ goto again;
+ }
+
+ if (estyle) {
+ if (exponent < 0) {
+ exponent = -exponent;
+ esign = '-';
+ } else
+ esign = '+';
+
+ /*
+ * Convert the exponent. The sizeof(econvert) is 4. So, the
+ * econvert buffer can hold e.g. "e+99" and "e-99". We don't
+ * support an exponent which contains more than two digits.
+ * Therefore, the following stores are safe.
+ */
+ epos = convert(exponent, econvert, 2, 10, 0);
+ /*
+ * C99 says: "The exponent always contains at least two digits,
+ * and only as many more digits as necessary to represent the
+ * exponent." (7.19.6.1, 8)
+ */
+ if (epos == 1)
+ econvert[epos++] = '0';
+ econvert[epos++] = esign;
+ econvert[epos++] = (flags & PRINT_F_UP) ? 'E' : 'e';
+ }
+
+ /* Convert the integer part and the fractional part. */
+ ipos = convert(intpart, iconvert, sizeof(iconvert), 10, 0);
+ if (fracpart != 0) /* convert() would return 1 if fracpart == 0. */
+ fpos = convert(fracpart, fconvert, sizeof(fconvert), 10, 0);
+
+ leadfraczeros = precision - fpos;
+
+ if (omitzeros) {
+ if (fpos > 0) /* Omit trailing fractional part zeros. */
+ while (omitcount < fpos && fconvert[omitcount] == '0')
+ omitcount++;
+ else { /* The fractional part is zero, omit it completely. */
+ omitcount = precision;
+ leadfraczeros = 0;
+ }
+ precision -= omitcount;
+ }
+
+ /*
+ * Print a decimal point if either the fractional part is non-zero
+ * and/or the "#" flag was specified.
+ */
+ if (precision > 0 || flags & PRINT_F_NUM)
+ emitpoint = 1;
+ if (separators) /* Get the number of group separators we'll print. */
+ separators = getnumsep(ipos);
+
+ padlen = width /* Minimum field width. */
+ - ipos /* Number of integer digits. */
+ - epos /* Number of exponent characters. */
+ - precision /* Number of fractional digits. */
+ - separators /* Number of group separators. */
+ - (emitpoint ? 1 : 0) /* Will we print a decimal point? */
+ - ((sign != 0) ? 1 : 0); /* Will we print a sign character? */
+
+ if (padlen < 0)
+ padlen = 0;
+
+ /*
+ * C99 says: "If the `0' and `-' flags both appear, the `0' flag is
+ * ignored." (7.19.6.1, 6)
+ */
+ if (flags & PRINT_F_MINUS) /* Left justifty. */
+ padlen = -padlen;
+ else if (flags & PRINT_F_ZERO && padlen > 0) {
+ if (sign != 0) { /* Sign. */
+ OUTCHAR(str, *len, size, sign);
+ sign = 0;
+ }
+ while (padlen > 0) { /* Leading zeros. */
+ OUTCHAR(str, *len, size, '0');
+ padlen--;
+ }
+ }
+ while (padlen > 0) { /* Leading spaces. */
+ OUTCHAR(str, *len, size, ' ');
+ padlen--;
+ }
+ if (sign != 0) /* Sign. */
+ OUTCHAR(str, *len, size, sign);
+ while (ipos > 0) { /* Integer part. */
+ ipos--;
+ OUTCHAR(str, *len, size, iconvert[ipos]);
+ if (separators > 0 && ipos > 0 && ipos % 3 == 0)
+ printsep(str, len, size);
+ }
+ if (emitpoint) { /* Decimal point. */
+#if HAVE_LOCALECONV && HAVE_LCONV_DECIMAL_POINT
+ if (lc->decimal_point != NULL && *lc->decimal_point != '\0')
+ OUTCHAR(str, *len, size, *lc->decimal_point);
+ else /* We'll always print some decimal point character. */
+#endif /* HAVE_LOCALECONV && HAVE_LCONV_DECIMAL_POINT */
+ OUTCHAR(str, *len, size, '.');
+ }
+ while (leadfraczeros > 0) { /* Leading fractional part zeros. */
+ OUTCHAR(str, *len, size, '0');
+ leadfraczeros--;
+ }
+ while (fpos > omitcount) { /* The remaining fractional part. */
+ fpos--;
+ OUTCHAR(str, *len, size, fconvert[fpos]);
+ }
+ while (epos > 0) { /* Exponent. */
+ epos--;
+ OUTCHAR(str, *len, size, econvert[epos]);
+ }
+ while (padlen < 0) { /* Trailing spaces. */
+ OUTCHAR(str, *len, size, ' ');
+ padlen++;
+ }
+}
+
+static void
+printsep(char *str, size_t *len, size_t size)
+{
+#if HAVE_LOCALECONV && HAVE_LCONV_THOUSANDS_SEP
+ struct lconv *lc = localeconv();
+ int i;
+
+ if (lc->thousands_sep != NULL)
+ for (i = 0; lc->thousands_sep[i] != '\0'; i++)
+ OUTCHAR(str, *len, size, lc->thousands_sep[i]);
+ else
+#endif /* HAVE_LOCALECONV && HAVE_LCONV_THOUSANDS_SEP */
+ OUTCHAR(str, *len, size, ',');
+}
+
+static int
+getnumsep(int digits)
+{
+ int separators = (digits - ((digits % 3 == 0) ? 1 : 0)) / 3;
+#if HAVE_LOCALECONV && HAVE_LCONV_THOUSANDS_SEP
+ int strln;
+ struct lconv *lc = localeconv();
+
+ /* We support an arbitrary separator length (including zero). */
+ if (lc->thousands_sep != NULL) {
+ for (strln = 0; lc->thousands_sep[strln] != '\0'; strln++)
+ continue;
+ separators *= strln;
+ }
+#endif /* HAVE_LOCALECONV && HAVE_LCONV_THOUSANDS_SEP */
+ return separators;
+}
+
+static int
+getexponent(LDOUBLE value)
+{
+ LDOUBLE tmp = (value >= 0.0) ? value : -value;
+ int exponent = 0;
+
+ /*
+ * We check for 99 > exponent > -99 in order to work around possible
+ * endless loops which could happen (at least) in the second loop (at
+ * least) if we're called with an infinite value. However, we checked
+ * for infinity before calling this function using our ISINF() macro, so
+ * this might be somewhat paranoid.
+ */
+ while (tmp < 1.0 && tmp > 0.0 && --exponent > -99)
+ tmp *= 10;
+ while (tmp >= 10.0 && ++exponent < 99)
+ tmp /= 10;
+
+ return exponent;
+}
+
+static int
+convert(UINTMAX_T value, char *buf, size_t size, int base, int caps)
+{
+ const char *digits = caps ? "0123456789ABCDEF" : "0123456789abcdef";
+ size_t pos = 0;
+
+ /* We return an unterminated buffer with the digits in reverse order. */
+ do {
+ buf[pos++] = digits[value % base];
+ value /= base;
+ } while (value != 0 && pos < size);
+
+ return (int)pos;
+}
+
+static UINTMAX_T
+cast(LDOUBLE value)
+{
+ UINTMAX_T result;
+
+ /*
+ * We check for ">=" and not for ">" because if UINTMAX_MAX cannot be
+ * represented exactly as an LDOUBLE value (but is less than LDBL_MAX),
+ * it may be increased to the nearest higher representable value for the
+ * comparison (cf. C99: 6.3.1.4, 2). It might then equal the LDOUBLE
+ * value although converting the latter to UINTMAX_T would overflow.
+ */
+ if (value >= UINTMAX_MAX)
+ return UINTMAX_MAX;
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ result = (unsigned long)value;
+#else
+ result = (UINTMAX_T)value;
+#endif
+ /*
+ * At least on NetBSD/sparc64 3.0.2 and 4.99.30, casting long double to
+ * an integer type converts e.g. 1.9 to 2 instead of 1 (which violates
+ * the standard). Sigh.
+ */
+ return (result <= value) ? result : result - 1;
+}
+
+static UINTMAX_T
+myround(LDOUBLE value)
+{
+ UINTMAX_T intpart = cast(value);
+
+ return ((value -= intpart) < 0.5) ? intpart : intpart + 1;
+}
+
+static LDOUBLE
+mypow10(int exponent)
+{
+ LDOUBLE result = 1;
+
+ while (exponent > 0) {
+ result *= 10;
+ exponent--;
+ }
+ while (exponent < 0) {
+ result /= 10;
+ exponent++;
+ }
+ return result;
+}
+#endif /* !HAVE_VSNPRINTF */
+
+#if !HAVE_VASPRINTF
+#if NEED_MYMEMCPY
+void *
+mymemcpy(void *dst, void *src, size_t len)
+{
+ const char *from = src;
+ char *to = dst;
+
+ /* No need for optimization, we use this only to replace va_copy(3). */
+ while (len-- > 0)
+ *to++ = *from++;
+ return dst;
+}
+#endif /* NEED_MYMEMCPY */
+
+int
+util_vasprintf(char **ret, const char *format, va_list ap)
+{
+ size_t size;
+ int len;
+ va_list aq;
+
+ VA_COPY(aq, ap);
+ len = vsnprintf(NULL, 0, format, aq);
+ VA_END_COPY(aq);
+ if (len < 0 || (*ret = malloc(size = len + 1)) == NULL)
+ return -1;
+ return vsnprintf(*ret, size, format, ap);
+}
+#endif /* !HAVE_VASPRINTF */
+
+#if !HAVE_SNPRINTF
+#if HAVE_STDARG_H
+int
+util_snprintf(char *str, size_t size, const char *format, ...)
+#else
+int
+util_snprintf(va_alist) va_dcl
+#endif /* HAVE_STDARG_H */
+{
+#if !HAVE_STDARG_H
+ char *str;
+ size_t size;
+ char *format;
+#endif /* HAVE_STDARG_H */
+ va_list ap;
+ int len;
+
+ VA_START(ap, format);
+ VA_SHIFT(ap, str, char *);
+ VA_SHIFT(ap, size, size_t);
+ VA_SHIFT(ap, format, const char *);
+ len = vsnprintf(str, size, format, ap);
+ va_end(ap);
+ return len;
+}
+#endif /* !HAVE_SNPRINTF */
+
+#if !HAVE_ASPRINTF
+#if HAVE_STDARG_H
+int
+util_asprintf(char **ret, const char *format, ...)
+#else
+int
+util_asprintf(va_alist) va_dcl
+#endif /* HAVE_STDARG_H */
+{
+#if !HAVE_STDARG_H
+ char **ret;
+ char *format;
+#endif /* HAVE_STDARG_H */
+ va_list ap;
+ int len;
+
+ VA_START(ap, format);
+ VA_SHIFT(ap, ret, char **);
+ VA_SHIFT(ap, format, const char *);
+ len = vasprintf(ret, format, ap);
+ va_end(ap);
+ return len;
+}
+#endif /* !HAVE_ASPRINTF */
+#else /* Dummy declaration to avoid empty translation unit warnings. */
+int main(void);
+#endif /* !HAVE_SNPRINTF || !HAVE_VSNPRINTF || !HAVE_ASPRINTF || [...] */
+
+
+/* vim: set joinspaces textwidth=80: */
diff --git a/src/gallium/auxiliary/util/u_sse.h b/src/gallium/auxiliary/util/u_sse.h
new file mode 100644
index 0000000000..e2a8491e62
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_sse.h
@@ -0,0 +1,77 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * SSE intrinsics portability header.
+ *
+ * Although the SSE intrinsics are support by all modern x86 and x86-64
+ * compilers, there are some intrisincs missing in some implementations
+ * (especially older MSVC versions). This header abstracts that away.
+ */
+
+#ifndef U_SSE_H_
+#define U_SSE_H_
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_ARCH_SSE)
+
+#include <xmmintrin.h>
+#include <emmintrin.h>
+
+
+/* MSVC before VC8 does not support the _mm_castxxx_yyy */
+#if defined(_MSC_VER) && _MSC_VER < 1500
+
+union __declspec(align(16)) m128_types {
+ __m128 m128;
+ __m128i m128i;
+ __m128d m128d;
+};
+
+static __inline __m128
+_mm_castsi128_ps(__m128i a)
+{
+ union m128_types u;
+ u.m128i = a;
+ return u.m128;
+}
+
+static __inline __m128i
+_mm_castps_si128(__m128 a)
+{
+ union m128_types u;
+ u.m128 = a;
+ return u.m128i;
+}
+
+#endif /* defined(_MSC_VER) && _MSC_VER < 1500 */
+
+#endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */
+
+#endif /* U_SSE_H_ */
diff --git a/src/gallium/auxiliary/util/u_stream.h b/src/gallium/auxiliary/util/u_stream.h
new file mode 100644
index 0000000000..a9d0f0121a
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_stream.h
@@ -0,0 +1,61 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Cross-platform sequential access stream abstraction.
+ */
+
+#ifndef U_STREAM_H
+#define U_STREAM_H
+
+
+#include "pipe/p_compiler.h"
+
+
+struct util_stream;
+
+
+/**
+ * Create a stream
+ * @param filename relative or absolute path (necessary for windows)
+ * @param optional maximum file size (0 for a growable size).
+ */
+struct util_stream *
+util_stream_create(const char *filename, size_t max_size);
+
+boolean
+util_stream_write(struct util_stream *stream, const void *data, size_t size);
+
+void
+util_stream_flush(struct util_stream *stream);
+
+void
+util_stream_close(struct util_stream *stream);
+
+
+#endif /* U_STREAM_H */
diff --git a/src/gallium/auxiliary/util/u_stream_stdc.c b/src/gallium/auxiliary/util/u_stream_stdc.c
new file mode 100644
index 0000000000..5cd05b2904
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_stream_stdc.c
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Stream implementation based on the Standard C Library.
+ */
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HAIKU)
+
+#include <stdio.h>
+
+#include "util/u_memory.h"
+
+#include "u_stream.h"
+
+
+struct util_stream
+{
+ FILE *file;
+};
+
+
+struct util_stream *
+util_stream_create(const char *filename, size_t max_size)
+{
+ struct util_stream *stream;
+
+ (void)max_size;
+
+ stream = CALLOC_STRUCT(util_stream);
+ if(!stream)
+ goto error1;
+
+ stream->file = fopen(filename, "w");
+ if(!stream->file)
+ goto error2;
+
+ return stream;
+
+error2:
+ FREE(stream);
+error1:
+ return NULL;
+}
+
+
+boolean
+util_stream_write(struct util_stream *stream, const void *data, size_t size)
+{
+ if(!stream)
+ return FALSE;
+
+ return fwrite(data, size, 1, stream->file) == size ? TRUE : FALSE;
+}
+
+
+void
+util_stream_flush(struct util_stream *stream)
+{
+ if(!stream)
+ return;
+
+ fflush(stream->file);
+}
+
+
+void
+util_stream_close(struct util_stream *stream)
+{
+ if(!stream)
+ return;
+
+ fclose(stream->file);
+
+ FREE(stream);
+}
+
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_stream_wd.c b/src/gallium/auxiliary/util/u_stream_wd.c
new file mode 100644
index 0000000000..864489e775
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_stream_wd.c
@@ -0,0 +1,224 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Stream implementation for the Windows Display driver.
+ */
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+
+#include <windows.h>
+#include <winddi.h>
+
+#include "util/u_memory.h"
+#include "util/u_string.h"
+
+#include "u_stream.h"
+
+
+#define MAP_FILE_SIZE (4*1024*1024)
+
+
+struct util_stream
+{
+ char filename[MAX_PATH + 1];
+ WCHAR wFileName[MAX_PATH + 1];
+ boolean growable;
+ size_t map_size;
+ ULONG_PTR iFile;
+ char *pMap;
+ size_t written;
+ unsigned suffix;
+};
+
+
+static INLINE boolean
+util_stream_map(struct util_stream *stream)
+{
+ ULONG BytesInUnicodeString;
+ static char filename[MAX_PATH + 1];
+ unsigned filename_len;
+
+ if(stream->growable)
+ filename_len = util_snprintf(filename,
+ sizeof(filename),
+ "%s.%04x",
+ stream->filename,
+ stream->suffix++);
+ else
+ filename_len = util_snprintf(filename,
+ sizeof(filename),
+ "%s",
+ stream->filename);
+
+ EngMultiByteToUnicodeN(
+ stream->wFileName,
+ sizeof(stream->wFileName),
+ &BytesInUnicodeString,
+ filename,
+ filename_len);
+
+ stream->pMap = EngMapFile(stream->wFileName, stream->map_size, &stream->iFile);
+ if(!stream->pMap)
+ return FALSE;
+
+ memset(stream->pMap, 0, stream->map_size);
+ stream->written = 0;
+
+ return TRUE;
+}
+
+
+static INLINE void
+util_stream_unmap(struct util_stream *stream)
+{
+ EngUnmapFile(stream->iFile);
+ if(stream->written < stream->map_size) {
+ /* Truncate file size */
+ stream->pMap = EngMapFile(stream->wFileName, stream->written, &stream->iFile);
+ if(stream->pMap)
+ EngUnmapFile(stream->iFile);
+ }
+
+ stream->pMap = NULL;
+}
+
+
+static INLINE void
+util_stream_full_qualified_filename(char *dst, size_t size, const char *src)
+{
+ boolean need_drive, need_root;
+
+ if((('A' <= src[0] && src[0] <= 'Z') || ('a' <= src[0] && src[0] <= 'z')) && src[1] == ':') {
+ need_drive = FALSE;
+ need_root = src[2] == '\\' ? FALSE : TRUE;
+ }
+ else {
+ need_drive = TRUE;
+ need_root = src[0] == '\\' ? FALSE : TRUE;
+ }
+
+ util_snprintf(dst, size,
+ "\\??\\%s%s%s",
+ need_drive ? "C:" : "",
+ need_root ? "\\" : "",
+ src);
+}
+
+
+struct util_stream *
+util_stream_create(const char *filename, size_t max_size)
+{
+ struct util_stream *stream;
+
+ stream = CALLOC_STRUCT(util_stream);
+ if(!stream)
+ goto error1;
+
+ util_stream_full_qualified_filename(stream->filename,
+ sizeof(stream->filename),
+ filename);
+
+ if(max_size) {
+ stream->growable = FALSE;
+ stream->map_size = max_size;
+ }
+ else {
+ stream->growable = TRUE;
+ stream->map_size = MAP_FILE_SIZE;
+ }
+
+ if(!util_stream_map(stream))
+ goto error2;
+
+ return stream;
+
+error2:
+ FREE(stream);
+error1:
+ return NULL;
+}
+
+
+static INLINE void
+util_stream_copy(struct util_stream *stream, const char *data, size_t size)
+{
+ assert(stream->written + size <= stream->map_size);
+ memcpy(stream->pMap + stream->written, data, size);
+ stream->written += size;
+}
+
+
+boolean
+util_stream_write(struct util_stream *stream, const void *data, size_t size)
+{
+ if(!stream)
+ return FALSE;
+
+ if(!stream->pMap)
+ return FALSE;
+
+ while(stream->written + size > stream->map_size) {
+ size_t step = stream->map_size - stream->written;
+ util_stream_copy(stream, data, step);
+ data = (const char *)data + step;
+ size -= step;
+
+ util_stream_unmap(stream);
+ if(!stream->growable || !util_stream_map(stream))
+ return FALSE;
+ }
+
+ util_stream_copy(stream, data, size);
+
+ return TRUE;
+}
+
+
+void
+util_stream_flush(struct util_stream *stream)
+{
+ (void)stream;
+}
+
+
+void
+util_stream_close(struct util_stream *stream)
+{
+ if(!stream)
+ return;
+
+ util_stream_unmap(stream);
+
+ FREE(stream);
+}
+
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_string.h b/src/gallium/auxiliary/util/u_string.h
new file mode 100644
index 0000000000..cc7992d739
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_string.h
@@ -0,0 +1,220 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Platform independent functions for string manipulation.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef U_STRING_H_
+#define U_STRING_H_
+
+#if !defined(WIN32) && !defined(XF86_LIBC_H)
+#include <stdio.h>
+#endif
+#include <stddef.h>
+#include <stdarg.h>
+
+#include "pipe/p_compiler.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef WIN32
+
+int util_vsnprintf(char *, size_t, const char *, va_list);
+int util_snprintf(char *str, size_t size, const char *format, ...);
+
+static INLINE void
+util_vsprintf(char *str, const char *format, va_list ap)
+{
+ util_vsnprintf(str, (size_t)-1, format, ap);
+}
+
+static INLINE void
+util_sprintf(char *str, const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ util_vsnprintf(str, (size_t)-1, format, ap);
+ va_end(ap);
+}
+
+static INLINE char *
+util_strchr(const char *s, char c)
+{
+ while(*s) {
+ if(*s == c)
+ return (char *)s;
+ ++s;
+ }
+ return NULL;
+}
+
+static INLINE char*
+util_strncat(char *dst, const char *src, size_t n)
+{
+ char *p = dst + strlen(dst);
+ const char *q = src;
+ size_t i;
+
+ for (i = 0; i < n && *q != '\0'; ++i)
+ *p++ = *q++;
+ *p = '\0';
+
+ return dst;
+}
+
+static INLINE int
+util_strcmp(const char *s1, const char *s2)
+{
+ unsigned char u1, u2;
+
+ while (1) {
+ u1 = (unsigned char) *s1++;
+ u2 = (unsigned char) *s2++;
+ if (u1 != u2)
+ return u1 - u2;
+ if (u1 == '\0')
+ return 0;
+ }
+ return 0;
+}
+
+static INLINE int
+util_strncmp(const char *s1, const char *s2, size_t n)
+{
+ unsigned char u1, u2;
+
+ while (n-- > 0) {
+ u1 = (unsigned char) *s1++;
+ u2 = (unsigned char) *s2++;
+ if (u1 != u2)
+ return u1 - u2;
+ if (u1 == '\0')
+ return 0;
+ }
+ return 0;
+}
+
+static INLINE char *
+util_strstr(const char *haystack, const char *needle)
+{
+ const char *p = haystack;
+ size_t len = strlen(needle);
+
+ for (; (p = util_strchr(p, *needle)) != 0; p++) {
+ if (util_strncmp(p, needle, len) == 0) {
+ return (char *)p;
+ }
+ }
+ return NULL;
+}
+
+static INLINE void *
+util_memmove(void *dest, const void *src, size_t n)
+{
+ char *p = (char *)dest;
+ const char *q = (const char *)src;
+ if (dest < src) {
+ while (n--)
+ *p++ = *q++;
+ }
+ else
+ {
+ p += n;
+ q += n;
+ while (n--)
+ *--p = *--q;
+ }
+ return dest;
+}
+
+
+#else
+
+#define util_vsnprintf vsnprintf
+#define util_snprintf snprintf
+#define util_vsprintf vsprintf
+#define util_sprintf sprintf
+#define util_strchr strchr
+#define util_strcmp strcmp
+#define util_strncmp strncmp
+#define util_strncat strncat
+#define util_strstr strstr
+#define util_memmove memmove
+
+#endif
+
+
+/**
+ * Printable string buffer
+ */
+struct util_strbuf
+{
+ char *str;
+ char *ptr;
+ size_t left;
+};
+
+
+static INLINE void
+util_strbuf_init(struct util_strbuf *sbuf, char *str, size_t size)
+{
+ sbuf->str = str;
+ sbuf->str[0] = 0;
+ sbuf->ptr = sbuf->str;
+ sbuf->left = size;
+}
+
+
+static INLINE void
+util_strbuf_printf(struct util_strbuf *sbuf, const char *format, ...)
+{
+ if(sbuf->left > 1) {
+ size_t written;
+ va_list ap;
+ va_start(ap, format);
+ written = util_vsnprintf(sbuf->ptr, sbuf->left, format, ap);
+ va_end(ap);
+ sbuf->ptr += written;
+ sbuf->left -= written;
+ }
+}
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_STRING_H_ */
diff --git a/src/gallium/auxiliary/util/u_surface.c b/src/gallium/auxiliary/util/u_surface.c
new file mode 100644
index 0000000000..85e443204e
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_surface.c
@@ -0,0 +1,113 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Surface utility functions.
+ *
+ * @author Brian Paul
+ */
+
+
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+
+#include "util/u_surface.h"
+
+
+/**
+ * Helper to quickly create an RGBA rendering surface of a certain size.
+ * \param textureOut returns the new texture
+ * \param surfaceOut returns the new surface
+ * \return TRUE for success, FALSE if failure
+ */
+boolean
+util_create_rgba_surface(struct pipe_screen *screen,
+ uint width, uint height,
+ struct pipe_texture **textureOut,
+ struct pipe_surface **surfaceOut)
+{
+ static const enum pipe_format rgbaFormats[] = {
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_FORMAT_R8G8B8A8_UNORM,
+ PIPE_FORMAT_NONE
+ };
+ const uint target = PIPE_TEXTURE_2D;
+ const uint usage = PIPE_TEXTURE_USAGE_RENDER_TARGET;
+ enum pipe_format format = PIPE_FORMAT_NONE;
+ struct pipe_texture templ;
+ uint i;
+
+ /* Choose surface format */
+ for (i = 0; rgbaFormats[i]; i++) {
+ if (screen->is_format_supported(screen, rgbaFormats[i],
+ target, usage, 0)) {
+ format = rgbaFormats[i];
+ break;
+ }
+ }
+ if (format == PIPE_FORMAT_NONE)
+ return FALSE; /* unable to get an rgba format!?! */
+
+ /* create texture */
+ memset(&templ, 0, sizeof(templ));
+ templ.target = target;
+ templ.format = format;
+ templ.last_level = 0;
+ templ.width[0] = width;
+ templ.height[0] = height;
+ templ.depth[0] = 1;
+ pf_get_block(format, &templ.block);
+ templ.tex_usage = usage;
+
+ *textureOut = screen->texture_create(screen, &templ);
+ if (!*textureOut)
+ return FALSE;
+
+ /* create surface / view into texture */
+ *surfaceOut = screen->get_tex_surface(screen, *textureOut, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_WRITE);
+ if (!*surfaceOut) {
+ pipe_texture_reference(textureOut, NULL);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+/**
+ * Release the surface and texture from util_create_rgba_surface().
+ */
+void
+util_destroy_rgba_surface(struct pipe_texture *texture,
+ struct pipe_surface *surface)
+{
+ pipe_surface_reference(&surface, NULL);
+ pipe_texture_reference(&texture, NULL);
+}
+
diff --git a/src/gallium/auxiliary/util/u_surface.h b/src/gallium/auxiliary/util/u_surface.h
new file mode 100644
index 0000000000..ce84ed7ad0
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_surface.h
@@ -0,0 +1,69 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_SURFACE_H
+#define U_SURFACE_H
+
+
+#include "pipe/p_compiler.h"
+
+
+struct pipe_screen;
+struct pipe_texture;
+struct pipe_surface;
+
+
+/**
+ * Are s1 and s2 the same surface?
+ * Surfaces are basically views into textures so check if the two surfaces
+ * name the same part of the same texture.
+ */
+static INLINE boolean
+util_same_surface(const struct pipe_surface *s1, const struct pipe_surface *s2)
+{
+ return (s1->texture == s2->texture &&
+ s1->face == s2->face &&
+ s1->level == s2->level &&
+ s1->zslice == s2->zslice);
+}
+
+
+
+
+extern boolean
+util_create_rgba_surface(struct pipe_screen *screen,
+ uint width, uint height,
+ struct pipe_texture **textureOut,
+ struct pipe_surface **surfaceOut);
+
+
+extern void
+util_destroy_rgba_surface(struct pipe_texture *texture,
+ struct pipe_surface *surface);
+
+
+#endif /* U_SURFACE_H */
diff --git a/src/gallium/auxiliary/util/u_tile.c b/src/gallium/auxiliary/util/u_tile.c
new file mode 100644
index 0000000000..0d6489c26e
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_tile.c
@@ -0,0 +1,1526 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * RGBA/float tile get/put functions.
+ * Usable both by drivers and state trackers.
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_rect.h"
+#include "util/u_tile.h"
+
+
+/**
+ * Move raw block of pixels from transfer object to user memory.
+ */
+void
+pipe_get_tile_raw(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ void *dst, int dst_stride)
+{
+ struct pipe_screen *screen = pt->texture->screen;
+ const void *src;
+
+ if (dst_stride == 0)
+ dst_stride = pf_get_nblocksx(&pt->block, w) * pt->block.size;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ src = screen->transfer_map(screen, pt);
+ assert(src);
+ if(!src)
+ return;
+
+ util_copy_rect(dst, &pt->block, dst_stride, 0, 0, w, h, src, pt->stride, x, y);
+
+ screen->transfer_unmap(screen, pt);
+}
+
+
+/**
+ * Move raw block of pixels from user memory to transfer object.
+ */
+void
+pipe_put_tile_raw(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ const void *src, int src_stride)
+{
+ struct pipe_screen *screen = pt->texture->screen;
+ void *dst;
+
+ if (src_stride == 0)
+ src_stride = pf_get_nblocksx(&pt->block, w) * pt->block.size;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ dst = screen->transfer_map(screen, pt);
+ assert(dst);
+ if(!dst)
+ return;
+
+ util_copy_rect(dst, &pt->block, pt->stride, x, y, w, h, src, src_stride, 0, 0);
+
+ screen->transfer_unmap(screen, pt);
+}
+
+
+
+
+/** Convert short in [-32768,32767] to GLfloat in [-1.0,1.0] */
+#define SHORT_TO_FLOAT(S) ((2.0F * (S) + 1.0F) * (1.0F/65535.0F))
+
+#define UNCLAMPED_FLOAT_TO_SHORT(us, f) \
+ us = ( (short) ( CLAMP((f), -1.0, 1.0) * 32767.0F) )
+
+
+
+/*** PIPE_FORMAT_A8R8G8B8_UNORM ***/
+
+static void
+a8r8g8b8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const unsigned pixel = *src++;
+ pRow[0] = ubyte_to_float((pixel >> 16) & 0xff);
+ pRow[1] = ubyte_to_float((pixel >> 8) & 0xff);
+ pRow[2] = ubyte_to_float((pixel >> 0) & 0xff);
+ pRow[3] = ubyte_to_float((pixel >> 24) & 0xff);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+a8r8g8b8_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, g, b, a;
+ r = float_to_ubyte(pRow[0]);
+ g = float_to_ubyte(pRow[1]);
+ b = float_to_ubyte(pRow[2]);
+ a = float_to_ubyte(pRow[3]);
+ *dst++ = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_X8R8G8B8_UNORM ***/
+
+static void
+x8r8g8b8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const unsigned pixel = *src++;
+ pRow[0] = ubyte_to_float((pixel >> 16) & 0xff);
+ pRow[1] = ubyte_to_float((pixel >> 8) & 0xff);
+ pRow[2] = ubyte_to_float((pixel >> 0) & 0xff);
+ pRow[3] = 1.0F;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+x8r8g8b8_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, g, b;
+ r = float_to_ubyte(pRow[0]);
+ g = float_to_ubyte(pRow[1]);
+ b = float_to_ubyte(pRow[2]);
+ *dst++ = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_B8G8R8A8_UNORM ***/
+
+static void
+b8g8r8a8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const unsigned pixel = *src++;
+ pRow[0] = ubyte_to_float((pixel >> 8) & 0xff);
+ pRow[1] = ubyte_to_float((pixel >> 16) & 0xff);
+ pRow[2] = ubyte_to_float((pixel >> 24) & 0xff);
+ pRow[3] = ubyte_to_float((pixel >> 0) & 0xff);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+b8g8r8a8_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, g, b, a;
+ r = float_to_ubyte(pRow[0]);
+ g = float_to_ubyte(pRow[1]);
+ b = float_to_ubyte(pRow[2]);
+ a = float_to_ubyte(pRow[3]);
+ *dst++ = (b << 24) | (g << 16) | (r << 8) | a;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_A1R5G5B5_UNORM ***/
+
+static void
+a1r5g5b5_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const ushort pixel = *src++;
+ pRow[0] = ((pixel >> 10) & 0x1f) * (1.0f / 31.0f);
+ pRow[1] = ((pixel >> 5) & 0x1f) * (1.0f / 31.0f);
+ pRow[2] = ((pixel ) & 0x1f) * (1.0f / 31.0f);
+ pRow[3] = ((pixel >> 15) ) * 1.0f;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+a1r5g5b5_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, g, b, a;
+ r = float_to_ubyte(pRow[0]);
+ g = float_to_ubyte(pRow[1]);
+ b = float_to_ubyte(pRow[2]);
+ a = float_to_ubyte(pRow[3]);
+ r = r >> 3; /* 5 bits */
+ g = g >> 3; /* 5 bits */
+ b = b >> 3; /* 5 bits */
+ a = a >> 7; /* 1 bit */
+ *dst++ = (a << 15) | (r << 10) | (g << 5) | b;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_A4R4G4B4_UNORM ***/
+
+static void
+a4r4g4b4_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const ushort pixel = *src++;
+ pRow[0] = ((pixel >> 8) & 0xf) * (1.0f / 15.0f);
+ pRow[1] = ((pixel >> 4) & 0xf) * (1.0f / 15.0f);
+ pRow[2] = ((pixel ) & 0xf) * (1.0f / 15.0f);
+ pRow[3] = ((pixel >> 12) ) * (1.0f / 15.0f);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+a4r4g4b4_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, g, b, a;
+ r = float_to_ubyte(pRow[0]);
+ g = float_to_ubyte(pRow[1]);
+ b = float_to_ubyte(pRow[2]);
+ a = float_to_ubyte(pRow[3]);
+ r >>= 4;
+ g >>= 4;
+ b >>= 4;
+ a >>= 4;
+ *dst++ = (a << 12) | (r << 16) | (g << 4) | b;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_R5G6B5_UNORM ***/
+
+static void
+r5g6b5_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const ushort pixel = *src++;
+ pRow[0] = ((pixel >> 11) & 0x1f) * (1.0f / 31.0f);
+ pRow[1] = ((pixel >> 5) & 0x3f) * (1.0f / 63.0f);
+ pRow[2] = ((pixel ) & 0x1f) * (1.0f / 31.0f);
+ pRow[3] = 1.0f;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+r5g6b5_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ uint r = (uint) (CLAMP(pRow[0], 0.0, 1.0) * 31.0);
+ uint g = (uint) (CLAMP(pRow[1], 0.0, 1.0) * 63.0);
+ uint b = (uint) (CLAMP(pRow[2], 0.0, 1.0) * 31.0);
+ *dst++ = (r << 11) | (g << 5) | (b);
+ }
+ p += src_stride;
+ }
+}
+
+
+
+/*** PIPE_FORMAT_R8G8B8_UNORM ***/
+
+static void
+r8g8b8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] = ubyte_to_float(src[0]);
+ pRow[1] = ubyte_to_float(src[1]);
+ pRow[2] = ubyte_to_float(src[2]);
+ pRow[3] = 1.0f;
+ src += 3;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+r8g8b8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ dst[0] = float_to_ubyte(pRow[0]);
+ dst[1] = float_to_ubyte(pRow[1]);
+ dst[2] = float_to_ubyte(pRow[2]);
+ dst += 3;
+ }
+ p += src_stride;
+ }
+}
+
+
+
+/*** PIPE_FORMAT_Z16_UNORM ***/
+
+/**
+ * Return each Z value as four floats in [0,1].
+ */
+static void
+z16_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ const float scale = 1.0f / 65535.0f;
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = *src++ * scale;
+ }
+ p += dst_stride;
+ }
+}
+
+
+
+
+/*** PIPE_FORMAT_L8_UNORM ***/
+
+static void
+l8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, src++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] = ubyte_to_float(*src);
+ pRow[3] = 1.0;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+l8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r;
+ r = float_to_ubyte(pRow[0]);
+ *dst++ = (ubyte) r;
+ }
+ p += src_stride;
+ }
+}
+
+
+
+/*** PIPE_FORMAT_A8_UNORM ***/
+
+static void
+a8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, src++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] = 0.0;
+ pRow[3] = ubyte_to_float(*src);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+a8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned a;
+ a = float_to_ubyte(pRow[3]);
+ *dst++ = (ubyte) a;
+ }
+ p += src_stride;
+ }
+}
+
+
+
+/*** PIPE_FORMAT_R16_SNORM ***/
+
+static void
+r16_get_tile_rgba(const short *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, src++, pRow += 4) {
+ pRow[0] = SHORT_TO_FLOAT(src[0]);
+ pRow[1] =
+ pRow[2] = 0.0;
+ pRow[3] = 1.0;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+r16_put_tile_rgba(short *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, dst++, pRow += 4) {
+ UNCLAMPED_FLOAT_TO_SHORT(dst[0], pRow[0]);
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_R16G16B16A16_SNORM ***/
+
+static void
+r16g16b16a16_get_tile_rgba(const short *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, src += 4, pRow += 4) {
+ pRow[0] = SHORT_TO_FLOAT(src[0]);
+ pRow[1] = SHORT_TO_FLOAT(src[1]);
+ pRow[2] = SHORT_TO_FLOAT(src[2]);
+ pRow[3] = SHORT_TO_FLOAT(src[3]);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+r16g16b16a16_put_tile_rgba(short *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, dst += 4, pRow += 4) {
+ UNCLAMPED_FLOAT_TO_SHORT(dst[0], pRow[0]);
+ UNCLAMPED_FLOAT_TO_SHORT(dst[1], pRow[1]);
+ UNCLAMPED_FLOAT_TO_SHORT(dst[2], pRow[2]);
+ UNCLAMPED_FLOAT_TO_SHORT(dst[3], pRow[3]);
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_R8G8B8A8_SRGB ***/
+
+/**
+ * Convert an 8-bit sRGB value from non-linear space to a
+ * linear RGB value in [0, 1].
+ * Implemented with a 256-entry lookup table.
+ */
+static INLINE float
+srgb_to_linear(ubyte cs8)
+{
+ static float table[256];
+ static boolean tableReady = FALSE;
+ if (!tableReady) {
+ /* compute lookup table now */
+ uint i;
+ for (i = 0; i < 256; i++) {
+ const float cs = ubyte_to_float(i);
+ if (cs <= 0.04045) {
+ table[i] = cs / 12.92f;
+ }
+ else {
+ table[i] = (float) powf((cs + 0.055) / 1.055, 2.4);
+ }
+ }
+ tableReady = TRUE;
+ }
+ return table[cs8];
+}
+
+
+/**
+ * Convert linear float in [0,1] to an srgb ubyte value in [0,255].
+ * XXX this hasn't been tested (render to srgb surface).
+ * XXX this needs optimization.
+ */
+static INLINE ubyte
+linear_to_srgb(float cl)
+{
+ if (cl >= 1.0F)
+ return 255;
+ else if (cl >= 0.0031308F)
+ return float_to_ubyte(1.055F * powf(cl, 0.41666F) - 0.055F);
+ else if (cl > 0.0F)
+ return float_to_ubyte(12.92F * cl);
+ else
+ return 0.0;
+}
+
+
+static void
+a8r8g8b8_srgb_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ const unsigned pixel = *src++;
+ pRow[0] = srgb_to_linear((pixel >> 16) & 0xff);
+ pRow[1] = srgb_to_linear((pixel >> 8) & 0xff);
+ pRow[2] = srgb_to_linear((pixel >> 0) & 0xff);
+ pRow[3] = ubyte_to_float((pixel >> 24) & 0xff);
+ }
+ p += dst_stride;
+ }
+}
+
+static void
+a8r8g8b8_srgb_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, g, b, a;
+ r = linear_to_srgb(pRow[0]);
+ g = linear_to_srgb(pRow[1]);
+ b = linear_to_srgb(pRow[2]);
+ a = float_to_ubyte(pRow[3]);
+ *dst++ = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_A8L8_SRGB ***/
+
+static void
+a8l8_srgb_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ ushort p = *src++;
+ pRow[0] =
+ pRow[1] =
+ pRow[2] = srgb_to_linear(p & 0xff);
+ pRow[3] = ubyte_to_float(p >> 8);
+ }
+ p += dst_stride;
+ }
+}
+
+static void
+a8l8_srgb_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, a;
+ r = linear_to_srgb(pRow[0]);
+ a = float_to_ubyte(pRow[3]);
+ *dst++ = (a << 8) | r;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_L8_SRGB ***/
+
+static void
+l8_srgb_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, src++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] = srgb_to_linear(*src);
+ pRow[3] = 1.0;
+ }
+ p += dst_stride;
+ }
+}
+
+static void
+l8_srgb_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r;
+ r = linear_to_srgb(pRow[0]);
+ *dst++ = (ubyte) r;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_I8_UNORM ***/
+
+static void
+i8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, src++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = ubyte_to_float(*src);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+i8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r;
+ r = float_to_ubyte(pRow[0]);
+ *dst++ = (ubyte) r;
+ }
+ p += src_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_A8L8_UNORM ***/
+
+static void
+a8l8_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ ushort p = *src++;
+ pRow[0] =
+ pRow[1] =
+ pRow[2] = ubyte_to_float(p & 0xff);
+ pRow[3] = ubyte_to_float(p >> 8);
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+a8l8_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const float *p,
+ unsigned src_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ const float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ unsigned r, a;
+ r = float_to_ubyte(pRow[0]);
+ a = float_to_ubyte(pRow[3]);
+ *dst++ = (a << 8) | r;
+ }
+ p += src_stride;
+ }
+}
+
+
+
+
+/*** PIPE_FORMAT_Z32_UNORM ***/
+
+/**
+ * Return each Z value as four floats in [0,1].
+ */
+static void
+z32_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ const double scale = 1.0 / (double) 0xffffffff;
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = (float) (*src++ * scale);
+ }
+ p += dst_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_S8Z24_UNORM ***/
+
+/**
+ * Return Z component as four float in [0,1]. Stencil part ignored.
+ */
+static void
+s8z24_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ const double scale = 1.0 / ((1 << 24) - 1);
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = (float) (scale * (*src++ & 0xffffff));
+ }
+ p += dst_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_Z24S8_UNORM ***/
+
+/**
+ * Return Z component as four float in [0,1]. Stencil part ignored.
+ */
+static void
+z24s8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ const double scale = 1.0 / ((1 << 24) - 1);
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = (float) (scale * (*src++ >> 8));
+ }
+ p += dst_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_Z32_FLOAT ***/
+
+/**
+ * Return each Z value as four floats in [0,1].
+ */
+static void
+z32f_get_tile_rgba(const float *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = *src++;
+ }
+ p += dst_stride;
+ }
+}
+
+
+/*** PIPE_FORMAT_YCBCR / PIPE_FORMAT_YCBCR_REV ***/
+
+/**
+ * Convert YCbCr (or YCrCb) to RGBA.
+ */
+static void
+ycbcr_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride,
+ boolean rev)
+{
+ const float scale = 1.0f / 255.0f;
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ /* do two texels at a time */
+ for (j = 0; j < (w & ~1); j += 2, src += 2) {
+ const ushort t0 = src[0];
+ const ushort t1 = src[1];
+ const ubyte y0 = (t0 >> 8) & 0xff; /* luminance */
+ const ubyte y1 = (t1 >> 8) & 0xff; /* luminance */
+ ubyte cb, cr;
+ float r, g, b;
+
+ if (rev) {
+ cb = t1 & 0xff; /* chroma U */
+ cr = t0 & 0xff; /* chroma V */
+ }
+ else {
+ cb = t0 & 0xff; /* chroma U */
+ cr = t1 & 0xff; /* chroma V */
+ }
+
+ /* even pixel: y0,cr,cb */
+ r = 1.164f * (y0-16) + 1.596f * (cr-128);
+ g = 1.164f * (y0-16) - 0.813f * (cr-128) - 0.391f * (cb-128);
+ b = 1.164f * (y0-16) + 2.018f * (cb-128);
+ pRow[0] = r * scale;
+ pRow[1] = g * scale;
+ pRow[2] = b * scale;
+ pRow[3] = 1.0f;
+ pRow += 4;
+
+ /* odd pixel: use y1,cr,cb */
+ r = 1.164f * (y1-16) + 1.596f * (cr-128);
+ g = 1.164f * (y1-16) - 0.813f * (cr-128) - 0.391f * (cb-128);
+ b = 1.164f * (y1-16) + 2.018f * (cb-128);
+ pRow[0] = r * scale;
+ pRow[1] = g * scale;
+ pRow[2] = b * scale;
+ pRow[3] = 1.0f;
+ pRow += 4;
+
+ }
+ /* do the last texel */
+ if (w & 1) {
+ const ushort t0 = src[0];
+ const ushort t1 = src[1];
+ const ubyte y0 = (t0 >> 8) & 0xff; /* luminance */
+ ubyte cb, cr;
+ float r, g, b;
+
+ if (rev) {
+ cb = t1 & 0xff; /* chroma U */
+ cr = t0 & 0xff; /* chroma V */
+ }
+ else {
+ cb = t0 & 0xff; /* chroma U */
+ cr = t1 & 0xff; /* chroma V */
+ }
+
+ /* even pixel: y0,cr,cb */
+ r = 1.164f * (y0-16) + 1.596f * (cr-128);
+ g = 1.164f * (y0-16) - 0.813f * (cr-128) - 0.391f * (cb-128);
+ b = 1.164f * (y0-16) + 2.018f * (cb-128);
+ pRow[0] = r * scale;
+ pRow[1] = g * scale;
+ pRow[2] = b * scale;
+ pRow[3] = 1.0f;
+ pRow += 4;
+ }
+ p += dst_stride;
+ }
+}
+
+
+static void
+fake_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ float *p,
+ unsigned dst_stride)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ float *pRow = p;
+ for (j = 0; j < w; j++, pRow += 4) {
+ pRow[0] =
+ pRow[1] =
+ pRow[2] =
+ pRow[3] = (i ^ j) & 1 ? 1.0f : 0.0f;
+ }
+ p += dst_stride;
+ }
+}
+
+
+void
+pipe_tile_raw_to_rgba(enum pipe_format format,
+ void *src,
+ uint w, uint h,
+ float *dst, unsigned dst_stride)
+{
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ a8r8g8b8_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ x8r8g8b8_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ b8g8r8a8_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ a1r5g5b5_get_tile_rgba((ushort *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ a4r4g4b4_get_tile_rgba((ushort *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ r5g6b5_get_tile_rgba((ushort *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ r8g8b8_get_tile_rgba((ubyte *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_L8_UNORM:
+ l8_get_tile_rgba((ubyte *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_A8_UNORM:
+ a8_get_tile_rgba((ubyte *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_I8_UNORM:
+ i8_get_tile_rgba((ubyte *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_A8L8_UNORM:
+ a8l8_get_tile_rgba((ushort *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_R16_SNORM:
+ r16_get_tile_rgba((short *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ r16g16b16a16_get_tile_rgba((short *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_A8R8G8B8_SRGB:
+ a8r8g8b8_srgb_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_A8L8_SRGB:
+ a8l8_srgb_get_tile_rgba((ushort *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_L8_SRGB:
+ l8_srgb_get_tile_rgba((ubyte *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ z16_get_tile_rgba((ushort *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_Z32_UNORM:
+ z32_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ s8z24_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ z24s8_get_tile_rgba((unsigned *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_Z32_FLOAT:
+ z32f_get_tile_rgba((float *) src, w, h, dst, dst_stride);
+ break;
+ case PIPE_FORMAT_YCBCR:
+ ycbcr_get_tile_rgba((ushort *) src, w, h, dst, dst_stride, FALSE);
+ break;
+ case PIPE_FORMAT_YCBCR_REV:
+ ycbcr_get_tile_rgba((ushort *) src, w, h, dst, dst_stride, TRUE);
+ break;
+ default:
+ debug_printf("%s: unsupported format %s\n", __FUNCTION__, pf_name(format));
+ fake_get_tile_rgba(src, w, h, dst, dst_stride);
+ }
+}
+
+
+void
+pipe_get_tile_rgba(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ float *p)
+{
+ unsigned dst_stride = w * 4;
+ void *packed;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ packed = MALLOC(pf_get_nblocks(&pt->block, w, h) * pt->block.size);
+
+ if (!packed)
+ return;
+
+ if(pt->format == PIPE_FORMAT_YCBCR || pt->format == PIPE_FORMAT_YCBCR_REV)
+ assert((x & 1) == 0);
+
+ pipe_get_tile_raw(pt, x, y, w, h, packed, 0);
+
+ pipe_tile_raw_to_rgba(pt->format, packed, w, h, p, dst_stride);
+
+ FREE(packed);
+}
+
+
+void
+pipe_put_tile_rgba(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ const float *p)
+{
+ unsigned src_stride = w * 4;
+ void *packed;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ packed = MALLOC(pf_get_nblocks(&pt->block, w, h) * pt->block.size);
+
+ if (!packed)
+ return;
+
+ switch (pt->format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ a8r8g8b8_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ x8r8g8b8_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ b8g8r8a8_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ a1r5g5b5_put_tile_rgba((ushort *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ r5g6b5_put_tile_rgba((ushort *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ r8g8b8_put_tile_rgba((ubyte *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ assert(0);
+ break;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ a4r4g4b4_put_tile_rgba((ushort *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_L8_UNORM:
+ l8_put_tile_rgba((ubyte *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_A8_UNORM:
+ a8_put_tile_rgba((ubyte *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_I8_UNORM:
+ i8_put_tile_rgba((ubyte *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_A8L8_UNORM:
+ a8l8_put_tile_rgba((ushort *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_R16_SNORM:
+ r16_put_tile_rgba((short *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ r16g16b16a16_put_tile_rgba((short *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_A8R8G8B8_SRGB:
+ a8r8g8b8_srgb_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_A8L8_SRGB:
+ a8l8_srgb_put_tile_rgba((ushort *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_L8_SRGB:
+ l8_srgb_put_tile_rgba((ubyte *) packed, w, h, p, src_stride);
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ /*z16_put_tile_rgba((ushort *) packed, w, h, p, src_stride);*/
+ break;
+ case PIPE_FORMAT_Z32_UNORM:
+ /*z32_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);*/
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ /*s8z24_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);*/
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ /*z24s8_put_tile_rgba((unsigned *) packed, w, h, p, src_stride);*/
+ break;
+ default:
+ debug_printf("%s: unsupported format %s\n", __FUNCTION__, pf_name(pt->format));
+ }
+
+ pipe_put_tile_raw(pt, x, y, w, h, packed, 0);
+
+ FREE(packed);
+}
+
+
+/**
+ * Get a block of Z values, converted to 32-bit range.
+ */
+void
+pipe_get_tile_z(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ uint *z)
+{
+ struct pipe_screen *screen = pt->texture->screen;
+ const uint dstStride = w;
+ ubyte *map;
+ uint *pDest = z;
+ uint i, j;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ map = (ubyte *)screen->transfer_map(screen, pt);
+ if (!map) {
+ assert(0);
+ return;
+ }
+
+ switch (pt->format) {
+ case PIPE_FORMAT_Z32_UNORM:
+ {
+ const uint *ptrc
+ = (const uint *)(map + y * pt->stride + x*4);
+ for (i = 0; i < h; i++) {
+ memcpy(pDest, ptrc, 4 * w);
+ pDest += dstStride;
+ ptrc += pt->stride/4;
+ }
+ }
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ {
+ const uint *ptrc
+ = (const uint *)(map + y * pt->stride + x*4);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 24-bit Z to 32-bit Z */
+ pDest[j] = (ptrc[j] << 8) | ((ptrc[j] >> 16) & 0xff);
+ }
+ pDest += dstStride;
+ ptrc += pt->stride/4;
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ {
+ const uint *ptrc
+ = (const uint *)(map + y * pt->stride + x*4);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 24-bit Z to 32-bit Z */
+ pDest[j] = (ptrc[j] & 0xffffff00) | ((ptrc[j] >> 24) & 0xff);
+ }
+ pDest += dstStride;
+ ptrc += pt->stride/4;
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ {
+ const ushort *ptrc
+ = (const ushort *)(map + y * pt->stride + x*2);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 16-bit Z to 32-bit Z */
+ pDest[j] = (ptrc[j] << 16) | ptrc[j];
+ }
+ pDest += dstStride;
+ ptrc += pt->stride/2;
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ screen->transfer_unmap(screen, pt);
+}
+
+
+void
+pipe_put_tile_z(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ const uint *zSrc)
+{
+ struct pipe_screen *screen = pt->texture->screen;
+ const uint srcStride = w;
+ const uint *ptrc = zSrc;
+ ubyte *map;
+ uint i, j;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ map = (ubyte *)screen->transfer_map(screen, pt);
+ if (!map) {
+ assert(0);
+ return;
+ }
+
+ switch (pt->format) {
+ case PIPE_FORMAT_Z32_UNORM:
+ {
+ uint *pDest = (uint *) (map + y * pt->stride + x*4);
+ for (i = 0; i < h; i++) {
+ memcpy(pDest, ptrc, 4 * w);
+ pDest += pt->stride/4;
+ ptrc += srcStride;
+ }
+ }
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ {
+ uint *pDest = (uint *) (map + y * pt->stride + x*4);
+ assert(pt->usage == PIPE_TRANSFER_READ_WRITE);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 32-bit Z to 24-bit Z, preserve stencil */
+ pDest[j] = (pDest[j] & 0xff000000) | ptrc[j] >> 8;
+ }
+ pDest += pt->stride/4;
+ ptrc += srcStride;
+ }
+ }
+ break;
+ case PIPE_FORMAT_X8Z24_UNORM:
+ {
+ uint *pDest = (uint *) (map + y * pt->stride + x*4);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 32-bit Z to 24-bit Z (0 stencil) */
+ pDest[j] = ptrc[j] >> 8;
+ }
+ pDest += pt->stride/4;
+ ptrc += srcStride;
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ {
+ uint *pDest = (uint *) (map + y * pt->stride + x*4);
+ assert(pt->usage == PIPE_TRANSFER_READ_WRITE);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 32-bit Z to 24-bit Z, preserve stencil */
+ pDest[j] = (pDest[j] & 0xff) | (ptrc[j] & 0xffffff00);
+ }
+ pDest += pt->stride/4;
+ ptrc += srcStride;
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z24X8_UNORM:
+ {
+ uint *pDest = (uint *) (map + y * pt->stride + x*4);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 32-bit Z to 24-bit Z (0 stencil) */
+ pDest[j] = ptrc[j] & 0xffffff00;
+ }
+ pDest += pt->stride/4;
+ ptrc += srcStride;
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ {
+ ushort *pDest = (ushort *) (map + y * pt->stride + x*2);
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ /* convert 32-bit Z to 16-bit Z */
+ pDest[j] = ptrc[j] >> 16;
+ }
+ pDest += pt->stride/2;
+ ptrc += srcStride;
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ screen->transfer_unmap(screen, pt);
+}
+
+
diff --git a/src/gallium/auxiliary/util/u_tile.h b/src/gallium/auxiliary/util/u_tile.h
new file mode 100644
index 0000000000..1453af38b8
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_tile.h
@@ -0,0 +1,101 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef P_TILE_H
+#define P_TILE_H
+
+#include "pipe/p_compiler.h"
+
+struct pipe_transfer;
+
+
+/**
+ * Clip tile against transfer dims.
+ * \return TRUE if tile is totally clipped, FALSE otherwise
+ */
+static INLINE boolean
+pipe_clip_tile(uint x, uint y, uint *w, uint *h, const struct pipe_transfer *pt)
+{
+ if (x >= pt->width)
+ return TRUE;
+ if (y >= pt->height)
+ return TRUE;
+ if (x + *w > pt->width)
+ *w = pt->width - x;
+ if (y + *h > pt->height)
+ *h = pt->height - y;
+ return FALSE;
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void
+pipe_get_tile_raw(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ void *p, int dst_stride);
+
+void
+pipe_put_tile_raw(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ const void *p, int src_stride);
+
+
+void
+pipe_get_tile_rgba(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ float *p);
+
+void
+pipe_put_tile_rgba(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ const float *p);
+
+
+void
+pipe_get_tile_z(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ uint *z);
+
+void
+pipe_put_tile_z(struct pipe_transfer *pt,
+ uint x, uint y, uint w, uint h,
+ const uint *z);
+
+void
+pipe_tile_raw_to_rgba(enum pipe_format format,
+ void *src,
+ uint w, uint h,
+ float *dst, unsigned dst_stride);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_time.c b/src/gallium/auxiliary/util/u_time.c
new file mode 100644
index 0000000000..b958a98635
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_time.c
@@ -0,0 +1,225 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * OS independent time-manipulation functions.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+#include <sys/time.h>
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+#include <windows.h>
+#include <winddi.h>
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+#include <windows.h>
+extern VOID KeQuerySystemTime(PLARGE_INTEGER);
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+#include <windows.h>
+#else
+#error Unsupported OS
+#endif
+
+#include "util/u_time.h"
+
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY) || defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+
+static int64_t frequency = 0;
+
+static INLINE void
+util_time_get_frequency(void)
+{
+ if(!frequency) {
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+ LONGLONG temp;
+ EngQueryPerformanceFrequency(&temp);
+ frequency = temp;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+ LARGE_INTEGER temp;
+ QueryPerformanceFrequency(&temp);
+ frequency = temp.QuadPart;
+#endif
+ }
+}
+#endif
+
+
+void
+util_time_get(struct util_time *t)
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+ gettimeofday(&t->tv, NULL);
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+ LONGLONG temp;
+ EngQueryPerformanceCounter(&temp);
+ t->counter = temp;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ /* Updated every 10 miliseconds, measured in units of 100 nanoseconds.
+ * http://msdn.microsoft.com/en-us/library/ms801642.aspx */
+ LARGE_INTEGER temp;
+ KeQuerySystemTime(&temp);
+ t->counter = temp.QuadPart;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+ LARGE_INTEGER temp;
+ QueryPerformanceCounter(&temp);
+ t->counter = temp.QuadPart;
+#endif
+}
+
+
+void
+util_time_add(const struct util_time *t1,
+ int64_t usecs,
+ struct util_time *t2)
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+ t2->tv.tv_sec = t1->tv.tv_sec + usecs / 1000000;
+ t2->tv.tv_usec = t1->tv.tv_usec + usecs % 1000000;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY) || defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+ util_time_get_frequency();
+ t2->counter = t1->counter + (usecs * frequency + INT64_C(999999))/INT64_C(1000000);
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ /* 1 tick = 100 nano seconds. */
+ t2->counter = t1->counter + usecs * 10;
+#else
+ LARGE_INTEGER temp;
+ LONGLONG freq;
+ freq = temp.QuadPart;
+ t2->counter = t1->counter + (usecs * freq)/1000000L;
+#endif
+}
+
+
+int64_t
+util_time_diff(const struct util_time *t1,
+ const struct util_time *t2)
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+ return (t2->tv.tv_usec - t1->tv.tv_usec) +
+ (t2->tv.tv_sec - t1->tv.tv_sec)*1000000;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY) || defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+ util_time_get_frequency();
+ return (t2->counter - t1->counter)*INT64_C(1000000)/frequency;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ return (t2->counter - t1->counter)/10;
+#endif
+}
+
+
+
+uint64_t
+util_time_micros( void )
+{
+ struct util_time t1;
+
+ util_time_get(&t1);
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+ return t1.tv.tv_usec + t1.tv.tv_sec*1000000LL;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY) || defined(PIPE_SUBSYSTEM_WINDOWS_USER) || defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+ util_time_get_frequency();
+ return t1.counter*INT64_C(1000000)/frequency;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+ return t1.counter/10;
+#endif
+}
+
+
+
+/**
+ * Compare two time values.
+ *
+ * Not publicly available because it does not take in account wrap-arounds.
+ * Use util_time_timeout instead.
+ */
+static INLINE int
+util_time_compare(const struct util_time *t1,
+ const struct util_time *t2)
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+ if (t1->tv.tv_sec < t2->tv.tv_sec)
+ return -1;
+ else if(t1->tv.tv_sec > t2->tv.tv_sec)
+ return 1;
+ else if (t1->tv.tv_usec < t2->tv.tv_usec)
+ return -1;
+ else if(t1->tv.tv_usec > t2->tv.tv_usec)
+ return 1;
+ else
+ return 0;
+#elif defined(PIPE_OS_WINDOWS)
+ if (t1->counter < t2->counter)
+ return -1;
+ else if(t1->counter > t2->counter)
+ return 1;
+ else
+ return 0;
+#endif
+}
+
+
+boolean
+util_time_timeout(const struct util_time *start,
+ const struct util_time *end,
+ const struct util_time *curr)
+{
+ if(util_time_compare(start, end) <= 0)
+ return !(util_time_compare(start, curr) <= 0 && util_time_compare(curr, end) < 0);
+ else
+ return !(util_time_compare(start, curr) <= 0 || util_time_compare(curr, end) < 0);
+}
+
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+void util_time_sleep(unsigned usecs)
+{
+ LONGLONG start, curr, end;
+
+ EngQueryPerformanceCounter(&start);
+
+ if(!frequency)
+ EngQueryPerformanceFrequency(&frequency);
+
+ end = start + (usecs * frequency + 999999LL)/1000000LL;
+
+ do {
+ EngQueryPerformanceCounter(&curr);
+ } while(start <= curr && curr < end ||
+ end < start && (curr < end || start <= curr));
+}
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+void util_time_sleep(unsigned usecs)
+{
+ Sleep((usecs + 999)/ 1000);
+}
+#endif
diff --git a/src/gallium/auxiliary/util/u_time.h b/src/gallium/auxiliary/util/u_time.h
new file mode 100644
index 0000000000..a6189a247b
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_time.h
@@ -0,0 +1,109 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * OS independent time-manipulation functions.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef U_TIME_H_
+#define U_TIME_H_
+
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE)
+#include <time.h> /* timeval */
+#include <unistd.h> /* usleep */
+#endif
+
+#if defined(PIPE_OS_HAIKU)
+#include <sys/time.h> /* timeval */
+#include <unistd.h>
+#endif
+
+#include "pipe/p_compiler.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Time abstraction.
+ *
+ * Do not access this structure directly. Use the provided function instead.
+ */
+struct util_time
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+ struct timeval tv;
+#else
+ int64_t counter;
+#endif
+};
+
+
+void
+util_time_get(struct util_time *t);
+
+void
+util_time_add(const struct util_time *t1,
+ int64_t usecs,
+ struct util_time *t2);
+
+uint64_t
+util_time_micros( void );
+
+int64_t
+util_time_diff(const struct util_time *t1,
+ const struct util_time *t2);
+
+/**
+ * Returns non-zero when the timeout expires.
+ */
+boolean
+util_time_timeout(const struct util_time *start,
+ const struct util_time *end,
+ const struct util_time *curr);
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_APPLE) || defined(PIPE_OS_HAIKU)
+#define util_time_sleep usleep
+#else
+void
+util_time_sleep(unsigned usecs);
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_TIME_H_ */
diff --git a/src/gallium/auxiliary/util/u_timed_winsys.c b/src/gallium/auxiliary/util/u_timed_winsys.c
new file mode 100644
index 0000000000..178acdca4d
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_timed_winsys.c
@@ -0,0 +1,318 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#include "pipe/p_state.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "u_timed_winsys.h"
+#include "util/u_memory.h"
+#include "util/u_time.h"
+
+
+struct timed_winsys {
+ struct pipe_winsys base;
+ struct pipe_winsys *backend;
+ uint64_t last_dump;
+ struct {
+ const char *name_key;
+ double total;
+ unsigned calls;
+ } funcs[13];
+};
+
+
+static struct timed_winsys *timed_winsys( struct pipe_winsys *winsys )
+{
+ return (struct timed_winsys *)winsys;
+}
+
+
+static uint64_t time_start( void )
+{
+ return util_time_micros();
+}
+
+
+static void time_display( struct pipe_winsys *winsys )
+{
+ struct timed_winsys *tws = timed_winsys(winsys);
+ unsigned i;
+ double overall = 0;
+
+ for (i = 0; i < Elements(tws->funcs); i++) {
+ if (tws->funcs[i].name_key) {
+ debug_printf("*** %-25s %5.3fms (%d calls, avg %.3fms)\n",
+ tws->funcs[i].name_key,
+ tws->funcs[i].total,
+ tws->funcs[i].calls,
+ tws->funcs[i].total / tws->funcs[i].calls);
+ overall += tws->funcs[i].total;
+ tws->funcs[i].calls = 0;
+ tws->funcs[i].total = 0;
+ }
+ }
+
+ debug_printf("*** %-25s %5.3fms\n",
+ "OVERALL WINSYS",
+ overall);
+}
+
+static void time_finish( struct pipe_winsys *winsys,
+ long long startval,
+ unsigned idx,
+ const char *name )
+{
+ struct timed_winsys *tws = timed_winsys(winsys);
+ uint64_t endval = util_time_micros();
+ double elapsed = (endval - startval)/1000.0;
+
+ if (endval - startval > 1000LL)
+ debug_printf("*** %s %.3f\n", name, elapsed );
+
+ assert( tws->funcs[idx].name_key == name ||
+ tws->funcs[idx].name_key == NULL);
+
+ tws->funcs[idx].name_key = name;
+ tws->funcs[idx].total += elapsed;
+ tws->funcs[idx].calls++;
+
+ if (endval - tws->last_dump > 10LL * 1000LL * 1000LL) {
+ time_display( winsys );
+ tws->last_dump = endval;
+ }
+}
+
+
+/* Pipe has no concept of pools, but the psb driver passes a flag that
+ * can be mapped onto pools in the backend.
+ */
+static struct pipe_buffer *
+timed_buffer_create(struct pipe_winsys *winsys,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size )
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ struct pipe_buffer *buf =
+ backend->buffer_create( backend, alignment, usage, size );
+
+ time_finish(winsys, start, 0, __FUNCTION__);
+
+ return buf;
+}
+
+
+
+
+static struct pipe_buffer *
+timed_user_buffer_create(struct pipe_winsys *winsys,
+ void *data,
+ unsigned bytes)
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ struct pipe_buffer *buf = backend->user_buffer_create( backend, data, bytes );
+
+ time_finish(winsys, start, 1, __FUNCTION__);
+
+ return buf;
+}
+
+
+static void *
+timed_buffer_map(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf,
+ unsigned flags)
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ void *map = backend->buffer_map( backend, buf, flags );
+
+ time_finish(winsys, start, 2, __FUNCTION__);
+
+ return map;
+}
+
+
+static void
+timed_buffer_unmap(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ backend->buffer_unmap( backend, buf );
+
+ time_finish(winsys, start, 3, __FUNCTION__);
+}
+
+
+static void
+timed_buffer_destroy(struct pipe_buffer *buf)
+{
+ struct pipe_winsys *winsys = buf->screen->winsys;
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ backend->buffer_destroy( buf );
+
+ time_finish(winsys, start, 4, __FUNCTION__);
+}
+
+
+static void
+timed_flush_frontbuffer( struct pipe_winsys *winsys,
+ struct pipe_surface *surf,
+ void *context_private)
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ backend->flush_frontbuffer( backend, surf, context_private );
+
+ time_finish(winsys, start, 5, __FUNCTION__);
+}
+
+
+
+
+static struct pipe_buffer *
+timed_surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ struct pipe_buffer *ret = backend->surface_buffer_create( backend, width, height,
+ format, usage, tex_usage, stride );
+
+ time_finish(winsys, start, 7, __FUNCTION__);
+
+ return ret;
+}
+
+
+static const char *
+timed_get_name( struct pipe_winsys *winsys )
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ const char *ret = backend->get_name( backend );
+
+ time_finish(winsys, start, 9, __FUNCTION__);
+
+ return ret;
+}
+
+static void
+timed_fence_reference(struct pipe_winsys *winsys,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ backend->fence_reference( backend, ptr, fence );
+
+ time_finish(winsys, start, 10, __FUNCTION__);
+}
+
+
+static int
+timed_fence_signalled( struct pipe_winsys *winsys,
+ struct pipe_fence_handle *fence,
+ unsigned flag )
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ int ret = backend->fence_signalled( backend, fence, flag );
+
+ time_finish(winsys, start, 11, __FUNCTION__);
+
+ return ret;
+}
+
+static int
+timed_fence_finish( struct pipe_winsys *winsys,
+ struct pipe_fence_handle *fence,
+ unsigned flag )
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ uint64_t start = time_start();
+
+ int ret = backend->fence_finish( backend, fence, flag );
+
+ time_finish(winsys, start, 12, __FUNCTION__);
+
+ return ret;
+}
+
+static void
+timed_winsys_destroy( struct pipe_winsys *winsys )
+{
+ struct pipe_winsys *backend = timed_winsys(winsys)->backend;
+ backend->destroy( backend );
+ FREE(winsys);
+}
+
+
+
+struct pipe_winsys *u_timed_winsys_create( struct pipe_winsys *backend )
+{
+ struct timed_winsys *ws = CALLOC_STRUCT(timed_winsys);
+
+ ws->base.user_buffer_create = timed_user_buffer_create;
+ ws->base.buffer_map = timed_buffer_map;
+ ws->base.buffer_unmap = timed_buffer_unmap;
+ ws->base.buffer_destroy = timed_buffer_destroy;
+ ws->base.buffer_create = timed_buffer_create;
+ ws->base.surface_buffer_create = timed_surface_buffer_create;
+ ws->base.flush_frontbuffer = timed_flush_frontbuffer;
+ ws->base.get_name = timed_get_name;
+ ws->base.fence_reference = timed_fence_reference;
+ ws->base.fence_signalled = timed_fence_signalled;
+ ws->base.fence_finish = timed_fence_finish;
+ ws->base.destroy = timed_winsys_destroy;
+
+ ws->backend = backend;
+
+ return &ws->base;
+}
+
diff --git a/src/gallium/auxiliary/util/u_timed_winsys.h b/src/gallium/auxiliary/util/u_timed_winsys.h
new file mode 100644
index 0000000000..542365112d
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_timed_winsys.h
@@ -0,0 +1,41 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+
+#ifndef U_TIMED_WINSYS_H
+#define U_TIMED_WINSYS_H
+
+
+struct pipe_winsys;
+struct pipe_winsys *u_timed_winsys_create( struct pipe_winsys *backend );
+
+
+#endif
diff --git a/src/gallium/auxiliary/util/u_upload_mgr.c b/src/gallium/auxiliary/util/u_upload_mgr.c
new file mode 100644
index 0000000000..eb635c9f14
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_upload_mgr.c
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Helper utility for uploading user buffers & other data, and
+ * coalescing small buffers into larger ones.
+ */
+
+#include "pipe/p_error.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "u_upload_mgr.h"
+
+
+struct u_upload_mgr {
+ struct pipe_screen *screen;
+
+ unsigned default_size;
+ unsigned alignment;
+ unsigned usage;
+
+ /* The active buffer:
+ */
+ struct pipe_buffer *buffer;
+ unsigned size;
+ unsigned offset;
+};
+
+
+struct u_upload_mgr *u_upload_create( struct pipe_screen *screen,
+ unsigned default_size,
+ unsigned alignment,
+ unsigned usage )
+{
+ struct u_upload_mgr *upload = CALLOC_STRUCT( u_upload_mgr );
+
+ upload->default_size = default_size;
+ upload->screen = screen;
+ upload->alignment = alignment;
+ upload->usage = usage;
+ upload->buffer = NULL;
+
+ return upload;
+}
+
+
+static INLINE enum pipe_error
+my_buffer_write(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset, unsigned size, unsigned dirty_size,
+ const void *data)
+{
+ uint8_t *map;
+
+ assert(offset < buf->size);
+ assert(offset + size <= buf->size);
+ assert(dirty_size >= size);
+ assert(size);
+
+ map = pipe_buffer_map_range(screen, buf, offset, size,
+ PIPE_BUFFER_USAGE_CPU_WRITE |
+ PIPE_BUFFER_USAGE_FLUSH_EXPLICIT);
+ if (map == NULL)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ memcpy(map + offset, data, size);
+ pipe_buffer_flush_mapped_range(screen, buf, offset, dirty_size);
+ pipe_buffer_unmap(screen, buf);
+
+ return PIPE_OK;
+}
+
+/* Release old buffer.
+ *
+ * This must usually be called prior to firing the command stream
+ * which references the upload buffer, as many memory managers will
+ * cause subsequent maps of a fired buffer to wait.
+ *
+ * Can improve this with a change to pipe_buffer_write to use the
+ * DONT_WAIT bit, but for now, it's easiest just to grab a new buffer.
+ */
+void u_upload_flush( struct u_upload_mgr *upload )
+{
+ pipe_buffer_reference( &upload->buffer, NULL );
+ upload->size = 0;
+}
+
+
+void u_upload_destroy( struct u_upload_mgr *upload )
+{
+ u_upload_flush( upload );
+ FREE( upload );
+}
+
+
+static enum pipe_error
+u_upload_alloc_buffer( struct u_upload_mgr *upload,
+ unsigned min_size )
+{
+ unsigned size;
+
+ /* Release old buffer, if present:
+ */
+ u_upload_flush( upload );
+
+ /* Allocate a new one:
+ */
+ size = align(MAX2(upload->default_size, min_size), 4096);
+
+ upload->buffer = pipe_buffer_create( upload->screen,
+ upload->alignment,
+ upload->usage | PIPE_BUFFER_USAGE_CPU_WRITE,
+ size );
+ if (upload->buffer == NULL)
+ goto fail;
+
+ upload->size = size;
+
+ upload->offset = 0;
+ return 0;
+
+fail:
+ if (upload->buffer)
+ pipe_buffer_reference( &upload->buffer, NULL );
+
+ return PIPE_ERROR_OUT_OF_MEMORY;
+}
+
+
+enum pipe_error u_upload_data( struct u_upload_mgr *upload,
+ unsigned size,
+ const void *data,
+ unsigned *out_offset,
+ struct pipe_buffer **outbuf )
+{
+ unsigned alloc_size = align( size, upload->alignment );
+ enum pipe_error ret = PIPE_OK;
+
+ if (upload->offset + alloc_size > upload->size) {
+ ret = u_upload_alloc_buffer( upload, alloc_size );
+ if (ret)
+ return ret;
+ }
+
+ /* Copy the data, using map_range if available:
+ */
+ ret = my_buffer_write( upload->screen,
+ upload->buffer,
+ upload->offset,
+ size,
+ alloc_size,
+ data );
+ if (ret)
+ return ret;
+
+ /* Emit the return values:
+ */
+ pipe_buffer_reference( outbuf, upload->buffer );
+ *out_offset = upload->offset;
+ upload->offset += alloc_size;
+ return PIPE_OK;
+}
+
+
+/* As above, but upload the full contents of a buffer. Useful for
+ * uploading user buffers, avoids generating an explosion of GPU
+ * buffers if you have an app that does lots of small vertex buffer
+ * renders or DrawElements calls.
+ */
+enum pipe_error u_upload_buffer( struct u_upload_mgr *upload,
+ unsigned offset,
+ unsigned size,
+ struct pipe_buffer *inbuf,
+ unsigned *out_offset,
+ struct pipe_buffer **outbuf )
+{
+ enum pipe_error ret = PIPE_OK;
+ const char *map = NULL;
+
+ map = (const char *)pipe_buffer_map(
+ upload->screen, inbuf, PIPE_BUFFER_USAGE_CPU_READ );
+
+ if (map == NULL) {
+ ret = PIPE_ERROR_OUT_OF_MEMORY;
+ goto done;
+ }
+
+ if (0)
+ debug_printf("upload ptr %p ofs %d sz %d\n", map, offset, size);
+
+ ret = u_upload_data( upload,
+ size,
+ map + offset,
+ out_offset,
+ outbuf );
+ if (ret)
+ goto done;
+
+done:
+ if (map)
+ pipe_buffer_unmap( upload->screen, inbuf );
+
+ return ret;
+}
diff --git a/src/gallium/auxiliary/util/u_upload_mgr.h b/src/gallium/auxiliary/util/u_upload_mgr.h
new file mode 100644
index 0000000000..745b5834af
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_upload_mgr.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Helper utility for uploading user buffers & other data, and
+ * coalescing small buffers into larger ones.
+ */
+
+#ifndef U_UPLOAD_MGR_H
+#define U_UPLOAD_MGR_H
+
+struct pipe_screen;
+struct pipe_buffer;
+struct u_upload_mgr;
+
+
+struct u_upload_mgr *u_upload_create( struct pipe_screen *screen,
+ unsigned default_size,
+ unsigned alignment,
+ unsigned usage );
+
+void u_upload_destroy( struct u_upload_mgr *upload );
+
+/* Unmap and release old buffer.
+ *
+ * This must usually be called prior to firing the command stream
+ * which references the upload buffer, as many memory managers either
+ * don't like firing a mapped buffer or cause subsequent maps of a
+ * fired buffer to wait. For now, it's easiest just to grab a new
+ * buffer.
+ */
+void u_upload_flush( struct u_upload_mgr *upload );
+
+
+enum pipe_error u_upload_data( struct u_upload_mgr *upload,
+ unsigned size,
+ const void *data,
+ unsigned *out_offset,
+ struct pipe_buffer **outbuf );
+
+
+enum pipe_error u_upload_buffer( struct u_upload_mgr *upload,
+ unsigned offset,
+ unsigned size,
+ struct pipe_buffer *inbuf,
+ unsigned *out_offset,
+ struct pipe_buffer **outbuf );
+
+
+
+#endif
+
diff --git a/src/gallium/drivers/Makefile b/src/gallium/drivers/Makefile
new file mode 100644
index 0000000000..9fe9b2c11d
--- /dev/null
+++ b/src/gallium/drivers/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/drivers/Makefile
+TOP = ../../..
+include $(TOP)/configs/current
+
+SUBDIRS = $(GALLIUM_DRIVERS_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/drivers/cell/Makefile b/src/gallium/drivers/cell/Makefile
new file mode 100644
index 0000000000..47aef7b05f
--- /dev/null
+++ b/src/gallium/drivers/cell/Makefile
@@ -0,0 +1,12 @@
+# Cell Gallium driver Makefile
+
+
+default:
+ ( cd spu ; make )
+ ( cd ppu ; make )
+
+
+
+clean:
+ ( cd spu ; make clean )
+ ( cd ppu ; make clean )
diff --git a/src/gallium/drivers/cell/common.h b/src/gallium/drivers/cell/common.h
new file mode 100644
index 0000000000..d5f5c7bbba
--- /dev/null
+++ b/src/gallium/drivers/cell/common.h
@@ -0,0 +1,376 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Types and tokens which are common to the SPU and PPU code.
+ */
+
+
+#ifndef CELL_COMMON_H
+#define CELL_COMMON_H
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+
+
+/** The standard assert macro doesn't seem to work reliably */
+#define ASSERT(x) \
+ if (!(x)) { \
+ ubyte *p = NULL; \
+ fprintf(stderr, "%s:%d: %s(): assertion %s failed.\n", \
+ __FILE__, __LINE__, __FUNCTION__, #x); \
+ *p = 0; \
+ exit(1); \
+ }
+
+
+
+#define JOIN(x, y) JOIN_AGAIN(x, y)
+#define JOIN_AGAIN(x, y) x ## y
+
+#define STATIC_ASSERT(e) \
+{typedef char JOIN(assertion_failed_at_line_, __LINE__) [(e) ? 1 : -1];}
+
+
+
+/** for sanity checking */
+#define ASSERT_ALIGN16(ptr) \
+ ASSERT((((unsigned long) (ptr)) & 0xf) == 0);
+
+
+/** round up value to next multiple of 4 */
+#define ROUNDUP4(k) (((k) + 0x3) & ~0x3)
+
+/** round up value to next multiple of 8 */
+#define ROUNDUP8(k) (((k) + 0x7) & ~0x7)
+
+/** round up value to next multiple of 16 */
+#define ROUNDUP16(k) (((k) + 0xf) & ~0xf)
+
+
+#define CELL_MAX_SPUS 8
+
+#define CELL_MAX_SAMPLERS 4
+#define CELL_MAX_TEXTURE_LEVELS 12 /* 2k x 2k */
+#define CELL_MAX_CONSTANTS 32 /**< number of float[4] constants */
+#define CELL_MAX_WIDTH 1024 /**< max framebuffer width */
+#define CELL_MAX_HEIGHT 1024 /**< max framebuffer width */
+
+#define TILE_SIZE 32
+
+
+/**
+ * The low byte of a mailbox word contains the command opcode.
+ * Remaining higher bytes are command specific.
+ */
+#define CELL_CMD_OPCODE_MASK 0xff
+
+#define CELL_CMD_EXIT 1
+#define CELL_CMD_CLEAR_SURFACE 2
+#define CELL_CMD_FINISH 3
+#define CELL_CMD_RENDER 4
+#define CELL_CMD_BATCH 5
+#define CELL_CMD_RELEASE_VERTS 6
+#define CELL_CMD_STATE_FRAMEBUFFER 10
+#define CELL_CMD_STATE_FRAGMENT_OPS 11
+#define CELL_CMD_STATE_SAMPLER 12
+#define CELL_CMD_STATE_TEXTURE 13
+#define CELL_CMD_STATE_VERTEX_INFO 14
+#define CELL_CMD_STATE_VIEWPORT 15
+#define CELL_CMD_STATE_UNIFORMS 16
+#define CELL_CMD_STATE_VS_ARRAY_INFO 17
+#define CELL_CMD_STATE_BIND_VS 18
+#define CELL_CMD_STATE_FRAGMENT_PROGRAM 19
+#define CELL_CMD_STATE_ATTRIB_FETCH 20
+#define CELL_CMD_STATE_FS_CONSTANTS 21
+#define CELL_CMD_STATE_RASTERIZER 22
+#define CELL_CMD_VS_EXECUTE 23
+#define CELL_CMD_FLUSH_BUFFER_RANGE 24
+#define CELL_CMD_FENCE 25
+
+
+/** Command/batch buffers */
+#define CELL_NUM_BUFFERS 4
+#define CELL_BUFFER_SIZE (4*1024) /**< 16KB would be the max */
+
+#define CELL_BUFFER_STATUS_FREE 10
+#define CELL_BUFFER_STATUS_USED 20
+
+/** Debug flags */
+#define CELL_DEBUG_CHECKER (1 << 0)
+#define CELL_DEBUG_ASM (1 << 1)
+#define CELL_DEBUG_SYNC (1 << 2)
+#define CELL_DEBUG_FRAGMENT_OPS (1 << 3)
+#define CELL_DEBUG_FRAGMENT_OP_FALLBACK (1 << 4)
+#define CELL_DEBUG_CMD (1 << 5)
+#define CELL_DEBUG_CACHE (1 << 6)
+
+#define CELL_FENCE_IDLE 0
+#define CELL_FENCE_EMITTED 1
+#define CELL_FENCE_SIGNALLED 2
+
+#define CELL_FACING_FRONT 0
+#define CELL_FACING_BACK 1
+
+struct cell_fence
+{
+ /** There's a 16-byte status qword per SPU */
+ volatile uint status[CELL_MAX_SPUS][4];
+};
+
+#ifdef __SPU__
+typedef vector unsigned int opcode_t;
+#else
+typedef unsigned int opcode_t[4];
+#endif
+
+/**
+ * Fence command sent to SPUs. In response, the SPUs will write
+ * CELL_FENCE_STATUS_SIGNALLED back to the fence status word in main memory.
+ */
+struct cell_command_fence
+{
+ opcode_t opcode; /**< CELL_CMD_FENCE */
+ struct cell_fence *fence;
+ uint32_t pad_[3];
+};
+
+
+/**
+ * Command to specify per-fragment operations state and generated code.
+ * Note that this is a variant-length structure, allocated with as
+ * much memory as needed to hold the generated code; the "code"
+ * field *must* be the last field in the structure. Also, the entire
+ * length of the structure (including the variant code field) must be
+ * a multiple of 8 bytes; we require that this structure itself be
+ * a multiple of 8 bytes, and that the generated code also be a multiple
+ * of 8 bytes.
+ *
+ * Also note that the dsa, blend, blend_color fields are really only needed
+ * for the fallback/C per-pixel code. They're not used when we generate
+ * dynamic SPU fragment code (which is the normal case), and will eventually
+ * be removed from this structure.
+ */
+struct cell_command_fragment_ops
+{
+ opcode_t opcode; /**< CELL_CMD_STATE_FRAGMENT_OPS */
+
+ /* Fields for the fallback case */
+ struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_blend_state blend;
+ struct pipe_blend_color blend_color;
+
+ /* Fields for the generated SPU code */
+ unsigned total_code_size;
+ unsigned front_code_index;
+ unsigned back_code_index;
+ /* this field has variant length, and must be the last field in
+ * the structure
+ */
+ unsigned code[0];
+};
+
+
+/** Max instructions for fragment programs */
+#define SPU_MAX_FRAGMENT_PROGRAM_INSTS 512
+
+/**
+ * Command to send a fragment program to SPUs.
+ */
+struct cell_command_fragment_program
+{
+ opcode_t opcode; /**< CELL_CMD_STATE_FRAGMENT_PROGRAM */
+ uint num_inst; /**< Number of instructions */
+ uint32_t pad[3];
+ unsigned code[SPU_MAX_FRAGMENT_PROGRAM_INSTS];
+};
+
+
+/**
+ * Tell SPUs about the framebuffer size, location
+ */
+struct cell_command_framebuffer
+{
+ opcode_t opcode; /**< CELL_CMD_STATE_FRAMEBUFFER */
+ int width, height;
+ void *color_start, *depth_start;
+ enum pipe_format color_format, depth_format;
+ uint32_t pad_[2];
+};
+
+
+/**
+ * Tell SPUs about rasterizer state.
+ */
+struct cell_command_rasterizer
+{
+ opcode_t opcode; /**< CELL_CMD_STATE_RASTERIZER */
+ struct pipe_rasterizer_state rasterizer;
+};
+
+
+/**
+ * Clear framebuffer to the given value/color.
+ */
+struct cell_command_clear_surface
+{
+ opcode_t opcode; /**< CELL_CMD_CLEAR_SURFACE */
+ uint surface; /**< Temporary: 0=color, 1=Z */
+ uint value;
+ uint32_t pad[2];
+};
+
+
+/**
+ * Array info used by the vertex shader's vertex puller.
+ */
+struct cell_array_info
+{
+ uint64_t base; /**< Base address of the 0th element. */
+ uint attr; /**< Attribute that this state is for. */
+ uint pitch; /**< Byte pitch from one entry to the next. */
+ uint size;
+ uint function_offset;
+};
+
+
+struct cell_attribute_fetch_code
+{
+ uint64_t base;
+ uint size;
+};
+
+
+struct cell_buffer_range
+{
+ uint64_t base;
+ unsigned size;
+};
+
+
+struct cell_shader_info
+{
+ uint64_t declarations;
+ uint64_t instructions;
+ uint64_t immediates;
+
+ unsigned num_outputs;
+ unsigned num_declarations;
+ unsigned num_instructions;
+ unsigned num_immediates;
+};
+
+
+#define SPU_VERTS_PER_BATCH 64
+struct cell_command_vs
+{
+ opcode_t opcode; /**< CELL_CMD_VS_EXECUTE */
+ uint64_t vOut[SPU_VERTS_PER_BATCH];
+ unsigned num_elts;
+ unsigned elts[SPU_VERTS_PER_BATCH];
+ float plane[12][4];
+ unsigned nr_planes;
+ unsigned nr_attrs;
+};
+
+
+struct cell_command_render
+{
+ opcode_t opcode; /**< CELL_CMD_RENDER */
+ uint prim_type; /**< PIPE_PRIM_x */
+ uint num_verts;
+ uint vertex_size; /**< bytes per vertex */
+ uint num_indexes;
+ uint vertex_buf; /**< which cell->buffer[] contains the vertex data */
+ float xmin, ymin, xmax, ymax; /* XXX another dummy field */
+ uint min_index;
+ boolean inline_verts;
+ uint32_t pad_[1];
+};
+
+
+struct cell_command_release_verts
+{
+ opcode_t opcode; /**< CELL_CMD_RELEASE_VERTS */
+ uint vertex_buf; /**< in [0, CELL_NUM_BUFFERS-1] */
+ uint32_t pad_[3];
+};
+
+
+struct cell_command_sampler
+{
+ opcode_t opcode; /**< CELL_CMD_STATE_SAMPLER */
+ uint unit;
+ struct pipe_sampler_state state;
+ uint32_t pad_[2];
+};
+
+
+struct cell_command_texture
+{
+ opcode_t opcode; /**< CELL_CMD_STATE_TEXTURE */
+ uint target; /**< PIPE_TEXTURE_x */
+ uint unit;
+ void *start[CELL_MAX_TEXTURE_LEVELS]; /**< Address in main memory */
+ ushort width[CELL_MAX_TEXTURE_LEVELS];
+ ushort height[CELL_MAX_TEXTURE_LEVELS];
+ ushort depth[CELL_MAX_TEXTURE_LEVELS];
+};
+
+
+#define MAX_SPU_FUNCTIONS 12
+/**
+ * Used to tell the PPU about the address of particular functions in the
+ * SPU's address space.
+ */
+struct cell_spu_function_info
+{
+ uint num;
+ char names[MAX_SPU_FUNCTIONS][16];
+ uint addrs[MAX_SPU_FUNCTIONS];
+ char pad[12]; /**< Pad struct to multiple of 16 bytes (256 currently) */
+};
+
+
+/** This is the object passed to spe_create_thread() */
+struct cell_init_info
+{
+ unsigned id;
+ unsigned num_spus;
+ unsigned debug_flags; /**< mask of CELL_DEBUG_x flags */
+ float inv_timebase; /**< 1.0/timebase, for perf measurement */
+
+ /** Buffers for command batches, vertex/index data */
+ ubyte *buffers[CELL_NUM_BUFFERS];
+ uint *buffer_status; /**< points at cell_context->buffer_status */
+
+ struct cell_spu_function_info *spu_functions;
+} ALIGN16_ATTRIB;
+
+
+#endif /* CELL_COMMON_H */
diff --git a/src/gallium/drivers/cell/ppu/Makefile b/src/gallium/drivers/cell/ppu/Makefile
new file mode 100644
index 0000000000..c92f8e5cba
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/Makefile
@@ -0,0 +1,86 @@
+# Gallium3D Cell driver: PPU code
+
+# This makefile builds the libcell.a library which gets pulled into
+# the main libGL.so library
+
+
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+
+# This is the "top-level" cell PPU driver code, will get pulled into libGL.so
+# by the winsys Makefile.
+CELL_LIB = ../libcell.a
+
+
+# This is the SPU code. We'd like to be able to put this into the libcell.a
+# archive with the PPU code, but nesting .a libs doesn't seem to work.
+# So, it's pulled into libGL.so in gallium/winsys/xlib/Makefile
+SPU_CODE_MODULE = ../spu/g3d_spu.a
+
+
+SOURCES = \
+ cell_batch.c \
+ cell_clear.c \
+ cell_context.c \
+ cell_draw_arrays.c \
+ cell_fence.c \
+ cell_flush.c \
+ cell_gen_fragment.c \
+ cell_gen_fp.c \
+ cell_state_derived.c \
+ cell_state_emit.c \
+ cell_state_shader.c \
+ cell_pipe_state.c \
+ cell_screen.c \
+ cell_state_vertex.c \
+ cell_spu.c \
+ cell_surface.c \
+ cell_texture.c \
+ cell_vbuf.c \
+ cell_vertex_fetch.c \
+ cell_vertex_shader.c
+
+
+OBJECTS = $(SOURCES:.c=.o) \
+
+INCLUDE_DIRS = \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/gallium/drivers
+
+.c.o:
+ $(CC) -c $(INCLUDE_DIRS) $(CFLAGS) $< -o $@
+
+
+.c.s:
+ $(CC) -S $(INCLUDE_DIRS) $(CFLAGS) $< -o $@
+
+
+default: $(CELL_LIB)
+
+
+$(CELL_LIB): $(OBJECTS) $(SPU_CODE_MODULE)
+# ar -ru $(CELL_LIB) $(OBJECTS) $(SPU_CODE_MODULE) # doesn't work
+ ar -ru $(CELL_LIB) $(OBJECTS)
+
+#$(PROG): $(PPU_OBJECTS)
+# $(CC) -o $(PROG) $(PPU_OBJECTS) $(SPU_CODE_MODULE) $(PPU_LFLAGS)
+
+
+
+clean:
+ rm -f *.o *~ $(CELL_LIB)
+
+
+
+depend: $(SOURCES)
+ rm -f depend
+ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(INCLUDE_DIRS) $(SOURCES) 2> /dev/null
+
+include depend
+
+
+
diff --git a/src/gallium/drivers/cell/ppu/cell_batch.c b/src/gallium/drivers/cell/ppu/cell_batch.c
new file mode 100644
index 0000000000..fe144f8b84
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_batch.c
@@ -0,0 +1,260 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "cell_context.h"
+#include "cell_batch.h"
+#include "cell_fence.h"
+#include "cell_spu.h"
+
+
+
+/**
+ * Search the buffer pool for an empty/free buffer and return its index.
+ * Buffers are used for storing vertex data, state and commands which
+ * will be sent to the SPUs.
+ * If no empty buffers are available, wait for one.
+ * \return buffer index in [0, CELL_NUM_BUFFERS-1]
+ */
+uint
+cell_get_empty_buffer(struct cell_context *cell)
+{
+ static uint prev_buffer = 0;
+ uint buf = (prev_buffer + 1) % CELL_NUM_BUFFERS;
+ uint tries = 0;
+
+ /* Find a buffer that's marked as free by all SPUs */
+ while (1) {
+ uint spu, num_free = 0;
+
+ for (spu = 0; spu < cell->num_spus; spu++) {
+ if (cell->buffer_status[spu][buf][0] == CELL_BUFFER_STATUS_FREE) {
+ num_free++;
+
+ if (num_free == cell->num_spus) {
+ /* found a free buffer, now mark status as used */
+ for (spu = 0; spu < cell->num_spus; spu++) {
+ cell->buffer_status[spu][buf][0] = CELL_BUFFER_STATUS_USED;
+ }
+ /*
+ printf("PPU: ALLOC BUFFER %u, %u tries\n", buf, tries);
+ */
+ prev_buffer = buf;
+
+ /* release tex buffer associated w/ prev use of this batch buf */
+ cell_free_fenced_buffers(cell, &cell->fenced_buffers[buf]);
+
+ return buf;
+ }
+ }
+ else {
+ break;
+ }
+ }
+
+ /* try next buf */
+ buf = (buf + 1) % CELL_NUM_BUFFERS;
+
+ tries++;
+ if (tries == 100) {
+ /*
+ printf("PPU WAITING for buffer...\n");
+ */
+ }
+ }
+}
+
+
+/**
+ * Append a fence command to the current batch buffer.
+ * Note that we're sure there's always room for this because of the
+ * adjusted size check in cell_batch_free_space().
+ */
+static void
+emit_fence(struct cell_context *cell)
+{
+ const uint batch = cell->cur_batch;
+ const uint size = cell->buffer_size[batch];
+ struct cell_command_fence *fence_cmd;
+ struct cell_fence *fence = &cell->fenced_buffers[batch].fence;
+ uint i;
+
+ /* set fence status to emitted, not yet signalled */
+ for (i = 0; i < cell->num_spus; i++) {
+ fence->status[i][0] = CELL_FENCE_EMITTED;
+ }
+
+ STATIC_ASSERT(sizeof(struct cell_command_fence) % 16 == 0);
+ ASSERT(size % 16 == 0);
+ ASSERT(size + sizeof(struct cell_command_fence) <= CELL_BUFFER_SIZE);
+
+ fence_cmd = (struct cell_command_fence *) (cell->buffer[batch] + size);
+ fence_cmd->opcode[0] = CELL_CMD_FENCE;
+ fence_cmd->fence = fence;
+
+ /* update batch buffer size */
+ cell->buffer_size[batch] = size + sizeof(struct cell_command_fence);
+}
+
+
+/**
+ * Flush the current batch buffer to the SPUs.
+ * An empty buffer will be found and set as the new current batch buffer
+ * for subsequent commands/data.
+ */
+void
+cell_batch_flush(struct cell_context *cell)
+{
+ static boolean flushing = FALSE;
+ uint batch = cell->cur_batch;
+ uint size = cell->buffer_size[batch];
+ uint spu, cmd_word;
+
+ assert(!flushing);
+
+ if (size == 0)
+ return;
+
+ /* Before we use this batch buffer, make sure any fenced texture buffers
+ * are released.
+ */
+ if (cell->fenced_buffers[batch].head) {
+ emit_fence(cell);
+ size = cell->buffer_size[batch];
+ }
+
+ flushing = TRUE;
+
+ assert(batch < CELL_NUM_BUFFERS);
+
+ /*
+ printf("cell_batch_dispatch: buf %u at %p, size %u\n",
+ batch, &cell->buffer[batch][0], size);
+ */
+
+ /*
+ * Build "BATCH" command and send to all SPUs.
+ */
+ cmd_word = CELL_CMD_BATCH | (batch << 8) | (size << 16);
+
+ for (spu = 0; spu < cell->num_spus; spu++) {
+ assert(cell->buffer_status[spu][batch][0] == CELL_BUFFER_STATUS_USED);
+ send_mbox_message(cell_global.spe_contexts[spu], cmd_word);
+ }
+
+ /* When the SPUs are done copying the buffer into their locals stores
+ * they'll write a BUFFER_STATUS_FREE message into the buffer_status[]
+ * array indicating that the PPU can re-use the buffer.
+ */
+
+ batch = cell_get_empty_buffer(cell);
+
+ cell->buffer_size[batch] = 0; /* empty */
+ cell->cur_batch = batch;
+
+ flushing = FALSE;
+}
+
+
+/**
+ * Return the number of bytes free in the current batch buffer.
+ */
+uint
+cell_batch_free_space(const struct cell_context *cell)
+{
+ uint free = CELL_BUFFER_SIZE - cell->buffer_size[cell->cur_batch];
+ free -= sizeof(struct cell_command_fence);
+ return free;
+}
+
+
+/**
+ * Allocate space in the current batch buffer for 'bytes' space.
+ * Bytes must be a multiple of 16 bytes. Allocation will be 16 byte aligned.
+ * \return address in batch buffer to put data
+ */
+void *
+cell_batch_alloc16(struct cell_context *cell, uint bytes)
+{
+ void *pos;
+ uint size;
+
+ ASSERT(bytes % 16 == 0);
+ ASSERT(bytes <= CELL_BUFFER_SIZE);
+ ASSERT(cell->cur_batch >= 0);
+
+#ifdef ASSERT
+ {
+ uint spu;
+ for (spu = 0; spu < cell->num_spus; spu++) {
+ ASSERT(cell->buffer_status[spu][cell->cur_batch][0]
+ == CELL_BUFFER_STATUS_USED);
+ }
+ }
+#endif
+
+ size = cell->buffer_size[cell->cur_batch];
+
+ if (bytes > cell_batch_free_space(cell)) {
+ cell_batch_flush(cell);
+ size = 0;
+ }
+
+ ASSERT(size % 16 == 0);
+ ASSERT(size + bytes <= CELL_BUFFER_SIZE);
+
+ pos = (void *) (cell->buffer[cell->cur_batch] + size);
+
+ cell->buffer_size[cell->cur_batch] = size + bytes;
+
+ return pos;
+}
+
+
+/**
+ * One-time init of batch buffers.
+ */
+void
+cell_init_batch_buffers(struct cell_context *cell)
+{
+ uint spu, buf;
+
+ /* init command, vertex/index buffer info */
+ for (buf = 0; buf < CELL_NUM_BUFFERS; buf++) {
+ cell->buffer_size[buf] = 0;
+
+ /* init batch buffer status values,
+ * mark 0th buffer as used, rest as free.
+ */
+ for (spu = 0; spu < cell->num_spus; spu++) {
+ if (buf == 0)
+ cell->buffer_status[spu][buf][0] = CELL_BUFFER_STATUS_USED;
+ else
+ cell->buffer_status[spu][buf][0] = CELL_BUFFER_STATUS_FREE;
+ }
+ }
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_batch.h b/src/gallium/drivers/cell/ppu/cell_batch.h
new file mode 100644
index 0000000000..290136031a
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_batch.h
@@ -0,0 +1,54 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_BATCH_H
+#define CELL_BATCH_H
+
+#include "pipe/p_compiler.h"
+
+
+struct cell_context;
+
+
+extern uint
+cell_get_empty_buffer(struct cell_context *cell);
+
+extern void
+cell_batch_flush(struct cell_context *cell);
+
+extern uint
+cell_batch_free_space(const struct cell_context *cell);
+
+extern void *
+cell_batch_alloc16(struct cell_context *cell, uint bytes);
+
+extern void
+cell_init_batch_buffers(struct cell_context *cell);
+
+
+#endif /* CELL_BATCH_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_clear.c b/src/gallium/drivers/cell/ppu/cell_clear.c
new file mode 100644
index 0000000000..79ad687ea9
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_clear.c
@@ -0,0 +1,92 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Authors
+ * Brian Paul
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdint.h>
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_pack_color.h"
+#include "cell/common.h"
+#include "cell_clear.h"
+#include "cell_context.h"
+#include "cell_batch.h"
+#include "cell_flush.h"
+#include "cell_spu.h"
+#include "cell_state.h"
+
+
+/**
+ * Called via pipe->clear()
+ */
+void
+cell_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ if (cell->dirty)
+ cell_update_derived(cell);
+
+ if (buffers & PIPE_CLEAR_COLOR) {
+ uint surfIndex = 0;
+ uint clearValue;
+
+ util_pack_color(rgba, cell->framebuffer.cbufs[0]->format, &clearValue);
+
+ /* Build a CLEAR command and place it in the current batch buffer */
+ STATIC_ASSERT(sizeof(struct cell_command_clear_surface) % 16 == 0);
+ struct cell_command_clear_surface *clr
+ = (struct cell_command_clear_surface *)
+ cell_batch_alloc16(cell, sizeof(*clr));
+ clr->opcode[0] = CELL_CMD_CLEAR_SURFACE;
+ clr->surface = surfIndex;
+ clr->value = clearValue;
+ }
+
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
+ uint surfIndex = 1;
+ uint clearValue;
+
+ clearValue = util_pack_z_stencil(cell->framebuffer.zsbuf->format,
+ depth, stencil);
+
+ /* Build a CLEAR command and place it in the current batch buffer */
+ STATIC_ASSERT(sizeof(struct cell_command_clear_surface) % 16 == 0);
+ struct cell_command_clear_surface *clr
+ = (struct cell_command_clear_surface *)
+ cell_batch_alloc16(cell, sizeof(*clr));
+ clr->opcode[0] = CELL_CMD_CLEAR_SURFACE;
+ clr->surface = surfIndex;
+ clr->value = clearValue;
+ }
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_clear.h b/src/gallium/drivers/cell/ppu/cell_clear.h
new file mode 100644
index 0000000000..08e091adfd
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_clear.h
@@ -0,0 +1,41 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_CLEAR_H
+#define CELL_CLEAR_H
+
+
+struct pipe_context;
+
+
+extern void
+cell_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil);
+
+
+#endif /* CELL_CLEAR_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_context.c b/src/gallium/drivers/cell/ppu/cell_context.c
new file mode 100644
index 0000000000..ebb7a7acc4
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_context.c
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Authors
+ * Brian Paul
+ */
+
+
+#include <stdio.h>
+
+#include "pipe/p_defines.h"
+#include "pipe/p_format.h"
+#include "util/u_memory.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_screen.h"
+
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+
+#include "cell/common.h"
+#include "cell_batch.h"
+#include "cell_clear.h"
+#include "cell_context.h"
+#include "cell_draw_arrays.h"
+#include "cell_fence.h"
+#include "cell_flush.h"
+#include "cell_state.h"
+#include "cell_surface.h"
+#include "cell_spu.h"
+#include "cell_pipe_state.h"
+#include "cell_texture.h"
+#include "cell_vbuf.h"
+
+
+
+static void
+cell_destroy_context( struct pipe_context *pipe )
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ util_delete_keymap(cell->fragment_ops_cache, NULL);
+
+ cell_spu_exit(cell);
+
+ align_free(cell);
+}
+
+
+static struct draw_context *
+cell_draw_create(struct cell_context *cell)
+{
+ struct draw_context *draw = draw_create();
+
+#if 0 /* broken */
+ if (getenv("GALLIUM_CELL_VS")) {
+ /* plug in SPU-based vertex transformation code */
+ draw->shader_queue_flush = cell_vertex_shader_queue_flush;
+ draw->driver_private = cell;
+ }
+#endif
+
+ return draw;
+}
+
+
+static const struct debug_named_value cell_debug_flags[] = {
+ {"checker", CELL_DEBUG_CHECKER},/**< modulate tile clear color by SPU ID */
+ {"asm", CELL_DEBUG_ASM}, /**< dump SPU asm code */
+ {"sync", CELL_DEBUG_SYNC}, /**< SPUs do synchronous DMA */
+ {"fragops", CELL_DEBUG_FRAGMENT_OPS}, /**< SPUs emit fragment ops debug messages*/
+ {"fragopfallback", CELL_DEBUG_FRAGMENT_OP_FALLBACK}, /**< SPUs use reference implementation for fragment ops*/
+ {"cmd", CELL_DEBUG_CMD}, /**< SPUs dump command buffer info */
+ {"cache", CELL_DEBUG_CACHE}, /**< report texture cache stats on exit */
+ {NULL, 0}
+};
+
+static unsigned int
+cell_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+cell_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *
+cell_create_context(struct pipe_screen *screen,
+ struct cell_winsys *cws)
+{
+ struct cell_context *cell;
+ uint i;
+
+ /* some fields need to be 16-byte aligned, so align the whole object */
+ cell = (struct cell_context*) align_malloc(sizeof(struct cell_context), 16);
+ if (!cell)
+ return NULL;
+
+ memset(cell, 0, sizeof(*cell));
+
+ cell->winsys = cws;
+ cell->pipe.winsys = screen->winsys;
+ cell->pipe.screen = screen;
+ cell->pipe.destroy = cell_destroy_context;
+
+ cell->pipe.clear = cell_clear;
+ cell->pipe.flush = cell_flush;
+
+ cell->pipe.is_texture_referenced = cell_is_texture_referenced;
+ cell->pipe.is_buffer_referenced = cell_is_buffer_referenced;
+
+#if 0
+ cell->pipe.begin_query = cell_begin_query;
+ cell->pipe.end_query = cell_end_query;
+ cell->pipe.wait_query = cell_wait_query;
+#endif
+
+ cell_init_draw_functions(cell);
+ cell_init_state_functions(cell);
+ cell_init_shader_functions(cell);
+ cell_init_surface_functions(cell);
+ cell_init_vertex_functions(cell);
+
+ cell->draw = cell_draw_create(cell);
+
+ /* Create cache of fragment ops generated code */
+ cell->fragment_ops_cache =
+ util_new_keymap(sizeof(struct cell_fragment_ops_key), ~0, NULL);
+
+ cell_init_vbuf(cell);
+
+ draw_set_rasterize_stage(cell->draw, cell->vbuf);
+
+ /* convert all points/lines to tris for the time being */
+ draw_wide_point_threshold(cell->draw, 0.0);
+ draw_wide_line_threshold(cell->draw, 0.0);
+
+ /* get env vars or read config file to get debug flags */
+ cell->debug_flags = debug_get_flags_option("CELL_DEBUG",
+ cell_debug_flags,
+ 0 );
+
+ for (i = 0; i < CELL_NUM_BUFFERS; i++)
+ cell_fence_init(&cell->fenced_buffers[i].fence);
+
+
+ /*
+ * SPU stuff
+ */
+ /* This call only works with SDK 3.0. Anyone still using 2.1??? */
+ cell->num_cells = spe_cpu_info_get(SPE_COUNT_PHYSICAL_CPU_NODES, -1);
+ cell->num_spus = spe_cpu_info_get(SPE_COUNT_USABLE_SPES, -1);
+ if (cell->debug_flags) {
+ printf("Cell: found %d Cell(s) with %u SPUs\n",
+ cell->num_cells, cell->num_spus);
+ }
+ if (getenv("CELL_NUM_SPUS")) {
+ cell->num_spus = atoi(getenv("CELL_NUM_SPUS"));
+ assert(cell->num_spus > 0);
+ }
+
+ cell_start_spus(cell);
+
+ cell_init_batch_buffers(cell);
+
+ /* make sure SPU initializations are done before proceeding */
+ cell_flush_int(cell, CELL_FLUSH_WAIT);
+
+ return &cell->pipe;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_context.h b/src/gallium/drivers/cell/ppu/cell_context.h
new file mode 100644
index 0000000000..5c3188e7f9
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_context.h
@@ -0,0 +1,202 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_CONTEXT_H
+#define CELL_CONTEXT_H
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_vbuf.h"
+#include "cell_winsys.h"
+#include "cell/common.h"
+#include "rtasm/rtasm_ppc_spe.h"
+#include "tgsi/tgsi_scan.h"
+#include "util/u_keymap.h"
+
+
+struct cell_vbuf_render;
+
+
+/**
+ * Cell vertex shader state, subclass of pipe_shader_state.
+ */
+struct cell_vertex_shader_state
+{
+ struct pipe_shader_state shader;
+ struct tgsi_shader_info info;
+ void *draw_data;
+};
+
+
+/**
+ * Cell fragment shader state, subclass of pipe_shader_state.
+ */
+struct cell_fragment_shader_state
+{
+ struct pipe_shader_state shader;
+ struct tgsi_shader_info info;
+ struct spe_function code;
+ void *data;
+};
+
+
+/**
+ * Key for mapping per-fragment state to cached SPU machine code.
+ * keymap(cell_fragment_ops_key) => cell_command_fragment_ops
+ */
+struct cell_fragment_ops_key
+{
+ struct pipe_blend_state blend;
+ struct pipe_blend_color blend_color;
+ struct pipe_depth_stencil_alpha_state dsa;
+ enum pipe_format color_format;
+ enum pipe_format zs_format;
+};
+
+
+struct cell_buffer_node;
+
+/**
+ * Fenced buffer list. List of buffers which can be unreferenced after
+ * the fence has been executed/signalled.
+ */
+struct cell_buffer_list
+{
+ struct cell_fence fence ALIGN16_ATTRIB;
+ struct cell_buffer_node *head;
+};
+
+
+/**
+ * Per-context state, subclass of pipe_context.
+ */
+struct cell_context
+{
+ struct pipe_context pipe;
+
+ struct cell_winsys *winsys;
+
+ const struct pipe_blend_state *blend;
+ const struct pipe_sampler_state *sampler[PIPE_MAX_SAMPLERS];
+ uint num_samplers;
+ const struct pipe_depth_stencil_alpha_state *depth_stencil;
+ const struct pipe_rasterizer_state *rasterizer;
+ const struct cell_vertex_shader_state *vs;
+ const struct cell_fragment_shader_state *fs;
+
+ struct spe_function logic_op;
+
+ struct pipe_blend_color blend_color;
+ struct pipe_clip_state clip;
+ struct pipe_constant_buffer constants[2];
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_poly_stipple poly_stipple;
+ struct pipe_scissor_state scissor;
+ struct cell_texture *texture[PIPE_MAX_SAMPLERS];
+ uint num_textures;
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+ uint num_vertex_buffers;
+ struct pipe_vertex_element vertex_element[PIPE_MAX_ATTRIBS];
+ uint num_vertex_elements;
+
+ ubyte *cbuf_map[PIPE_MAX_COLOR_BUFS];
+ ubyte *zsbuf_map;
+
+ uint dirty;
+ uint dirty_textures; /* bitmask of texture units */
+ uint dirty_samplers; /* bitmask of sampler units */
+
+ /** Cache of code generated for per-fragment ops */
+ struct keymap *fragment_ops_cache;
+
+ /** The primitive drawing context */
+ struct draw_context *draw;
+ struct draw_stage *render_stage;
+
+ /** For post-transformed vertex buffering: */
+ struct cell_vbuf_render *vbuf_render;
+ struct draw_stage *vbuf;
+
+ struct vertex_info vertex_info;
+
+ /** Mapped constant buffers */
+ void *mapped_constants[PIPE_SHADER_TYPES];
+
+ struct cell_spu_function_info spu_functions ALIGN16_ATTRIB;
+
+ uint num_cells, num_spus;
+
+ /** Buffers for command batches, vertex/index data */
+ uint buffer_size[CELL_NUM_BUFFERS];
+ ubyte buffer[CELL_NUM_BUFFERS][CELL_BUFFER_SIZE] ALIGN16_ATTRIB;
+
+ int cur_batch; /**< which buffer is being filled w/ commands */
+
+ /** [4] to ensure 16-byte alignment for each status word */
+ uint buffer_status[CELL_MAX_SPUS][CELL_NUM_BUFFERS][4] ALIGN16_ATTRIB;
+
+
+ /** Associated with each command/batch buffer is a list of pipe_buffers
+ * that are fenced. When the last command in a buffer is executed, the
+ * fence will be signalled, indicating that any pipe_buffers preceeding
+ * that fence can be unreferenced (and probably freed).
+ */
+ struct cell_buffer_list fenced_buffers[CELL_NUM_BUFFERS];
+
+
+ struct spe_function attrib_fetch;
+ unsigned attrib_fetch_offsets[PIPE_MAX_ATTRIBS];
+
+ unsigned debug_flags;
+};
+
+
+
+
+static INLINE struct cell_context *
+cell_context(struct pipe_context *pipe)
+{
+ return (struct cell_context *) pipe;
+}
+
+
+extern struct pipe_context *
+cell_create_context(struct pipe_screen *screen, struct cell_winsys *cws);
+
+extern void
+cell_vertex_shader_queue_flush(struct draw_context *draw);
+
+
+/* XXX find a better home for this */
+extern void cell_update_vertex_fetch(struct draw_context *draw);
+
+
+#endif /* CELL_CONTEXT_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_draw_arrays.c b/src/gallium/drivers/cell/ppu/cell_draw_arrays.c
new file mode 100644
index 0000000000..644496db40
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_draw_arrays.c
@@ -0,0 +1,191 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ * Keith Whitwell
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+
+#include "cell_context.h"
+#include "cell_draw_arrays.h"
+#include "cell_state.h"
+#include "cell_flush.h"
+
+#include "draw/draw_context.h"
+
+
+
+static void
+cell_map_constant_buffers(struct cell_context *sp)
+{
+ struct pipe_winsys *ws = sp->pipe.winsys;
+ uint i;
+ for (i = 0; i < 2; i++) {
+ if (sp->constants[i].buffer && sp->constants[i].buffer->size) {
+ sp->mapped_constants[i] = ws->buffer_map(ws, sp->constants[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ cell_flush_buffer_range(sp, sp->mapped_constants[i],
+ sp->constants[i].buffer->size);
+ }
+ }
+
+ draw_set_mapped_constant_buffer(sp->draw,
+ sp->mapped_constants[PIPE_SHADER_VERTEX],
+ sp->constants[PIPE_SHADER_VERTEX].buffer->size);
+}
+
+static void
+cell_unmap_constant_buffers(struct cell_context *sp)
+{
+ struct pipe_winsys *ws = sp->pipe.winsys;
+ uint i;
+ for (i = 0; i < 2; i++) {
+ if (sp->constants[i].buffer && sp->constants[i].buffer->size)
+ ws->buffer_unmap(ws, sp->constants[i].buffer);
+ sp->mapped_constants[i] = NULL;
+ }
+}
+
+
+
+/**
+ * Draw vertex arrays, with optional indexing.
+ * Basically, map the vertex buffers (and drawing surfaces), then hand off
+ * the drawing to the 'draw' module.
+ *
+ * XXX should the element buffer be specified/bound with a separate function?
+ */
+static boolean
+cell_draw_range_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned min_index,
+ unsigned max_index,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct cell_context *sp = cell_context(pipe);
+ struct draw_context *draw = sp->draw;
+ unsigned i;
+
+ if (sp->dirty)
+ cell_update_derived( sp );
+
+#if 0
+ cell_map_surfaces(sp);
+#endif
+ cell_map_constant_buffers(sp);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < sp->num_vertex_buffers; i++) {
+ void *buf = pipe_buffer_map(pipe->screen,
+ sp->vertex_buffer[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ cell_flush_buffer_range(sp, buf, sp->vertex_buffer[i].buffer->size);
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+ /* Map index buffer, if present */
+ if (indexBuffer) {
+ void *mapped_indexes = pipe_buffer_map(pipe->screen,
+ indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes);
+ }
+ else {
+ /* no index/element buffer */
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+
+ /* draw! */
+ draw_arrays(draw, mode, start, count);
+
+ /*
+ * unmap vertex/index buffers - will cause draw module to flush
+ */
+ for (i = 0; i < sp->num_vertex_buffers; i++) {
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ pipe_buffer_unmap(pipe->screen, sp->vertex_buffer[i].buffer);
+ }
+ if (indexBuffer) {
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ pipe_buffer_unmap(pipe->screen, indexBuffer);
+ }
+
+ /* Note: leave drawing surfaces mapped */
+ cell_unmap_constant_buffers(sp);
+
+ return TRUE;
+}
+
+
+static boolean
+cell_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ return cell_draw_range_elements( pipe, indexBuffer,
+ indexSize,
+ 0, 0xffffffff,
+ mode, start, count );
+}
+
+
+static boolean
+cell_draw_arrays(struct pipe_context *pipe, unsigned mode,
+ unsigned start, unsigned count)
+{
+ return cell_draw_elements(pipe, NULL, 0, mode, start, count);
+}
+
+
+static void
+cell_set_edgeflags(struct pipe_context *pipe, const unsigned *edgeflags)
+{
+ struct cell_context *cell = cell_context(pipe);
+ draw_set_edgeflags(cell->draw, edgeflags);
+}
+
+
+
+void
+cell_init_draw_functions(struct cell_context *cell)
+{
+ cell->pipe.draw_arrays = cell_draw_arrays;
+ cell->pipe.draw_elements = cell_draw_elements;
+ cell->pipe.draw_range_elements = cell_draw_range_elements;
+ cell->pipe.set_edgeflags = cell_set_edgeflags;
+}
+
diff --git a/src/gallium/drivers/cell/ppu/cell_draw_arrays.h b/src/gallium/drivers/cell/ppu/cell_draw_arrays.h
new file mode 100644
index 0000000000..148873aa67
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_draw_arrays.h
@@ -0,0 +1,36 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef CELL_DRAW_ARRAYS_H
+#define CELL_DRAW_ARRAYS_H
+
+
+extern void
+cell_init_draw_functions(struct cell_context *cell);
+
+
+#endif /* CELL_DRAW_ARRAYS_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_fence.c b/src/gallium/drivers/cell/ppu/cell_fence.c
new file mode 100644
index 0000000000..13125a9fa3
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_fence.c
@@ -0,0 +1,164 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <unistd.h>
+#include "util/u_memory.h"
+#include "pipe/p_inlines.h"
+#include "cell_context.h"
+#include "cell_batch.h"
+#include "cell_fence.h"
+#include "cell_texture.h"
+
+
+void
+cell_fence_init(struct cell_fence *fence)
+{
+ uint i;
+ ASSERT_ALIGN16(fence->status);
+ for (i = 0; i < CELL_MAX_SPUS; i++) {
+ fence->status[i][0] = CELL_FENCE_IDLE;
+ }
+}
+
+
+boolean
+cell_fence_signalled(const struct cell_context *cell,
+ const struct cell_fence *fence)
+{
+ uint i;
+ for (i = 0; i < cell->num_spus; i++) {
+ if (fence->status[i][0] != CELL_FENCE_SIGNALLED)
+ return FALSE;
+ /*assert(fence->status[i][0] == CELL_FENCE_EMITTED);*/
+ }
+ return TRUE;
+}
+
+
+void
+cell_fence_finish(const struct cell_context *cell,
+ const struct cell_fence *fence)
+{
+ while (!cell_fence_signalled(cell, fence)) {
+ usleep(10);
+ }
+
+#ifdef DEBUG
+ {
+ uint i;
+ for (i = 0; i < cell->num_spus; i++) {
+ assert(fence->status[i][0] == CELL_FENCE_SIGNALLED);
+ }
+ }
+#endif
+}
+
+
+
+
+struct cell_buffer_node
+{
+ struct pipe_buffer *buffer;
+ struct cell_buffer_node *next;
+};
+
+
+static void
+cell_add_buffer_to_list(struct cell_context *cell,
+ struct cell_buffer_list *list,
+ struct pipe_buffer *buffer)
+{
+ struct pipe_screen *ps = cell->pipe.screen;
+ struct cell_buffer_node *node = CALLOC_STRUCT(cell_buffer_node);
+ /* create new list node which references the buffer, insert at head */
+ if (node) {
+ pipe_buffer_reference(&node->buffer, buffer);
+ node->next = list->head;
+ list->head = node;
+ }
+}
+
+
+/**
+ * Wait for completion of the given fence, then unreference any buffers
+ * on the list.
+ * This typically unrefs/frees texture buffers after any rendering which uses
+ * them has completed.
+ */
+void
+cell_free_fenced_buffers(struct cell_context *cell,
+ struct cell_buffer_list *list)
+{
+ if (list->head) {
+ struct pipe_screen *ps = cell->pipe.screen;
+ struct cell_buffer_node *node;
+
+ cell_fence_finish(cell, &list->fence);
+
+ /* traverse the list, unreferencing buffers, freeing nodes */
+ node = list->head;
+ while (node) {
+ struct cell_buffer_node *next = node->next;
+ assert(node->buffer);
+ pipe_buffer_unmap(ps, node->buffer);
+#if 0
+ printf("Unref buffer %p\n", node->buffer);
+ if (node->buffer->reference.count == 1)
+ printf(" Delete!\n");
+#endif
+ pipe_buffer_reference(&node->buffer, NULL);
+ FREE(node);
+ node = next;
+ }
+ list->head = NULL;
+ }
+}
+
+
+/**
+ * This should be called for each render command.
+ * Any texture buffers that are current bound will be added to a fenced
+ * list to be freed later when the fence is executed/signalled.
+ */
+void
+cell_add_fenced_textures(struct cell_context *cell)
+{
+ struct cell_buffer_list *list = &cell->fenced_buffers[cell->cur_batch];
+ uint i;
+
+ for (i = 0; i < cell->num_textures; i++) {
+ struct cell_texture *ct = cell->texture[i];
+ if (ct) {
+#if 0
+ printf("Adding texture %p buffer %p to list\n",
+ ct, ct->tiled_buffer[level]);
+#endif
+ if (ct->buffer)
+ cell_add_buffer_to_list(cell, list, ct->buffer);
+ }
+ }
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_fence.h b/src/gallium/drivers/cell/ppu/cell_fence.h
new file mode 100644
index 0000000000..536b4ba411
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_fence.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_FENCE_H
+#define CELL_FENCE_H
+
+
+extern void
+cell_fence_init(struct cell_fence *fence);
+
+
+extern boolean
+cell_fence_signalled(const struct cell_context *cell,
+ const struct cell_fence *fence);
+
+
+extern void
+cell_fence_finish(const struct cell_context *cell,
+ const struct cell_fence *fence);
+
+
+
+extern void
+cell_free_fenced_buffers(struct cell_context *cell,
+ struct cell_buffer_list *list);
+
+
+extern void
+cell_add_fenced_textures(struct cell_context *cell);
+
+
+#endif /* CELL_FENCE_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_flush.c b/src/gallium/drivers/cell/ppu/cell_flush.c
new file mode 100644
index 0000000000..8275c9dc9c
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_flush.c
@@ -0,0 +1,112 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "cell_context.h"
+#include "cell_batch.h"
+#include "cell_flush.h"
+#include "cell_spu.h"
+#include "cell_render.h"
+#include "draw/draw_context.h"
+
+
+/**
+ * Called via pipe->flush()
+ */
+void
+cell_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ if (fence) {
+ *fence = NULL;
+ /* XXX: Implement real fencing */
+ flags |= CELL_FLUSH_WAIT;
+ }
+
+ if (flags & (PIPE_FLUSH_SWAPBUFFERS | PIPE_FLUSH_RENDER_CACHE))
+ flags |= CELL_FLUSH_WAIT;
+
+ draw_flush( cell->draw );
+ cell_flush_int(cell, flags);
+}
+
+
+/**
+ * Cell internal flush function. Send the current batch buffer to all SPUs.
+ * If flags & CELL_FLUSH_WAIT, do not return until the SPUs are idle.
+ * \param flags bitmask of flags CELL_FLUSH_WAIT, or zero
+ */
+void
+cell_flush_int(struct cell_context *cell, unsigned flags)
+{
+ static boolean flushing = FALSE; /* recursion catcher */
+ uint i;
+
+ ASSERT(!flushing);
+ flushing = TRUE;
+
+ if (flags & CELL_FLUSH_WAIT) {
+ STATIC_ASSERT(sizeof(opcode_t) % 16 == 0);
+ opcode_t *cmd = (opcode_t*) cell_batch_alloc16(cell, sizeof(opcode_t));
+ *cmd[0] = CELL_CMD_FINISH;
+ }
+
+ cell_batch_flush(cell);
+
+#if 0
+ /* Send CMD_FINISH to all SPUs */
+ for (i = 0; i < cell->num_spus; i++) {
+ send_mbox_message(cell_global.spe_contexts[i], CELL_CMD_FINISH);
+ }
+#endif
+
+ if (flags & CELL_FLUSH_WAIT) {
+ /* Wait for ack */
+ for (i = 0; i < cell->num_spus; i++) {
+ uint k = wait_mbox_message(cell_global.spe_contexts[i]);
+ assert(k == CELL_CMD_FINISH);
+ }
+ }
+
+ flushing = FALSE;
+}
+
+
+void
+cell_flush_buffer_range(struct cell_context *cell, void *ptr,
+ unsigned size)
+{
+ STATIC_ASSERT((sizeof(opcode_t) + sizeof(struct cell_buffer_range)) % 16 == 0);
+ uint32_t *batch = (uint32_t*)cell_batch_alloc16(cell,
+ sizeof(opcode_t) + sizeof(struct cell_buffer_range));
+ struct cell_buffer_range *br = (struct cell_buffer_range *) &batch[4];
+ batch[0] = CELL_CMD_FLUSH_BUFFER_RANGE;
+ br->base = (uintptr_t) ptr;
+ br->size = size;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_flush.h b/src/gallium/drivers/cell/ppu/cell_flush.h
new file mode 100644
index 0000000000..509ae6239a
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_flush.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_FLUSH
+#define CELL_FLUSH
+
+#define CELL_FLUSH_WAIT 0x80000000
+
+extern void
+cell_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence);
+
+extern void
+cell_flush_int(struct cell_context *cell, unsigned flags);
+
+extern void
+cell_flush_buffer_range(struct cell_context *cell, void *ptr,
+ unsigned size);
+
+#endif
diff --git a/src/gallium/drivers/cell/ppu/cell_gen_fp.c b/src/gallium/drivers/cell/ppu/cell_gen_fp.c
new file mode 100644
index 0000000000..312621fd53
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_gen_fp.c
@@ -0,0 +1,2046 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+/**
+ * Generate SPU fragment program/shader code.
+ *
+ * Note that we generate SOA-style code here. So each TGSI instruction
+ * operates on four pixels (and is translated into four SPU instructions,
+ * generally speaking).
+ *
+ * \author Brian Paul
+ */
+
+#include <math.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_dump.h"
+#include "rtasm/rtasm_ppc_spe.h"
+#include "util/u_memory.h"
+#include "cell_context.h"
+#include "cell_gen_fp.h"
+
+
+#define MAX_TEMPS 16
+#define MAX_IMMED 8
+
+#define CHAN_X 0
+#define CHAN_Y 1
+#define CHAN_Z 2
+#define CHAN_W 3
+
+/**
+ * Context needed during code generation.
+ */
+struct codegen
+{
+ struct cell_context *cell;
+ int inputs_reg; /**< 1st function parameter */
+ int outputs_reg; /**< 2nd function parameter */
+ int constants_reg; /**< 3rd function parameter */
+ int temp_regs[MAX_TEMPS][4]; /**< maps TGSI temps to SPE registers */
+ int imm_regs[MAX_IMMED][4]; /**< maps TGSI immediates to SPE registers */
+
+ int num_imm; /**< number of immediates */
+
+ int one_reg; /**< register containing {1.0, 1.0, 1.0, 1.0} */
+
+ int addr_reg; /**< address register, integer values */
+
+ /** Per-instruction temps / intermediate temps */
+ int num_itemps;
+ int itemps[12];
+
+ /** Current IF/ELSE/ENDIF nesting level */
+ int if_nesting;
+ /** Current BGNLOOP/ENDLOOP nesting level */
+ int loop_nesting;
+ /** Location of start of current loop */
+ int loop_start;
+
+ /** Index of if/conditional mask register */
+ int cond_mask_reg;
+ /** Index of loop mask register */
+ int loop_mask_reg;
+
+ /** Index of master execution mask register */
+ int exec_mask_reg;
+
+ /** KIL mask: indicates which fragments have been killed */
+ int kill_mask_reg;
+
+ int frame_size; /**< Stack frame size, in words */
+
+ struct spe_function *f;
+ boolean error;
+};
+
+
+/**
+ * Allocate an intermediate temporary register.
+ */
+static int
+get_itemp(struct codegen *gen)
+{
+ int t = spe_allocate_available_register(gen->f);
+ assert(gen->num_itemps < Elements(gen->itemps));
+ gen->itemps[gen->num_itemps++] = t;
+ return t;
+}
+
+/**
+ * Free all intermediate temporary registers. To be called after each
+ * instruction has been emitted.
+ */
+static void
+free_itemps(struct codegen *gen)
+{
+ int i;
+ for (i = 0; i < gen->num_itemps; i++) {
+ spe_release_register(gen->f, gen->itemps[i]);
+ }
+ gen->num_itemps = 0;
+}
+
+
+/**
+ * Return index of an SPE register containing {1.0, 1.0, 1.0, 1.0}.
+ * The register is allocated and initialized upon the first call.
+ */
+static int
+get_const_one_reg(struct codegen *gen)
+{
+ if (gen->one_reg <= 0) {
+ gen->one_reg = spe_allocate_available_register(gen->f);
+
+ spe_indent(gen->f, 4);
+ spe_comment(gen->f, -4, "init constant reg = 1.0:");
+
+ /* one = {1.0, 1.0, 1.0, 1.0} */
+ spe_load_float(gen->f, gen->one_reg, 1.0f);
+
+ spe_indent(gen->f, -4);
+ }
+
+ return gen->one_reg;
+}
+
+
+/**
+ * Return index of the address register.
+ * Used for indirect register loads/stores.
+ */
+static int
+get_address_reg(struct codegen *gen)
+{
+ if (gen->addr_reg <= 0) {
+ gen->addr_reg = spe_allocate_available_register(gen->f);
+
+ spe_indent(gen->f, 4);
+ spe_comment(gen->f, -4, "init address reg = 0:");
+
+ /* init addr = {0, 0, 0, 0} */
+ spe_zero(gen->f, gen->addr_reg);
+
+ spe_indent(gen->f, -4);
+ }
+
+ return gen->addr_reg;
+}
+
+
+/**
+ * Return index of the master execution mask.
+ * The register is allocated an initialized upon the first call.
+ *
+ * The master execution mask controls which pixels in a quad are
+ * modified, according to surrounding conditionals, loops, etc.
+ */
+static int
+get_exec_mask_reg(struct codegen *gen)
+{
+ if (gen->exec_mask_reg <= 0) {
+ gen->exec_mask_reg = spe_allocate_available_register(gen->f);
+
+ /* XXX this may not be needed */
+ spe_comment(gen->f, 0*-4, "initialize master execution mask = ~0");
+ spe_load_int(gen->f, gen->exec_mask_reg, ~0);
+ }
+
+ return gen->exec_mask_reg;
+}
+
+
+/** Return index of the conditional (if/else) execution mask register */
+static int
+get_cond_mask_reg(struct codegen *gen)
+{
+ if (gen->cond_mask_reg <= 0) {
+ gen->cond_mask_reg = spe_allocate_available_register(gen->f);
+ }
+
+ return gen->cond_mask_reg;
+}
+
+
+/** Return index of the loop execution mask register */
+static int
+get_loop_mask_reg(struct codegen *gen)
+{
+ if (gen->loop_mask_reg <= 0) {
+ gen->loop_mask_reg = spe_allocate_available_register(gen->f);
+ }
+
+ return gen->loop_mask_reg;
+}
+
+
+
+static boolean
+is_register_src(struct codegen *gen, int channel,
+ const struct tgsi_full_src_register *src)
+{
+ int swizzle = tgsi_util_get_full_src_register_extswizzle(src, channel);
+ int sign_op = tgsi_util_get_full_src_register_sign_mode(src, channel);
+
+ if (swizzle > TGSI_SWIZZLE_W || sign_op != TGSI_UTIL_SIGN_KEEP) {
+ return FALSE;
+ }
+ if (src->SrcRegister.File == TGSI_FILE_TEMPORARY ||
+ src->SrcRegister.File == TGSI_FILE_IMMEDIATE) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+static boolean
+is_memory_dst(struct codegen *gen, int channel,
+ const struct tgsi_full_dst_register *dst)
+{
+ if (dst->DstRegister.File == TGSI_FILE_OUTPUT) {
+ return TRUE;
+ }
+ else {
+ return FALSE;
+ }
+}
+
+
+/**
+ * Return the index of the SPU temporary containing the named TGSI
+ * source register. If the TGSI register is a TGSI_FILE_TEMPORARY we
+ * just return the corresponding SPE register. If the TGIS register
+ * is TGSI_FILE_INPUT/CONSTANT/IMMEDIATE we allocate a new SPE register
+ * and emit an SPE load instruction.
+ */
+static int
+get_src_reg(struct codegen *gen,
+ int channel,
+ const struct tgsi_full_src_register *src)
+{
+ int reg = -1;
+ int swizzle = tgsi_util_get_full_src_register_extswizzle(src, channel);
+ boolean reg_is_itemp = FALSE;
+ uint sign_op;
+
+ assert(swizzle >= TGSI_SWIZZLE_X);
+ assert(swizzle <= TGSI_EXTSWIZZLE_ONE);
+
+ if (swizzle == TGSI_EXTSWIZZLE_ONE) {
+ /* Load const one float and early out */
+ reg = get_const_one_reg(gen);
+ }
+ else if (swizzle == TGSI_EXTSWIZZLE_ZERO) {
+ /* Load const zero float and early out */
+ reg = get_itemp(gen);
+ spe_xor(gen->f, reg, reg, reg);
+ }
+ else {
+ int index = src->SrcRegister.Index;
+
+ assert(swizzle < 4);
+
+ if (src->SrcRegister.Indirect) {
+ /* XXX unfinished */
+ }
+
+ switch (src->SrcRegister.File) {
+ case TGSI_FILE_TEMPORARY:
+ reg = gen->temp_regs[index][swizzle];
+ break;
+ case TGSI_FILE_INPUT:
+ {
+ /* offset is measured in quadwords, not bytes */
+ int offset = index * 4 + swizzle;
+ reg = get_itemp(gen);
+ reg_is_itemp = TRUE;
+ /* Load: reg = memory[(machine_reg) + offset] */
+ spe_lqd(gen->f, reg, gen->inputs_reg, offset * 16);
+ }
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ reg = gen->imm_regs[index][swizzle];
+ break;
+ case TGSI_FILE_CONSTANT:
+ {
+ /* offset is measured in quadwords, not bytes */
+ int offset = index * 4 + swizzle;
+ reg = get_itemp(gen);
+ reg_is_itemp = TRUE;
+ /* Load: reg = memory[(machine_reg) + offset] */
+ spe_lqd(gen->f, reg, gen->constants_reg, offset * 16);
+ }
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ /*
+ * Handle absolute value, negate or set-negative of src register.
+ */
+ sign_op = tgsi_util_get_full_src_register_sign_mode(src, channel);
+ if (sign_op != TGSI_UTIL_SIGN_KEEP) {
+ /*
+ * All sign ops are done by manipulating bit 31, the IEEE float sign bit.
+ */
+ const int bit31mask_reg = get_itemp(gen);
+ int result_reg;
+
+ if (reg_is_itemp) {
+ /* re-use 'reg' for the result */
+ result_reg = reg;
+ }
+ else {
+ /* alloc a new reg for the result */
+ result_reg = get_itemp(gen);
+ }
+
+ /* mask with bit 31 set, the rest cleared */
+ spe_load_uint(gen->f, bit31mask_reg, (1 << 31));
+
+ if (sign_op == TGSI_UTIL_SIGN_CLEAR) {
+ spe_andc(gen->f, result_reg, reg, bit31mask_reg);
+ }
+ else if (sign_op == TGSI_UTIL_SIGN_SET) {
+ spe_and(gen->f, result_reg, reg, bit31mask_reg);
+ }
+ else {
+ assert(sign_op == TGSI_UTIL_SIGN_TOGGLE);
+ spe_xor(gen->f, result_reg, reg, bit31mask_reg);
+ }
+
+ reg = result_reg;
+ }
+
+ return reg;
+}
+
+
+/**
+ * Return the index of an SPE register to use for the given TGSI register.
+ * If the TGSI register is TGSI_FILE_TEMPORARAY, the index of the
+ * corresponding SPE register is returned. If the TGSI register is
+ * TGSI_FILE_OUTPUT we allocate an intermediate temporary register.
+ * See store_dest_reg() below...
+ */
+static int
+get_dst_reg(struct codegen *gen,
+ int channel,
+ const struct tgsi_full_dst_register *dest)
+{
+ int reg = -1;
+
+ switch (dest->DstRegister.File) {
+ case TGSI_FILE_TEMPORARY:
+ if (gen->if_nesting > 0 || gen->loop_nesting > 0)
+ reg = get_itemp(gen);
+ else
+ reg = gen->temp_regs[dest->DstRegister.Index][channel];
+ break;
+ case TGSI_FILE_OUTPUT:
+ reg = get_itemp(gen);
+ break;
+ default:
+ assert(0);
+ }
+
+ return reg;
+}
+
+
+/**
+ * When a TGSI instruction is writing to an output register, this
+ * function emits the SPE store instruction to store the value_reg.
+ * \param value_reg the SPE register containing the value to store.
+ * This would have been returned by get_dst_reg().
+ */
+static void
+store_dest_reg(struct codegen *gen,
+ int value_reg, int channel,
+ const struct tgsi_full_dst_register *dest)
+{
+ /*
+ * XXX need to implement dst reg clamping/saturation
+ */
+#if 0
+ switch (inst->Instruction.Saturate) {
+ case TGSI_SAT_NONE:
+ break;
+ case TGSI_SAT_ZERO_ONE:
+ break;
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ break;
+ default:
+ assert( 0 );
+ }
+#endif
+
+ switch (dest->DstRegister.File) {
+ case TGSI_FILE_TEMPORARY:
+ if (gen->if_nesting > 0 || gen->loop_nesting > 0) {
+ int d_reg = gen->temp_regs[dest->DstRegister.Index][channel];
+ int exec_reg = get_exec_mask_reg(gen);
+ /* Mix d with new value according to exec mask:
+ * d[i] = mask_reg[i] ? value_reg : d_reg
+ */
+ spe_selb(gen->f, d_reg, d_reg, value_reg, exec_reg);
+ }
+ else {
+ /* we're not inside a condition or loop: do nothing special */
+
+ }
+ break;
+ case TGSI_FILE_OUTPUT:
+ {
+ /* offset is measured in quadwords, not bytes */
+ int offset = dest->DstRegister.Index * 4 + channel;
+ if (gen->if_nesting > 0 || gen->loop_nesting > 0) {
+ int exec_reg = get_exec_mask_reg(gen);
+ int curval_reg = get_itemp(gen);
+ /* First read the current value from memory:
+ * Load: curval = memory[(machine_reg) + offset]
+ */
+ spe_lqd(gen->f, curval_reg, gen->outputs_reg, offset * 16);
+ /* Mix curval with newvalue according to exec mask:
+ * d[i] = mask_reg[i] ? value_reg : d_reg
+ */
+ spe_selb(gen->f, curval_reg, curval_reg, value_reg, exec_reg);
+ /* Store: memory[(machine_reg) + offset] = curval */
+ spe_stqd(gen->f, curval_reg, gen->outputs_reg, offset * 16);
+ }
+ else {
+ /* Store: memory[(machine_reg) + offset] = reg */
+ spe_stqd(gen->f, value_reg, gen->outputs_reg, offset * 16);
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+
+static void
+emit_prologue(struct codegen *gen)
+{
+ gen->frame_size = 1024; /* XXX temporary, should be dynamic */
+
+ spe_comment(gen->f, 0, "Function prologue:");
+
+ /* save $lr on stack # stqd $lr,16($sp) */
+ spe_stqd(gen->f, SPE_REG_RA, SPE_REG_SP, 16);
+
+ if (gen->frame_size >= 512) {
+ /* offset is too large for ai instruction */
+ int offset_reg = spe_allocate_available_register(gen->f);
+ int sp_reg = spe_allocate_available_register(gen->f);
+ /* offset = -framesize */
+ spe_load_int(gen->f, offset_reg, -gen->frame_size);
+ /* sp = $sp */
+ spe_move(gen->f, sp_reg, SPE_REG_SP);
+ /* $sp = $sp + offset_reg */
+ spe_a(gen->f, SPE_REG_SP, SPE_REG_SP, offset_reg);
+ /* save $sp in stack frame */
+ spe_stqd(gen->f, sp_reg, SPE_REG_SP, 0);
+ /* clean up */
+ spe_release_register(gen->f, offset_reg);
+ spe_release_register(gen->f, sp_reg);
+ }
+ else {
+ /* save stack pointer # stqd $sp,-frameSize($sp) */
+ spe_stqd(gen->f, SPE_REG_SP, SPE_REG_SP, -gen->frame_size);
+
+ /* adjust stack pointer # ai $sp,$sp,-frameSize */
+ spe_ai(gen->f, SPE_REG_SP, SPE_REG_SP, -gen->frame_size);
+ }
+}
+
+
+static void
+emit_epilogue(struct codegen *gen)
+{
+ const int return_reg = 3;
+
+ spe_comment(gen->f, 0, "Function epilogue:");
+
+ spe_comment(gen->f, 0, "return the killed mask");
+ if (gen->kill_mask_reg > 0) {
+ /* shader called KIL, return the "alive" mask */
+ spe_move(gen->f, return_reg, gen->kill_mask_reg);
+ }
+ else {
+ /* return {0,0,0,0} */
+ spe_load_uint(gen->f, return_reg, 0);
+ }
+
+ spe_comment(gen->f, 0, "restore stack and return");
+ if (gen->frame_size >= 512) {
+ /* offset is too large for ai instruction */
+ int offset_reg = spe_allocate_available_register(gen->f);
+ /* offset = framesize */
+ spe_load_int(gen->f, offset_reg, gen->frame_size);
+ /* $sp = $sp + offset */
+ spe_a(gen->f, SPE_REG_SP, SPE_REG_SP, offset_reg);
+ /* clean up */
+ spe_release_register(gen->f, offset_reg);
+ }
+ else {
+ /* restore stack pointer # ai $sp,$sp,frameSize */
+ spe_ai(gen->f, SPE_REG_SP, SPE_REG_SP, gen->frame_size);
+ }
+
+ /* restore $lr # lqd $lr,16($sp) */
+ spe_lqd(gen->f, SPE_REG_RA, SPE_REG_SP, 16);
+
+ /* return from function call */
+ spe_bi(gen->f, SPE_REG_RA, 0, 0);
+}
+
+
+#define FOR_EACH_ENABLED_CHANNEL(inst, ch) \
+ for (ch = 0; ch < 4; ch++) \
+ if (inst->FullDstRegisters[0].DstRegister.WriteMask & (1 << ch))
+
+
+static boolean
+emit_ARL(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch = 0, src_reg, addr_reg;
+
+ src_reg = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ addr_reg = get_address_reg(gen);
+
+ /* convert float to int */
+ spe_cflts(gen->f, addr_reg, src_reg, 0);
+
+ free_itemps(gen);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_MOV(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, src_reg[4], dst_reg[4];
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ src_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ dst_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ if (is_register_src(gen, ch, &inst->FullSrcRegisters[0]) &&
+ is_memory_dst(gen, ch, &inst->FullDstRegisters[0])) {
+ /* special-case: register to memory store */
+ store_dest_reg(gen, src_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+ else {
+ spe_move(gen->f, dst_reg[ch], src_reg[ch]);
+ store_dest_reg(gen, dst_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+ }
+
+ free_itemps(gen);
+
+ return TRUE;
+}
+
+/**
+ * Emit binary operation
+ */
+static boolean
+emit_binop(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], s2_reg[4], d_reg[4];
+
+ /* Loop over Red/Green/Blue/Alpha channels, fetch src operands */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ s2_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[1]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+
+ /* Loop over Red/Green/Blue/Alpha channels, do the op, store results */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ /* Emit actual SPE instruction: d = s1 + s2 */
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ADD:
+ spe_fa(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ break;
+ case TGSI_OPCODE_SUB:
+ spe_fs(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ break;
+ case TGSI_OPCODE_MUL:
+ spe_fm(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ break;
+ default:
+ ;
+ }
+ }
+
+ /* Store the result (a no-op for TGSI_FILE_TEMPORARY dests) */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ /* Free any intermediate temps we allocated */
+ free_itemps(gen);
+
+ return TRUE;
+}
+
+
+/**
+ * Emit multiply add. See emit_ADD for comments.
+ */
+static boolean
+emit_MAD(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], s2_reg[4], s3_reg[4], d_reg[4];
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ s2_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[1]);
+ s3_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[2]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fma(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch], s3_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit linear interpolate. See emit_ADD for comments.
+ */
+static boolean
+emit_LERP(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], s2_reg[4], s3_reg[4], d_reg[4], tmp_reg[4];
+
+ /* setup/get src/dst/temp regs */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ s2_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[1]);
+ s3_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[2]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ tmp_reg[ch] = get_itemp(gen);
+ }
+
+ /* d = s3 + s1(s2 - s3) */
+ /* do all subtracts, then all fma, then all stores to better pipeline */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fs(gen->f, tmp_reg[ch], s2_reg[ch], s3_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fma(gen->f, d_reg[ch], tmp_reg[ch], s1_reg[ch], s3_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+
+/**
+ * Emit reciprocal or recip sqrt.
+ */
+static boolean
+emit_RCP_RSQ(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], d_reg[4], tmp_reg[4];
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ tmp_reg[ch] = get_itemp(gen);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ if (inst->Instruction.Opcode == TGSI_OPCODE_RCP) {
+ /* tmp = 1/s1 */
+ spe_frest(gen->f, tmp_reg[ch], s1_reg[ch]);
+ }
+ else {
+ /* tmp = 1/sqrt(s1) */
+ spe_frsqest(gen->f, tmp_reg[ch], s1_reg[ch]);
+ }
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ /* d = float_interp(s1, tmp) */
+ spe_fi(gen->f, d_reg[ch], s1_reg[ch], tmp_reg[ch]);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit absolute value. See emit_ADD for comments.
+ */
+static boolean
+emit_ABS(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], d_reg[4];
+ const int bit31mask_reg = get_itemp(gen);
+
+ /* mask with bit 31 set, the rest cleared */
+ spe_load_uint(gen->f, bit31mask_reg, (1 << 31));
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+
+ /* d = sign bit cleared in s1 */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_andc(gen->f, d_reg[ch], s1_reg[ch], bit31mask_reg);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+/**
+ * Emit 3 component dot product. See emit_ADD for comments.
+ */
+static boolean
+emit_DP3(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch;
+ int s1x_reg, s1y_reg, s1z_reg;
+ int s2x_reg, s2y_reg, s2z_reg;
+ int t0_reg = get_itemp(gen), t1_reg = get_itemp(gen);
+
+ s1x_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[0]);
+ s2x_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[1]);
+ s1y_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[0]);
+ s2y_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[1]);
+ s1z_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[0]);
+ s2z_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[1]);
+
+ /* t0 = x0 * x1 */
+ spe_fm(gen->f, t0_reg, s1x_reg, s2x_reg);
+
+ /* t1 = y0 * y1 */
+ spe_fm(gen->f, t1_reg, s1y_reg, s2y_reg);
+
+ /* t0 = z0 * z1 + t0 */
+ spe_fma(gen->f, t0_reg, s1z_reg, s2z_reg, t0_reg);
+
+ /* t0 = t0 + t1 */
+ spe_fa(gen->f, t0_reg, t0_reg, t1_reg);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ spe_move(gen->f, d_reg, t0_reg);
+ store_dest_reg(gen, d_reg, ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+/**
+ * Emit 4 component dot product. See emit_ADD for comments.
+ */
+static boolean
+emit_DP4(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch;
+ int s0x_reg, s0y_reg, s0z_reg, s0w_reg;
+ int s1x_reg, s1y_reg, s1z_reg, s1w_reg;
+ int t0_reg = get_itemp(gen), t1_reg = get_itemp(gen);
+
+ s0x_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[0]);
+ s1x_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[1]);
+ s0y_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[0]);
+ s1y_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[1]);
+ s0z_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[0]);
+ s1z_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[1]);
+ s0w_reg = get_src_reg(gen, CHAN_W, &inst->FullSrcRegisters[0]);
+ s1w_reg = get_src_reg(gen, CHAN_W, &inst->FullSrcRegisters[1]);
+
+ /* t0 = x0 * x1 */
+ spe_fm(gen->f, t0_reg, s0x_reg, s1x_reg);
+
+ /* t1 = y0 * y1 */
+ spe_fm(gen->f, t1_reg, s0y_reg, s1y_reg);
+
+ /* t0 = z0 * z1 + t0 */
+ spe_fma(gen->f, t0_reg, s0z_reg, s1z_reg, t0_reg);
+
+ /* t1 = w0 * w1 + t1 */
+ spe_fma(gen->f, t1_reg, s0w_reg, s1w_reg, t1_reg);
+
+ /* t0 = t0 + t1 */
+ spe_fa(gen->f, t0_reg, t0_reg, t1_reg);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ spe_move(gen->f, d_reg, t0_reg);
+ store_dest_reg(gen, d_reg, ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+/**
+ * Emit homogeneous dot product. See emit_ADD for comments.
+ */
+static boolean
+emit_DPH(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ /* XXX rewrite this function to look more like DP3/DP4 */
+ int ch;
+ int s1_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[0]);
+ int s2_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[1]);
+ int tmp_reg = get_itemp(gen);
+
+ /* t = x0 * x1 */
+ spe_fm(gen->f, tmp_reg, s1_reg, s2_reg);
+
+ s1_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[1]);
+ /* t = y0 * y1 + t */
+ spe_fma(gen->f, tmp_reg, s1_reg, s2_reg, tmp_reg);
+
+ s1_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[1]);
+ /* t = z0 * z1 + t */
+ spe_fma(gen->f, tmp_reg, s1_reg, s2_reg, tmp_reg);
+
+ s2_reg = get_src_reg(gen, CHAN_W, &inst->FullSrcRegisters[1]);
+ /* t = w1 + t */
+ spe_fa(gen->f, tmp_reg, s2_reg, tmp_reg);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ spe_move(gen->f, d_reg, tmp_reg);
+ store_dest_reg(gen, tmp_reg, ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+/**
+ * Emit 3-component vector normalize.
+ */
+static boolean
+emit_NRM3(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch;
+ int src_reg[3];
+ int t0_reg = get_itemp(gen), t1_reg = get_itemp(gen);
+
+ src_reg[0] = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[0]);
+ src_reg[1] = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[0]);
+ src_reg[2] = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[0]);
+
+ /* t0 = x * x */
+ spe_fm(gen->f, t0_reg, src_reg[0], src_reg[0]);
+
+ /* t1 = y * y */
+ spe_fm(gen->f, t1_reg, src_reg[1], src_reg[1]);
+
+ /* t0 = z * z + t0 */
+ spe_fma(gen->f, t0_reg, src_reg[2], src_reg[2], t0_reg);
+
+ /* t0 = t0 + t1 */
+ spe_fa(gen->f, t0_reg, t0_reg, t1_reg);
+
+ /* t1 = 1.0 / sqrt(t0) */
+ spe_frsqest(gen->f, t1_reg, t0_reg);
+ spe_fi(gen->f, t1_reg, t0_reg, t1_reg);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ /* dst = src[ch] * t1 */
+ spe_fm(gen->f, d_reg, src_reg[ch], t1_reg);
+ store_dest_reg(gen, d_reg, ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit cross product. See emit_ADD for comments.
+ */
+static boolean
+emit_XPD(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int s1_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[0]);
+ int s2_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[1]);
+ int tmp_reg = get_itemp(gen);
+
+ /* t = z0 * y1 */
+ spe_fm(gen->f, tmp_reg, s1_reg, s2_reg);
+
+ s1_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[1]);
+ /* t = y0 * z1 - t */
+ spe_fms(gen->f, tmp_reg, s1_reg, s2_reg, tmp_reg);
+
+ if (inst->FullDstRegisters[0].DstRegister.WriteMask & (1 << CHAN_X)) {
+ store_dest_reg(gen, tmp_reg, CHAN_X, &inst->FullDstRegisters[0]);
+ }
+
+ s1_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[1]);
+ /* t = x0 * z1 */
+ spe_fm(gen->f, tmp_reg, s1_reg, s2_reg);
+
+ s1_reg = get_src_reg(gen, CHAN_Z, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[1]);
+ /* t = z0 * x1 - t */
+ spe_fms(gen->f, tmp_reg, s1_reg, s2_reg, tmp_reg);
+
+ if (inst->FullDstRegisters[0].DstRegister.WriteMask & (1 << CHAN_Y)) {
+ store_dest_reg(gen, tmp_reg, CHAN_Y, &inst->FullDstRegisters[0]);
+ }
+
+ s1_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[1]);
+ /* t = y0 * x1 */
+ spe_fm(gen->f, tmp_reg, s1_reg, s2_reg);
+
+ s1_reg = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[0]);
+ s2_reg = get_src_reg(gen, CHAN_Y, &inst->FullSrcRegisters[1]);
+ /* t = x0 * y1 - t */
+ spe_fms(gen->f, tmp_reg, s1_reg, s2_reg, tmp_reg);
+
+ if (inst->FullDstRegisters[0].DstRegister.WriteMask & (1 << CHAN_Z)) {
+ store_dest_reg(gen, tmp_reg, CHAN_Z, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit inequality instruction.
+ * Note that the SPE fcgt instruction produces 0x0 and 0xffffffff as
+ * the result but OpenGL/TGSI needs 0.0 and 1.0 results.
+ * We can easily convert 0x0/0xffffffff to 0.0/1.0 with a bitwise AND.
+ */
+static boolean
+emit_inequality(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], s2_reg[4], d_reg[4], one_reg;
+ bool complement = FALSE;
+
+ one_reg = get_const_one_reg(gen);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ s2_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[1]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_SGT:
+ spe_fcgt(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ break;
+ case TGSI_OPCODE_SLT:
+ spe_fcgt(gen->f, d_reg[ch], s2_reg[ch], s1_reg[ch]);
+ break;
+ case TGSI_OPCODE_SGE:
+ spe_fcgt(gen->f, d_reg[ch], s2_reg[ch], s1_reg[ch]);
+ complement = TRUE;
+ break;
+ case TGSI_OPCODE_SLE:
+ spe_fcgt(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ complement = TRUE;
+ break;
+ case TGSI_OPCODE_SEQ:
+ spe_fceq(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ break;
+ case TGSI_OPCODE_SNE:
+ spe_fceq(gen->f, d_reg[ch], s1_reg[ch], s2_reg[ch]);
+ complement = TRUE;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ /* convert d from 0x0/0xffffffff to 0.0/1.0 */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ /* d = d & one_reg */
+ if (complement)
+ spe_andc(gen->f, d_reg[ch], one_reg, d_reg[ch]);
+ else
+ spe_and(gen->f, d_reg[ch], one_reg, d_reg[ch]);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit compare.
+ */
+static boolean
+emit_CMP(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch;
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int s1_reg = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ int s2_reg = get_src_reg(gen, ch, &inst->FullSrcRegisters[1]);
+ int s3_reg = get_src_reg(gen, ch, &inst->FullSrcRegisters[2]);
+ int d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ int zero_reg = get_itemp(gen);
+
+ spe_zero(gen->f, zero_reg);
+
+ /* d = (s1 < 0) ? s2 : s3 */
+ spe_fcgt(gen->f, d_reg, zero_reg, s1_reg);
+ spe_selb(gen->f, d_reg, s3_reg, s2_reg, d_reg);
+
+ store_dest_reg(gen, d_reg, ch, &inst->FullDstRegisters[0]);
+ free_itemps(gen);
+ }
+
+ return TRUE;
+}
+
+/**
+ * Emit trunc.
+ * Convert float to signed int
+ * Convert signed int to float
+ */
+static boolean
+emit_TRUNC(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], d_reg[4];
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+
+ /* Convert float to int */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_cflts(gen->f, d_reg[ch], s1_reg[ch], 0);
+ }
+
+ /* Convert int to float */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_csflt(gen->f, d_reg[ch], d_reg[ch], 0);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit floor.
+ * If negative int subtract one
+ * Convert float to signed int
+ * Convert signed int to float
+ */
+static boolean
+emit_FLR(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], d_reg[4], tmp_reg[4], zero_reg, one_reg;
+
+ zero_reg = get_itemp(gen);
+ spe_zero(gen->f, zero_reg);
+ one_reg = get_const_one_reg(gen);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ tmp_reg[ch] = get_itemp(gen);
+ }
+
+ /* If negative, subtract 1.0 */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fcgt(gen->f, tmp_reg[ch], zero_reg, s1_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_selb(gen->f, tmp_reg[ch], zero_reg, one_reg, tmp_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fs(gen->f, tmp_reg[ch], s1_reg[ch], tmp_reg[ch]);
+ }
+
+ /* Convert float to int */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_cflts(gen->f, tmp_reg[ch], tmp_reg[ch], 0);
+ }
+
+ /* Convert int to float */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_csflt(gen->f, d_reg[ch], tmp_reg[ch], 0);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Compute frac = Input - FLR(Input)
+ */
+static boolean
+emit_FRC(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s1_reg[4], d_reg[4], tmp_reg[4], zero_reg, one_reg;
+
+ zero_reg = get_itemp(gen);
+ spe_zero(gen->f, zero_reg);
+ one_reg = get_const_one_reg(gen);
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ tmp_reg[ch] = get_itemp(gen);
+ }
+
+ /* If negative, subtract 1.0 */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fcgt(gen->f, tmp_reg[ch], zero_reg, s1_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_selb(gen->f, tmp_reg[ch], zero_reg, one_reg, tmp_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fs(gen->f, tmp_reg[ch], s1_reg[ch], tmp_reg[ch]);
+ }
+
+ /* Convert float to int */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_cflts(gen->f, tmp_reg[ch], tmp_reg[ch], 0);
+ }
+
+ /* Convert int to float */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_csflt(gen->f, tmp_reg[ch], tmp_reg[ch], 0);
+ }
+
+ /* d = s1 - FLR(s1) */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_fs(gen->f, d_reg[ch], s1_reg[ch], tmp_reg[ch]);
+ }
+
+ /* store result */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+#if 0
+static void
+print_functions(struct cell_context *cell)
+{
+ struct cell_spu_function_info *funcs = &cell->spu_functions;
+ uint i;
+ for (i = 0; i < funcs->num; i++) {
+ printf("SPU func %u: %s at %u\n",
+ i, funcs->names[i], funcs->addrs[i]);
+ }
+}
+#endif
+
+
+static uint
+lookup_function(struct cell_context *cell, const char *funcname)
+{
+ const struct cell_spu_function_info *funcs = &cell->spu_functions;
+ uint i, addr = 0;
+ for (i = 0; i < funcs->num; i++) {
+ if (strcmp(funcs->names[i], funcname) == 0) {
+ addr = funcs->addrs[i];
+ }
+ }
+ assert(addr && "spu function not found");
+ return addr / 4; /* discard 2 least significant bits */
+}
+
+
+/**
+ * Emit code to call a SPU function.
+ * Used to implement instructions like SIN/COS/POW/TEX/etc.
+ * If scalar, only the X components of the src regs are used, and the
+ * result is replicated across the dest register's XYZW components.
+ */
+static boolean
+emit_function_call(struct codegen *gen,
+ const struct tgsi_full_instruction *inst,
+ char *funcname, uint num_args, boolean scalar)
+{
+ const uint addr = lookup_function(gen->cell, funcname);
+ char comment[100];
+ int s_regs[3];
+ int func_called = FALSE;
+ uint a, ch;
+ int retval_reg = -1;
+
+ assert(num_args <= 3);
+
+ snprintf(comment, sizeof(comment), "CALL %s:", funcname);
+ spe_comment(gen->f, -4, comment);
+
+ if (scalar) {
+ for (a = 0; a < num_args; a++) {
+ s_regs[a] = get_src_reg(gen, CHAN_X, &inst->FullSrcRegisters[a]);
+ }
+ /* we'll call the function, put the return value in this register,
+ * then replicate it across all write-enabled components in d_reg.
+ */
+ retval_reg = spe_allocate_available_register(gen->f);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int d_reg;
+ ubyte usedRegs[SPE_NUM_REGS];
+ uint i, numUsed;
+
+ if (!scalar) {
+ for (a = 0; a < num_args; a++) {
+ s_regs[a] = get_src_reg(gen, ch, &inst->FullSrcRegisters[a]);
+ }
+ }
+
+ d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+
+ if (!scalar || !func_called) {
+ /* for a scalar function, we'll really only call the function once */
+
+ numUsed = spe_get_registers_used(gen->f, usedRegs);
+ assert(numUsed < gen->frame_size / 16 - 2);
+
+ /* save registers to stack */
+ for (i = 0; i < numUsed; i++) {
+ uint reg = usedRegs[i];
+ int offset = 2 + i;
+ spe_stqd(gen->f, reg, SPE_REG_SP, 16 * offset);
+ }
+
+ /* setup function arguments */
+ for (a = 0; a < num_args; a++) {
+ spe_move(gen->f, 3 + a, s_regs[a]);
+ }
+
+ /* branch to function, save return addr */
+ spe_brasl(gen->f, SPE_REG_RA, addr);
+
+ /* save function's return value */
+ if (scalar)
+ spe_move(gen->f, retval_reg, 3);
+ else
+ spe_move(gen->f, d_reg, 3);
+
+ /* restore registers from stack */
+ for (i = 0; i < numUsed; i++) {
+ uint reg = usedRegs[i];
+ if (reg != d_reg && reg != retval_reg) {
+ int offset = 2 + i;
+ spe_lqd(gen->f, reg, SPE_REG_SP, 16 * offset);
+ }
+ }
+
+ func_called = TRUE;
+ }
+
+ if (scalar) {
+ spe_move(gen->f, d_reg, retval_reg);
+ }
+
+ store_dest_reg(gen, d_reg, ch, &inst->FullDstRegisters[0]);
+ free_itemps(gen);
+ }
+
+ if (scalar) {
+ spe_release_register(gen->f, retval_reg);
+ }
+
+ return TRUE;
+}
+
+
+static boolean
+emit_TEX(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ const uint target = inst->InstructionExtTexture.Texture;
+ const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
+ uint addr;
+ int ch;
+ int coord_regs[4], d_regs[4];
+
+ switch (target) {
+ case TGSI_TEXTURE_1D:
+ case TGSI_TEXTURE_2D:
+ addr = lookup_function(gen->cell, "spu_tex_2d");
+ break;
+ case TGSI_TEXTURE_3D:
+ addr = lookup_function(gen->cell, "spu_tex_3d");
+ break;
+ case TGSI_TEXTURE_CUBE:
+ addr = lookup_function(gen->cell, "spu_tex_cube");
+ break;
+ default:
+ ASSERT(0 && "unsupported texture target");
+ return FALSE;
+ }
+
+ assert(inst->FullSrcRegisters[1].SrcRegister.File == TGSI_FILE_SAMPLER);
+
+ spe_comment(gen->f, -4, "CALL tex:");
+
+ /* get src/dst reg info */
+ for (ch = 0; ch < 4; ch++) {
+ coord_regs[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ d_regs[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ }
+
+ {
+ ubyte usedRegs[SPE_NUM_REGS];
+ uint i, numUsed;
+
+ numUsed = spe_get_registers_used(gen->f, usedRegs);
+ assert(numUsed < gen->frame_size / 16 - 2);
+
+ /* save registers to stack */
+ for (i = 0; i < numUsed; i++) {
+ uint reg = usedRegs[i];
+ int offset = 2 + i;
+ spe_stqd(gen->f, reg, SPE_REG_SP, 16 * offset);
+ }
+
+ /* setup function arguments (XXX depends on target) */
+ for (i = 0; i < 4; i++) {
+ spe_move(gen->f, 3 + i, coord_regs[i]);
+ }
+ spe_load_uint(gen->f, 7, unit); /* sampler unit */
+
+ /* branch to function, save return addr */
+ spe_brasl(gen->f, SPE_REG_RA, addr);
+
+ /* save function's return values (four pixel's colors) */
+ for (i = 0; i < 4; i++) {
+ spe_move(gen->f, d_regs[i], 3 + i);
+ }
+
+ /* restore registers from stack */
+ for (i = 0; i < numUsed; i++) {
+ uint reg = usedRegs[i];
+ if (reg != d_regs[0] &&
+ reg != d_regs[1] &&
+ reg != d_regs[2] &&
+ reg != d_regs[3]) {
+ int offset = 2 + i;
+ spe_lqd(gen->f, reg, SPE_REG_SP, 16 * offset);
+ }
+ }
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_regs[ch], ch, &inst->FullDstRegisters[0]);
+ free_itemps(gen);
+ }
+
+ return TRUE;
+}
+
+
+/**
+ * KILL if any of src reg values are less than zero.
+ */
+static boolean
+emit_KIL(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch;
+ int s_regs[4], kil_reg = -1, cmp_reg, zero_reg;
+
+ spe_comment(gen->f, -4, "CALL kil:");
+
+ /* zero = {0,0,0,0} */
+ zero_reg = get_itemp(gen);
+ spe_zero(gen->f, zero_reg);
+
+ cmp_reg = get_itemp(gen);
+
+ /* get src regs */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s_regs[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ }
+
+ /* test if any src regs are < 0 */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ if (kil_reg >= 0) {
+ /* cmp = 0 > src ? : ~0 : 0 */
+ spe_fcgt(gen->f, cmp_reg, zero_reg, s_regs[ch]);
+ /* kil = kil | cmp */
+ spe_or(gen->f, kil_reg, kil_reg, cmp_reg);
+ }
+ else {
+ kil_reg = get_itemp(gen);
+ /* kil = 0 > src ? : ~0 : 0 */
+ spe_fcgt(gen->f, kil_reg, zero_reg, s_regs[ch]);
+ }
+ }
+
+ if (gen->if_nesting || gen->loop_nesting) {
+ /* may have been a conditional kil */
+ spe_and(gen->f, kil_reg, kil_reg, gen->exec_mask_reg);
+ }
+
+ /* allocate the kill mask reg if needed */
+ if (gen->kill_mask_reg <= 0) {
+ gen->kill_mask_reg = spe_allocate_available_register(gen->f);
+ spe_move(gen->f, gen->kill_mask_reg, kil_reg);
+ }
+ else {
+ spe_or(gen->f, gen->kill_mask_reg, gen->kill_mask_reg, kil_reg);
+ }
+
+ free_itemps(gen);
+
+ return TRUE;
+}
+
+
+
+/**
+ * Emit min or max.
+ */
+static boolean
+emit_MIN_MAX(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int ch, s0_reg[4], s1_reg[4], d_reg[4], tmp_reg[4];
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ s0_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ s1_reg[ch] = get_src_reg(gen, ch, &inst->FullSrcRegisters[1]);
+ d_reg[ch] = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+ tmp_reg[ch] = get_itemp(gen);
+ }
+
+ /* d = (s0 > s1) ? s0 : s1 */
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ if (inst->Instruction.Opcode == TGSI_OPCODE_MAX)
+ spe_fcgt(gen->f, tmp_reg[ch], s0_reg[ch], s1_reg[ch]);
+ else
+ spe_fcgt(gen->f, tmp_reg[ch], s1_reg[ch], s0_reg[ch]);
+ }
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ spe_selb(gen->f, d_reg[ch], s1_reg[ch], s0_reg[ch], tmp_reg[ch]);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ store_dest_reg(gen, d_reg[ch], ch, &inst->FullDstRegisters[0]);
+ }
+
+ free_itemps(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit code to update the execution mask.
+ * This needs to be done whenever the execution status of a conditional
+ * or loop is changed.
+ */
+static void
+emit_update_exec_mask(struct codegen *gen)
+{
+ const int exec_reg = get_exec_mask_reg(gen);
+ const int cond_reg = gen->cond_mask_reg;
+ const int loop_reg = gen->loop_mask_reg;
+
+ spe_comment(gen->f, 0, "Update master execution mask");
+
+ if (gen->if_nesting > 0 && gen->loop_nesting > 0) {
+ /* exec_mask = cond_mask & loop_mask */
+ assert(cond_reg > 0);
+ assert(loop_reg > 0);
+ spe_and(gen->f, exec_reg, cond_reg, loop_reg);
+ }
+ else if (gen->if_nesting > 0) {
+ assert(cond_reg > 0);
+ spe_move(gen->f, exec_reg, cond_reg);
+ }
+ else if (gen->loop_nesting > 0) {
+ assert(loop_reg > 0);
+ spe_move(gen->f, exec_reg, loop_reg);
+ }
+ else {
+ spe_load_int(gen->f, exec_reg, ~0x0);
+ }
+}
+
+
+static boolean
+emit_IF(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ const int channel = 0;
+ int cond_reg;
+
+ cond_reg = get_cond_mask_reg(gen);
+
+ /* XXX push cond exec mask */
+
+ spe_comment(gen->f, 0, "init conditional exec mask = ~0:");
+ spe_load_int(gen->f, cond_reg, ~0);
+
+ /* update conditional execution mask with the predicate register */
+ int tmp_reg = get_itemp(gen);
+ int s1_reg = get_src_reg(gen, channel, &inst->FullSrcRegisters[0]);
+
+ /* tmp = (s1_reg == 0) */
+ spe_ceqi(gen->f, tmp_reg, s1_reg, 0);
+ /* tmp = !tmp */
+ spe_complement(gen->f, tmp_reg, tmp_reg);
+ /* cond_mask = cond_mask & tmp */
+ spe_and(gen->f, cond_reg, cond_reg, tmp_reg);
+
+ gen->if_nesting++;
+
+ /* update the master execution mask */
+ emit_update_exec_mask(gen);
+
+ free_itemps(gen);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_ELSE(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ const int cond_reg = get_cond_mask_reg(gen);
+
+ spe_comment(gen->f, 0, "cond exec mask = !cond exec mask");
+ spe_complement(gen->f, cond_reg, cond_reg);
+ emit_update_exec_mask(gen);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_ENDIF(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ /* XXX todo: pop cond exec mask */
+
+ gen->if_nesting--;
+
+ emit_update_exec_mask(gen);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_BGNLOOP(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ int exec_reg, loop_reg;
+
+ exec_reg = get_exec_mask_reg(gen);
+ loop_reg = get_loop_mask_reg(gen);
+
+ /* XXX push loop_exec mask */
+
+ spe_comment(gen->f, 0*-4, "initialize loop exec mask = ~0");
+ spe_load_int(gen->f, loop_reg, ~0x0);
+
+ gen->loop_nesting++;
+ gen->loop_start = spe_code_size(gen->f); /* in bytes */
+
+ return TRUE;
+}
+
+
+static boolean
+emit_ENDLOOP(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ const int loop_reg = get_loop_mask_reg(gen);
+ const int tmp_reg = get_itemp(gen);
+ int offset;
+
+ /* tmp_reg = exec[0] | exec[1] | exec[2] | exec[3] */
+ spe_orx(gen->f, tmp_reg, loop_reg);
+
+ offset = gen->loop_start - spe_code_size(gen->f); /* in bytes */
+
+ /* branch back to top of loop if tmp_reg != 0 */
+ spe_brnz(gen->f, tmp_reg, offset / 4);
+
+ /* XXX pop loop_exec mask */
+
+ gen->loop_nesting--;
+
+ emit_update_exec_mask(gen);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_BRK(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ const int exec_reg = get_exec_mask_reg(gen);
+ const int loop_reg = get_loop_mask_reg(gen);
+
+ assert(gen->loop_nesting > 0);
+
+ spe_comment(gen->f, 0, "loop exec mask &= ~master exec mask");
+ spe_andc(gen->f, loop_reg, loop_reg, exec_reg);
+
+ emit_update_exec_mask(gen);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_CONT(struct codegen *gen, const struct tgsi_full_instruction *inst)
+{
+ assert(gen->loop_nesting > 0);
+
+ return TRUE;
+}
+
+
+static boolean
+emit_DDX_DDY(struct codegen *gen, const struct tgsi_full_instruction *inst,
+ boolean ddx)
+{
+ int ch;
+
+ FOR_EACH_ENABLED_CHANNEL(inst, ch) {
+ int s_reg = get_src_reg(gen, ch, &inst->FullSrcRegisters[0]);
+ int d_reg = get_dst_reg(gen, ch, &inst->FullDstRegisters[0]);
+
+ int t1_reg = get_itemp(gen);
+ int t2_reg = get_itemp(gen);
+
+ spe_splat_word(gen->f, t1_reg, s_reg, 0); /* upper-left pixel */
+ if (ddx) {
+ spe_splat_word(gen->f, t2_reg, s_reg, 1); /* upper-right pixel */
+ }
+ else {
+ spe_splat_word(gen->f, t2_reg, s_reg, 2); /* lower-left pixel */
+ }
+ spe_fs(gen->f, d_reg, t2_reg, t1_reg);
+
+ free_itemps(gen);
+ }
+
+ return TRUE;
+}
+
+
+
+
+/**
+ * Emit END instruction.
+ * We just return from the shader function at this point.
+ *
+ * Note that there may be more code after this that would be
+ * called by TGSI_OPCODE_CALL.
+ */
+static boolean
+emit_END(struct codegen *gen)
+{
+ emit_epilogue(gen);
+ return TRUE;
+}
+
+
+/**
+ * Emit code for the given instruction. Just a big switch stmt.
+ */
+static boolean
+emit_instruction(struct codegen *gen,
+ const struct tgsi_full_instruction *inst)
+{
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL:
+ return emit_ARL(gen, inst);
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ return emit_MOV(gen, inst);
+ case TGSI_OPCODE_ADD:
+ case TGSI_OPCODE_SUB:
+ case TGSI_OPCODE_MUL:
+ return emit_binop(gen, inst);
+ case TGSI_OPCODE_MAD:
+ return emit_MAD(gen, inst);
+ case TGSI_OPCODE_LERP:
+ return emit_LRP(gen, inst);
+ case TGSI_OPCODE_DP3:
+ return emit_DP3(gen, inst);
+ case TGSI_OPCODE_DP4:
+ return emit_DP4(gen, inst);
+ case TGSI_OPCODE_DPH:
+ return emit_DPH(gen, inst);
+ case TGSI_OPCODE_NRM:
+ return emit_NRM3(gen, inst);
+ case TGSI_OPCODE_XPD:
+ return emit_XPD(gen, inst);
+ case TGSI_OPCODE_RCP:
+ case TGSI_OPCODE_RSQ:
+ return emit_RCP_RSQ(gen, inst);
+ case TGSI_OPCODE_ABS:
+ return emit_ABS(gen, inst);
+ case TGSI_OPCODE_SGT:
+ case TGSI_OPCODE_SLT:
+ case TGSI_OPCODE_SGE:
+ case TGSI_OPCODE_SLE:
+ case TGSI_OPCODE_SEQ:
+ case TGSI_OPCODE_SNE:
+ return emit_inequality(gen, inst);
+ case TGSI_OPCODE_CMP:
+ return emit_CMP(gen, inst);
+ case TGSI_OPCODE_MIN:
+ case TGSI_OPCODE_MAX:
+ return emit_MIN_MAX(gen, inst);
+ case TGSI_OPCODE_TRUNC:
+ return emit_TRUNC(gen, inst);
+ case TGSI_OPCODE_FLR:
+ return emit_FLR(gen, inst);
+ case TGSI_OPCODE_FRC:
+ return emit_FRC(gen, inst);
+ case TGSI_OPCODE_END:
+ return emit_END(gen);
+
+ case TGSI_OPCODE_COS:
+ return emit_function_call(gen, inst, "spu_cos", 1, TRUE);
+ case TGSI_OPCODE_SIN:
+ return emit_function_call(gen, inst, "spu_sin", 1, TRUE);
+ case TGSI_OPCODE_POW:
+ return emit_function_call(gen, inst, "spu_pow", 2, TRUE);
+ case TGSI_OPCODE_EX2:
+ return emit_function_call(gen, inst, "spu_exp2", 1, TRUE);
+ case TGSI_OPCODE_LG2:
+ return emit_function_call(gen, inst, "spu_log2", 1, TRUE);
+ case TGSI_OPCODE_TEX:
+ /* fall-through for now */
+ case TGSI_OPCODE_TXD:
+ /* fall-through for now */
+ case TGSI_OPCODE_TXB:
+ /* fall-through for now */
+ case TGSI_OPCODE_TXL:
+ /* fall-through for now */
+ case TGSI_OPCODE_TXP:
+ return emit_TEX(gen, inst);
+ case TGSI_OPCODE_KIL:
+ return emit_KIL(gen, inst);
+
+ case TGSI_OPCODE_IF:
+ return emit_IF(gen, inst);
+ case TGSI_OPCODE_ELSE:
+ return emit_ELSE(gen, inst);
+ case TGSI_OPCODE_ENDIF:
+ return emit_ENDIF(gen, inst);
+
+ case TGSI_OPCODE_BGNLOOP:
+ return emit_BGNLOOP(gen, inst);
+ case TGSI_OPCODE_ENDLOOP:
+ return emit_ENDLOOP(gen, inst);
+ case TGSI_OPCODE_BRK:
+ return emit_BRK(gen, inst);
+ case TGSI_OPCODE_CONT:
+ return emit_CONT(gen, inst);
+
+ case TGSI_OPCODE_DDX:
+ return emit_DDX_DDY(gen, inst, TRUE);
+ case TGSI_OPCODE_DDY:
+ return emit_DDX_DDY(gen, inst, FALSE);
+
+ /* XXX lots more cases to do... */
+
+ default:
+ fprintf(stderr, "Cell: unimplemented TGSI instruction %d!\n",
+ inst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+
+/**
+ * Emit code for a TGSI immediate value (vector of four floats).
+ * This involves register allocation and initialization.
+ * XXX the initialization should be done by a "prepare" stage, not
+ * per quad execution!
+ */
+static boolean
+emit_immediate(struct codegen *gen, const struct tgsi_full_immediate *immed)
+{
+ int ch;
+
+ assert(gen->num_imm < MAX_TEMPS);
+
+ for (ch = 0; ch < 4; ch++) {
+ float val = immed->u[ch].Float;
+
+ if (ch > 0 && val == immed->u[ch - 1].Float) {
+ /* re-use previous register */
+ gen->imm_regs[gen->num_imm][ch] = gen->imm_regs[gen->num_imm][ch - 1];
+ }
+ else {
+ char str[100];
+ int reg = spe_allocate_available_register(gen->f);
+
+ if (reg < 0)
+ return FALSE;
+
+ sprintf(str, "init $%d = %f", reg, val);
+ spe_comment(gen->f, 0, str);
+
+ /* update immediate map */
+ gen->imm_regs[gen->num_imm][ch] = reg;
+
+ /* emit initializer instruction */
+ spe_load_float(gen->f, reg, val);
+ }
+ }
+
+ gen->num_imm++;
+
+ return TRUE;
+}
+
+
+
+/**
+ * Emit "code" for a TGSI declaration.
+ * We only care about TGSI TEMPORARY register declarations at this time.
+ * For each TGSI TEMPORARY we allocate four SPE registers.
+ */
+static boolean
+emit_declaration(struct cell_context *cell,
+ struct codegen *gen, const struct tgsi_full_declaration *decl)
+{
+ int i, ch;
+
+ switch (decl->Declaration.File) {
+ case TGSI_FILE_TEMPORARY:
+ for (i = decl->DeclarationRange.First;
+ i <= decl->DeclarationRange.Last;
+ i++) {
+ assert(i < MAX_TEMPS);
+ for (ch = 0; ch < 4; ch++) {
+ gen->temp_regs[i][ch] = spe_allocate_available_register(gen->f);
+ if (gen->temp_regs[i][ch] < 0)
+ return FALSE; /* out of regs */
+ }
+
+ /* XXX if we run out of SPE registers, we need to spill
+ * to SPU memory. someday...
+ */
+
+ {
+ char buf[100];
+ sprintf(buf, "TGSI temp[%d] maps to SPU regs [$%d $%d $%d $%d]", i,
+ gen->temp_regs[i][0], gen->temp_regs[i][1],
+ gen->temp_regs[i][2], gen->temp_regs[i][3]);
+ spe_comment(gen->f, 0, buf);
+ }
+ }
+ break;
+ default:
+ ; /* ignore */
+ }
+
+ return TRUE;
+}
+
+
+
+/**
+ * Translate TGSI shader code to SPE instructions. This is done when
+ * the state tracker gives us a new shader (via pipe->create_fs_state()).
+ *
+ * \param cell the rendering context (in)
+ * \param tokens the TGSI shader (in)
+ * \param f the generated function (out)
+ */
+boolean
+cell_gen_fragment_program(struct cell_context *cell,
+ const struct tgsi_token *tokens,
+ struct spe_function *f)
+{
+ struct tgsi_parse_context parse;
+ struct codegen gen;
+ uint ic = 0;
+
+ memset(&gen, 0, sizeof(gen));
+ gen.cell = cell;
+ gen.f = f;
+
+ /* For SPE function calls: reg $3 = first param, $4 = second param, etc. */
+ gen.inputs_reg = 3; /* pointer to inputs array */
+ gen.outputs_reg = 4; /* pointer to outputs array */
+ gen.constants_reg = 5; /* pointer to constants array */
+
+ spe_init_func(f, SPU_MAX_FRAGMENT_PROGRAM_INSTS * SPE_INST_SIZE);
+ spe_allocate_register(f, gen.inputs_reg);
+ spe_allocate_register(f, gen.outputs_reg);
+ spe_allocate_register(f, gen.constants_reg);
+
+ if (cell->debug_flags & CELL_DEBUG_ASM) {
+ spe_print_code(f, TRUE);
+ spe_indent(f, 2*8);
+ printf("Begin %s\n", __FUNCTION__);
+ tgsi_dump(tokens, 0);
+ }
+
+ tgsi_parse_init(&parse, tokens);
+
+ emit_prologue(&gen);
+
+ while (!tgsi_parse_end_of_tokens(&parse) && !gen.error) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ if (f->print) {
+ _debug_printf(" # ");
+ tgsi_dump_immediate(&parse.FullToken.FullImmediate);
+ }
+ if (!emit_immediate(&gen, &parse.FullToken.FullImmediate))
+ gen.error = TRUE;
+ break;
+
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (f->print) {
+ _debug_printf(" # ");
+ tgsi_dump_declaration(&parse.FullToken.FullDeclaration);
+ }
+ if (!emit_declaration(cell, &gen, &parse.FullToken.FullDeclaration))
+ gen.error = TRUE;
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ if (f->print) {
+ _debug_printf(" # ");
+ ic++;
+ tgsi_dump_instruction(&parse.FullToken.FullInstruction, ic);
+ }
+ if (!emit_instruction(&gen, &parse.FullToken.FullInstruction))
+ gen.error = TRUE;
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ if (gen.error) {
+ /* terminate the SPE code */
+ return emit_END(&gen);
+ }
+
+ if (cell->debug_flags & CELL_DEBUG_ASM) {
+ printf("cell_gen_fragment_program nr instructions: %d\n", f->num_inst);
+ printf("End %s\n", __FUNCTION__);
+ }
+
+ tgsi_parse_free( &parse );
+
+ return !gen.error;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_gen_fp.h b/src/gallium/drivers/cell/ppu/cell_gen_fp.h
new file mode 100644
index 0000000000..99faea7046
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_gen_fp.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#ifndef CELL_GEN_FP_H
+#define CELL_GEN_FP_H
+
+
+
+extern boolean
+cell_gen_fragment_program(struct cell_context *cell,
+ const struct tgsi_token *tokens,
+ struct spe_function *f);
+
+
+#endif /* CELL_GEN_FP_H */
+
diff --git a/src/gallium/drivers/cell/ppu/cell_gen_fragment.c b/src/gallium/drivers/cell/ppu/cell_gen_fragment.c
new file mode 100644
index 0000000000..66d4b3b6a3
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_gen_fragment.c
@@ -0,0 +1,2181 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Generate SPU per-fragment code (actually per-quad code).
+ * \author Brian Paul
+ * \author Bob Ellison
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "rtasm/rtasm_ppc_spe.h"
+#include "cell_context.h"
+#include "cell_gen_fragment.h"
+
+
+
+/** Do extra optimizations? */
+#define OPTIMIZATIONS 1
+
+
+/**
+ * Generate SPE code to perform Z/depth testing.
+ *
+ * \param dsa Gallium depth/stencil/alpha state to gen code for
+ * \param f SPE function to append instruction onto.
+ * \param mask_reg register containing quad/pixel "alive" mask (in/out)
+ * \param ifragZ_reg register containing integer fragment Z values (in)
+ * \param ifbZ_reg register containing integer frame buffer Z values (in/out)
+ * \param zmask_reg register containing result of Z test/comparison (out)
+ *
+ * Returns TRUE if the Z-buffer needs to be updated.
+ */
+static boolean
+gen_depth_test(struct spe_function *f,
+ const struct pipe_depth_stencil_alpha_state *dsa,
+ int mask_reg, int ifragZ_reg, int ifbZ_reg, int zmask_reg)
+{
+ /* NOTE: we use clgt below, not cgt, because we want to compare _unsigned_
+ * quantities. This only makes a difference for 32-bit Z values though.
+ */
+ ASSERT(dsa->depth.enabled);
+
+ switch (dsa->depth.func) {
+ case PIPE_FUNC_EQUAL:
+ /* zmask = (ifragZ == ref) */
+ spe_ceq(f, zmask_reg, ifragZ_reg, ifbZ_reg);
+ /* mask = (mask & zmask) */
+ spe_and(f, mask_reg, mask_reg, zmask_reg);
+ break;
+
+ case PIPE_FUNC_NOTEQUAL:
+ /* zmask = (ifragZ == ref) */
+ spe_ceq(f, zmask_reg, ifragZ_reg, ifbZ_reg);
+ /* mask = (mask & ~zmask) */
+ spe_andc(f, mask_reg, mask_reg, zmask_reg);
+ break;
+
+ case PIPE_FUNC_GREATER:
+ /* zmask = (ifragZ > ref) */
+ spe_clgt(f, zmask_reg, ifragZ_reg, ifbZ_reg);
+ /* mask = (mask & zmask) */
+ spe_and(f, mask_reg, mask_reg, zmask_reg);
+ break;
+
+ case PIPE_FUNC_LESS:
+ /* zmask = (ref > ifragZ) */
+ spe_clgt(f, zmask_reg, ifbZ_reg, ifragZ_reg);
+ /* mask = (mask & zmask) */
+ spe_and(f, mask_reg, mask_reg, zmask_reg);
+ break;
+
+ case PIPE_FUNC_LEQUAL:
+ /* zmask = (ifragZ > ref) */
+ spe_clgt(f, zmask_reg, ifragZ_reg, ifbZ_reg);
+ /* mask = (mask & ~zmask) */
+ spe_andc(f, mask_reg, mask_reg, zmask_reg);
+ break;
+
+ case PIPE_FUNC_GEQUAL:
+ /* zmask = (ref > ifragZ) */
+ spe_clgt(f, zmask_reg, ifbZ_reg, ifragZ_reg);
+ /* mask = (mask & ~zmask) */
+ spe_andc(f, mask_reg, mask_reg, zmask_reg);
+ break;
+
+ case PIPE_FUNC_NEVER:
+ spe_il(f, mask_reg, 0); /* mask = {0,0,0,0} */
+ spe_move(f, zmask_reg, mask_reg); /* zmask = mask */
+ break;
+
+ case PIPE_FUNC_ALWAYS:
+ /* mask unchanged */
+ spe_il(f, zmask_reg, ~0); /* zmask = {~0,~0,~0,~0} */
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ if (dsa->depth.writemask) {
+ /*
+ * If (ztest passed) {
+ * framebufferZ = fragmentZ;
+ * }
+ * OR,
+ * framebufferZ = (ztest_passed ? fragmentZ : framebufferZ;
+ */
+ spe_selb(f, ifbZ_reg, ifbZ_reg, ifragZ_reg, mask_reg);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+/**
+ * Generate SPE code to perform alpha testing.
+ *
+ * \param dsa Gallium depth/stencil/alpha state to gen code for
+ * \param f SPE function to append instruction onto.
+ * \param mask_reg register containing quad/pixel "alive" mask (in/out)
+ * \param fragA_reg register containing four fragment alpha values (in)
+ */
+static void
+gen_alpha_test(const struct pipe_depth_stencil_alpha_state *dsa,
+ struct spe_function *f, int mask_reg, int fragA_reg)
+{
+ int ref_reg = spe_allocate_available_register(f);
+ int amask_reg = spe_allocate_available_register(f);
+
+ ASSERT(dsa->alpha.enabled);
+
+ if ((dsa->alpha.func != PIPE_FUNC_NEVER) &&
+ (dsa->alpha.func != PIPE_FUNC_ALWAYS)) {
+ /* load/splat the alpha reference float value */
+ spe_load_float(f, ref_reg, dsa->alpha.ref_value);
+ }
+
+ /* emit code to do the alpha comparison, updating 'mask' */
+ switch (dsa->alpha.func) {
+ case PIPE_FUNC_EQUAL:
+ /* amask = (fragA == ref) */
+ spe_fceq(f, amask_reg, fragA_reg, ref_reg);
+ /* mask = (mask & amask) */
+ spe_and(f, mask_reg, mask_reg, amask_reg);
+ break;
+
+ case PIPE_FUNC_NOTEQUAL:
+ /* amask = (fragA == ref) */
+ spe_fceq(f, amask_reg, fragA_reg, ref_reg);
+ /* mask = (mask & ~amask) */
+ spe_andc(f, mask_reg, mask_reg, amask_reg);
+ break;
+
+ case PIPE_FUNC_GREATER:
+ /* amask = (fragA > ref) */
+ spe_fcgt(f, amask_reg, fragA_reg, ref_reg);
+ /* mask = (mask & amask) */
+ spe_and(f, mask_reg, mask_reg, amask_reg);
+ break;
+
+ case PIPE_FUNC_LESS:
+ /* amask = (ref > fragA) */
+ spe_fcgt(f, amask_reg, ref_reg, fragA_reg);
+ /* mask = (mask & amask) */
+ spe_and(f, mask_reg, mask_reg, amask_reg);
+ break;
+
+ case PIPE_FUNC_LEQUAL:
+ /* amask = (fragA > ref) */
+ spe_fcgt(f, amask_reg, fragA_reg, ref_reg);
+ /* mask = (mask & ~amask) */
+ spe_andc(f, mask_reg, mask_reg, amask_reg);
+ break;
+
+ case PIPE_FUNC_GEQUAL:
+ /* amask = (ref > fragA) */
+ spe_fcgt(f, amask_reg, ref_reg, fragA_reg);
+ /* mask = (mask & ~amask) */
+ spe_andc(f, mask_reg, mask_reg, amask_reg);
+ break;
+
+ case PIPE_FUNC_NEVER:
+ spe_il(f, mask_reg, 0); /* mask = [0,0,0,0] */
+ break;
+
+ case PIPE_FUNC_ALWAYS:
+ /* no-op, mask unchanged */
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+#if OPTIMIZATIONS
+ /* if mask == {0,0,0,0} we're all done, return */
+ {
+ /* re-use amask reg here */
+ int tmp_reg = amask_reg;
+ /* tmp[0] = (mask[0] | mask[1] | mask[2] | mask[3]) */
+ spe_orx(f, tmp_reg, mask_reg);
+ /* if tmp[0] == 0 then return from function call */
+ spe_biz(f, tmp_reg, SPE_REG_RA, 0, 0);
+ }
+#endif
+
+ spe_release_register(f, ref_reg);
+ spe_release_register(f, amask_reg);
+}
+
+
+/**
+ * This pair of functions is used inline to allocate and deallocate
+ * optional constant registers. Once a constant is discovered to be
+ * needed, we will likely need it again, so we don't want to deallocate
+ * it and have to allocate and load it again unnecessarily.
+ */
+static INLINE void
+setup_optional_register(struct spe_function *f,
+ int *r)
+{
+ if (*r < 0)
+ *r = spe_allocate_available_register(f);
+}
+
+static INLINE void
+release_optional_register(struct spe_function *f,
+ int r)
+{
+ if (r >= 0)
+ spe_release_register(f, r);
+}
+
+static INLINE void
+setup_const_register(struct spe_function *f,
+ int *r,
+ float value)
+{
+ if (*r >= 0)
+ return;
+ setup_optional_register(f, r);
+ spe_load_float(f, *r, value);
+}
+
+static INLINE void
+release_const_register(struct spe_function *f,
+ int r)
+{
+ release_optional_register(f, r);
+}
+
+
+
+/**
+ * Unpack/convert framebuffer colors from four 32-bit packed colors
+ * (fbRGBA) to four float RGBA vectors (fbR, fbG, fbB, fbA).
+ * Each 8-bit color component is expanded into a float in [0.0, 1.0].
+ */
+static void
+unpack_colors(struct spe_function *f,
+ enum pipe_format color_format,
+ int fbRGBA_reg,
+ int fbR_reg, int fbG_reg, int fbB_reg, int fbA_reg)
+{
+ int mask0_reg = spe_allocate_available_register(f);
+ int mask1_reg = spe_allocate_available_register(f);
+ int mask2_reg = spe_allocate_available_register(f);
+ int mask3_reg = spe_allocate_available_register(f);
+
+ spe_load_int(f, mask0_reg, 0xff);
+ spe_load_int(f, mask1_reg, 0xff00);
+ spe_load_int(f, mask2_reg, 0xff0000);
+ spe_load_int(f, mask3_reg, 0xff000000);
+
+ spe_comment(f, 0, "Unpack framebuffer colors, convert to floats");
+
+ switch (color_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ /* fbB = fbRGBA & mask */
+ spe_and(f, fbB_reg, fbRGBA_reg, mask0_reg);
+
+ /* fbG = fbRGBA & mask */
+ spe_and(f, fbG_reg, fbRGBA_reg, mask1_reg);
+
+ /* fbR = fbRGBA & mask */
+ spe_and(f, fbR_reg, fbRGBA_reg, mask2_reg);
+
+ /* fbA = fbRGBA & mask */
+ spe_and(f, fbA_reg, fbRGBA_reg, mask3_reg);
+
+ /* fbG = fbG >> 8 */
+ spe_roti(f, fbG_reg, fbG_reg, -8);
+
+ /* fbR = fbR >> 16 */
+ spe_roti(f, fbR_reg, fbR_reg, -16);
+
+ /* fbA = fbA >> 24 */
+ spe_roti(f, fbA_reg, fbA_reg, -24);
+ break;
+
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ /* fbA = fbRGBA & mask */
+ spe_and(f, fbA_reg, fbRGBA_reg, mask0_reg);
+
+ /* fbR = fbRGBA & mask */
+ spe_and(f, fbR_reg, fbRGBA_reg, mask1_reg);
+
+ /* fbG = fbRGBA & mask */
+ spe_and(f, fbG_reg, fbRGBA_reg, mask2_reg);
+
+ /* fbB = fbRGBA & mask */
+ spe_and(f, fbB_reg, fbRGBA_reg, mask3_reg);
+
+ /* fbR = fbR >> 8 */
+ spe_roti(f, fbR_reg, fbR_reg, -8);
+
+ /* fbG = fbG >> 16 */
+ spe_roti(f, fbG_reg, fbG_reg, -16);
+
+ /* fbB = fbB >> 24 */
+ spe_roti(f, fbB_reg, fbB_reg, -24);
+ break;
+
+ default:
+ ASSERT(0);
+ }
+
+ /* convert int[4] in [0,255] to float[4] in [0.0, 1.0] */
+ spe_cuflt(f, fbR_reg, fbR_reg, 8);
+ spe_cuflt(f, fbG_reg, fbG_reg, 8);
+ spe_cuflt(f, fbB_reg, fbB_reg, 8);
+ spe_cuflt(f, fbA_reg, fbA_reg, 8);
+
+ spe_release_register(f, mask0_reg);
+ spe_release_register(f, mask1_reg);
+ spe_release_register(f, mask2_reg);
+ spe_release_register(f, mask3_reg);
+}
+
+
+/**
+ * Generate SPE code to implement the given blend mode for a quad of pixels.
+ * \param f SPE function to append instruction onto.
+ * \param fragR_reg register with fragment red values (float) (in/out)
+ * \param fragG_reg register with fragment green values (float) (in/out)
+ * \param fragB_reg register with fragment blue values (float) (in/out)
+ * \param fragA_reg register with fragment alpha values (float) (in/out)
+ * \param fbRGBA_reg register with packed framebuffer colors (integer) (in)
+ */
+static void
+gen_blend(const struct pipe_blend_state *blend,
+ const struct pipe_blend_color *blend_color,
+ struct spe_function *f,
+ enum pipe_format color_format,
+ int fragR_reg, int fragG_reg, int fragB_reg, int fragA_reg,
+ int fbRGBA_reg)
+{
+ int term1R_reg = spe_allocate_available_register(f);
+ int term1G_reg = spe_allocate_available_register(f);
+ int term1B_reg = spe_allocate_available_register(f);
+ int term1A_reg = spe_allocate_available_register(f);
+
+ int term2R_reg = spe_allocate_available_register(f);
+ int term2G_reg = spe_allocate_available_register(f);
+ int term2B_reg = spe_allocate_available_register(f);
+ int term2A_reg = spe_allocate_available_register(f);
+
+ int fbR_reg = spe_allocate_available_register(f);
+ int fbG_reg = spe_allocate_available_register(f);
+ int fbB_reg = spe_allocate_available_register(f);
+ int fbA_reg = spe_allocate_available_register(f);
+
+ int tmp_reg = spe_allocate_available_register(f);
+
+ /* Optional constant registers we might or might not end up using;
+ * if we do use them, make sure we only allocate them once by
+ * keeping a flag on each one.
+ */
+ int one_reg = -1;
+ int constR_reg = -1, constG_reg = -1, constB_reg = -1, constA_reg = -1;
+
+ ASSERT(blend->blend_enable);
+
+ /* packed RGBA -> float colors */
+ unpack_colors(f, color_format, fbRGBA_reg,
+ fbR_reg, fbG_reg, fbB_reg, fbA_reg);
+
+ /*
+ * Compute Src RGB terms. We're actually looking for the value
+ * of (the appropriate RGB factors) * (the incoming source RGB color),
+ * because in some cases (like PIPE_BLENDFACTOR_ONE and
+ * PIPE_BLENDFACTOR_ZERO) we can avoid doing unnecessary math.
+ */
+ switch (blend->rgb_src_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ /* factors = (1,1,1), so term = (R,G,B) */
+ spe_move(f, term1R_reg, fragR_reg);
+ spe_move(f, term1G_reg, fragG_reg);
+ spe_move(f, term1B_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ /* factors = (0,0,0), so term = (0,0,0) */
+ spe_load_float(f, term1R_reg, 0.0f);
+ spe_load_float(f, term1G_reg, 0.0f);
+ spe_load_float(f, term1B_reg, 0.0f);
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ /* factors = (R,G,B), so term = (R*R, G*G, B*B) */
+ spe_fm(f, term1R_reg, fragR_reg, fragR_reg);
+ spe_fm(f, term1G_reg, fragG_reg, fragG_reg);
+ spe_fm(f, term1B_reg, fragB_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ /* factors = (A,A,A), so term = (R*A, G*A, B*A) */
+ spe_fm(f, term1R_reg, fragR_reg, fragA_reg);
+ spe_fm(f, term1G_reg, fragG_reg, fragA_reg);
+ spe_fm(f, term1B_reg, fragB_reg, fragA_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ /* factors = (1-R,1-G,1-B), so term = (R*(1-R), G*(1-G), B*(1-B))
+ * or in other words term = (R-R*R, G-G*G, B-B*B)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term1R_reg, fragR_reg, fragR_reg, fragR_reg);
+ spe_fnms(f, term1G_reg, fragG_reg, fragG_reg, fragG_reg);
+ spe_fnms(f, term1B_reg, fragB_reg, fragB_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* factors = (Rfb,Gfb,Bfb), so term = (R*Rfb, G*Gfb, B*Bfb) */
+ spe_fm(f, term1R_reg, fragR_reg, fbR_reg);
+ spe_fm(f, term1G_reg, fragG_reg, fbG_reg);
+ spe_fm(f, term1B_reg, fragB_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ /* factors = (1-Rfb,1-Gfb,1-Bfb), so term = (R*(1-Rfb),G*(1-Gfb),B*(1-Bfb))
+ * or term = (R-R*Rfb, G-G*Gfb, B-B*Bfb)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term1R_reg, fragR_reg, fbR_reg, fragR_reg);
+ spe_fnms(f, term1G_reg, fragG_reg, fbG_reg, fragG_reg);
+ spe_fnms(f, term1B_reg, fragB_reg, fbB_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ /* factors = (1-A,1-A,1-A), so term = (R*(1-A),G*(1-A),B*(1-A))
+ * or term = (R-R*A,G-G*A,B-B*A)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term1R_reg, fragR_reg, fragA_reg, fragR_reg);
+ spe_fnms(f, term1G_reg, fragG_reg, fragA_reg, fragG_reg);
+ spe_fnms(f, term1B_reg, fragB_reg, fragA_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ /* factors = (Afb, Afb, Afb), so term = (R*Afb, G*Afb, B*Afb) */
+ spe_fm(f, term1R_reg, fragR_reg, fbA_reg);
+ spe_fm(f, term1G_reg, fragG_reg, fbA_reg);
+ spe_fm(f, term1B_reg, fragB_reg, fbA_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ /* factors = (1-Afb, 1-Afb, 1-Afb), so term = (R*(1-Afb),G*(1-Afb),B*(1-Afb))
+ * or term = (R-R*Afb,G-G*Afb,b-B*Afb)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term1R_reg, fragR_reg, fbA_reg, fragR_reg);
+ spe_fnms(f, term1G_reg, fragG_reg, fbA_reg, fragG_reg);
+ spe_fnms(f, term1B_reg, fragB_reg, fbA_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* We need the optional constant color registers */
+ setup_const_register(f, &constR_reg, blend_color->color[0]);
+ setup_const_register(f, &constG_reg, blend_color->color[1]);
+ setup_const_register(f, &constB_reg, blend_color->color[2]);
+ /* now, factor = (Rc,Gc,Bc), so term = (R*Rc,G*Gc,B*Bc) */
+ spe_fm(f, term1R_reg, fragR_reg, constR_reg);
+ spe_fm(f, term1G_reg, fragG_reg, constG_reg);
+ spe_fm(f, term1B_reg, fragB_reg, constB_reg);
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ /* we'll need the optional constant alpha register */
+ setup_const_register(f, &constA_reg, blend_color->color[3]);
+ /* factor = (Ac,Ac,Ac), so term = (R*Ac,G*Ac,B*Ac) */
+ spe_fm(f, term1R_reg, fragR_reg, constA_reg);
+ spe_fm(f, term1G_reg, fragG_reg, constA_reg);
+ spe_fm(f, term1B_reg, fragB_reg, constA_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ /* We need the optional constant color registers */
+ setup_const_register(f, &constR_reg, blend_color->color[0]);
+ setup_const_register(f, &constG_reg, blend_color->color[1]);
+ setup_const_register(f, &constB_reg, blend_color->color[2]);
+ /* factor = (1-Rc,1-Gc,1-Bc), so term = (R*(1-Rc),G*(1-Gc),B*(1-Bc))
+ * or term = (R-R*Rc, G-G*Gc, B-B*Bc)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term1R_reg, fragR_reg, constR_reg, fragR_reg);
+ spe_fnms(f, term1G_reg, fragG_reg, constG_reg, fragG_reg);
+ spe_fnms(f, term1B_reg, fragB_reg, constB_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ /* We need the optional constant color registers */
+ setup_const_register(f, &constR_reg, blend_color->color[0]);
+ setup_const_register(f, &constG_reg, blend_color->color[1]);
+ setup_const_register(f, &constB_reg, blend_color->color[2]);
+ /* factor = (1-Ac,1-Ac,1-Ac), so term = (R*(1-Ac),G*(1-Ac),B*(1-Ac))
+ * or term = (R-R*Ac,G-G*Ac,B-B*Ac)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term1R_reg, fragR_reg, constA_reg, fragR_reg);
+ spe_fnms(f, term1G_reg, fragG_reg, constA_reg, fragG_reg);
+ spe_fnms(f, term1B_reg, fragB_reg, constA_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ /* We'll need the optional {1,1,1,1} register */
+ setup_const_register(f, &one_reg, 1.0f);
+ /* factor = (min(A,1-Afb),min(A,1-Afb),min(A,1-Afb)), so
+ * term = (R*min(A,1-Afb), G*min(A,1-Afb), B*min(A,1-Afb))
+ * We could expand the term (as a*min(b,c) == min(a*b,a*c)
+ * as long as a is positive), but then we'd have to do three
+ * spe_float_min() functions instead of one, so this is simpler.
+ */
+ /* tmp = 1 - Afb */
+ spe_fs(f, tmp_reg, one_reg, fbA_reg);
+ /* tmp = min(A,tmp) */
+ spe_float_min(f, tmp_reg, fragA_reg, tmp_reg);
+ /* term = R*tmp */
+ spe_fm(f, term1R_reg, fragR_reg, tmp_reg);
+ spe_fm(f, term1G_reg, fragG_reg, tmp_reg);
+ spe_fm(f, term1B_reg, fragB_reg, tmp_reg);
+ break;
+
+ /* These are special D3D cases involving a second color output
+ * from the fragment shader. I'm not sure we can support them
+ * yet... XXX
+ */
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Compute Src Alpha term. Like the above, we're looking for
+ * the full term A*factor, not just the factor itself, because
+ * in many cases we can avoid doing unnecessary multiplies.
+ */
+ switch (blend->alpha_src_factor) {
+ case PIPE_BLENDFACTOR_ZERO:
+ /* factor = 0, so term = 0 */
+ spe_load_float(f, term1A_reg, 0.0f);
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: /* fall through */
+ case PIPE_BLENDFACTOR_ONE:
+ /* factor = 1, so term = A */
+ spe_move(f, term1A_reg, fragA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ /* factor = A, so term = A*A */
+ spe_fm(f, term1A_reg, fragA_reg, fragA_reg);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ spe_fm(f, term1A_reg, fragA_reg, fragA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ /* factor = 1-A, so term = A*(1-A) = A-A*A */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term1A_reg, fragA_reg, fragA_reg, fragA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_DST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* factor = Afb, so term = A*Afb */
+ spe_fm(f, term1A_reg, fragA_reg, fbA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ /* factor = 1-Afb, so term = A*(1-Afb) = A - A*Afb */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term1A_reg, fragA_reg, fbA_reg, fragA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_CONST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* We need the optional constA_reg register */
+ setup_const_register(f, &constA_reg, blend_color->color[3]);
+ /* factor = Ac, so term = A*Ac */
+ spe_fm(f, term1A_reg, fragA_reg, constA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ /* We need the optional constA_reg register */
+ setup_const_register(f, &constA_reg, blend_color->color[3]);
+ /* factor = 1-Ac, so term = A*(1-Ac) = A-A*Ac */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term1A_reg, fragA_reg, constA_reg, fragA_reg);
+ break;
+
+ /* These are special D3D cases involving a second color output
+ * from the fragment shader. I'm not sure we can support them
+ * yet... XXX
+ */
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Compute Dest RGB term. Like the above, we're looking for
+ * the full term (Rfb,Gfb,Bfb)*(factor), not just the factor itself, because
+ * in many cases we can avoid doing unnecessary multiplies.
+ */
+ switch (blend->rgb_dst_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ /* factors = (1,1,1), so term = (Rfb,Gfb,Bfb) */
+ spe_move(f, term2R_reg, fbR_reg);
+ spe_move(f, term2G_reg, fbG_reg);
+ spe_move(f, term2B_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ /* factor s= (0,0,0), so term = (0,0,0) */
+ spe_load_float(f, term2R_reg, 0.0f);
+ spe_load_float(f, term2G_reg, 0.0f);
+ spe_load_float(f, term2B_reg, 0.0f);
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ /* factors = (R,G,B), so term = (R*Rfb, G*Gfb, B*Bfb) */
+ spe_fm(f, term2R_reg, fbR_reg, fragR_reg);
+ spe_fm(f, term2G_reg, fbG_reg, fragG_reg);
+ spe_fm(f, term2B_reg, fbB_reg, fragB_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ /* factors = (1-R,1-G,1-B), so term = (Rfb*(1-R), Gfb*(1-G), Bfb*(1-B))
+ * or in other words term = (Rfb-Rfb*R, Gfb-Gfb*G, Bfb-Bfb*B)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term2R_reg, fragR_reg, fbR_reg, fbR_reg);
+ spe_fnms(f, term2G_reg, fragG_reg, fbG_reg, fbG_reg);
+ spe_fnms(f, term2B_reg, fragB_reg, fbB_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ /* factors = (A,A,A), so term = (Rfb*A, Gfb*A, Bfb*A) */
+ spe_fm(f, term2R_reg, fbR_reg, fragA_reg);
+ spe_fm(f, term2G_reg, fbG_reg, fragA_reg);
+ spe_fm(f, term2B_reg, fbB_reg, fragA_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ /* factors = (1-A,1-A,1-A) so term = (Rfb-Rfb*A,Gfb-Gfb*A,Bfb-Bfb*A) */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term2R_reg, fbR_reg, fragA_reg, fbR_reg);
+ spe_fnms(f, term2G_reg, fbG_reg, fragA_reg, fbG_reg);
+ spe_fnms(f, term2B_reg, fbB_reg, fragA_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* factors = (Rfb,Gfb,Bfb), so term = (Rfb*Rfb, Gfb*Gfb, Bfb*Bfb) */
+ spe_fm(f, term2R_reg, fbR_reg, fbR_reg);
+ spe_fm(f, term2G_reg, fbG_reg, fbG_reg);
+ spe_fm(f, term2B_reg, fbB_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ /* factors = (1-Rfb,1-Gfb,1-Bfb), so term = (Rfb*(1-Rfb),Gfb*(1-Gfb),Bfb*(1-Bfb))
+ * or term = (Rfb-Rfb*Rfb, Gfb-Gfb*Gfb, Bfb-Bfb*Bfb)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term2R_reg, fbR_reg, fbR_reg, fbR_reg);
+ spe_fnms(f, term2G_reg, fbG_reg, fbG_reg, fbG_reg);
+ spe_fnms(f, term2B_reg, fbB_reg, fbB_reg, fbB_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ /* factors = (Afb, Afb, Afb), so term = (Rfb*Afb, Gfb*Afb, Bfb*Afb) */
+ spe_fm(f, term2R_reg, fbR_reg, fbA_reg);
+ spe_fm(f, term2G_reg, fbG_reg, fbA_reg);
+ spe_fm(f, term2B_reg, fbB_reg, fbA_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ /* factors = (1-Afb, 1-Afb, 1-Afb), so term = (Rfb*(1-Afb),Gfb*(1-Afb),Bfb*(1-Afb))
+ * or term = (Rfb-Rfb*Afb,Gfb-Gfb*Afb,Bfb-Bfb*Afb)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term2R_reg, fbR_reg, fbA_reg, fbR_reg);
+ spe_fnms(f, term2G_reg, fbG_reg, fbA_reg, fbG_reg);
+ spe_fnms(f, term2B_reg, fbB_reg, fbA_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* We need the optional constant color registers */
+ setup_const_register(f, &constR_reg, blend_color->color[0]);
+ setup_const_register(f, &constG_reg, blend_color->color[1]);
+ setup_const_register(f, &constB_reg, blend_color->color[2]);
+ /* now, factor = (Rc,Gc,Bc), so term = (Rfb*Rc,Gfb*Gc,Bfb*Bc) */
+ spe_fm(f, term2R_reg, fbR_reg, constR_reg);
+ spe_fm(f, term2G_reg, fbG_reg, constG_reg);
+ spe_fm(f, term2B_reg, fbB_reg, constB_reg);
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ /* we'll need the optional constant alpha register */
+ setup_const_register(f, &constA_reg, blend_color->color[3]);
+ /* factor = (Ac,Ac,Ac), so term = (Rfb*Ac,Gfb*Ac,Bfb*Ac) */
+ spe_fm(f, term2R_reg, fbR_reg, constA_reg);
+ spe_fm(f, term2G_reg, fbG_reg, constA_reg);
+ spe_fm(f, term2B_reg, fbB_reg, constA_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ /* We need the optional constant color registers */
+ setup_const_register(f, &constR_reg, blend_color->color[0]);
+ setup_const_register(f, &constG_reg, blend_color->color[1]);
+ setup_const_register(f, &constB_reg, blend_color->color[2]);
+ /* factor = (1-Rc,1-Gc,1-Bc), so term = (Rfb*(1-Rc),Gfb*(1-Gc),Bfb*(1-Bc))
+ * or term = (Rfb-Rfb*Rc, Gfb-Gfb*Gc, Bfb-Bfb*Bc)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term2R_reg, fbR_reg, constR_reg, fbR_reg);
+ spe_fnms(f, term2G_reg, fbG_reg, constG_reg, fbG_reg);
+ spe_fnms(f, term2B_reg, fbB_reg, constB_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ /* We need the optional constant color registers */
+ setup_const_register(f, &constR_reg, blend_color->color[0]);
+ setup_const_register(f, &constG_reg, blend_color->color[1]);
+ setup_const_register(f, &constB_reg, blend_color->color[2]);
+ /* factor = (1-Ac,1-Ac,1-Ac), so term = (Rfb*(1-Ac),Gfb*(1-Ac),Bfb*(1-Ac))
+ * or term = (Rfb-Rfb*Ac,Gfb-Gfb*Ac,Bfb-Bfb*Ac)
+ * fnms(a,b,c,d) computes a = d - b*c
+ */
+ spe_fnms(f, term2R_reg, fbR_reg, constA_reg, fbR_reg);
+ spe_fnms(f, term2G_reg, fbG_reg, constA_reg, fbG_reg);
+ spe_fnms(f, term2B_reg, fbB_reg, constA_reg, fbB_reg);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: /* not supported for dest RGB */
+ ASSERT(0);
+ break;
+
+ /* These are special D3D cases involving a second color output
+ * from the fragment shader. I'm not sure we can support them
+ * yet... XXX
+ */
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Compute Dest Alpha term. Like the above, we're looking for
+ * the full term Afb*factor, not just the factor itself, because
+ * in many cases we can avoid doing unnecessary multiplies.
+ */
+ switch (blend->alpha_dst_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ /* factor = 1, so term = Afb */
+ spe_move(f, term2A_reg, fbA_reg);
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ /* factor = 0, so term = 0 */
+ spe_load_float(f, term2A_reg, 0.0f);
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ /* factor = A, so term = Afb*A */
+ spe_fm(f, term2A_reg, fbA_reg, fragA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ /* factor = 1-A, so term = Afb*(1-A) = Afb-Afb*A */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term2A_reg, fbA_reg, fragA_reg, fbA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_DST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* factor = Afb, so term = Afb*Afb */
+ spe_fm(f, term2A_reg, fbA_reg, fbA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ /* factor = 1-Afb, so term = Afb*(1-Afb) = Afb - Afb*Afb */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term2A_reg, fbA_reg, fbA_reg, fbA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_CONST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* We need the optional constA_reg register */
+ setup_const_register(f, &constA_reg, blend_color->color[3]);
+ /* factor = Ac, so term = Afb*Ac */
+ spe_fm(f, term2A_reg, fbA_reg, constA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA: /* fall through */
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ /* We need the optional constA_reg register */
+ setup_const_register(f, &constA_reg, blend_color->color[3]);
+ /* factor = 1-Ac, so term = Afb*(1-Ac) = Afb-Afb*Ac */
+ /* fnms(a,b,c,d) computes a = d - b*c */
+ spe_fnms(f, term2A_reg, fbA_reg, constA_reg, fbA_reg);
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: /* not supported for dest alpha */
+ ASSERT(0);
+ break;
+
+ /* These are special D3D cases involving a second color output
+ * from the fragment shader. I'm not sure we can support them
+ * yet... XXX
+ */
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Combine Src/Dest RGB terms as per the blend equation.
+ */
+ switch (blend->rgb_func) {
+ case PIPE_BLEND_ADD:
+ spe_fa(f, fragR_reg, term1R_reg, term2R_reg);
+ spe_fa(f, fragG_reg, term1G_reg, term2G_reg);
+ spe_fa(f, fragB_reg, term1B_reg, term2B_reg);
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ spe_fs(f, fragR_reg, term1R_reg, term2R_reg);
+ spe_fs(f, fragG_reg, term1G_reg, term2G_reg);
+ spe_fs(f, fragB_reg, term1B_reg, term2B_reg);
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ spe_fs(f, fragR_reg, term2R_reg, term1R_reg);
+ spe_fs(f, fragG_reg, term2G_reg, term1G_reg);
+ spe_fs(f, fragB_reg, term2B_reg, term1B_reg);
+ break;
+ case PIPE_BLEND_MIN:
+ spe_float_min(f, fragR_reg, term1R_reg, term2R_reg);
+ spe_float_min(f, fragG_reg, term1G_reg, term2G_reg);
+ spe_float_min(f, fragB_reg, term1B_reg, term2B_reg);
+ break;
+ case PIPE_BLEND_MAX:
+ spe_float_max(f, fragR_reg, term1R_reg, term2R_reg);
+ spe_float_max(f, fragG_reg, term1G_reg, term2G_reg);
+ spe_float_max(f, fragB_reg, term1B_reg, term2B_reg);
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Combine Src/Dest A term
+ */
+ switch (blend->alpha_func) {
+ case PIPE_BLEND_ADD:
+ spe_fa(f, fragA_reg, term1A_reg, term2A_reg);
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ spe_fs(f, fragA_reg, term1A_reg, term2A_reg);
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ spe_fs(f, fragA_reg, term2A_reg, term1A_reg);
+ break;
+ case PIPE_BLEND_MIN:
+ spe_float_min(f, fragA_reg, term1A_reg, term2A_reg);
+ break;
+ case PIPE_BLEND_MAX:
+ spe_float_max(f, fragA_reg, term1A_reg, term2A_reg);
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ spe_release_register(f, term1R_reg);
+ spe_release_register(f, term1G_reg);
+ spe_release_register(f, term1B_reg);
+ spe_release_register(f, term1A_reg);
+
+ spe_release_register(f, term2R_reg);
+ spe_release_register(f, term2G_reg);
+ spe_release_register(f, term2B_reg);
+ spe_release_register(f, term2A_reg);
+
+ spe_release_register(f, fbR_reg);
+ spe_release_register(f, fbG_reg);
+ spe_release_register(f, fbB_reg);
+ spe_release_register(f, fbA_reg);
+
+ spe_release_register(f, tmp_reg);
+
+ /* Free any optional registers that actually got used */
+ release_const_register(f, one_reg);
+ release_const_register(f, constR_reg);
+ release_const_register(f, constG_reg);
+ release_const_register(f, constB_reg);
+ release_const_register(f, constA_reg);
+}
+
+
+static void
+gen_logicop(const struct pipe_blend_state *blend,
+ struct spe_function *f,
+ int fragRGBA_reg, int fbRGBA_reg)
+{
+ /* We've got four 32-bit RGBA packed pixels in each of
+ * fragRGBA_reg and fbRGBA_reg, not sets of floating-point
+ * reds, greens, blues, and alphas.
+ * */
+ ASSERT(blend->logicop_enable);
+
+ switch(blend->logicop_func) {
+ case PIPE_LOGICOP_CLEAR: /* 0 */
+ spe_zero(f, fragRGBA_reg);
+ break;
+ case PIPE_LOGICOP_NOR: /* ~(s | d) */
+ spe_nor(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_AND_INVERTED: /* ~s & d */
+ /* andc R, A, B computes R = A & ~B */
+ spe_andc(f, fragRGBA_reg, fbRGBA_reg, fragRGBA_reg);
+ break;
+ case PIPE_LOGICOP_COPY_INVERTED: /* ~s */
+ spe_complement(f, fragRGBA_reg, fragRGBA_reg);
+ break;
+ case PIPE_LOGICOP_AND_REVERSE: /* s & ~d */
+ /* andc R, A, B computes R = A & ~B */
+ spe_andc(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_INVERT: /* ~d */
+ /* Note that (A nor A) == ~(A|A) == ~A */
+ spe_nor(f, fragRGBA_reg, fbRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_XOR: /* s ^ d */
+ spe_xor(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_NAND: /* ~(s & d) */
+ spe_nand(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_AND: /* s & d */
+ spe_and(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_EQUIV: /* ~(s ^ d) */
+ spe_xor(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ spe_complement(f, fragRGBA_reg, fragRGBA_reg);
+ break;
+ case PIPE_LOGICOP_NOOP: /* d */
+ spe_move(f, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_OR_INVERTED: /* ~s | d */
+ /* orc R, A, B computes R = A | ~B */
+ spe_orc(f, fragRGBA_reg, fbRGBA_reg, fragRGBA_reg);
+ break;
+ case PIPE_LOGICOP_COPY: /* s */
+ break;
+ case PIPE_LOGICOP_OR_REVERSE: /* s | ~d */
+ /* orc R, A, B computes R = A | ~B */
+ spe_orc(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_OR: /* s | d */
+ spe_or(f, fragRGBA_reg, fragRGBA_reg, fbRGBA_reg);
+ break;
+ case PIPE_LOGICOP_SET: /* 1 */
+ spe_load_int(f, fragRGBA_reg, 0xffffffff);
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+
+
+/**
+ * Generate code to pack a quad of float colors into four 32-bit integers.
+ *
+ * \param f SPE function to append instruction onto.
+ * \param color_format the dest color packing format
+ * \param r_reg register containing four red values (in/clobbered)
+ * \param g_reg register containing four green values (in/clobbered)
+ * \param b_reg register containing four blue values (in/clobbered)
+ * \param a_reg register containing four alpha values (in/clobbered)
+ * \param rgba_reg register to store the packed RGBA colors (out)
+ */
+static void
+gen_pack_colors(struct spe_function *f,
+ enum pipe_format color_format,
+ int r_reg, int g_reg, int b_reg, int a_reg,
+ int rgba_reg)
+{
+ int rg_reg = spe_allocate_available_register(f);
+ int ba_reg = spe_allocate_available_register(f);
+
+ /* Convert float[4] in [0.0,1.0] to int[4] in [0,~0], with clamping */
+ spe_cfltu(f, r_reg, r_reg, 32);
+ spe_cfltu(f, g_reg, g_reg, 32);
+ spe_cfltu(f, b_reg, b_reg, 32);
+ spe_cfltu(f, a_reg, a_reg, 32);
+
+ /* Shift the most significant bytes to the least significant positions.
+ * I.e.: reg = reg >> 24
+ */
+ spe_rotmi(f, r_reg, r_reg, -24);
+ spe_rotmi(f, g_reg, g_reg, -24);
+ spe_rotmi(f, b_reg, b_reg, -24);
+ spe_rotmi(f, a_reg, a_reg, -24);
+
+ /* Shift the color bytes according to the surface format */
+ if (color_format == PIPE_FORMAT_A8R8G8B8_UNORM) {
+ spe_roti(f, g_reg, g_reg, 8); /* green <<= 8 */
+ spe_roti(f, r_reg, r_reg, 16); /* red <<= 16 */
+ spe_roti(f, a_reg, a_reg, 24); /* alpha <<= 24 */
+ }
+ else if (color_format == PIPE_FORMAT_B8G8R8A8_UNORM) {
+ spe_roti(f, r_reg, r_reg, 8); /* red <<= 8 */
+ spe_roti(f, g_reg, g_reg, 16); /* green <<= 16 */
+ spe_roti(f, b_reg, b_reg, 24); /* blue <<= 24 */
+ }
+ else {
+ ASSERT(0);
+ }
+
+ /* Merge red, green, blue, alpha registers to make packed RGBA colors.
+ * Eg: after shifting according to color_format we might have:
+ * R = {0x00ff0000, 0x00110000, 0x00220000, 0x00330000}
+ * G = {0x0000ff00, 0x00004400, 0x00005500, 0x00006600}
+ * B = {0x000000ff, 0x00000077, 0x00000088, 0x00000099}
+ * A = {0xff000000, 0xaa000000, 0xbb000000, 0xcc000000}
+ * OR-ing all those together gives us four packed colors:
+ * RGBA = {0xffffffff, 0xaa114477, 0xbb225588, 0xcc336699}
+ */
+ spe_or(f, rg_reg, r_reg, g_reg);
+ spe_or(f, ba_reg, a_reg, b_reg);
+ spe_or(f, rgba_reg, rg_reg, ba_reg);
+
+ spe_release_register(f, rg_reg);
+ spe_release_register(f, ba_reg);
+}
+
+
+static void
+gen_colormask(struct spe_function *f,
+ uint colormask,
+ enum pipe_format color_format,
+ int fragRGBA_reg, int fbRGBA_reg)
+{
+ /* We've got four 32-bit RGBA packed pixels in each of
+ * fragRGBA_reg and fbRGBA_reg, not sets of floating-point
+ * reds, greens, blues, and alphas. Further, the pixels
+ * are packed according to the given color format, not
+ * necessarily RGBA...
+ */
+ uint r_mask;
+ uint g_mask;
+ uint b_mask;
+ uint a_mask;
+
+ /* Calculate exactly where the bits for any particular color
+ * end up, so we can mask them correctly.
+ */
+ switch(color_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ /* ARGB */
+ a_mask = 0xff000000;
+ r_mask = 0x00ff0000;
+ g_mask = 0x0000ff00;
+ b_mask = 0x000000ff;
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ /* BGRA */
+ b_mask = 0xff000000;
+ g_mask = 0x00ff0000;
+ r_mask = 0x0000ff00;
+ a_mask = 0x000000ff;
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /* For each R, G, B, and A component we're supposed to mask out,
+ * clear its bits. Then our mask operation later will work
+ * as expected.
+ */
+ if (!(colormask & PIPE_MASK_R)) {
+ r_mask = 0;
+ }
+ if (!(colormask & PIPE_MASK_G)) {
+ g_mask = 0;
+ }
+ if (!(colormask & PIPE_MASK_B)) {
+ b_mask = 0;
+ }
+ if (!(colormask & PIPE_MASK_A)) {
+ a_mask = 0;
+ }
+
+ /* Get a temporary register to hold the mask that will be applied
+ * to the fragment
+ */
+ int colormask_reg = spe_allocate_available_register(f);
+
+ /* The actual mask we're going to use is an OR of the remaining R, G, B,
+ * and A masks. Load the result value into our temporary register.
+ */
+ spe_load_uint(f, colormask_reg, r_mask | g_mask | b_mask | a_mask);
+
+ /* Use the mask register to select between the fragment color
+ * values and the frame buffer color values. Wherever the
+ * mask has a 0 bit, the current frame buffer color should override
+ * the fragment color. Wherever the mask has a 1 bit, the
+ * fragment color should persevere. The Select Bits (selb rt, rA, rB, rM)
+ * instruction will select bits from its first operand rA wherever the
+ * the mask bits rM are 0, and from its second operand rB wherever the
+ * mask bits rM are 1. That means that the frame buffer color is the
+ * first operand, and the fragment color the second.
+ */
+ spe_selb(f, fragRGBA_reg, fbRGBA_reg, fragRGBA_reg, colormask_reg);
+
+ /* Release the temporary register and we're done */
+ spe_release_register(f, colormask_reg);
+}
+
+
+/**
+ * This function is annoyingly similar to gen_depth_test(), above, except
+ * that instead of comparing two varying values (i.e. fragment and buffer),
+ * we're comparing a varying value with a static value. As such, we have
+ * access to the Compare Immediate instructions where we don't in
+ * gen_depth_test(), which is what makes us very different.
+ *
+ * There's some added complexity if there's a non-trivial state->mask
+ * value; then stencil and reference both must be masked
+ *
+ * The return value in the stencil_pass_reg is a bitmask of valid
+ * fragments that also passed the stencil test. The bitmask of valid
+ * fragments that failed would be found in
+ * (fragment_mask_reg & ~stencil_pass_reg).
+ */
+static void
+gen_stencil_test(struct spe_function *f,
+ const struct pipe_stencil_state *state,
+ uint stencil_max_value,
+ int fragment_mask_reg,
+ int fbS_reg,
+ int stencil_pass_reg)
+{
+ /* Generate code that puts the set of passing fragments into the
+ * stencil_pass_reg register, taking into account whether each fragment
+ * was active to begin with.
+ */
+ switch (state->func) {
+ case PIPE_FUNC_EQUAL:
+ if (state->valuemask == stencil_max_value) {
+ /* stencil_pass = fragment_mask & (s == reference) */
+ spe_compare_equal_uint(f, stencil_pass_reg, fbS_reg, state->ref_value);
+ spe_and(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ }
+ else {
+ /* stencil_pass = fragment_mask & ((s&mask) == (reference&mask)) */
+ uint tmp_masked_stencil = spe_allocate_available_register(f);
+ spe_and_uint(f, tmp_masked_stencil, fbS_reg, state->valuemask);
+ spe_compare_equal_uint(f, stencil_pass_reg, tmp_masked_stencil,
+ state->valuemask & state->ref_value);
+ spe_and(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_masked_stencil);
+ }
+ break;
+
+ case PIPE_FUNC_NOTEQUAL:
+ if (state->valuemask == stencil_max_value) {
+ /* stencil_pass = fragment_mask & ~(s == reference) */
+ spe_compare_equal_uint(f, stencil_pass_reg, fbS_reg, state->ref_value);
+ spe_andc(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ }
+ else {
+ /* stencil_pass = fragment_mask & ~((s&mask) == (reference&mask)) */
+ int tmp_masked_stencil = spe_allocate_available_register(f);
+ spe_and_uint(f, tmp_masked_stencil, fbS_reg, state->valuemask);
+ spe_compare_equal_uint(f, stencil_pass_reg, tmp_masked_stencil,
+ state->valuemask & state->ref_value);
+ spe_andc(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_masked_stencil);
+ }
+ break;
+
+ case PIPE_FUNC_LESS:
+ if (state->valuemask == stencil_max_value) {
+ /* stencil_pass = fragment_mask & (reference < s) */
+ spe_compare_greater_uint(f, stencil_pass_reg, fbS_reg, state->ref_value);
+ spe_and(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ }
+ else {
+ /* stencil_pass = fragment_mask & ((reference&mask) < (s & mask)) */
+ int tmp_masked_stencil = spe_allocate_available_register(f);
+ spe_and_uint(f, tmp_masked_stencil, fbS_reg, state->valuemask);
+ spe_compare_greater_uint(f, stencil_pass_reg, tmp_masked_stencil,
+ state->valuemask & state->ref_value);
+ spe_and(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_masked_stencil);
+ }
+ break;
+
+ case PIPE_FUNC_GREATER:
+ if (state->valuemask == stencil_max_value) {
+ /* stencil_pass = fragment_mask & (reference > s) */
+ /* There's no convenient Compare Less Than Immediate instruction, so
+ * we'll have to do this one the harder way, by loading a register and
+ * comparing directly. Compare Logical Greater Than Word (clgt)
+ * treats its operands as unsigned - no sign extension.
+ */
+ int tmp_reg = spe_allocate_available_register(f);
+ spe_load_uint(f, tmp_reg, state->ref_value);
+ spe_clgt(f, stencil_pass_reg, tmp_reg, fbS_reg);
+ spe_and(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_reg);
+ }
+ else {
+ /* stencil_pass = fragment_mask & ((reference&mask) > (s&mask)) */
+ int tmp_reg = spe_allocate_available_register(f);
+ int tmp_masked_stencil = spe_allocate_available_register(f);
+ spe_load_uint(f, tmp_reg, state->valuemask & state->ref_value);
+ spe_and_uint(f, tmp_masked_stencil, fbS_reg, state->valuemask);
+ spe_clgt(f, stencil_pass_reg, tmp_reg, tmp_masked_stencil);
+ spe_and(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_reg);
+ spe_release_register(f, tmp_masked_stencil);
+ }
+ break;
+
+ case PIPE_FUNC_GEQUAL:
+ if (state->valuemask == stencil_max_value) {
+ /* stencil_pass = fragment_mask & (reference >= s)
+ * = fragment_mask & ~(s > reference) */
+ spe_compare_greater_uint(f, stencil_pass_reg, fbS_reg,
+ state->ref_value);
+ spe_andc(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ }
+ else {
+ /* stencil_pass = fragment_mask & ~((s&mask) > (reference&mask)) */
+ int tmp_masked_stencil = spe_allocate_available_register(f);
+ spe_and_uint(f, tmp_masked_stencil, fbS_reg, state->valuemask);
+ spe_compare_greater_uint(f, stencil_pass_reg, tmp_masked_stencil,
+ state->valuemask & state->ref_value);
+ spe_andc(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_masked_stencil);
+ }
+ break;
+
+ case PIPE_FUNC_LEQUAL:
+ if (state->valuemask == stencil_max_value) {
+ /* stencil_pass = fragment_mask & (reference <= s) ]
+ * = fragment_mask & ~(reference > s) */
+ /* As above, we have to do this by loading a register */
+ int tmp_reg = spe_allocate_available_register(f);
+ spe_load_uint(f, tmp_reg, state->ref_value);
+ spe_clgt(f, stencil_pass_reg, tmp_reg, fbS_reg);
+ spe_andc(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_reg);
+ }
+ else {
+ /* stencil_pass = fragment_mask & ~((reference&mask) > (s&mask)) */
+ int tmp_reg = spe_allocate_available_register(f);
+ int tmp_masked_stencil = spe_allocate_available_register(f);
+ spe_load_uint(f, tmp_reg, state->ref_value & state->valuemask);
+ spe_and_uint(f, tmp_masked_stencil, fbS_reg, state->valuemask);
+ spe_clgt(f, stencil_pass_reg, tmp_reg, tmp_masked_stencil);
+ spe_andc(f, stencil_pass_reg, fragment_mask_reg, stencil_pass_reg);
+ spe_release_register(f, tmp_reg);
+ spe_release_register(f, tmp_masked_stencil);
+ }
+ break;
+
+ case PIPE_FUNC_NEVER:
+ /* stencil_pass = fragment_mask & 0 = 0 */
+ spe_load_uint(f, stencil_pass_reg, 0);
+ break;
+
+ case PIPE_FUNC_ALWAYS:
+ /* stencil_pass = fragment_mask & 1 = fragment_mask */
+ spe_move(f, stencil_pass_reg, fragment_mask_reg);
+ break;
+ }
+
+ /* The fragments that passed the stencil test are now in stencil_pass_reg.
+ * The fragments that failed would be (fragment_mask_reg & ~stencil_pass_reg).
+ */
+}
+
+
+/**
+ * This function generates code that calculates a set of new stencil values
+ * given the earlier values and the operation to apply. It does not
+ * apply any tests. It is intended to be called up to 3 times
+ * (for the stencil fail operation, for the stencil pass-z fail operation,
+ * and for the stencil pass-z pass operation) to collect up to three
+ * possible sets of values, and for the caller to combine them based
+ * on the result of the tests.
+ *
+ * stencil_max_value should be (2^n - 1) where n is the number of bits
+ * in the stencil buffer - in other words, it should be usable as a mask.
+ */
+static void
+gen_stencil_values(struct spe_function *f,
+ uint stencil_op,
+ uint stencil_ref_value,
+ uint stencil_max_value,
+ int fbS_reg,
+ int newS_reg)
+{
+ /* The code below assumes that newS_reg and fbS_reg are not the same
+ * register; if they can be, the calculations below will have to use
+ * an additional temporary register. For now, mark the assumption
+ * with an assertion that will fail if they are the same.
+ */
+ ASSERT(fbS_reg != newS_reg);
+
+ /* The code also assumes the the stencil_max_value is of the form
+ * 2^n-1 and can therefore be used as a mask for the valid bits in
+ * addition to a maximum. Make sure this is the case as well.
+ * The clever math below exploits the fact that incrementing a
+ * binary number serves to flip all the bits of a number starting at
+ * the LSB and continuing to (and including) the first zero bit
+ * found. That means that a number and its increment will always
+ * have at least one bit in common (the high order bit, if nothing
+ * else) *unless* the number is zero, *or* the number is of a form
+ * consisting of some number of 1s in the low-order bits followed
+ * by nothing but 0s in the high-order bits. The latter case
+ * implies it's of the form 2^n-1.
+ */
+ ASSERT(stencil_max_value > 0 && ((stencil_max_value + 1) & stencil_max_value) == 0);
+
+ switch(stencil_op) {
+ case PIPE_STENCIL_OP_KEEP:
+ /* newS = S */
+ spe_move(f, newS_reg, fbS_reg);
+ break;
+
+ case PIPE_STENCIL_OP_ZERO:
+ /* newS = 0 */
+ spe_zero(f, newS_reg);
+ break;
+
+ case PIPE_STENCIL_OP_REPLACE:
+ /* newS = stencil reference value */
+ spe_load_uint(f, newS_reg, stencil_ref_value);
+ break;
+
+ case PIPE_STENCIL_OP_INCR: {
+ /* newS = (s == max ? max : s + 1) */
+ int equals_reg = spe_allocate_available_register(f);
+
+ spe_compare_equal_uint(f, equals_reg, fbS_reg, stencil_max_value);
+ /* Add Word Immediate computes rT = rA + 10-bit signed immediate */
+ spe_ai(f, newS_reg, fbS_reg, 1);
+ /* Select from the current value or the new value based on the equality test */
+ spe_selb(f, newS_reg, newS_reg, fbS_reg, equals_reg);
+
+ spe_release_register(f, equals_reg);
+ break;
+ }
+ case PIPE_STENCIL_OP_DECR: {
+ /* newS = (s == 0 ? 0 : s - 1) */
+ int equals_reg = spe_allocate_available_register(f);
+
+ spe_compare_equal_uint(f, equals_reg, fbS_reg, 0);
+ /* Add Word Immediate with a (-1) value works */
+ spe_ai(f, newS_reg, fbS_reg, -1);
+ /* Select from the current value or the new value based on the equality test */
+ spe_selb(f, newS_reg, newS_reg, fbS_reg, equals_reg);
+
+ spe_release_register(f, equals_reg);
+ break;
+ }
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ /* newS = (s == max ? 0 : s + 1), but since max is 2^n-1, we can
+ * do a normal add and mask off the correct bits
+ */
+ spe_ai(f, newS_reg, fbS_reg, 1);
+ spe_and_uint(f, newS_reg, newS_reg, stencil_max_value);
+ break;
+
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ /* newS = (s == 0 ? max : s - 1), but we'll pull the same mask trick as above */
+ spe_ai(f, newS_reg, fbS_reg, -1);
+ spe_and_uint(f, newS_reg, newS_reg, stencil_max_value);
+ break;
+
+ case PIPE_STENCIL_OP_INVERT:
+ /* newS = ~s. We take advantage of the mask/max value to invert only
+ * the valid bits for the field so we don't have to do an extra "and".
+ */
+ spe_xor_uint(f, newS_reg, fbS_reg, stencil_max_value);
+ break;
+
+ default:
+ ASSERT(0);
+ }
+}
+
+
+/**
+ * This function generates code to get all the necessary possible
+ * stencil values. For each of the output registers (fail_reg,
+ * zfail_reg, and zpass_reg), it either allocates a new register
+ * and calculates a new set of values based on the stencil operation,
+ * or it reuses a register allocation and calculation done for an
+ * earlier (matching) operation, or it reuses the fbS_reg register
+ * (if the stencil operation is KEEP, which doesn't change the
+ * stencil buffer).
+ *
+ * Since this function allocates a variable number of registers,
+ * to avoid incurring complex logic to free them, they should
+ * be allocated after a spe_allocate_register_set() call
+ * and released by the corresponding spe_release_register_set() call.
+ */
+static void
+gen_get_stencil_values(struct spe_function *f,
+ const struct pipe_stencil_state *stencil,
+ const uint depth_enabled,
+ int fbS_reg,
+ int *fail_reg,
+ int *zfail_reg,
+ int *zpass_reg)
+{
+ uint zfail_op;
+
+ /* Stenciling had better be enabled here */
+ ASSERT(stencil->enabled);
+
+ /* If the depth test is not enabled, it is treated as though it always
+ * passes, which means that the zfail_op is not considered - a
+ * failing stencil test triggers the fail_op, and a passing one
+ * triggers the zpass_op
+ *
+ * As an optimization, override calculation of the zfail_op values
+ * if they aren't going to be used. By setting the value of
+ * the operation to PIPE_STENCIL_OP_KEEP, its value will be assumed
+ * to match the incoming stencil values, and no calculation will
+ * be done.
+ */
+ if (depth_enabled) {
+ zfail_op = stencil->zfail_op;
+ }
+ else {
+ zfail_op = PIPE_STENCIL_OP_KEEP;
+ }
+
+ /* One-sided or front-facing stencil */
+ if (stencil->fail_op == PIPE_STENCIL_OP_KEEP) {
+ *fail_reg = fbS_reg;
+ }
+ else {
+ *fail_reg = spe_allocate_available_register(f);
+ gen_stencil_values(f, stencil->fail_op, stencil->ref_value,
+ 0xff, fbS_reg, *fail_reg);
+ }
+
+ /* Check the possibly overridden value, not the structure value */
+ if (zfail_op == PIPE_STENCIL_OP_KEEP) {
+ *zfail_reg = fbS_reg;
+ }
+ else if (zfail_op == stencil->fail_op) {
+ *zfail_reg = *fail_reg;
+ }
+ else {
+ *zfail_reg = spe_allocate_available_register(f);
+ gen_stencil_values(f, stencil->zfail_op, stencil->ref_value,
+ 0xff, fbS_reg, *zfail_reg);
+ }
+
+ if (stencil->zpass_op == PIPE_STENCIL_OP_KEEP) {
+ *zpass_reg = fbS_reg;
+ }
+ else if (stencil->zpass_op == stencil->fail_op) {
+ *zpass_reg = *fail_reg;
+ }
+ else if (stencil->zpass_op == zfail_op) {
+ *zpass_reg = *zfail_reg;
+ }
+ else {
+ *zpass_reg = spe_allocate_available_register(f);
+ gen_stencil_values(f, stencil->zpass_op, stencil->ref_value,
+ 0xff, fbS_reg, *zpass_reg);
+ }
+}
+
+/**
+ * Note that fbZ_reg may *not* be set on entry, if in fact
+ * the depth test is not enabled. This function must not use
+ * the register if depth is not enabled.
+ */
+static boolean
+gen_stencil_depth_test(struct spe_function *f,
+ const struct pipe_depth_stencil_alpha_state *dsa,
+ const uint facing,
+ const int mask_reg, const int fragZ_reg,
+ const int fbZ_reg, const int fbS_reg)
+{
+ /* True if we've generated code that could require writeback to the
+ * depth and/or stencil buffers
+ */
+ boolean modified_buffers = FALSE;
+
+ boolean need_to_calculate_stencil_values;
+ boolean need_to_writemask_stencil_values;
+
+ struct pipe_stencil_state *stencil;
+
+ /* Registers. We may or may not actually allocate these, depending
+ * on whether the state values indicate that we need them.
+ */
+ int stencil_pass_reg, stencil_fail_reg;
+ int stencil_fail_values, stencil_pass_depth_fail_values, stencil_pass_depth_pass_values;
+ int stencil_writemask_reg;
+ int zmask_reg;
+ int newS_reg;
+
+ /* Stenciling is quite complex: up to six different configurable stencil
+ * operations/calculations can be required (three each for front-facing
+ * and back-facing fragments). Many of those operations will likely
+ * be identical, so there's good reason to try to avoid calculating
+ * the same values more than once (which unfortunately makes the code less
+ * straightforward).
+ *
+ * To make register management easier, we start a new
+ * register set; we can release all the registers in the set at
+ * once, and avoid having to keep track of exactly which registers
+ * we allocate. We can still allocate and free registers as
+ * desired (if we know we no longer need a register), but we don't
+ * have to spend the complexity to track the more difficult variant
+ * register usage scenarios.
+ */
+ spe_comment(f, 0, "Allocating stencil register set");
+ spe_allocate_register_set(f);
+
+ /* The facing we're given is the fragment facing; it doesn't
+ * exactly match the stencil facing. If stencil is enabled,
+ * but two-sided stencil is *not* enabled, we use the same
+ * stencil settings for both front- and back-facing fragments.
+ * We only use the "back-facing" stencil for backfacing fragments
+ * if two-sided stenciling is enabled.
+ */
+ if (facing == CELL_FACING_BACK && dsa->stencil[1].enabled) {
+ stencil = &dsa->stencil[1];
+ }
+ else {
+ stencil = &dsa->stencil[0];
+ }
+
+ /* Calculate the writemask. If the writemask is trivial (either
+ * all 0s, meaning that we don't need to calculate any stencil values
+ * because they're not going to change the stencil anyway, or all 1s,
+ * meaning that we have to calculate the stencil values but do not
+ * need to mask them), we can avoid generating code. Don't forget
+ * that we need to consider backfacing stencil, if enabled.
+ *
+ * Note that if the backface stencil is *not* enabled, the backface
+ * stencil will have the same values as the frontface stencil.
+ */
+ if (stencil->fail_op == PIPE_STENCIL_OP_KEEP &&
+ stencil->zfail_op == PIPE_STENCIL_OP_KEEP &&
+ stencil->zpass_op == PIPE_STENCIL_OP_KEEP) {
+ need_to_calculate_stencil_values = FALSE;
+ need_to_writemask_stencil_values = FALSE;
+ }
+ else if (stencil->writemask == 0x0) {
+ /* All changes are writemasked out, so no need to calculate
+ * what those changes might be, and no need to write anything back.
+ */
+ need_to_calculate_stencil_values = FALSE;
+ need_to_writemask_stencil_values = FALSE;
+ }
+ else if (stencil->writemask == 0xff) {
+ /* Still trivial, but a little less so. We need to write the stencil
+ * values, but we don't need to mask them.
+ */
+ need_to_calculate_stencil_values = TRUE;
+ need_to_writemask_stencil_values = FALSE;
+ }
+ else {
+ /* The general case: calculate, mask, and write */
+ need_to_calculate_stencil_values = TRUE;
+ need_to_writemask_stencil_values = TRUE;
+
+ /* While we're here, generate code that calculates what the
+ * writemask should be. If backface stenciling is enabled,
+ * and the backface writemask is not the same as the frontface
+ * writemask, we'll have to generate code that merges the
+ * two masks into a single effective mask based on fragment facing.
+ */
+ spe_comment(f, 0, "Computing stencil writemask");
+ stencil_writemask_reg = spe_allocate_available_register(f);
+ spe_load_uint(f, stencil_writemask_reg, dsa->stencil[facing].writemask);
+ }
+
+ /* At least one-sided stenciling must be on. Generate code that
+ * runs the stencil test on the basic/front-facing stencil, leaving
+ * the mask of passing stencil bits in stencil_pass_reg. This mask will
+ * be used both to mask the set of active pixels, and also to
+ * determine how the stencil buffer changes.
+ *
+ * This test will *not* change the value in mask_reg (because we don't
+ * yet know whether to apply the two-sided stencil or one-sided stencil).
+ */
+ spe_comment(f, 0, "Running basic stencil test");
+ stencil_pass_reg = spe_allocate_available_register(f);
+ gen_stencil_test(f, stencil, 0xff, mask_reg, fbS_reg, stencil_pass_reg);
+
+ /* Generate code that, given the mask of valid fragments and the
+ * mask of valid fragments that passed the stencil test, computes
+ * the mask of valid fragments that failed the stencil test. We
+ * have to do this before we run a depth test (because the
+ * depth test should not be performed on fragments that failed the
+ * stencil test, and because the depth test will update the
+ * mask of valid fragments based on the results of the depth test).
+ */
+ spe_comment(f, 0, "Computing stencil fail mask and updating fragment mask");
+ stencil_fail_reg = spe_allocate_available_register(f);
+ spe_andc(f, stencil_fail_reg, mask_reg, stencil_pass_reg);
+ /* Now remove the stenciled-out pixels from the valid fragment mask,
+ * so we can later use the valid fragment mask in the depth test.
+ */
+ spe_and(f, mask_reg, mask_reg, stencil_pass_reg);
+
+ /* We may not need to calculate stencil values, if the writemask is off */
+ if (need_to_calculate_stencil_values) {
+ /* Generate code that calculates exactly which stencil values we need,
+ * without calculating the same value twice (say, if two different
+ * stencil ops have the same value). This code will work for one-sided
+ * and two-sided stenciling (so that we take into account that operations
+ * may match between front and back stencils), and will also take into
+ * account whether the depth test is enabled (if the depth test is off,
+ * we don't need any of the zfail results, because the depth test always
+ * is considered to pass if it is disabled). Any register value that
+ * does not need to be calculated will come back with the same value
+ * that's in fbS_reg.
+ *
+ * This function will allocate a variant number of registers that
+ * will be released as part of the register set.
+ */
+ spe_comment(f, 0, facing == CELL_FACING_FRONT
+ ? "Computing front-facing stencil values"
+ : "Computing back-facing stencil values");
+ gen_get_stencil_values(f, stencil, dsa->depth.enabled, fbS_reg,
+ &stencil_fail_values, &stencil_pass_depth_fail_values,
+ &stencil_pass_depth_pass_values);
+ }
+
+ /* We now have all the stencil values we need. We also need
+ * the results of the depth test to figure out which
+ * stencil values will become the new stencil values. (Even if
+ * we aren't actually calculating stencil values, we need to apply
+ * the depth test if it's enabled.)
+ *
+ * The code generated by gen_depth_test() returns the results of the
+ * test in the given register, but also alters the mask_reg based
+ * on the results of the test.
+ */
+ if (dsa->depth.enabled) {
+ spe_comment(f, 0, "Running stencil depth test");
+ zmask_reg = spe_allocate_available_register(f);
+ modified_buffers |= gen_depth_test(f, dsa, mask_reg, fragZ_reg,
+ fbZ_reg, zmask_reg);
+ }
+
+ if (need_to_calculate_stencil_values) {
+
+ /* If we need to writemask the stencil values before going into
+ * the stencil buffer, we'll have to use a new register to
+ * hold the new values. If not, we can just keep using the
+ * current register.
+ */
+ if (need_to_writemask_stencil_values) {
+ newS_reg = spe_allocate_available_register(f);
+ spe_comment(f, 0, "Saving current stencil values for writemasking");
+ spe_move(f, newS_reg, fbS_reg);
+ }
+ else {
+ newS_reg = fbS_reg;
+ }
+
+ /* Merge in the selected stencil fail values */
+ if (stencil_fail_values != fbS_reg) {
+ spe_comment(f, 0, "Loading stencil fail values");
+ spe_selb(f, newS_reg, newS_reg, stencil_fail_values, stencil_fail_reg);
+ modified_buffers = TRUE;
+ }
+
+ /* Same for the stencil pass/depth fail values. If this calculation
+ * is not needed (say, if depth test is off), then the
+ * stencil_pass_depth_fail_values register will be equal to fbS_reg
+ * and we'll skip the calculation.
+ */
+ if (stencil_pass_depth_fail_values != fbS_reg) {
+ /* We don't actually have a stencil pass/depth fail mask yet.
+ * Calculate it here from the stencil passing mask and the
+ * depth passing mask. Note that zmask_reg *must* have been
+ * set above if we're here.
+ */
+ uint stencil_pass_depth_fail_mask =
+ spe_allocate_available_register(f);
+
+ spe_comment(f, 0, "Loading stencil pass/depth fail values");
+ spe_andc(f, stencil_pass_depth_fail_mask, stencil_pass_reg, zmask_reg);
+
+ spe_selb(f, newS_reg, newS_reg, stencil_pass_depth_fail_values,
+ stencil_pass_depth_fail_mask);
+
+ spe_release_register(f, stencil_pass_depth_fail_mask);
+ modified_buffers = TRUE;
+ }
+
+ /* Same for the stencil pass/depth pass mask. Note that we
+ * *can* get here with zmask_reg being unset (if the depth
+ * test is off but the stencil test is on). In this case,
+ * we assume the depth test passes, and don't need to mask
+ * the stencil pass mask with the Z mask.
+ */
+ if (stencil_pass_depth_pass_values != fbS_reg) {
+ if (dsa->depth.enabled) {
+ uint stencil_pass_depth_pass_mask = spe_allocate_available_register(f);
+ /* We'll need a separate register */
+ spe_comment(f, 0, "Loading stencil pass/depth pass values");
+ spe_and(f, stencil_pass_depth_pass_mask, stencil_pass_reg, zmask_reg);
+ spe_selb(f, newS_reg, newS_reg, stencil_pass_depth_pass_values, stencil_pass_depth_pass_mask);
+ spe_release_register(f, stencil_pass_depth_pass_mask);
+ }
+ else {
+ /* We can use the same stencil-pass register */
+ spe_comment(f, 0, "Loading stencil pass values");
+ spe_selb(f, newS_reg, newS_reg, stencil_pass_depth_pass_values, stencil_pass_reg);
+ }
+ modified_buffers = TRUE;
+ }
+
+ /* Almost done. If we need to writemask, do it now, leaving the
+ * results in the fbS_reg register passed in. If we don't need
+ * to writemask, then the results are *already* in the fbS_reg,
+ * so there's nothing more to do.
+ */
+
+ if (need_to_writemask_stencil_values && modified_buffers) {
+ /* The Select Bytes command makes a fine writemask. Where
+ * the mask is 0, the first (original) values are retained,
+ * effectively masking out changes. Where the mask is 1, the
+ * second (new) values are retained, incorporating changes.
+ */
+ spe_comment(f, 0, "Writemasking new stencil values");
+ spe_selb(f, fbS_reg, fbS_reg, newS_reg, stencil_writemask_reg);
+ }
+
+ } /* done calculating stencil values */
+
+ /* The stencil and/or depth values have been applied, and the
+ * mask_reg, fbS_reg, and fbZ_reg values have been updated.
+ * We're all done, except that we've allocated a fair number
+ * of registers that we didn't bother tracking. Release all
+ * those registers as part of the register set, and go home.
+ */
+ spe_comment(f, 0, "Releasing stencil register set");
+ spe_release_register_set(f);
+
+ /* Return TRUE if we could have modified the stencil and/or
+ * depth buffers.
+ */
+ return modified_buffers;
+}
+
+
+/**
+ * Generate depth and/or stencil test code.
+ * \param cell context
+ * \param dsa depth/stencil/alpha state
+ * \param f spe function to emit
+ * \param facing either CELL_FACING_FRONT or CELL_FACING_BACK
+ * \param mask_reg register containing the pixel alive/dead mask
+ * \param depth_tile_reg register containing address of z/stencil tile
+ * \param quad_offset_reg offset to quad from start of tile
+ * \param fragZ_reg register containg fragment Z values
+ */
+static void
+gen_depth_stencil(struct cell_context *cell,
+ const struct pipe_depth_stencil_alpha_state *dsa,
+ struct spe_function *f,
+ uint facing,
+ int mask_reg,
+ int depth_tile_reg,
+ int quad_offset_reg,
+ int fragZ_reg)
+
+{
+ const enum pipe_format zs_format = cell->framebuffer.zsbuf->format;
+ boolean write_depth_stencil;
+
+ /* framebuffer's combined z/stencil values register */
+ int fbZS_reg = spe_allocate_available_register(f);
+
+ /* Framebufer Z values register */
+ int fbZ_reg = spe_allocate_available_register(f);
+
+ /* Framebuffer stencil values register (may not be used) */
+ int fbS_reg = spe_allocate_available_register(f);
+
+ /* 24-bit mask register (may not be used) */
+ int zmask_reg = spe_allocate_available_register(f);
+
+ /**
+ * The following code:
+ * 1. fetch quad of packed Z/S values from the framebuffer tile.
+ * 2. extract the separate the Z and S values from packed values
+ * 3. convert fragment Z values from float in [0,1] to 32/24/16-bit ints
+ *
+ * The instructions for doing this are interleaved for better performance.
+ */
+ spe_comment(f, 0, "Fetch Z/stencil quad from tile");
+
+ switch(zs_format) {
+ case PIPE_FORMAT_S8Z24_UNORM: /* fall through */
+ case PIPE_FORMAT_X8Z24_UNORM:
+ /* prepare mask to extract Z vals from ZS vals */
+ spe_load_uint(f, zmask_reg, 0x00ffffff);
+
+ /* convert fragment Z from [0,1] to 32-bit ints */
+ spe_cfltu(f, fragZ_reg, fragZ_reg, 32);
+
+ /* Load: fbZS_reg = memory[depth_tile_reg + offset_reg] */
+ spe_lqx(f, fbZS_reg, depth_tile_reg, quad_offset_reg);
+
+ /* right shift 32-bit fragment Z to 24 bits */
+ spe_rotmi(f, fragZ_reg, fragZ_reg, -8);
+
+ /* extract 24-bit Z values from ZS values by masking */
+ spe_and(f, fbZ_reg, fbZS_reg, zmask_reg);
+
+ /* extract 8-bit stencil values by shifting */
+ spe_rotmi(f, fbS_reg, fbZS_reg, -24);
+ break;
+
+ case PIPE_FORMAT_Z24S8_UNORM: /* fall through */
+ case PIPE_FORMAT_Z24X8_UNORM:
+ /* convert fragment Z from [0,1] to 32-bit ints */
+ spe_cfltu(f, fragZ_reg, fragZ_reg, 32);
+
+ /* Load: fbZS_reg = memory[depth_tile_reg + offset_reg] */
+ spe_lqx(f, fbZS_reg, depth_tile_reg, quad_offset_reg);
+
+ /* right shift 32-bit fragment Z to 24 bits */
+ spe_rotmi(f, fragZ_reg, fragZ_reg, -8);
+
+ /* extract 24-bit Z values from ZS values by shifting */
+ spe_rotmi(f, fbZ_reg, fbZS_reg, -8);
+
+ /* extract 8-bit stencil values by masking */
+ spe_and_uint(f, fbS_reg, fbZS_reg, 0x000000ff);
+ break;
+
+ case PIPE_FORMAT_Z32_UNORM:
+ /* Load: fbZ_reg = memory[depth_tile_reg + offset_reg] */
+ spe_lqx(f, fbZ_reg, depth_tile_reg, quad_offset_reg);
+
+ /* convert fragment Z from [0,1] to 32-bit ints */
+ spe_cfltu(f, fragZ_reg, fragZ_reg, 32);
+
+ /* No stencil, so can't do anything there */
+ break;
+
+ case PIPE_FORMAT_Z16_UNORM:
+ /* XXX This code for 16bpp Z is broken! */
+
+ /* Load: fbZS_reg = memory[depth_tile_reg + offset_reg] */
+ spe_lqx(f, fbZS_reg, depth_tile_reg, quad_offset_reg);
+
+ /* Copy over 4 32-bit values */
+ spe_move(f, fbZ_reg, fbZS_reg);
+
+ /* convert Z from [0,1] to 16-bit ints */
+ spe_cfltu(f, fragZ_reg, fragZ_reg, 32);
+ spe_rotmi(f, fragZ_reg, fragZ_reg, -16);
+ /* No stencil */
+ break;
+
+ default:
+ ASSERT(0); /* invalid format */
+ }
+
+ /* If stencil is enabled, use the stencil-specific code
+ * generator to generate both the stencil and depth (if needed)
+ * tests. Otherwise, if only depth is enabled, generate
+ * a quick depth test. The test generators themselves will
+ * report back whether the depth/stencil buffer has to be
+ * written back.
+ */
+ if (dsa->stencil[0].enabled) {
+ /* This will perform the stencil and depth tests, and update
+ * the mask_reg, fbZ_reg, and fbS_reg as required by the
+ * tests.
+ */
+ ASSERT(fbS_reg >= 0);
+ spe_comment(f, 0, "Perform stencil test");
+
+ /* Note that fbZ_reg may not be set on entry, if stenciling
+ * is enabled but there's no Z-buffer. The
+ * gen_stencil_depth_test() function must ignore the
+ * fbZ_reg register if depth is not enabled.
+ */
+ write_depth_stencil = gen_stencil_depth_test(f, dsa, facing,
+ mask_reg, fragZ_reg,
+ fbZ_reg, fbS_reg);
+ }
+ else if (dsa->depth.enabled) {
+ int zmask_reg = spe_allocate_available_register(f);
+ ASSERT(fbZ_reg >= 0);
+ spe_comment(f, 0, "Perform depth test");
+ write_depth_stencil = gen_depth_test(f, dsa, mask_reg, fragZ_reg,
+ fbZ_reg, zmask_reg);
+ spe_release_register(f, zmask_reg);
+ }
+ else {
+ write_depth_stencil = FALSE;
+ }
+
+ if (write_depth_stencil) {
+ /* Merge latest Z and Stencil values into fbZS_reg.
+ * fbZ_reg has four Z vals in bits [23..0] or bits [15..0].
+ * fbS_reg has four 8-bit Z values in bits [7..0].
+ */
+ spe_comment(f, 0, "Store quad's depth/stencil values in tile");
+ if (zs_format == PIPE_FORMAT_S8Z24_UNORM ||
+ zs_format == PIPE_FORMAT_X8Z24_UNORM) {
+ spe_shli(f, fbS_reg, fbS_reg, 24); /* fbS = fbS << 24 */
+ spe_or(f, fbZS_reg, fbS_reg, fbZ_reg); /* fbZS = fbS | fbZ */
+ }
+ else if (zs_format == PIPE_FORMAT_Z24S8_UNORM ||
+ zs_format == PIPE_FORMAT_Z24X8_UNORM) {
+ spe_shli(f, fbZ_reg, fbZ_reg, 8); /* fbZ = fbZ << 8 */
+ spe_or(f, fbZS_reg, fbS_reg, fbZ_reg); /* fbZS = fbS | fbZ */
+ }
+ else if (zs_format == PIPE_FORMAT_Z32_UNORM) {
+ spe_move(f, fbZS_reg, fbZ_reg); /* fbZS = fbZ */
+ }
+ else if (zs_format == PIPE_FORMAT_Z16_UNORM) {
+ spe_move(f, fbZS_reg, fbZ_reg); /* fbZS = fbZ */
+ }
+ else if (zs_format == PIPE_FORMAT_S8_UNORM) {
+ ASSERT(0); /* XXX to do */
+ }
+ else {
+ ASSERT(0); /* bad zs_format */
+ }
+
+ /* Store: memory[depth_tile_reg + quad_offset_reg] = fbZS */
+ spe_stqx(f, fbZS_reg, depth_tile_reg, quad_offset_reg);
+ }
+
+ /* Don't need these any more */
+ spe_release_register(f, fbZS_reg);
+ spe_release_register(f, fbZ_reg);
+ spe_release_register(f, fbS_reg);
+ spe_release_register(f, zmask_reg);
+}
+
+
+
+/**
+ * Generate SPE code to implement the fragment operations (alpha test,
+ * depth test, stencil test, blending, colormask, and final
+ * framebuffer write) as specified by the current context state.
+ *
+ * Logically, this code will be called after running the fragment
+ * shader. But under some circumstances we could run some of this
+ * code before the fragment shader to cull fragments/quads that are
+ * totally occluded/discarded.
+ *
+ * XXX we only support PIPE_FORMAT_Z24S8_UNORM z/stencil buffer right now.
+ *
+ * See the spu_default_fragment_ops() function to see how the per-fragment
+ * operations would be done with ordinary C code.
+ * The code we generate here though has no branches, is SIMD, etc and
+ * should be much faster.
+ *
+ * \param cell the rendering context (in)
+ * \param facing whether the generated code is for front-facing or
+ * back-facing fragments
+ * \param f the generated function (in/out); on input, the function
+ * must already have been initialized. On exit, whatever
+ * instructions within the generated function have had
+ * the fragment ops appended.
+ */
+void
+cell_gen_fragment_function(struct cell_context *cell,
+ const uint facing,
+ struct spe_function *f)
+{
+ const struct pipe_depth_stencil_alpha_state *dsa = cell->depth_stencil;
+ const struct pipe_blend_state *blend = cell->blend;
+ const struct pipe_blend_color *blend_color = &cell->blend_color;
+ const enum pipe_format color_format = cell->framebuffer.cbufs[0]->format;
+
+ /* For SPE function calls: reg $3 = first param, $4 = second param, etc. */
+ const int x_reg = 3; /* uint */
+ const int y_reg = 4; /* uint */
+ const int color_tile_reg = 5; /* tile_t * */
+ const int depth_tile_reg = 6; /* tile_t * */
+ const int fragZ_reg = 7; /* vector float */
+ const int fragR_reg = 8; /* vector float */
+ const int fragG_reg = 9; /* vector float */
+ const int fragB_reg = 10; /* vector float */
+ const int fragA_reg = 11; /* vector float */
+ const int mask_reg = 12; /* vector uint */
+
+ ASSERT(facing == CELL_FACING_FRONT || facing == CELL_FACING_BACK);
+
+ /* offset of quad from start of tile
+ * XXX assuming 4-byte pixels for color AND Z/stencil!!!!
+ */
+ int quad_offset_reg;
+
+ int fbRGBA_reg; /**< framebuffer's RGBA colors for quad */
+
+ if (cell->debug_flags & CELL_DEBUG_ASM) {
+ spe_print_code(f, TRUE);
+ spe_indent(f, 8);
+ spe_comment(f, -4, facing == CELL_FACING_FRONT
+ ? "Begin front-facing per-fragment ops"
+ : "Begin back-facing per-fragment ops");
+ }
+
+ spe_allocate_register(f, x_reg);
+ spe_allocate_register(f, y_reg);
+ spe_allocate_register(f, color_tile_reg);
+ spe_allocate_register(f, depth_tile_reg);
+ spe_allocate_register(f, fragZ_reg);
+ spe_allocate_register(f, fragR_reg);
+ spe_allocate_register(f, fragG_reg);
+ spe_allocate_register(f, fragB_reg);
+ spe_allocate_register(f, fragA_reg);
+ spe_allocate_register(f, mask_reg);
+
+ quad_offset_reg = spe_allocate_available_register(f);
+ fbRGBA_reg = spe_allocate_available_register(f);
+
+ /* compute offset of quad from start of tile, in bytes */
+ {
+ int x2_reg = spe_allocate_available_register(f);
+ int y2_reg = spe_allocate_available_register(f);
+
+ ASSERT(TILE_SIZE == 32);
+
+ spe_comment(f, 0, "Compute quad offset within tile");
+ spe_rotmi(f, y2_reg, y_reg, -1); /* y2 = y / 2 */
+ spe_rotmi(f, x2_reg, x_reg, -1); /* x2 = x / 2 */
+ spe_shli(f, y2_reg, y2_reg, 4); /* y2 *= 16 */
+ spe_a(f, quad_offset_reg, y2_reg, x2_reg); /* offset = y2 + x2 */
+ spe_shli(f, quad_offset_reg, quad_offset_reg, 4); /* offset *= 16 */
+
+ spe_release_register(f, x2_reg);
+ spe_release_register(f, y2_reg);
+ }
+
+ /* Generate the alpha test, if needed. */
+ if (dsa->alpha.enabled) {
+ gen_alpha_test(dsa, f, mask_reg, fragA_reg);
+ }
+
+ /* generate depth and/or stencil test code */
+ if (dsa->depth.enabled || dsa->stencil[0].enabled) {
+ gen_depth_stencil(cell, dsa, f,
+ facing,
+ mask_reg,
+ depth_tile_reg,
+ quad_offset_reg,
+ fragZ_reg);
+ }
+
+ /* Get framebuffer quad/colors. We'll need these for blending,
+ * color masking, and to obey the quad/pixel mask.
+ * Load: fbRGBA_reg = memory[color_tile + quad_offset]
+ * Note: if mask={~0,~0,~0,~0} and we're not blending or colormasking
+ * we could skip this load.
+ */
+ spe_comment(f, 0, "Fetch quad colors from tile");
+ spe_lqx(f, fbRGBA_reg, color_tile_reg, quad_offset_reg);
+
+ if (blend->blend_enable) {
+ spe_comment(f, 0, "Perform blending");
+ gen_blend(blend, blend_color, f, color_format,
+ fragR_reg, fragG_reg, fragB_reg, fragA_reg, fbRGBA_reg);
+ }
+
+ /*
+ * Write fragment colors to framebuffer/tile.
+ * This involves converting the fragment colors from float[4] to the
+ * tile's specific format and obeying the quad/pixel mask.
+ */
+ {
+ int rgba_reg = spe_allocate_available_register(f);
+
+ /* Pack four float colors as four 32-bit int colors */
+ spe_comment(f, 0, "Convert float quad colors to packed int framebuffer colors");
+ gen_pack_colors(f, color_format,
+ fragR_reg, fragG_reg, fragB_reg, fragA_reg,
+ rgba_reg);
+
+ if (blend->logicop_enable) {
+ spe_comment(f, 0, "Compute logic op");
+ gen_logicop(blend, f, rgba_reg, fbRGBA_reg);
+ }
+
+ if (blend->colormask != PIPE_MASK_RGBA) {
+ spe_comment(f, 0, "Compute color mask");
+ gen_colormask(f, blend->colormask, color_format, rgba_reg, fbRGBA_reg);
+ }
+
+ /* Mix fragment colors with framebuffer colors using the quad/pixel mask:
+ * if (mask[i])
+ * rgba[i] = rgba[i];
+ * else
+ * rgba[i] = framebuffer[i];
+ */
+ spe_selb(f, rgba_reg, fbRGBA_reg, rgba_reg, mask_reg);
+
+ /* Store updated quad in tile:
+ * memory[color_tile + quad_offset] = rgba_reg;
+ */
+ spe_comment(f, 0, "Store quad colors into color tile");
+ spe_stqx(f, rgba_reg, color_tile_reg, quad_offset_reg);
+
+ spe_release_register(f, rgba_reg);
+ }
+
+ //printf("gen_fragment_ops nr instructions: %u\n", f->num_inst);
+
+ spe_bi(f, SPE_REG_RA, 0, 0); /* return from function call */
+
+ spe_release_register(f, fbRGBA_reg);
+ spe_release_register(f, quad_offset_reg);
+
+ if (cell->debug_flags & CELL_DEBUG_ASM) {
+ char buffer[1024];
+ sprintf(buffer, "End %s-facing per-fragment ops: %d instructions",
+ facing == CELL_FACING_FRONT ? "front" : "back", f->num_inst);
+ spe_comment(f, -4, buffer);
+ }
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_gen_fragment.h b/src/gallium/drivers/cell/ppu/cell_gen_fragment.h
new file mode 100644
index 0000000000..21b35d1faf
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_gen_fragment.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_GEN_FRAGMENT_H
+#define CELL_GEN_FRAGMENT_H
+
+
+extern void
+cell_gen_fragment_function(struct cell_context *cell, const uint facing, struct spe_function *f);
+
+
+#endif /* CELL_GEN_FRAGMENT_H */
+
diff --git a/src/gallium/drivers/cell/ppu/cell_pipe_state.c b/src/gallium/drivers/cell/ppu/cell_pipe_state.c
new file mode 100644
index 0000000000..ccd0fef6e8
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_pipe_state.c
@@ -0,0 +1,407 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "util/u_memory.h"
+#include "pipe/p_inlines.h"
+#include "draw/draw_context.h"
+#include "cell_context.h"
+#include "cell_flush.h"
+#include "cell_pipe_state.h"
+#include "cell_state.h"
+#include "cell_texture.h"
+
+
+
+static void *
+cell_create_blend_state(struct pipe_context *pipe,
+ const struct pipe_blend_state *blend)
+{
+ return mem_dup(blend, sizeof(*blend));
+}
+
+
+static void
+cell_bind_blend_state(struct pipe_context *pipe, void *blend)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ draw_flush(cell->draw);
+
+ cell->blend = (struct pipe_blend_state *) blend;
+ cell->dirty |= CELL_NEW_BLEND;
+}
+
+
+static void
+cell_delete_blend_state(struct pipe_context *pipe, void *blend)
+{
+ FREE(blend);
+}
+
+
+static void
+cell_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ draw_flush(cell->draw);
+
+ cell->blend_color = *blend_color;
+
+ cell->dirty |= CELL_NEW_BLEND;
+}
+
+
+
+
+static void *
+cell_create_depth_stencil_alpha_state(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *dsa)
+{
+ return mem_dup(dsa, sizeof(*dsa));
+}
+
+
+static void
+cell_bind_depth_stencil_alpha_state(struct pipe_context *pipe,
+ void *dsa)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ draw_flush(cell->draw);
+
+ cell->depth_stencil = (struct pipe_depth_stencil_alpha_state *) dsa;
+ cell->dirty |= CELL_NEW_DEPTH_STENCIL;
+}
+
+
+static void
+cell_delete_depth_stencil_alpha_state(struct pipe_context *pipe, void *dsa)
+{
+ FREE(dsa);
+}
+
+
+static void
+cell_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ /* pass the clip state to the draw module */
+ draw_set_clip_state(cell->draw, clip);
+}
+
+
+
+/* Called when driver state tracker notices changes to the viewport
+ * matrix:
+ */
+static void
+cell_set_viewport_state( struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport )
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ cell->viewport = *viewport; /* struct copy */
+ cell->dirty |= CELL_NEW_VIEWPORT;
+
+ /* pass the viewport info to the draw module */
+ draw_set_viewport_state(cell->draw, viewport);
+
+ /* Using tnl/ and vf/ modules is temporary while getting started.
+ * Full pipe will have vertex shader, vertex fetch of its own.
+ */
+}
+
+
+static void
+cell_set_scissor_state( struct pipe_context *pipe,
+ const struct pipe_scissor_state *scissor )
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ memcpy( &cell->scissor, scissor, sizeof(*scissor) );
+ cell->dirty |= CELL_NEW_SCISSOR;
+}
+
+
+static void
+cell_set_polygon_stipple( struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple )
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ memcpy( &cell->poly_stipple, stipple, sizeof(*stipple) );
+ cell->dirty |= CELL_NEW_STIPPLE;
+}
+
+
+
+static void *
+cell_create_rasterizer_state(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *rasterizer)
+{
+ return mem_dup(rasterizer, sizeof(*rasterizer));
+}
+
+
+static void
+cell_bind_rasterizer_state(struct pipe_context *pipe, void *rast)
+{
+ struct pipe_rasterizer_state *rasterizer =
+ (struct pipe_rasterizer_state *) rast;
+ struct cell_context *cell = cell_context(pipe);
+
+ /* pass-through to draw module */
+ draw_set_rasterizer_state(cell->draw, rasterizer);
+
+ cell->rasterizer = rasterizer;
+
+ cell->dirty |= CELL_NEW_RASTERIZER;
+}
+
+
+static void
+cell_delete_rasterizer_state(struct pipe_context *pipe, void *rasterizer)
+{
+ FREE(rasterizer);
+}
+
+
+
+static void *
+cell_create_sampler_state(struct pipe_context *pipe,
+ const struct pipe_sampler_state *sampler)
+{
+ return mem_dup(sampler, sizeof(*sampler));
+}
+
+
+static void
+cell_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **samplers)
+{
+ struct cell_context *cell = cell_context(pipe);
+ uint i, changed = 0x0;
+
+ assert(num <= CELL_MAX_SAMPLERS);
+
+ draw_flush(cell->draw);
+
+ for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
+ struct pipe_sampler_state *new_samp = i < num ? samplers[i] : NULL;
+ if (cell->sampler[i] != new_samp) {
+ cell->sampler[i] = new_samp;
+ changed |= (1 << i);
+ }
+ }
+
+ if (changed) {
+ cell->dirty |= CELL_NEW_SAMPLER;
+ cell->dirty_samplers |= changed;
+ }
+}
+
+
+static void
+cell_delete_sampler_state(struct pipe_context *pipe,
+ void *sampler)
+{
+ FREE( sampler );
+}
+
+
+
+static void
+cell_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num, struct pipe_texture **texture)
+{
+ struct cell_context *cell = cell_context(pipe);
+ uint i, changed = 0x0;
+
+ assert(num <= CELL_MAX_SAMPLERS);
+
+ for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
+ struct cell_texture *new_tex = cell_texture(i < num ? texture[i] : NULL);
+ struct cell_texture *old_tex = cell->texture[i];
+ if (old_tex != new_tex) {
+
+ pipe_texture_reference((struct pipe_texture **) &cell->texture[i],
+ (struct pipe_texture *) new_tex);
+
+ changed |= (1 << i);
+ }
+ }
+
+ cell->num_textures = num;
+
+ if (changed) {
+ cell->dirty |= CELL_NEW_TEXTURE;
+ cell->dirty_textures |= changed;
+ }
+}
+
+
+/**
+ * Map color and z/stencil framebuffer surfaces.
+ */
+static void
+cell_map_surfaces(struct cell_context *cell)
+{
+ struct pipe_screen *screen = cell->pipe.screen;
+ uint i;
+
+ for (i = 0; i < 1; i++) {
+ struct pipe_surface *ps = cell->framebuffer.cbufs[i];
+ if (ps) {
+ struct cell_texture *ct = cell_texture(ps->texture);
+ cell->cbuf_map[i] = screen->buffer_map(screen,
+ ct->buffer,
+ (PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE));
+ }
+ }
+
+ {
+ struct pipe_surface *ps = cell->framebuffer.zsbuf;
+ if (ps) {
+ struct cell_texture *ct = cell_texture(ps->texture);
+ cell->zsbuf_map = screen->buffer_map(screen,
+ ct->buffer,
+ (PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE));
+ }
+ }
+}
+
+
+/**
+ * Unmap color and z/stencil framebuffer surfaces.
+ */
+static void
+cell_unmap_surfaces(struct cell_context *cell)
+{
+ struct pipe_screen *screen = cell->pipe.screen;
+ uint i;
+
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
+ struct pipe_surface *ps = cell->framebuffer.cbufs[i];
+ if (ps && cell->cbuf_map[i]) {
+ struct cell_texture *ct = cell_texture(ps->texture);
+ assert(ps->texture);
+ assert(ct->buffer);
+
+ screen->buffer_unmap(screen, ct->buffer);
+ cell->cbuf_map[i] = NULL;
+ }
+ }
+
+ {
+ struct pipe_surface *ps = cell->framebuffer.zsbuf;
+ if (ps && cell->zsbuf_map) {
+ struct cell_texture *ct = cell_texture(ps->texture);
+ screen->buffer_unmap(screen, ct->buffer);
+ cell->zsbuf_map = NULL;
+ }
+ }
+}
+
+
+static void
+cell_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ if (1 /*memcmp(&cell->framebuffer, fb, sizeof(*fb))*/) {
+ uint i;
+
+ /* unmap old surfaces */
+ cell_unmap_surfaces(cell);
+
+ /* Finish any pending rendering to the current surface before
+ * installing a new surface!
+ */
+ cell_flush_int(cell, CELL_FLUSH_WAIT);
+
+ /* update my state
+ * (this is also where old surfaces will finally get freed)
+ */
+ cell->framebuffer.width = fb->width;
+ cell->framebuffer.height = fb->height;
+ cell->framebuffer.nr_cbufs = fb->nr_cbufs;
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
+ pipe_surface_reference(&cell->framebuffer.cbufs[i], fb->cbufs[i]);
+ }
+ pipe_surface_reference(&cell->framebuffer.zsbuf, fb->zsbuf);
+
+ /* map new surfaces */
+ cell_map_surfaces(cell);
+
+ cell->dirty |= CELL_NEW_FRAMEBUFFER;
+ }
+}
+
+
+
+void
+cell_init_state_functions(struct cell_context *cell)
+{
+ cell->pipe.create_blend_state = cell_create_blend_state;
+ cell->pipe.bind_blend_state = cell_bind_blend_state;
+ cell->pipe.delete_blend_state = cell_delete_blend_state;
+
+ cell->pipe.create_sampler_state = cell_create_sampler_state;
+ cell->pipe.bind_sampler_states = cell_bind_sampler_states;
+ cell->pipe.delete_sampler_state = cell_delete_sampler_state;
+
+ cell->pipe.set_sampler_textures = cell_set_sampler_textures;
+
+ cell->pipe.create_depth_stencil_alpha_state = cell_create_depth_stencil_alpha_state;
+ cell->pipe.bind_depth_stencil_alpha_state = cell_bind_depth_stencil_alpha_state;
+ cell->pipe.delete_depth_stencil_alpha_state = cell_delete_depth_stencil_alpha_state;
+
+ cell->pipe.create_rasterizer_state = cell_create_rasterizer_state;
+ cell->pipe.bind_rasterizer_state = cell_bind_rasterizer_state;
+ cell->pipe.delete_rasterizer_state = cell_delete_rasterizer_state;
+
+ cell->pipe.set_blend_color = cell_set_blend_color;
+ cell->pipe.set_clip_state = cell_set_clip_state;
+
+ cell->pipe.set_framebuffer_state = cell_set_framebuffer_state;
+
+ cell->pipe.set_polygon_stipple = cell_set_polygon_stipple;
+ cell->pipe.set_scissor_state = cell_set_scissor_state;
+ cell->pipe.set_viewport_state = cell_set_viewport_state;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_pipe_state.h b/src/gallium/drivers/cell/ppu/cell_pipe_state.h
new file mode 100644
index 0000000000..1889bd52ff
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_pipe_state.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_PIPE_STATE_H
+#define CELL_PIPE_STATE_H
+
+
+struct cell_context;
+
+extern void
+cell_init_state_functions(struct cell_context *cell);
+
+
+#endif /* CELL_PIPE_STATE_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_render.c b/src/gallium/drivers/cell/ppu/cell_render.c
new file mode 100644
index 0000000000..79cb8df82f
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_render.c
@@ -0,0 +1,211 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Last stage of 'draw' pipeline: send tris to SPUs.
+ * \author Brian Paul
+ */
+
+#include "cell_context.h"
+#include "cell_render.h"
+#include "cell_spu.h"
+#include "util/u_memory.h"
+#include "draw/draw_private.h"
+
+
+struct render_stage {
+ struct draw_stage stage; /**< This must be first (base class) */
+
+ struct cell_context *cell;
+};
+
+
+static INLINE struct render_stage *
+render_stage(struct draw_stage *stage)
+{
+ return (struct render_stage *) stage;
+}
+
+
+static void render_begin( struct draw_stage *stage )
+{
+#if 0
+ struct render_stage *render = render_stage(stage);
+ struct cell_context *sp = render->cell;
+ const struct pipe_shader_state *fs = &render->cell->fs->shader;
+ render->quad.nr_attrs = render->cell->nr_frag_attrs;
+
+ render->firstFpInput = fs->input_semantic_name[0];
+
+ sp->quad.first->begin(sp->quad.first);
+#endif
+}
+
+
+static void render_end( struct draw_stage *stage )
+{
+}
+
+
+static void reset_stipple_counter( struct draw_stage *stage )
+{
+ struct render_stage *render = render_stage(stage);
+ /*render->cell->line_stipple_counter = 0;*/
+}
+
+
+static void
+render_point(struct draw_stage *stage, struct prim_header *prim)
+{
+}
+
+
+static void
+render_line(struct draw_stage *stage, struct prim_header *prim)
+{
+}
+
+
+/** Write a vertex into the prim buffer */
+static void
+save_vertex(struct cell_prim_buffer *buf, uint pos,
+ const struct vertex_header *vert)
+{
+ uint attr, j;
+
+ for (attr = 0; attr < 2; attr++) {
+ for (j = 0; j < 4; j++) {
+ buf->vertex[pos][attr][j] = vert->data[attr][j];
+ }
+ }
+
+ /* update bounding box */
+ if (vert->data[0][0] < buf->xmin)
+ buf->xmin = vert->data[0][0];
+ if (vert->data[0][0] > buf->xmax)
+ buf->xmax = vert->data[0][0];
+ if (vert->data[0][1] < buf->ymin)
+ buf->ymin = vert->data[0][1];
+ if (vert->data[0][1] > buf->ymax)
+ buf->ymax = vert->data[0][1];
+}
+
+
+static void
+render_tri(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct render_stage *rs = render_stage(stage);
+ struct cell_context *cell = rs->cell;
+ struct cell_prim_buffer *buf = &cell->prim_buffer;
+ uint i;
+
+ if (buf->num_verts + 3 > CELL_MAX_VERTS) {
+ cell_flush_prim_buffer(cell);
+ }
+
+ i = buf->num_verts;
+ assert(i+2 <= CELL_MAX_VERTS);
+ save_vertex(buf, i+0, prim->v[0]);
+ save_vertex(buf, i+1, prim->v[1]);
+ save_vertex(buf, i+2, prim->v[2]);
+ buf->num_verts += 3;
+}
+
+
+/**
+ * Send the a RENDER command to all SPUs to have them render the prims
+ * in the current prim_buffer.
+ */
+void
+cell_flush_prim_buffer(struct cell_context *cell)
+{
+ uint i;
+
+ if (cell->prim_buffer.num_verts == 0)
+ return;
+
+ for (i = 0; i < cell->num_spus; i++) {
+ struct cell_command_render *render = &cell_global.command[i].render;
+ render->prim_type = PIPE_PRIM_TRIANGLES;
+ render->num_verts = cell->prim_buffer.num_verts;
+ render->front_winding = cell->rasterizer->front_winding;
+ render->vertex_size = cell->vertex_info->size * 4;
+ render->xmin = cell->prim_buffer.xmin;
+ render->ymin = cell->prim_buffer.ymin;
+ render->xmax = cell->prim_buffer.xmax;
+ render->ymax = cell->prim_buffer.ymax;
+ render->vertex_data = &cell->prim_buffer.vertex;
+ ASSERT_ALIGN16(render->vertex_data);
+ send_mbox_message(cell_global.spe_contexts[i], CELL_CMD_RENDER);
+ }
+
+ cell->prim_buffer.num_verts = 0;
+
+ cell->prim_buffer.xmin = 1e100;
+ cell->prim_buffer.ymin = 1e100;
+ cell->prim_buffer.xmax = -1e100;
+ cell->prim_buffer.ymax = -1e100;
+
+ /* XXX temporary, need to double-buffer the prim buffer until we get
+ * a real command buffer/list system.
+ */
+ cell_flush(&cell->pipe, 0x0);
+}
+
+
+
+static void render_destroy( struct draw_stage *stage )
+{
+ FREE( stage );
+}
+
+
+/**
+ * Create a new draw/render stage. This will be plugged into the
+ * draw module as the last pipeline stage.
+ */
+struct draw_stage *cell_draw_render_stage( struct cell_context *cell )
+{
+ struct render_stage *render = CALLOC_STRUCT(render_stage);
+
+ render->cell = cell;
+ render->stage.draw = cell->draw;
+ render->stage.begin = render_begin;
+ render->stage.point = render_point;
+ render->stage.line = render_line;
+ render->stage.tri = render_tri;
+ render->stage.end = render_end;
+ render->stage.reset_stipple_counter = reset_stipple_counter;
+ render->stage.destroy = render_destroy;
+
+ /*
+ render->quad.coef = render->coef;
+ render->quad.posCoef = &render->posCoef;
+ */
+
+ return &render->stage;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_render.h b/src/gallium/drivers/cell/ppu/cell_render.h
new file mode 100644
index 0000000000..826dcbafeb
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_render.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef CELL_RENDER_H
+#define CELL_RENDER_H
+
+struct cell_context;
+struct draw_stage;
+
+extern void
+cell_flush_prim_buffer(struct cell_context *cell);
+
+extern struct draw_stage *cell_draw_render_stage( struct cell_context *cell );
+
+#endif /* CELL_RENDER_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_screen.c b/src/gallium/drivers/cell/ppu/cell_screen.c
new file mode 100644
index 0000000000..bd48ce7005
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_screen.c
@@ -0,0 +1,178 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "util/u_simple_screen.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+
+#include "cell/common.h"
+#include "cell_screen.h"
+#include "cell_texture.h"
+#include "cell_winsys.h"
+
+
+static const char *
+cell_get_vendor(struct pipe_screen *screen)
+{
+ return "Tungsten Graphics, Inc.";
+}
+
+
+static const char *
+cell_get_name(struct pipe_screen *screen)
+{
+ return "Cell";
+}
+
+
+static int
+cell_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return CELL_MAX_SAMPLERS;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 1;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 0;
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 1;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 10;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return CELL_MAX_TEXTURE_LEVELS;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 8; /* max 128x128x128 */
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return CELL_MAX_TEXTURE_LEVELS;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1; /* XXX not really true */
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 0; /* XXX to do */
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+static float
+cell_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 255.0; /* arbitrary */
+
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 255.0; /* arbitrary */
+
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 0.0;
+
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0; /* arbitrary */
+
+ default:
+ return 0;
+ }
+}
+
+
+static boolean
+cell_is_format_supported( struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags )
+{
+ /* cell supports most formats, XXX for now anyway */
+ if (format == PIPE_FORMAT_DXT5_RGBA ||
+ format == PIPE_FORMAT_R8G8B8A8_SRGB)
+ return FALSE;
+ else
+ return TRUE;
+}
+
+
+static void
+cell_destroy_screen( struct pipe_screen *screen )
+{
+ struct pipe_winsys *winsys = screen->winsys;
+
+ if(winsys->destroy)
+ winsys->destroy(winsys);
+
+ FREE(screen);
+}
+
+
+/**
+ * Create a new pipe_screen object
+ * Note: we're not presently subclassing pipe_screen (no cell_screen) but
+ * that would be the place to put SPU thread/context info...
+ */
+struct pipe_screen *
+cell_create_screen(struct pipe_winsys *winsys)
+{
+ struct pipe_screen *screen = CALLOC_STRUCT(pipe_screen);
+
+ if (!screen)
+ return NULL;
+
+ screen->winsys = winsys;
+
+ screen->destroy = cell_destroy_screen;
+
+ screen->get_name = cell_get_name;
+ screen->get_vendor = cell_get_vendor;
+ screen->get_param = cell_get_param;
+ screen->get_paramf = cell_get_paramf;
+ screen->is_format_supported = cell_is_format_supported;
+
+ cell_init_screen_texture_funcs(screen);
+ u_simple_screen_init(screen);
+
+ return screen;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_screen.h b/src/gallium/drivers/cell/ppu/cell_screen.h
new file mode 100644
index 0000000000..c7e15889d6
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_screen.h
@@ -0,0 +1,41 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_SCREEN_H
+#define CELL_SCREEN_H
+
+
+struct pipe_screen;
+struct pipe_winsys;
+
+
+extern struct pipe_screen *
+cell_create_screen(struct pipe_winsys *winsys);
+
+
+#endif /* CELL_SCREEN_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_spu.c b/src/gallium/drivers/cell/ppu/cell_spu.c
new file mode 100644
index 0000000000..28e5e6d706
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_spu.c
@@ -0,0 +1,219 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Utility/wrappers for communicating with the SPUs.
+ */
+
+
+#include <pthread.h>
+
+#include "cell_spu.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+#include "cell/common.h"
+
+
+/*
+helpful headers:
+/opt/ibm/cell-sdk/prototype/src/include/ppu/cbe_mfc.h
+*/
+
+
+/**
+ * Cell/SPU info that's not per-context.
+ */
+struct cell_global_info cell_global;
+
+
+/**
+ * Scan /proc/cpuinfo to determine the timebase for the system.
+ * This is used by the SPUs to convert 'decrementer' ticks to seconds.
+ * There may be a better way to get this value...
+ */
+static unsigned
+get_timebase(void)
+{
+ FILE *f = fopen("/proc/cpuinfo", "r");
+ unsigned timebase;
+
+ assert(f);
+ while (!feof(f)) {
+ char line[80];
+ fgets(line, sizeof(line), f);
+ if (strncmp(line, "timebase", 8) == 0) {
+ char *colon = strchr(line, ':');
+ if (colon) {
+ timebase = atoi(colon + 2);
+ break;
+ }
+ }
+ }
+ fclose(f);
+
+ return timebase;
+}
+
+
+/**
+ * Write a 1-word message to the given SPE mailbox.
+ */
+void
+send_mbox_message(spe_context_ptr_t ctx, unsigned int msg)
+{
+ spe_in_mbox_write(ctx, &msg, 1, SPE_MBOX_ALL_BLOCKING);
+}
+
+
+/**
+ * Wait for a 1-word message to arrive in given mailbox.
+ */
+uint
+wait_mbox_message(spe_context_ptr_t ctx)
+{
+ do {
+ unsigned data;
+ int count = spe_out_mbox_read(ctx, &data, 1);
+
+ if (count == 1) {
+ return data;
+ }
+
+ if (count < 0) {
+ /* error */ ;
+ }
+ } while (1);
+}
+
+
+/**
+ * Called by pthread_create() to spawn an SPU thread.
+ */
+static void *
+cell_thread_function(void *arg)
+{
+ struct cell_init_info *init = (struct cell_init_info *) arg;
+ unsigned entry = SPE_DEFAULT_ENTRY;
+
+ ASSERT_ALIGN16(init);
+
+ if (spe_context_run(cell_global.spe_contexts[init->id], &entry, 0,
+ init, NULL, NULL) < 0) {
+ fprintf(stderr, "spe_context_run() failed\n");
+ exit(1);
+ }
+
+ pthread_exit(NULL);
+}
+
+
+/**
+ * Create the SPU threads. This is done once during driver initialization.
+ * This involves setting the the "init" message which is sent to each SPU.
+ * The init message specifies an SPU id, total number of SPUs, location
+ * and number of batch buffers, etc.
+ */
+void
+cell_start_spus(struct cell_context *cell)
+{
+ static boolean one_time_init = FALSE;
+ uint i, j;
+ uint timebase = get_timebase();
+
+ if (one_time_init) {
+ fprintf(stderr, "PPU: Multiple rendering contexts not yet supported "
+ "on Cell.\n");
+ abort();
+ }
+
+ one_time_init = TRUE;
+
+ assert(cell->num_spus <= CELL_MAX_SPUS);
+
+ ASSERT_ALIGN16(&cell_global.inits[0]);
+ ASSERT_ALIGN16(&cell_global.inits[1]);
+
+ /*
+ * Initialize the global 'inits' structure for each SPU.
+ * A pointer to the init struct will be passed to each SPU.
+ * The SPUs will then each grab their init info with mfc_get().
+ */
+ for (i = 0; i < cell->num_spus; i++) {
+ cell_global.inits[i].id = i;
+ cell_global.inits[i].num_spus = cell->num_spus;
+ cell_global.inits[i].debug_flags = cell->debug_flags;
+ cell_global.inits[i].inv_timebase = 1000.0f / timebase;
+
+ for (j = 0; j < CELL_NUM_BUFFERS; j++) {
+ cell_global.inits[i].buffers[j] = cell->buffer[j];
+ }
+ cell_global.inits[i].buffer_status = &cell->buffer_status[0][0][0];
+
+ cell_global.inits[i].spu_functions = &cell->spu_functions;
+
+ cell_global.spe_contexts[i] = spe_context_create(0, NULL);
+ if (!cell_global.spe_contexts[i]) {
+ fprintf(stderr, "spe_context_create() failed\n");
+ exit(1);
+ }
+
+ if (spe_program_load(cell_global.spe_contexts[i], &g3d_spu)) {
+ fprintf(stderr, "spe_program_load() failed\n");
+ exit(1);
+ }
+
+ pthread_create(&cell_global.spe_threads[i], /* returned thread handle */
+ NULL, /* pthread attribs */
+ &cell_thread_function, /* start routine */
+ &cell_global.inits[i]); /* thread argument */
+ }
+}
+
+
+/**
+ * Tell all the SPUs to stop/exit.
+ * This is done when the driver's exiting / cleaning up.
+ */
+void
+cell_spu_exit(struct cell_context *cell)
+{
+ uint i;
+
+ for (i = 0; i < cell->num_spus; i++) {
+ send_mbox_message(cell_global.spe_contexts[i], CELL_CMD_EXIT);
+ }
+
+ /* wait for threads to exit */
+ for (i = 0; i < cell->num_spus; i++) {
+ void *value;
+ pthread_join(cell_global.spe_threads[i], &value);
+ cell_global.spe_threads[i] = 0;
+ cell_global.spe_contexts[i] = 0;
+ }
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_spu.h b/src/gallium/drivers/cell/ppu/cell_spu.h
new file mode 100644
index 0000000000..c93958a9ed
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_spu.h
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef CELL_SPU
+#define CELL_SPU
+
+
+#include <libspe2.h>
+#include <pthread.h>
+#include "cell/common.h"
+
+#include "cell_context.h"
+
+
+/**
+ * Global vars, for now anyway.
+ */
+struct cell_global_info
+{
+ /**
+ * SPU/SPE handles, etc
+ */
+ spe_context_ptr_t spe_contexts[CELL_MAX_SPUS];
+ pthread_t spe_threads[CELL_MAX_SPUS];
+
+ /**
+ * Data sent to SPUs at start-up
+ */
+ struct cell_init_info inits[CELL_MAX_SPUS];
+};
+
+
+extern struct cell_global_info cell_global;
+
+
+/** This is the handle for the actual SPE code */
+extern spe_program_handle_t g3d_spu;
+
+
+extern void
+send_mbox_message(spe_context_ptr_t ctx, unsigned int msg);
+
+extern uint
+wait_mbox_message(spe_context_ptr_t ctx);
+
+
+extern void
+cell_start_spus(struct cell_context *cell);
+
+
+extern void
+cell_spu_exit(struct cell_context *cell);
+
+
+#endif /* CELL_SPU */
diff --git a/src/gallium/drivers/cell/ppu/cell_state.h b/src/gallium/drivers/cell/ppu/cell_state.h
new file mode 100644
index 0000000000..b193170f9c
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_STATE_H
+#define CELL_STATE_H
+
+
+#define CELL_NEW_VIEWPORT 0x1
+#define CELL_NEW_RASTERIZER 0x2
+#define CELL_NEW_FS 0x4
+#define CELL_NEW_BLEND 0x8
+#define CELL_NEW_CLIP 0x10
+#define CELL_NEW_SCISSOR 0x20
+#define CELL_NEW_STIPPLE 0x40
+#define CELL_NEW_FRAMEBUFFER 0x80
+#define CELL_NEW_ALPHA_TEST 0x100
+#define CELL_NEW_DEPTH_STENCIL 0x200
+#define CELL_NEW_SAMPLER 0x400
+#define CELL_NEW_TEXTURE 0x800
+#define CELL_NEW_VERTEX 0x1000
+#define CELL_NEW_VS 0x2000
+#define CELL_NEW_VS_CONSTANTS 0x4000
+#define CELL_NEW_FS_CONSTANTS 0x8000
+#define CELL_NEW_VERTEX_INFO 0x10000
+
+
+extern void
+cell_update_derived( struct cell_context *softpipe );
+
+
+extern void
+cell_init_shader_functions(struct cell_context *cell);
+
+
+extern void
+cell_init_vertex_functions(struct cell_context *cell);
+
+
+#endif /* CELL_STATE_H */
+
diff --git a/src/gallium/drivers/cell/ppu/cell_state_derived.c b/src/gallium/drivers/cell/ppu/cell_state_derived.c
new file mode 100644
index 0000000000..efc4f78364
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_derived.c
@@ -0,0 +1,170 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vertex.h"
+#include "cell_context.h"
+#include "cell_batch.h"
+#include "cell_state.h"
+#include "cell_state_emit.h"
+
+
+/**
+ * Determine how to map vertex program outputs to fragment program inputs.
+ * Basically, this will be used when computing the triangle interpolation
+ * coefficients from the post-transform vertex attributes.
+ */
+static void
+calculate_vertex_layout( struct cell_context *cell )
+{
+ const struct cell_fragment_shader_state *fs = cell->fs;
+ const enum interp_mode colorInterp
+ = cell->rasterizer->flatshade ? INTERP_CONSTANT : INTERP_LINEAR;
+ struct vertex_info *vinfo = &cell->vertex_info;
+ uint i;
+ int src;
+
+#if 0
+ if (cell->vbuf) {
+ /* if using the post-transform vertex buffer, tell draw_vbuf to
+ * simply emit the whole post-xform vertex as-is:
+ */
+ struct vertex_info *vinfo_vbuf = &cell->vertex_info_vbuf;
+ vinfo_vbuf->num_attribs = 0;
+ draw_emit_vertex_attr(vinfo_vbuf, EMIT_ALL, INTERP_NONE, 0);
+ vinfo_vbuf->size = 4 * vs->num_outputs + sizeof(struct vertex_header)/4;
+ }
+#endif
+
+ /* reset vinfo */
+ vinfo->num_attribs = 0;
+
+ /* we always want to emit vertex pos */
+ src = draw_find_vs_output(cell->draw, TGSI_SEMANTIC_POSITION, 0);
+ assert(src >= 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_POS, src);
+
+
+ /*
+ * Loop over fragment shader inputs, searching for the matching output
+ * from the vertex shader.
+ */
+ for (i = 0; i < fs->info.num_inputs; i++) {
+ switch (fs->info.input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ /* already done above */
+ break;
+
+ case TGSI_SEMANTIC_COLOR:
+ src = draw_find_vs_output(cell->draw, TGSI_SEMANTIC_COLOR,
+ fs->info.input_semantic_index[i]);
+ assert(src >= 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, colorInterp, src);
+ break;
+
+ case TGSI_SEMANTIC_FOG:
+ src = draw_find_vs_output(cell->draw, TGSI_SEMANTIC_FOG, 0);
+#if 1
+ if (src < 0) /* XXX temp hack, try demos/fogcoord.c with this */
+ src = 0;
+#endif
+ assert(src >= 0);
+ draw_emit_vertex_attr(vinfo, EMIT_1F, INTERP_PERSPECTIVE, src);
+ break;
+
+ case TGSI_SEMANTIC_GENERIC:
+ /* this includes texcoords and varying vars */
+ src = draw_find_vs_output(cell->draw, TGSI_SEMANTIC_GENERIC,
+ fs->info.input_semantic_index[i]);
+ assert(src >= 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ draw_compute_vertex_size(vinfo);
+
+ /* XXX only signal this if format really changes */
+ cell->dirty |= CELL_NEW_VERTEX_INFO;
+}
+
+
+#if 0
+/**
+ * Recompute cliprect from scissor bounds, scissor enable and surface size.
+ */
+static void
+compute_cliprect(struct cell_context *sp)
+{
+ uint surfWidth = sp->framebuffer.width;
+ uint surfHeight = sp->framebuffer.height;
+
+ if (sp->rasterizer->scissor) {
+ /* clip to scissor rect */
+ sp->cliprect.minx = MAX2(sp->scissor.minx, 0);
+ sp->cliprect.miny = MAX2(sp->scissor.miny, 0);
+ sp->cliprect.maxx = MIN2(sp->scissor.maxx, surfWidth);
+ sp->cliprect.maxy = MIN2(sp->scissor.maxy, surfHeight);
+ }
+ else {
+ /* clip to surface bounds */
+ sp->cliprect.minx = 0;
+ sp->cliprect.miny = 0;
+ sp->cliprect.maxx = surfWidth;
+ sp->cliprect.maxy = surfHeight;
+ }
+}
+#endif
+
+
+
+/**
+ * Update derived state, send current state to SPUs prior to rendering.
+ */
+void cell_update_derived( struct cell_context *cell )
+{
+ if (cell->dirty & (CELL_NEW_RASTERIZER |
+ CELL_NEW_FS |
+ CELL_NEW_VS))
+ calculate_vertex_layout( cell );
+
+#if 0
+ if (cell->dirty & (CELL_NEW_SCISSOR |
+ CELL_NEW_DEPTH_STENCIL_ALPHA |
+ CELL_NEW_FRAMEBUFFER))
+ compute_cliprect(cell);
+#endif
+
+ cell_emit_state(cell);
+
+ cell->dirty = 0;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_state_emit.c b/src/gallium/drivers/cell/ppu/cell_state_emit.c
new file mode 100644
index 0000000000..9479c0898f
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_emit.c
@@ -0,0 +1,344 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "cell_context.h"
+#include "cell_gen_fragment.h"
+#include "cell_state.h"
+#include "cell_state_emit.h"
+#include "cell_batch.h"
+#include "cell_texture.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+
+
+/**
+ * Find/create a cell_command_fragment_ops object corresponding to the
+ * current blend/stencil/z/colormask/etc. state.
+ */
+static struct cell_command_fragment_ops *
+lookup_fragment_ops(struct cell_context *cell)
+{
+ struct cell_fragment_ops_key key;
+ struct cell_command_fragment_ops *ops;
+
+ /*
+ * Build key
+ */
+ memset(&key, 0, sizeof(key));
+ key.blend = *cell->blend;
+ key.blend_color = cell->blend_color;
+ key.dsa = *cell->depth_stencil;
+
+ if (cell->framebuffer.cbufs[0])
+ key.color_format = cell->framebuffer.cbufs[0]->format;
+ else
+ key.color_format = PIPE_FORMAT_NONE;
+
+ if (cell->framebuffer.zsbuf)
+ key.zs_format = cell->framebuffer.zsbuf->format;
+ else
+ key.zs_format = PIPE_FORMAT_NONE;
+
+ /*
+ * Look up key in cache.
+ */
+ ops = (struct cell_command_fragment_ops *)
+ util_keymap_lookup(cell->fragment_ops_cache, &key);
+
+ /*
+ * If not found, create/save new fragment ops command.
+ */
+ if (!ops) {
+ struct spe_function spe_code_front, spe_code_back;
+ unsigned int facing_dependent, total_code_size;
+
+ if (0)
+ debug_printf("**** Create New Fragment Ops\n");
+
+ /* Prepare the buffer that will hold the generated code. The
+ * "0" passed in for the size means that the SPE code will
+ * use a default size.
+ */
+ spe_init_func(&spe_code_front, 0);
+ spe_init_func(&spe_code_back, 0);
+
+ /* Generate new code. Always generate new code for both front-facing
+ * and back-facing fragments, even if it's the same code in both
+ * cases.
+ */
+ cell_gen_fragment_function(cell, CELL_FACING_FRONT, &spe_code_front);
+ cell_gen_fragment_function(cell, CELL_FACING_BACK, &spe_code_back);
+
+ /* Make sure the code is a multiple of 8 bytes long; this is
+ * required to ensure that the dual pipe instruction alignment
+ * is correct. It's also important for the SPU unpacking,
+ * which assumes 8-byte boundaries.
+ */
+ unsigned int front_code_size = spe_code_size(&spe_code_front);
+ while (front_code_size % 8 != 0) {
+ spe_lnop(&spe_code_front);
+ front_code_size = spe_code_size(&spe_code_front);
+ }
+ unsigned int back_code_size = spe_code_size(&spe_code_back);
+ while (back_code_size % 8 != 0) {
+ spe_lnop(&spe_code_back);
+ back_code_size = spe_code_size(&spe_code_back);
+ }
+
+ /* Determine whether the code we generated is facing-dependent, by
+ * determining whether the generated code is different for the front-
+ * and back-facing fragments.
+ */
+ if (front_code_size == back_code_size && memcmp(spe_code_front.store, spe_code_back.store, front_code_size) == 0) {
+ /* Code is identical; only need one copy. */
+ facing_dependent = 0;
+ total_code_size = front_code_size;
+ }
+ else {
+ /* Code is different for front-facing and back-facing fragments.
+ * Need to send both copies.
+ */
+ facing_dependent = 1;
+ total_code_size = front_code_size + back_code_size;
+ }
+
+ /* alloc new fragment ops command. Note that this structure
+ * has variant length based on the total code size required.
+ */
+ ops = CALLOC_VARIANT_LENGTH_STRUCT(cell_command_fragment_ops, total_code_size);
+ /* populate the new cell_command_fragment_ops object */
+ ops->opcode[0] = CELL_CMD_STATE_FRAGMENT_OPS;
+ ops->total_code_size = total_code_size;
+ ops->front_code_index = 0;
+ memcpy(ops->code, spe_code_front.store, front_code_size);
+ if (facing_dependent) {
+ /* We have separate front- and back-facing code. Append the
+ * back-facing code to the buffer. Be careful because the code
+ * size is in bytes, but the buffer is of unsigned elements.
+ */
+ ops->back_code_index = front_code_size / sizeof(spe_code_front.store[0]);
+ memcpy(ops->code + ops->back_code_index, spe_code_back.store, back_code_size);
+ }
+ else {
+ /* Use the same code for front- and back-facing fragments */
+ ops->back_code_index = ops->front_code_index;
+ }
+
+ /* Set the fields for the fallback case. Note that these fields
+ * (and the whole fallback case) will eventually go away.
+ */
+ ops->dsa = *cell->depth_stencil;
+ ops->blend = *cell->blend;
+ ops->blend_color = cell->blend_color;
+
+ /* insert cell_command_fragment_ops object into keymap/cache */
+ util_keymap_insert(cell->fragment_ops_cache, &key, ops, NULL);
+
+ /* release rtasm buffer */
+ spe_release_func(&spe_code_front);
+ spe_release_func(&spe_code_back);
+ }
+ else {
+ if (0)
+ debug_printf("**** Re-use Fragment Ops\n");
+ }
+
+ return ops;
+}
+
+
+
+static void
+emit_state_cmd(struct cell_context *cell, uint cmd,
+ const void *state, uint state_size)
+{
+ uint32_t *dst = (uint32_t *)
+ cell_batch_alloc16(cell, ROUNDUP16(sizeof(opcode_t) + state_size));
+ *dst = cmd;
+ memcpy(dst + 4, state, state_size);
+}
+
+
+/**
+ * For state marked as 'dirty', construct a state-update command block
+ * and insert it into the current batch buffer.
+ */
+void
+cell_emit_state(struct cell_context *cell)
+{
+ if (cell->dirty & CELL_NEW_FRAMEBUFFER) {
+ struct pipe_surface *cbuf = cell->framebuffer.cbufs[0];
+ struct pipe_surface *zbuf = cell->framebuffer.zsbuf;
+ STATIC_ASSERT(sizeof(struct cell_command_framebuffer) % 16 == 0);
+ struct cell_command_framebuffer *fb
+ = cell_batch_alloc16(cell, sizeof(*fb));
+ fb->opcode[0] = CELL_CMD_STATE_FRAMEBUFFER;
+ fb->color_start = cell->cbuf_map[0];
+ fb->color_format = cbuf->format;
+ fb->depth_start = cell->zsbuf_map;
+ fb->depth_format = zbuf ? zbuf->format : PIPE_FORMAT_NONE;
+ fb->width = cell->framebuffer.width;
+ fb->height = cell->framebuffer.height;
+#if 0
+ printf("EMIT color format %s\n", pf_name(fb->color_format));
+ printf("EMIT depth format %s\n", pf_name(fb->depth_format));
+#endif
+ }
+
+ if (cell->dirty & (CELL_NEW_RASTERIZER)) {
+ STATIC_ASSERT(sizeof(struct cell_command_rasterizer) % 16 == 0);
+ struct cell_command_rasterizer *rast =
+ cell_batch_alloc16(cell, sizeof(*rast));
+ rast->opcode[0] = CELL_CMD_STATE_RASTERIZER;
+ rast->rasterizer = *cell->rasterizer;
+ }
+
+ if (cell->dirty & (CELL_NEW_FS)) {
+ /* Send new fragment program to SPUs */
+ STATIC_ASSERT(sizeof(struct cell_command_fragment_program) % 16 == 0);
+ struct cell_command_fragment_program *fp
+ = cell_batch_alloc16(cell, sizeof(*fp));
+ fp->opcode[0] = CELL_CMD_STATE_FRAGMENT_PROGRAM;
+ fp->num_inst = cell->fs->code.num_inst;
+ memcpy(&fp->code, cell->fs->code.store,
+ SPU_MAX_FRAGMENT_PROGRAM_INSTS * SPE_INST_SIZE);
+ if (0) {
+ int i;
+ printf("PPU Emit CELL_CMD_STATE_FRAGMENT_PROGRAM:\n");
+ for (i = 0; i < fp->num_inst; i++) {
+ printf(" %3d: 0x%08x\n", i, fp->code[i]);
+ }
+ }
+ }
+
+ if (cell->dirty & (CELL_NEW_FS_CONSTANTS)) {
+ const uint shader = PIPE_SHADER_FRAGMENT;
+ const uint num_const = cell->constants[shader].buffer->size / sizeof(float);
+ uint i, j;
+ float *buf = cell_batch_alloc16(cell, ROUNDUP16(32 + num_const * sizeof(float)));
+ uint32_t *ibuf = (uint32_t *) buf;
+ const float *constants = pipe_buffer_map(cell->pipe.screen,
+ cell->constants[shader].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ ibuf[0] = CELL_CMD_STATE_FS_CONSTANTS;
+ ibuf[4] = num_const;
+ j = 8;
+ for (i = 0; i < num_const; i++) {
+ buf[j++] = constants[i];
+ }
+ pipe_buffer_unmap(cell->pipe.screen, cell->constants[shader].buffer);
+ }
+
+ if (cell->dirty & (CELL_NEW_FRAMEBUFFER |
+ CELL_NEW_DEPTH_STENCIL |
+ CELL_NEW_BLEND)) {
+ struct cell_command_fragment_ops *fops, *fops_cmd;
+ /* Note that cell_command_fragment_ops is a variant-sized record */
+ fops = lookup_fragment_ops(cell);
+ fops_cmd = cell_batch_alloc16(cell, ROUNDUP16(sizeof(*fops_cmd) + fops->total_code_size));
+ memcpy(fops_cmd, fops, sizeof(*fops) + fops->total_code_size);
+ }
+
+ if (cell->dirty & CELL_NEW_SAMPLER) {
+ uint i;
+ for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
+ if (cell->dirty_samplers & (1 << i)) {
+ if (cell->sampler[i]) {
+ STATIC_ASSERT(sizeof(struct cell_command_sampler) % 16 == 0);
+ struct cell_command_sampler *sampler
+ = cell_batch_alloc16(cell, sizeof(*sampler));
+ sampler->opcode[0] = CELL_CMD_STATE_SAMPLER;
+ sampler->unit = i;
+ sampler->state = *cell->sampler[i];
+ }
+ }
+ }
+ cell->dirty_samplers = 0x0;
+ }
+
+ if (cell->dirty & CELL_NEW_TEXTURE) {
+ uint i;
+ for (i = 0;i < CELL_MAX_SAMPLERS; i++) {
+ if (cell->dirty_textures & (1 << i)) {
+ STATIC_ASSERT(sizeof(struct cell_command_texture) % 16 == 0);
+ struct cell_command_texture *texture =
+ (struct cell_command_texture *)
+ cell_batch_alloc16(cell, sizeof(*texture));
+
+ texture->opcode[0] = CELL_CMD_STATE_TEXTURE;
+ texture->unit = i;
+ if (cell->texture[i]) {
+ struct cell_texture *ct = cell->texture[i];
+ uint level;
+ for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
+ texture->start[level] = (ct->mapped +
+ ct->level_offset[level]);
+ texture->width[level] = ct->base.width[level];
+ texture->height[level] = ct->base.height[level];
+ texture->depth[level] = ct->base.depth[level];
+ }
+ texture->target = ct->base.target;
+ }
+ else {
+ uint level;
+ for (level = 0; level < CELL_MAX_TEXTURE_LEVELS; level++) {
+ texture->start[level] = NULL;
+ texture->width[level] = 0;
+ texture->height[level] = 0;
+ texture->depth[level] = 0;
+ }
+ texture->target = 0;
+ }
+ }
+ }
+ cell->dirty_textures = 0x0;
+ }
+
+ if (cell->dirty & CELL_NEW_VERTEX_INFO) {
+ emit_state_cmd(cell, CELL_CMD_STATE_VERTEX_INFO,
+ &cell->vertex_info, sizeof(struct vertex_info));
+ }
+
+#if 0
+ if (cell->dirty & CELL_NEW_VS) {
+ const struct draw_context *const draw = cell->draw;
+ struct cell_shader_info info;
+
+ info.num_outputs = draw_num_vs_outputs(draw);
+ info.declarations = (uintptr_t) draw->vs.machine.Declarations;
+ info.num_declarations = draw->vs.machine.NumDeclarations;
+ info.instructions = (uintptr_t) draw->vs.machine.Instructions;
+ info.num_instructions = draw->vs.machine.NumInstructions;
+ info.immediates = (uintptr_t) draw->vs.machine.Imms;
+ info.num_immediates = draw->vs.machine.ImmLimit / 4;
+
+ emit_state_cmd(cell, CELL_CMD_STATE_BIND_VS, &info, sizeof(info));
+ }
+#endif
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_state_emit.h b/src/gallium/drivers/cell/ppu/cell_state_emit.h
new file mode 100644
index 0000000000..59f8affe8d
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_emit.h
@@ -0,0 +1,36 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef CELL_STATE_EMIT_H
+#define CELL_STATE_EMIT_H
+
+
+extern void
+cell_emit_state(struct cell_context *cell);
+
+
+#endif /* CELL_STATE_EMIT_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_state_per_fragment.c b/src/gallium/drivers/cell/ppu/cell_state_per_fragment.c
new file mode 100644
index 0000000000..d97c22b2ef
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_per_fragment.c
@@ -0,0 +1,1430 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file
+ * Generate code to perform all per-fragment operations.
+ *
+ * Code generated by these functions perform both alpha, depth, and stencil
+ * testing as well as alpha blending.
+ *
+ * \note
+ * Occlusion query is not supported, but this is the right place to add that
+ * support.
+ *
+ * \author Ian Romanick <idr@us.ibm.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "cell_context.h"
+
+#include "rtasm/rtasm_ppc_spe.h"
+
+
+/**
+ * Generate code to perform alpha testing.
+ *
+ * The code generated by this function uses the register specificed by
+ * \c mask as both an input and an output.
+ *
+ * \param dsa Current alpha-test state
+ * \param f Function to which code should be appended
+ * \param mask Index of register containing active fragment mask
+ * \param alphas Index of register containing per-fragment alpha values
+ *
+ * \note Emits a maximum of 6 instructions.
+ */
+static void
+emit_alpha_test(struct pipe_depth_stencil_alpha_state *dsa,
+ struct spe_function *f, int mask, int alphas)
+{
+ /* If the alpha function is either NEVER or ALWAYS, there is no need to
+ * load the reference value into a register. ALWAYS is a fairly common
+ * case, and this optimization saves 2 instructions.
+ */
+ if (dsa->alpha.enabled
+ && (dsa->alpha.func != PIPE_FUNC_NEVER)
+ && (dsa->alpha.func != PIPE_FUNC_ALWAYS)) {
+ int ref = spe_allocate_available_register(f);
+ int tmp_a = spe_allocate_available_register(f);
+ int tmp_b = spe_allocate_available_register(f);
+ union {
+ float f;
+ unsigned u;
+ } ref_val;
+ boolean complement = FALSE;
+
+ ref_val.f = dsa->alpha.ref;
+
+ spe_il(f, ref, ref_val.u & 0x0000ffff);
+ spe_ilh(f, ref, ref_val.u >> 16);
+
+ switch (dsa->alpha.func) {
+ case PIPE_FUNC_NOTEQUAL:
+ complement = TRUE;
+ /* FALLTHROUGH */
+
+ case PIPE_FUNC_EQUAL:
+ spe_fceq(f, tmp_a, ref, alphas);
+ break;
+
+ case PIPE_FUNC_LEQUAL:
+ complement = TRUE;
+ /* FALLTHROUGH */
+
+ case PIPE_FUNC_GREATER:
+ spe_fcgt(f, tmp_a, ref, alphas);
+ break;
+
+ case PIPE_FUNC_LESS:
+ complement = TRUE;
+ /* FALLTHROUGH */
+
+ case PIPE_FUNC_GEQUAL:
+ spe_fcgt(f, tmp_a, ref, alphas);
+ spe_fceq(f, tmp_b, ref, alphas);
+ spe_or(f, tmp_a, tmp_b, tmp_a);
+ break;
+
+ case PIPE_FUNC_ALWAYS:
+ case PIPE_FUNC_NEVER:
+ default:
+ assert(0);
+ break;
+ }
+
+ if (complement) {
+ spe_andc(f, mask, mask, tmp_a);
+ } else {
+ spe_and(f, mask, mask, tmp_a);
+ }
+
+ spe_release_register(f, ref);
+ spe_release_register(f, tmp_a);
+ spe_release_register(f, tmp_b);
+ } else if (dsa->alpha.enabled && (dsa->alpha.func == PIPE_FUNC_NEVER)) {
+ spe_il(f, mask, 0);
+ }
+}
+
+
+/**
+ * Generate code to perform Z testing. Four Z values are tested at once.
+ * \param dsa Current depth-test state
+ * \param f Function to which code should be appended
+ * \param mask Index of register to contain depth-pass mask
+ * \param stored Index of register containing values from depth buffer
+ * \param calculated Index of register containing per-fragment depth values
+ *
+ * \return
+ * If the calculated depth comparison mask is the actual mask, \c FALSE is
+ * returned. If the calculated depth comparison mask is the compliment of
+ * the actual mask, \c TRUE is returned.
+ *
+ * \note Emits a maximum of 3 instructions.
+ */
+static boolean
+emit_depth_test(struct pipe_depth_stencil_alpha_state *dsa,
+ struct spe_function *f, int mask, int stored, int calculated)
+{
+ unsigned func = (dsa->depth.enabled)
+ ? dsa->depth.func : PIPE_FUNC_ALWAYS;
+ int tmp = spe_allocate_available_register(f);
+ boolean compliment = FALSE;
+
+ switch (func) {
+ case PIPE_FUNC_NEVER:
+ spe_il(f, mask, 0);
+ break;
+
+ case PIPE_FUNC_NOTEQUAL:
+ compliment = TRUE;
+ /* FALLTHROUGH */
+ case PIPE_FUNC_EQUAL:
+ spe_ceq(f, mask, calculated, stored);
+ break;
+
+ case PIPE_FUNC_LEQUAL:
+ compliment = TRUE;
+ /* FALLTHROUGH */
+ case PIPE_FUNC_GREATER:
+ spe_clgt(f, mask, calculated, stored);
+ break;
+
+ case PIPE_FUNC_LESS:
+ compliment = TRUE;
+ /* FALLTHROUGH */
+ case PIPE_FUNC_GEQUAL:
+ spe_clgt(f, mask, calculated, stored);
+ spe_ceq(f, tmp, calculated, stored);
+ spe_or(f, mask, mask, tmp);
+ break;
+
+ case PIPE_FUNC_ALWAYS:
+ spe_il(f, mask, ~0);
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ spe_release_register(f, tmp);
+ return compliment;
+}
+
+
+/**
+ * Generate code to apply the stencil operation (after testing).
+ * \note Emits a maximum of 5 instructions.
+ *
+ * \warning
+ * Since \c out and \c in might be the same register, this routine cannot
+ * generate code that uses \c out as a temporary.
+ */
+static void
+emit_stencil_op(struct spe_function *f,
+ int out, int in, int mask, unsigned op, unsigned ref)
+{
+ const int clamp = spe_allocate_available_register(f);
+ const int clamp_mask = spe_allocate_available_register(f);
+ const int result = spe_allocate_available_register(f);
+
+ switch(op) {
+ case PIPE_STENCIL_OP_KEEP:
+ assert(0);
+ case PIPE_STENCIL_OP_ZERO:
+ spe_il(f, result, 0);
+ break;
+ case PIPE_STENCIL_OP_REPLACE:
+ spe_il(f, result, ref);
+ break;
+ case PIPE_STENCIL_OP_INCR:
+ /* clamp = [0xff, 0xff, 0xff, 0xff] */
+ spe_il(f, clamp, 0x0ff);
+ /* result[i] = in[i] + 1 */
+ spe_ai(f, result, in, 1);
+ /* clamp_mask[i] = (result[i] > 0xff) */
+ spe_clgti(f, clamp_mask, result, 0x0ff);
+ /* result[i] = clamp_mask[i] ? clamp[i] : result[i] */
+ spe_selb(f, result, result, clamp, clamp_mask);
+ break;
+ case PIPE_STENCIL_OP_DECR:
+ spe_il(f, clamp, 0);
+ spe_ai(f, result, in, -1);
+
+ /* If "(s-1) < 0" in signed arithemtic, then "(s-1) > MAX" in unsigned
+ * arithmetic.
+ */
+ spe_clgti(f, clamp_mask, result, 0x0ff);
+ spe_selb(f, result, result, clamp, clamp_mask);
+ break;
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ spe_ai(f, result, in, 1);
+ break;
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ spe_ai(f, result, in, -1);
+ break;
+ case PIPE_STENCIL_OP_INVERT:
+ spe_nor(f, result, in, in);
+ break;
+ default:
+ assert(0);
+ }
+
+ spe_selb(f, out, in, result, mask);
+
+ spe_release_register(f, result);
+ spe_release_register(f, clamp_mask);
+ spe_release_register(f, clamp);
+}
+
+
+/**
+ * Generate code to do stencil test. Four pixels are tested at once.
+ * \param dsa Depth / stencil test state
+ * \param face 0 for front face, 1 for back face
+ * \param f Function to append instructions to
+ * \param mask Register containing mask of fragments passing the
+ * alpha test
+ * \param depth_mask Register containing mask of fragments passing the
+ * depth test
+ * \param depth_compliment Is \c depth_mask the compliment of the actual mask?
+ * \param stencil Register containing values from stencil buffer
+ * \param depth_pass Register to store mask of fragments passing stencil test
+ * and depth test
+ *
+ * \note
+ * Emits a maximum of 10 + (3 * 5) = 25 instructions.
+ */
+static int
+emit_stencil_test(struct pipe_depth_stencil_alpha_state *dsa,
+ unsigned face,
+ struct spe_function *f,
+ int mask,
+ int depth_mask,
+ boolean depth_complement,
+ int stencil,
+ int depth_pass)
+{
+ int stencil_fail = spe_allocate_available_register(f);
+ int depth_fail = spe_allocate_available_register(f);
+ int stencil_mask = spe_allocate_available_register(f);
+ int stencil_pass = spe_allocate_available_register(f);
+ int face_stencil = spe_allocate_available_register(f);
+ int stencil_src = stencil;
+ const unsigned ref = (dsa->stencil[face].ref_value
+ & dsa->stencil[face].valuemask);
+ boolean complement = FALSE;
+ int stored;
+ int tmp = spe_allocate_available_register(f);
+
+
+ if ((dsa->stencil[face].func != PIPE_FUNC_NEVER)
+ && (dsa->stencil[face].func != PIPE_FUNC_ALWAYS)
+ && (dsa->stencil[face].valuemask != 0x0ff)) {
+ stored = spe_allocate_available_register(f);
+ spe_andi(f, stored, stencil, dsa->stencil[face].valuemask);
+ } else {
+ stored = stencil;
+ }
+
+
+ switch (dsa->stencil[face].func) {
+ case PIPE_FUNC_NEVER:
+ spe_il(f, stencil_mask, 0); /* stencil_mask[0..3] = [0,0,0,0] */
+ break;
+
+ case PIPE_FUNC_NOTEQUAL:
+ complement = TRUE;
+ /* FALLTHROUGH */
+ case PIPE_FUNC_EQUAL:
+ /* stencil_mask[i] = (stored[i] == ref) */
+ spe_ceqi(f, stencil_mask, stored, ref);
+ break;
+
+ case PIPE_FUNC_LEQUAL:
+ complement = TRUE;
+ /* FALLTHROUGH */
+ case PIPE_FUNC_GREATER:
+ complement = TRUE;
+ /* stencil_mask[i] = (stored[i] > ref) */
+ spe_clgti(f, stencil_mask, stored, ref);
+ break;
+
+ case PIPE_FUNC_LESS:
+ complement = TRUE;
+ /* FALLTHROUGH */
+ case PIPE_FUNC_GEQUAL:
+ /* stencil_mask[i] = (stored[i] > ref) */
+ spe_clgti(f, stencil_mask, stored, ref);
+ /* tmp[i] = (stored[i] == ref) */
+ spe_ceqi(f, tmp, stored, ref);
+ /* stencil_mask[i] = stencil_mask[i] | tmp[i] */
+ spe_or(f, stencil_mask, stencil_mask, tmp);
+ break;
+
+ case PIPE_FUNC_ALWAYS:
+ /* See comment below. */
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ if (stored != stencil) {
+ spe_release_register(f, stored);
+ }
+ spe_release_register(f, tmp);
+
+
+ /* ALWAYS is a very common stencil-test, so some effort is applied to
+ * optimize that case. The stencil-pass mask is the same as the input
+ * fragment mask. This makes the stencil-test (above) a no-op, and the
+ * input fragment mask can be "renamed" the stencil-pass mask.
+ */
+ if (dsa->stencil[face].func == PIPE_FUNC_ALWAYS) {
+ spe_release_register(f, stencil_pass);
+ stencil_pass = mask;
+ } else {
+ if (complement) {
+ spe_andc(f, stencil_pass, mask, stencil_mask);
+ } else {
+ spe_and(f, stencil_pass, mask, stencil_mask);
+ }
+ }
+
+ if (depth_complement) {
+ spe_andc(f, depth_pass, stencil_pass, depth_mask);
+ } else {
+ spe_and(f, depth_pass, stencil_pass, depth_mask);
+ }
+
+
+ /* Conditionally emit code to update the stencil value under various
+ * condititons. Note that there is no need to generate code under the
+ * following circumstances:
+ *
+ * - Stencil write mask is zero.
+ * - For stencil-fail if the stencil test is ALWAYS
+ * - For depth-fail if the stencil test is NEVER
+ * - For depth-pass if the stencil test is NEVER
+ * - Any of the 3 conditions if the operation is KEEP
+ */
+ if (dsa->stencil[face].writemask != 0) {
+ if ((dsa->stencil[face].func != PIPE_FUNC_ALWAYS)
+ && (dsa->stencil[face].fail_op != PIPE_STENCIL_OP_KEEP)) {
+ if (complement) {
+ spe_and(f, stencil_fail, mask, stencil_mask);
+ } else {
+ spe_andc(f, stencil_fail, mask, stencil_mask);
+ }
+
+ emit_stencil_op(f, face_stencil, stencil_src, stencil_fail,
+ dsa->stencil[face].fail_op,
+ dsa->stencil[face].ref_value);
+
+ stencil_src = face_stencil;
+ }
+
+ if ((dsa->stencil[face].func != PIPE_FUNC_NEVER)
+ && (dsa->stencil[face].zfail_op != PIPE_STENCIL_OP_KEEP)) {
+ if (depth_complement) {
+ spe_and(f, depth_fail, stencil_pass, depth_mask);
+ } else {
+ spe_andc(f, depth_fail, stencil_pass, depth_mask);
+ }
+
+ emit_stencil_op(f, face_stencil, stencil_src, depth_fail,
+ dsa->stencil[face].zfail_op,
+ dsa->stencil[face].ref_value);
+ stencil_src = face_stencil;
+ }
+
+ if ((dsa->stencil[face].func != PIPE_FUNC_NEVER)
+ && (dsa->stencil[face].zpass_op != PIPE_STENCIL_OP_KEEP)) {
+ emit_stencil_op(f, face_stencil, stencil_src, depth_pass,
+ dsa->stencil[face].zpass_op,
+ dsa->stencil[face].ref_value);
+ stencil_src = face_stencil;
+ }
+ }
+
+ spe_release_register(f, stencil_fail);
+ spe_release_register(f, depth_fail);
+ spe_release_register(f, stencil_mask);
+ if (stencil_pass != mask) {
+ spe_release_register(f, stencil_pass);
+ }
+
+ /* If all of the stencil operations were KEEP or the stencil write mask was
+ * zero, "stencil_src" will still be set to "stencil". In this case
+ * release the "face_stencil" register. Otherwise apply the stencil write
+ * mask to select bits from the calculated stencil value and the previous
+ * stencil value.
+ */
+ if (stencil_src == stencil) {
+ spe_release_register(f, face_stencil);
+ } else if (dsa->stencil[face].writemask != 0x0ff) {
+ int tmp = spe_allocate_available_register(f);
+
+ spe_il(f, tmp, dsa->stencil[face].writemask);
+ spe_selb(f, stencil_src, stencil, stencil_src, tmp);
+
+ spe_release_register(f, tmp);
+ }
+
+ return stencil_src;
+}
+
+
+void
+cell_generate_depth_stencil_test(struct cell_depth_stencil_alpha_state *cdsa)
+{
+ struct pipe_depth_stencil_alpha_state *const dsa = &cdsa->base;
+ struct spe_function *const f = &cdsa->code;
+
+ /* This code generates a maximum of 6 (alpha test) + 3 (depth test)
+ * + 25 (front stencil) + 25 (back stencil) + 4 = 63 instructions. Round
+ * up to 64 to make it a happy power-of-two.
+ */
+ spe_init_func(f, SPE_INST_SIZE * 64);
+
+
+ /* Allocate registers for the function's input parameters. Cleverly (and
+ * clever code is usually dangerous, but I couldn't resist) the generated
+ * function returns a structure. Returned structures start with register
+ * 3, and the structure fields are ordered to match up exactly with the
+ * input parameters.
+ */
+ int mask = spe_allocate_register(f, 3);
+ int depth = spe_allocate_register(f, 4);
+ int stencil = spe_allocate_register(f, 5);
+ int zvals = spe_allocate_register(f, 6);
+ int frag_a = spe_allocate_register(f, 7);
+ int facing = spe_allocate_register(f, 8);
+
+ int depth_mask = spe_allocate_available_register(f);
+
+ boolean depth_complement;
+
+
+ emit_alpha_test(dsa, f, mask, frag_a);
+
+ depth_complement = emit_depth_test(dsa, f, depth_mask, depth, zvals);
+
+ if (dsa->stencil[0].enabled) {
+ const int front_depth_pass = spe_allocate_available_register(f);
+ int front_stencil = emit_stencil_test(dsa, 0, f, mask,
+ depth_mask, depth_complement,
+ stencil, front_depth_pass);
+
+ if (dsa->stencil[1].enabled) {
+ const int back_depth_pass = spe_allocate_available_register(f);
+ int back_stencil = emit_stencil_test(dsa, 1, f, mask,
+ depth_mask, depth_complement,
+ stencil, back_depth_pass);
+
+ /* If the front facing stencil value and the back facing stencil
+ * value are stored in the same register, there is no need to select
+ * a value based on the facing. This can happen if the stencil value
+ * was not modified due to the write masks being zero, the stencil
+ * operations being KEEP, etc.
+ */
+ if (front_stencil != back_stencil) {
+ spe_selb(f, stencil, back_stencil, front_stencil, facing);
+ }
+
+ if (back_stencil != stencil) {
+ spe_release_register(f, back_stencil);
+ }
+
+ if (front_stencil != stencil) {
+ spe_release_register(f, front_stencil);
+ }
+
+ spe_selb(f, mask, back_depth_pass, front_depth_pass, facing);
+
+ spe_release_register(f, back_depth_pass);
+ } else {
+ if (front_stencil != stencil) {
+ spe_or(f, stencil, front_stencil, front_stencil);
+ spe_release_register(f, front_stencil);
+ }
+ spe_or(f, mask, front_depth_pass, front_depth_pass);
+ }
+
+ spe_release_register(f, front_depth_pass);
+ } else if (dsa->depth.enabled) {
+ if (depth_complement) {
+ spe_andc(f, mask, mask, depth_mask);
+ } else {
+ spe_and(f, mask, mask, depth_mask);
+ }
+ }
+
+ if (dsa->depth.writemask) {
+ spe_selb(f, depth, depth, zvals, mask);
+ }
+
+ spe_bi(f, 0, 0, 0); /* return from function call */
+
+
+#if 0
+ {
+ const uint32_t *p = f->store;
+ unsigned i;
+
+ printf("# alpha (%sabled)\n",
+ (dsa->alpha.enabled) ? "en" : "dis");
+ printf("# func: %u\n", dsa->alpha.func);
+ printf("# ref: %.2f\n", dsa->alpha.ref);
+
+ printf("# depth (%sabled)\n",
+ (dsa->depth.enabled) ? "en" : "dis");
+ printf("# func: %u\n", dsa->depth.func);
+
+ for (i = 0; i < 2; i++) {
+ printf("# %s stencil (%sabled)\n",
+ (i == 0) ? "front" : "back",
+ (dsa->stencil[i].enabled) ? "en" : "dis");
+
+ printf("# func: %u\n", dsa->stencil[i].func);
+ printf("# op (sf, zf, zp): %u %u %u\n",
+ dsa->stencil[i].fail_op,
+ dsa->stencil[i].zfail_op,
+ dsa->stencil[i].zpass_op);
+ printf("# ref value / value mask / write mask: %02x %02x %02x\n",
+ dsa->stencil[i].ref_value,
+ dsa->stencil[i].valuemask,
+ dsa->stencil[i].writemask);
+ }
+
+ printf("\t.text\n");
+ for (/* empty */; p < f->csr; p++) {
+ printf("\t.long\t0x%04x\n", *p);
+ }
+ fflush(stdout);
+ }
+#endif
+}
+
+
+/**
+ * \note Emits a maximum of 3 instructions
+ */
+static int
+emit_alpha_factor_calculation(struct spe_function *f,
+ unsigned factor,
+ int src_alpha, int dst_alpha, int const_alpha)
+{
+ int factor_reg;
+ int tmp;
+
+
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ factor_reg = -1;
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ factor_reg = spe_allocate_available_register(f);
+
+ spe_or(f, factor_reg, src_alpha, src_alpha);
+ break;
+
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ factor_reg = dst_alpha;
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ factor_reg = -1;
+ break;
+
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ factor_reg = spe_allocate_available_register(f);
+
+ tmp = spe_allocate_available_register(f);
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, factor_reg, tmp, const_alpha);
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ factor_reg = const_alpha;
+ break;
+
+ case PIPE_BLENDFACTOR_ZERO:
+ factor_reg = -1;
+ break;
+
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ tmp = spe_allocate_available_register(f);
+ factor_reg = spe_allocate_available_register(f);
+
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, factor_reg, tmp, src_alpha);
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ tmp = spe_allocate_available_register(f);
+ factor_reg = spe_allocate_available_register(f);
+
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, factor_reg, tmp, dst_alpha);
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ default:
+ assert(0);
+ factor_reg = -1;
+ break;
+ }
+
+ return factor_reg;
+}
+
+
+/**
+ * \note Emits a maximum of 6 instructions
+ */
+static void
+emit_color_factor_calculation(struct spe_function *f,
+ unsigned sF, unsigned mask,
+ const int *src,
+ const int *dst,
+ const int *const_color,
+ int *factor)
+{
+ int tmp;
+ unsigned i;
+
+
+ factor[0] = -1;
+ factor[1] = -1;
+ factor[2] = -1;
+ factor[3] = -1;
+
+ switch (sF) {
+ case PIPE_BLENDFACTOR_ONE:
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ for (i = 0; i < 3; ++i) {
+ if ((mask & (1U << i)) != 0) {
+ factor[i] = spe_allocate_available_register(f);
+ spe_or(f, factor[i], src[i], src[i]);
+ }
+ }
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ factor[0] = spe_allocate_available_register(f);
+ factor[1] = factor[0];
+ factor[2] = factor[0];
+
+ spe_or(f, factor[0], src[3], src[3]);
+ break;
+
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ factor[0] = dst[3];
+ factor[1] = dst[3];
+ factor[2] = dst[3];
+ break;
+
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ factor[0] = dst[0];
+ factor[1] = dst[1];
+ factor[2] = dst[2];
+ break;
+
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ tmp = spe_allocate_available_register(f);
+ factor[0] = spe_allocate_available_register(f);
+ factor[1] = factor[0];
+ factor[2] = factor[0];
+
+ /* Alpha saturate means min(As, 1-Ad).
+ */
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, tmp, tmp, dst[3]);
+ spe_fcgt(f, factor[0], tmp, src[3]);
+ spe_selb(f, factor[0], src[3], tmp, factor[0]);
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ tmp = spe_allocate_available_register(f);
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+
+ for (i = 0; i < 3; i++) {
+ factor[i] = spe_allocate_available_register(f);
+
+ spe_fs(f, factor[i], tmp, const_color[i]);
+ }
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ for (i = 0; i < 3; i++) {
+ factor[i] = const_color[i];
+ }
+ break;
+
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ factor[0] = spe_allocate_available_register(f);
+ factor[1] = factor[0];
+ factor[2] = factor[0];
+
+ tmp = spe_allocate_available_register(f);
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, factor[0], tmp, const_color[3]);
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ factor[0] = const_color[3];
+ factor[1] = factor[0];
+ factor[2] = factor[0];
+ break;
+
+ case PIPE_BLENDFACTOR_ZERO:
+ break;
+
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ tmp = spe_allocate_available_register(f);
+
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+
+ for (i = 0; i < 3; ++i) {
+ if ((mask & (1U << i)) != 0) {
+ factor[i] = spe_allocate_available_register(f);
+ spe_fs(f, factor[i], tmp, src[i]);
+ }
+ }
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ tmp = spe_allocate_available_register(f);
+ factor[0] = spe_allocate_available_register(f);
+ factor[1] = factor[0];
+ factor[2] = factor[0];
+
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, factor[0], tmp, src[3]);
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ tmp = spe_allocate_available_register(f);
+ factor[0] = spe_allocate_available_register(f);
+ factor[1] = factor[0];
+ factor[2] = factor[0];
+
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+ spe_fs(f, factor[0], tmp, dst[3]);
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ tmp = spe_allocate_available_register(f);
+
+ spe_il(f, tmp, 1);
+ spe_cuflt(f, tmp, tmp, 0);
+
+ for (i = 0; i < 3; ++i) {
+ if ((mask & (1U << i)) != 0) {
+ factor[i] = spe_allocate_available_register(f);
+ spe_fs(f, factor[i], tmp, dst[i]);
+ }
+ }
+
+ spe_release_register(f, tmp);
+ break;
+
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ default:
+ assert(0);
+ }
+}
+
+
+static void
+emit_blend_calculation(struct spe_function *f,
+ unsigned func, unsigned sF, unsigned dF,
+ int src, int src_factor, int dst, int dst_factor)
+{
+ int tmp = spe_allocate_available_register(f);
+
+ switch (func) {
+ case PIPE_BLEND_ADD:
+ if (sF == PIPE_BLENDFACTOR_ONE) {
+ if (dF == PIPE_BLENDFACTOR_ZERO) {
+ /* Do nothing. */
+ } else if (dF == PIPE_BLENDFACTOR_ONE) {
+ spe_fa(f, src, src, dst);
+ }
+ } else if (sF == PIPE_BLENDFACTOR_ZERO) {
+ if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_il(f, src, 0);
+ } else if (dF == PIPE_BLENDFACTOR_ONE) {
+ spe_or(f, src, dst, dst);
+ } else {
+ spe_fm(f, src, dst, dst_factor);
+ }
+ } else if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_fm(f, src, src, src_factor);
+ } else {
+ spe_fm(f, tmp, dst, dst_factor);
+ spe_fma(f, src, src, src_factor, tmp);
+ }
+ break;
+
+ case PIPE_BLEND_SUBTRACT:
+ if (sF == PIPE_BLENDFACTOR_ONE) {
+ if (dF == PIPE_BLENDFACTOR_ZERO) {
+ /* Do nothing. */
+ } else if (dF == PIPE_BLENDFACTOR_ONE) {
+ spe_fs(f, src, src, dst);
+ }
+ } else if (sF == PIPE_BLENDFACTOR_ZERO) {
+ if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_il(f, src, 0);
+ } else if (dF == PIPE_BLENDFACTOR_ONE) {
+ spe_il(f, tmp, 0);
+ spe_fs(f, src, tmp, dst);
+ } else {
+ spe_fm(f, src, dst, dst_factor);
+ }
+ } else if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_fm(f, src, src, src_factor);
+ } else {
+ spe_fm(f, tmp, dst, dst_factor);
+ spe_fms(f, src, src, src_factor, tmp);
+ }
+ break;
+
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ if (sF == PIPE_BLENDFACTOR_ONE) {
+ if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_il(f, tmp, 0);
+ spe_fs(f, src, tmp, src);
+ } else if (dF == PIPE_BLENDFACTOR_ONE) {
+ spe_fs(f, src, dst, src);
+ }
+ } else if (sF == PIPE_BLENDFACTOR_ZERO) {
+ if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_il(f, src, 0);
+ } else if (dF == PIPE_BLENDFACTOR_ONE) {
+ spe_or(f, src, dst, dst);
+ } else {
+ spe_fm(f, src, dst, dst_factor);
+ }
+ } else if (dF == PIPE_BLENDFACTOR_ZERO) {
+ spe_fm(f, src, src, src_factor);
+ } else {
+ spe_fm(f, tmp, src, src_factor);
+ spe_fms(f, src, src, dst_factor, tmp);
+ }
+ break;
+
+ case PIPE_BLEND_MIN:
+ spe_cgt(f, tmp, src, dst);
+ spe_selb(f, src, src, dst, tmp);
+ break;
+
+ case PIPE_BLEND_MAX:
+ spe_cgt(f, tmp, src, dst);
+ spe_selb(f, src, dst, src, tmp);
+ break;
+
+ default:
+ assert(0);
+ }
+
+ spe_release_register(f, tmp);
+}
+
+
+/**
+ * Generate code to perform alpha blending on the SPE
+ */
+void
+cell_generate_alpha_blend(struct cell_blend_state *cb)
+{
+ struct pipe_blend_state *const b = &cb->base;
+ struct spe_function *const f = &cb->code;
+
+ /* This code generates a maximum of 3 (source alpha factor)
+ * + 3 (destination alpha factor) + (3 * 6) (source color factor)
+ * + (3 * 6) (destination color factor) + (4 * 2) (blend equation)
+ * + 4 (fragment mask) + 1 (return) = 55 instlructions. Round up to 64 to
+ * make it a happy power-of-two.
+ */
+ spe_init_func(f, SPE_INST_SIZE * 64);
+
+
+ const int frag[4] = {
+ spe_allocate_register(f, 3),
+ spe_allocate_register(f, 4),
+ spe_allocate_register(f, 5),
+ spe_allocate_register(f, 6),
+ };
+ const int pixel[4] = {
+ spe_allocate_register(f, 7),
+ spe_allocate_register(f, 8),
+ spe_allocate_register(f, 9),
+ spe_allocate_register(f, 10),
+ };
+ const int const_color[4] = {
+ spe_allocate_register(f, 11),
+ spe_allocate_register(f, 12),
+ spe_allocate_register(f, 13),
+ spe_allocate_register(f, 14),
+ };
+ unsigned func[4];
+ unsigned sF[4];
+ unsigned dF[4];
+ unsigned i;
+ int src_factor[4];
+ int dst_factor[4];
+
+
+ /* Does the selected blend mode make use of the source / destination
+ * color (RGB) blend factors?
+ */
+ boolean need_color_factor = b->blend_enable
+ && (b->rgb_func != PIPE_BLEND_MIN)
+ && (b->rgb_func != PIPE_BLEND_MAX);
+
+ /* Does the selected blend mode make use of the source / destination
+ * alpha blend factors?
+ */
+ boolean need_alpha_factor = b->blend_enable
+ && (b->alpha_func != PIPE_BLEND_MIN)
+ && (b->alpha_func != PIPE_BLEND_MAX);
+
+
+ if (b->blend_enable) {
+ sF[0] = b->rgb_src_factor;
+ sF[1] = sF[0];
+ sF[2] = sF[0];
+ switch (b->alpha_src_factor & 0x0f) {
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ sF[3] = PIPE_BLENDFACTOR_ONE;
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ sF[3] = b->alpha_src_factor + 1;
+ break;
+ default:
+ sF[3] = b->alpha_src_factor;
+ }
+
+ dF[0] = b->rgb_dst_factor;
+ dF[1] = dF[0];
+ dF[2] = dF[0];
+ switch (b->alpha_dst_factor & 0x0f) {
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ dF[3] = b->alpha_dst_factor + 1;
+ break;
+ default:
+ dF[3] = b->alpha_dst_factor;
+ }
+
+ func[0] = b->rgb_func;
+ func[1] = func[0];
+ func[2] = func[0];
+ func[3] = b->alpha_func;
+ } else {
+ sF[0] = PIPE_BLENDFACTOR_ONE;
+ sF[1] = PIPE_BLENDFACTOR_ONE;
+ sF[2] = PIPE_BLENDFACTOR_ONE;
+ sF[3] = PIPE_BLENDFACTOR_ONE;
+ dF[0] = PIPE_BLENDFACTOR_ZERO;
+ dF[1] = PIPE_BLENDFACTOR_ZERO;
+ dF[2] = PIPE_BLENDFACTOR_ZERO;
+ dF[3] = PIPE_BLENDFACTOR_ZERO;
+
+ func[0] = PIPE_BLEND_ADD;
+ func[1] = PIPE_BLEND_ADD;
+ func[2] = PIPE_BLEND_ADD;
+ func[3] = PIPE_BLEND_ADD;
+ }
+
+
+ /* If alpha writing is enabled and the alpha blend mode requires use of
+ * the alpha factor, calculate the alpha factor.
+ */
+ if (((b->colormask & 8) != 0) && need_alpha_factor) {
+ src_factor[3] = emit_alpha_factor_calculation(f, sF[3], const_color[3],
+ frag[3], pixel[3]);
+
+ /* If the alpha destination blend factor is the same as the alpha source
+ * blend factor, re-use the previously calculated value.
+ */
+ dst_factor[3] = (dF[3] == sF[3])
+ ? src_factor[3]
+ : emit_alpha_factor_calculation(f, dF[3], const_color[3],
+ frag[3], pixel[3]);
+ }
+
+
+ if (sF[0] == sF[3]) {
+ src_factor[0] = src_factor[3];
+ src_factor[1] = src_factor[3];
+ src_factor[2] = src_factor[3];
+ } else if (sF[0] == dF[3]) {
+ src_factor[0] = dst_factor[3];
+ src_factor[1] = dst_factor[3];
+ src_factor[2] = dst_factor[3];
+ } else if (need_color_factor) {
+ emit_color_factor_calculation(f,
+ b->rgb_src_factor,
+ b->colormask,
+ frag, pixel, const_color, src_factor);
+ }
+
+
+ if (dF[0] == sF[3]) {
+ dst_factor[0] = src_factor[3];
+ dst_factor[1] = src_factor[3];
+ dst_factor[2] = src_factor[3];
+ } else if (dF[0] == dF[3]) {
+ dst_factor[0] = dst_factor[3];
+ dst_factor[1] = dst_factor[3];
+ dst_factor[2] = dst_factor[3];
+ } else if (dF[0] == sF[0]) {
+ dst_factor[0] = src_factor[0];
+ dst_factor[1] = src_factor[1];
+ dst_factor[2] = src_factor[2];
+ } else if (need_color_factor) {
+ emit_color_factor_calculation(f,
+ b->rgb_dst_factor,
+ b->colormask,
+ frag, pixel, const_color, dst_factor);
+ }
+
+
+
+ for (i = 0; i < 4; ++i) {
+ if ((b->colormask & (1U << i)) != 0) {
+ emit_blend_calculation(f,
+ func[i], sF[i], dF[i],
+ frag[i], src_factor[i],
+ pixel[i], dst_factor[i]);
+ }
+ }
+
+ spe_bi(f, 0, 0, 0);
+
+#if 0
+ {
+ const uint32_t *p = f->store;
+
+ printf("# %u instructions\n", f->csr - f->store);
+ printf("# blend (%sabled)\n",
+ (cb->base.blend_enable) ? "en" : "dis");
+ printf("# RGB func / sf / df: %u %u %u\n",
+ cb->base.rgb_func,
+ cb->base.rgb_src_factor,
+ cb->base.rgb_dst_factor);
+ printf("# ALP func / sf / df: %u %u %u\n",
+ cb->base.alpha_func,
+ cb->base.alpha_src_factor,
+ cb->base.alpha_dst_factor);
+
+ printf("\t.text\n");
+ for (/* empty */; p < f->csr; p++) {
+ printf("\t.long\t0x%04x\n", *p);
+ }
+ fflush(stdout);
+ }
+#endif
+}
+
+
+static int
+PC_OFFSET(const struct spe_function *f, const void *d)
+{
+ const intptr_t pc = (intptr_t) &f->store[f->num_inst];
+ const intptr_t ea = ~0x0f & (intptr_t) d;
+
+ return (ea - pc) >> 2;
+}
+
+
+/**
+ * Generate code to perform color conversion and logic op
+ *
+ * \bug
+ * The code generated by this function should also perform dithering.
+ *
+ * \bug
+ * The code generated by this function should also perform color-write
+ * masking.
+ *
+ * \bug
+ * Only two framebuffer formats are supported at this time.
+ */
+void
+cell_generate_logic_op(struct spe_function *f,
+ const struct pipe_blend_state *blend,
+ struct pipe_surface *surf)
+{
+ const unsigned logic_op = (blend->logicop_enable)
+ ? blend->logicop_func : PIPE_LOGICOP_COPY;
+
+ /* This code generates a maximum of 37 instructions. An additional 32
+ * bytes (equiv. to 8 instructions) are needed for data storage. Round up
+ * to 64 to make it a happy power-of-two.
+ */
+ spe_init_func(f, SPE_INST_SIZE * 64);
+
+
+ /* Pixel colors in framebuffer format in AoS layout.
+ */
+ const int pixel[4] = {
+ spe_allocate_register(f, 3),
+ spe_allocate_register(f, 4),
+ spe_allocate_register(f, 5),
+ spe_allocate_register(f, 6),
+ };
+
+ /* Fragment colors stored as floats in SoA layout.
+ */
+ const int frag[4] = {
+ spe_allocate_register(f, 7),
+ spe_allocate_register(f, 8),
+ spe_allocate_register(f, 9),
+ spe_allocate_register(f, 10),
+ };
+
+ const int mask = spe_allocate_register(f, 11);
+
+
+ /* Short-circuit the noop and invert cases.
+ */
+ if ((logic_op == PIPE_LOGICOP_NOOP) || (blend->colormask == 0)) {
+ spe_bi(f, 0, 0, 0);
+ return;
+ } else if (logic_op == PIPE_LOGICOP_INVERT) {
+ spe_nor(f, pixel[0], pixel[0], pixel[0]);
+ spe_nor(f, pixel[1], pixel[1], pixel[1]);
+ spe_nor(f, pixel[2], pixel[2], pixel[2]);
+ spe_nor(f, pixel[3], pixel[3], pixel[3]);
+ spe_bi(f, 0, 0, 0);
+ return;
+ }
+
+
+ const int tmp[4] = {
+ spe_allocate_available_register(f),
+ spe_allocate_available_register(f),
+ spe_allocate_available_register(f),
+ spe_allocate_available_register(f),
+ };
+
+ const int shuf_xpose_hi = spe_allocate_available_register(f);
+ const int shuf_xpose_lo = spe_allocate_available_register(f);
+ const int shuf_color = spe_allocate_available_register(f);
+
+
+ /* Pointer to the begining of the function's private data area.
+ */
+ uint32_t *const data = ((uint32_t *) f->store) + (64 - 8);
+
+
+ /* Convert fragment colors to framebuffer format in AoS layout.
+ */
+ switch (surf->format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ data[0] = 0x00010203;
+ data[1] = 0x10111213;
+ data[2] = 0x04050607;
+ data[3] = 0x14151617;
+ data[4] = 0x0c000408;
+ data[5] = 0x80808080;
+ data[6] = 0x80808080;
+ data[7] = 0x80808080;
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ data[0] = 0x03020100;
+ data[1] = 0x13121110;
+ data[2] = 0x07060504;
+ data[3] = 0x17161514;
+ data[4] = 0x0804000c;
+ data[5] = 0x80808080;
+ data[6] = 0x80808080;
+ data[7] = 0x80808080;
+ break;
+ default:
+ fprintf(stderr, "CELL: Bad pixel format in cell_generate_logic_op()");
+ ASSERT(0);
+ }
+
+ spe_ilh(f, tmp[0], 0x0808);
+ spe_lqr(f, shuf_xpose_hi, PC_OFFSET(f, data+0));
+ spe_lqr(f, shuf_color, PC_OFFSET(f, data+4));
+ spe_a(f, shuf_xpose_lo, shuf_xpose_hi, tmp[0]);
+
+ spe_shufb(f, tmp[0], frag[0], frag[2], shuf_xpose_hi);
+ spe_shufb(f, tmp[1], frag[0], frag[2], shuf_xpose_lo);
+ spe_shufb(f, tmp[2], frag[1], frag[3], shuf_xpose_hi);
+ spe_shufb(f, tmp[3], frag[1], frag[3], shuf_xpose_lo);
+
+ spe_shufb(f, frag[0], tmp[0], tmp[2], shuf_xpose_hi);
+ spe_shufb(f, frag[1], tmp[0], tmp[2], shuf_xpose_lo);
+ spe_shufb(f, frag[2], tmp[1], tmp[3], shuf_xpose_hi);
+ spe_shufb(f, frag[3], tmp[1], tmp[3], shuf_xpose_lo);
+
+ spe_cfltu(f, frag[0], frag[0], 32);
+ spe_cfltu(f, frag[1], frag[1], 32);
+ spe_cfltu(f, frag[2], frag[2], 32);
+ spe_cfltu(f, frag[3], frag[3], 32);
+
+ spe_shufb(f, frag[0], frag[0], pixel[0], shuf_color);
+ spe_shufb(f, frag[1], frag[1], pixel[1], shuf_color);
+ spe_shufb(f, frag[2], frag[2], pixel[2], shuf_color);
+ spe_shufb(f, frag[3], frag[3], pixel[3], shuf_color);
+
+
+ /* If logic op is enabled, perform the requested logical operation on the
+ * converted fragment colors and the pixel colors.
+ */
+ switch (logic_op) {
+ case PIPE_LOGICOP_CLEAR:
+ spe_il(f, frag[0], 0);
+ spe_il(f, frag[1], 0);
+ spe_il(f, frag[2], 0);
+ spe_il(f, frag[3], 0);
+ break;
+ case PIPE_LOGICOP_NOR:
+ spe_nor(f, frag[0], frag[0], pixel[0]);
+ spe_nor(f, frag[1], frag[1], pixel[1]);
+ spe_nor(f, frag[2], frag[2], pixel[2]);
+ spe_nor(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_AND_INVERTED:
+ spe_andc(f, frag[0], pixel[0], frag[0]);
+ spe_andc(f, frag[1], pixel[1], frag[1]);
+ spe_andc(f, frag[2], pixel[2], frag[2]);
+ spe_andc(f, frag[3], pixel[3], frag[3]);
+ break;
+ case PIPE_LOGICOP_COPY_INVERTED:
+ spe_nor(f, frag[0], frag[0], frag[0]);
+ spe_nor(f, frag[1], frag[1], frag[1]);
+ spe_nor(f, frag[2], frag[2], frag[2]);
+ spe_nor(f, frag[3], frag[3], frag[3]);
+ break;
+ case PIPE_LOGICOP_AND_REVERSE:
+ spe_andc(f, frag[0], frag[0], pixel[0]);
+ spe_andc(f, frag[1], frag[1], pixel[1]);
+ spe_andc(f, frag[2], frag[2], pixel[2]);
+ spe_andc(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_XOR:
+ spe_xor(f, frag[0], frag[0], pixel[0]);
+ spe_xor(f, frag[1], frag[1], pixel[1]);
+ spe_xor(f, frag[2], frag[2], pixel[2]);
+ spe_xor(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_NAND:
+ spe_nand(f, frag[0], frag[0], pixel[0]);
+ spe_nand(f, frag[1], frag[1], pixel[1]);
+ spe_nand(f, frag[2], frag[2], pixel[2]);
+ spe_nand(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_AND:
+ spe_and(f, frag[0], frag[0], pixel[0]);
+ spe_and(f, frag[1], frag[1], pixel[1]);
+ spe_and(f, frag[2], frag[2], pixel[2]);
+ spe_and(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_EQUIV:
+ spe_eqv(f, frag[0], frag[0], pixel[0]);
+ spe_eqv(f, frag[1], frag[1], pixel[1]);
+ spe_eqv(f, frag[2], frag[2], pixel[2]);
+ spe_eqv(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_OR_INVERTED:
+ spe_orc(f, frag[0], pixel[0], frag[0]);
+ spe_orc(f, frag[1], pixel[1], frag[1]);
+ spe_orc(f, frag[2], pixel[2], frag[2]);
+ spe_orc(f, frag[3], pixel[3], frag[3]);
+ break;
+ case PIPE_LOGICOP_COPY:
+ break;
+ case PIPE_LOGICOP_OR_REVERSE:
+ spe_orc(f, frag[0], frag[0], pixel[0]);
+ spe_orc(f, frag[1], frag[1], pixel[1]);
+ spe_orc(f, frag[2], frag[2], pixel[2]);
+ spe_orc(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_OR:
+ spe_or(f, frag[0], frag[0], pixel[0]);
+ spe_or(f, frag[1], frag[1], pixel[1]);
+ spe_or(f, frag[2], frag[2], pixel[2]);
+ spe_or(f, frag[3], frag[3], pixel[3]);
+ break;
+ case PIPE_LOGICOP_SET:
+ spe_il(f, frag[0], ~0);
+ spe_il(f, frag[1], ~0);
+ spe_il(f, frag[2], ~0);
+ spe_il(f, frag[3], ~0);
+ break;
+
+ /* These two cases are short-circuited above.
+ */
+ case PIPE_LOGICOP_INVERT:
+ case PIPE_LOGICOP_NOOP:
+ default:
+ assert(0);
+ }
+
+
+ /* Apply fragment mask.
+ */
+ spe_ilh(f, tmp[0], 0x0000);
+ spe_ilh(f, tmp[1], 0x0404);
+ spe_ilh(f, tmp[2], 0x0808);
+ spe_ilh(f, tmp[3], 0x0c0c);
+
+ spe_shufb(f, tmp[0], mask, mask, tmp[0]);
+ spe_shufb(f, tmp[1], mask, mask, tmp[1]);
+ spe_shufb(f, tmp[2], mask, mask, tmp[2]);
+ spe_shufb(f, tmp[3], mask, mask, tmp[3]);
+
+ spe_selb(f, pixel[0], pixel[0], frag[0], tmp[0]);
+ spe_selb(f, pixel[1], pixel[1], frag[1], tmp[1]);
+ spe_selb(f, pixel[2], pixel[2], frag[2], tmp[2]);
+ spe_selb(f, pixel[3], pixel[3], frag[3], tmp[3]);
+
+ spe_bi(f, 0, 0, 0);
+
+#if 0
+ {
+ const uint32_t *p = f->store;
+ unsigned i;
+
+ printf("# %u instructions\n", f->csr - f->store);
+
+ printf("\t.text\n");
+ for (i = 0; i < 64; i++) {
+ printf("\t.long\t0x%04x\n", p[i]);
+ }
+ fflush(stdout);
+ }
+#endif
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_state_per_fragment.h b/src/gallium/drivers/cell/ppu/cell_state_per_fragment.h
new file mode 100644
index 0000000000..a8267a5133
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_per_fragment.h
@@ -0,0 +1,39 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef CELL_STATE_PER_FRAGMENT_H
+#define CELL_STATE_PER_FRAGMENT_H
+
+extern void
+cell_generate_depth_stencil_test(struct cell_depth_stencil_alpha_state *cdsa);
+
+extern void
+cell_generate_alpha_blend(struct cell_blend_state *cb);
+
+extern void
+cell_generate_logic_op(struct spe_function *f,
+ const struct pipe_blend_state *blend,
+ struct pipe_surface *surf);
+
+#endif /* CELL_STATE_PER_FRAGMENT_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_state_shader.c b/src/gallium/drivers/cell/ppu/cell_state_shader.c
new file mode 100644
index 0000000000..6568c784fe
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_shader.c
@@ -0,0 +1,217 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "draw/draw_context.h"
+#include "tgsi/tgsi_parse.h"
+
+#include "cell_context.h"
+#include "cell_state.h"
+#include "cell_gen_fp.h"
+
+
+/** cast wrapper */
+static INLINE struct cell_fragment_shader_state *
+cell_fragment_shader_state(void *shader)
+{
+ return (struct cell_fragment_shader_state *) shader;
+}
+
+
+/** cast wrapper */
+static INLINE struct cell_vertex_shader_state *
+cell_vertex_shader_state(void *shader)
+{
+ return (struct cell_vertex_shader_state *) shader;
+}
+
+
+/**
+ * Create fragment shader state.
+ * Called via pipe->create_fs_state()
+ */
+static void *
+cell_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct cell_context *cell = cell_context(pipe);
+ struct cell_fragment_shader_state *cfs;
+
+ cfs = CALLOC_STRUCT(cell_fragment_shader_state);
+ if (!cfs)
+ return NULL;
+
+ cfs->shader.tokens = tgsi_dup_tokens(templ->tokens);
+ if (!cfs->shader.tokens) {
+ FREE(cfs);
+ return NULL;
+ }
+
+ tgsi_scan_shader(templ->tokens, &cfs->info);
+
+ cell_gen_fragment_program(cell, cfs->shader.tokens, &cfs->code);
+
+ return cfs;
+}
+
+
+/**
+ * Called via pipe->bind_fs_state()
+ */
+static void
+cell_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ cell->fs = cell_fragment_shader_state(fs);
+
+ cell->dirty |= CELL_NEW_FS;
+}
+
+
+/**
+ * Called via pipe->delete_fs_state()
+ */
+static void
+cell_delete_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct cell_fragment_shader_state *cfs = cell_fragment_shader_state(fs);
+
+ spe_release_func(&cfs->code);
+
+ FREE((void *) cfs->shader.tokens);
+ FREE(cfs);
+}
+
+
+/**
+ * Create vertex shader state.
+ * Called via pipe->create_vs_state()
+ */
+static void *
+cell_create_vs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct cell_context *cell = cell_context(pipe);
+ struct cell_vertex_shader_state *cvs;
+
+ cvs = CALLOC_STRUCT(cell_vertex_shader_state);
+ if (!cvs)
+ return NULL;
+
+ cvs->shader.tokens = tgsi_dup_tokens(templ->tokens);
+ if (!cvs->shader.tokens) {
+ FREE(cvs);
+ return NULL;
+ }
+
+ tgsi_scan_shader(templ->tokens, &cvs->info);
+
+ cvs->draw_data = draw_create_vertex_shader(cell->draw, &cvs->shader);
+ if (cvs->draw_data == NULL) {
+ FREE( (void *) cvs->shader.tokens );
+ FREE( cvs );
+ return NULL;
+ }
+
+ return cvs;
+}
+
+
+/**
+ * Called via pipe->bind_vs_state()
+ */
+static void
+cell_bind_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ cell->vs = cell_vertex_shader_state(vs);
+
+ draw_bind_vertex_shader(cell->draw,
+ (cell->vs ? cell->vs->draw_data : NULL));
+
+ cell->dirty |= CELL_NEW_VS;
+}
+
+
+/**
+ * Called via pipe->delete_vs_state()
+ */
+static void
+cell_delete_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct cell_context *cell = cell_context(pipe);
+ struct cell_vertex_shader_state *cvs = cell_vertex_shader_state(vs);
+
+ draw_delete_vertex_shader(cell->draw, cvs->draw_data);
+ FREE( (void *) cvs->shader.tokens );
+ FREE( cvs );
+}
+
+
+/**
+ * Called via pipe->set_constant_buffer()
+ */
+static void
+cell_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ draw_flush(cell->draw);
+
+ /* note: reference counting */
+ pipe_buffer_reference(&cell->constants[shader].buffer, buf->buffer);
+
+ if (shader == PIPE_SHADER_VERTEX)
+ cell->dirty |= CELL_NEW_VS_CONSTANTS;
+ else if (shader == PIPE_SHADER_FRAGMENT)
+ cell->dirty |= CELL_NEW_FS_CONSTANTS;
+}
+
+
+void
+cell_init_shader_functions(struct cell_context *cell)
+{
+ cell->pipe.create_fs_state = cell_create_fs_state;
+ cell->pipe.bind_fs_state = cell_bind_fs_state;
+ cell->pipe.delete_fs_state = cell_delete_fs_state;
+
+ cell->pipe.create_vs_state = cell_create_vs_state;
+ cell->pipe.bind_vs_state = cell_bind_vs_state;
+ cell->pipe.delete_vs_state = cell_delete_vs_state;
+
+ cell->pipe.set_constant_buffer = cell_set_constant_buffer;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_state_vertex.c b/src/gallium/drivers/cell/ppu/cell_state_vertex.c
new file mode 100644
index 0000000000..fbe55c8472
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_state_vertex.c
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "cell_context.h"
+#include "cell_state.h"
+
+#include "draw/draw_context.h"
+
+
+static void
+cell_set_vertex_elements(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_element *elements)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(cell->vertex_element, elements, count * sizeof(elements[0]));
+ cell->num_vertex_elements = count;
+
+ cell->dirty |= CELL_NEW_VERTEX;
+
+ draw_set_vertex_elements(cell->draw, count, elements);
+}
+
+
+static void
+cell_set_vertex_buffers(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct cell_context *cell = cell_context(pipe);
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(cell->vertex_buffer, buffers, count * sizeof(buffers[0]));
+ cell->num_vertex_buffers = count;
+
+ cell->dirty |= CELL_NEW_VERTEX;
+
+ draw_set_vertex_buffers(cell->draw, count, buffers);
+}
+
+
+void
+cell_init_vertex_functions(struct cell_context *cell)
+{
+ cell->pipe.set_vertex_buffers = cell_set_vertex_buffers;
+ cell->pipe.set_vertex_elements = cell_set_vertex_elements;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_surface.c b/src/gallium/drivers/cell/ppu/cell_surface.c
new file mode 100644
index 0000000000..ffb8595d82
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_surface.c
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_rect.h"
+#include "cell_context.h"
+#include "cell_surface.h"
+
+
+static void
+cell_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ util_surface_copy(pipe, FALSE,
+ dest, destx, desty,
+ src, srcx, srcy,
+ width, height);
+}
+
+void
+cell_init_surface_functions(struct cell_context *cell)
+{
+ cell->pipe.surface_copy = cell_surface_copy;
+ cell->pipe.surface_fill = util_surface_fill;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_surface.h b/src/gallium/drivers/cell/ppu/cell_surface.h
new file mode 100644
index 0000000000..9e58f32944
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_surface.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef CELL_SURFACE_H
+#define CELL_SURFACE_H
+
+
+struct cell_context;
+
+
+extern void
+cell_init_surface_functions(struct cell_context *cell);
+
+
+#endif /* SP_SURFACE_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_texture.c b/src/gallium/drivers/cell/ppu/cell_texture.c
new file mode 100644
index 0000000000..6a63a0e6ce
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_texture.c
@@ -0,0 +1,497 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ * Brian Paul
+ */
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "cell_context.h"
+#include "cell_state.h"
+#include "cell_texture.h"
+
+
+
+static void
+cell_texture_layout(struct cell_texture *ct)
+{
+ struct pipe_texture *pt = &ct->base;
+ unsigned level;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned depth = pt->depth[0];
+
+ ct->buffer_size = 0;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ unsigned size;
+ unsigned w_tile, h_tile;
+
+ assert(level < CELL_MAX_TEXTURE_LEVELS);
+
+ /* width, height, rounded up to tile size */
+ w_tile = align(width, TILE_SIZE);
+ h_tile = align(height, TILE_SIZE);
+
+ pt->width[level] = width;
+ pt->height[level] = height;
+ pt->depth[level] = depth;
+ pt->nblocksx[level] = pf_get_nblocksx(&pt->block, w_tile);
+ pt->nblocksy[level] = pf_get_nblocksy(&pt->block, h_tile);
+
+ ct->stride[level] = pt->nblocksx[level] * pt->block.size;
+
+ ct->level_offset[level] = ct->buffer_size;
+
+ size = pt->nblocksx[level] * pt->nblocksy[level] * pt->block.size;
+ if (pt->target == PIPE_TEXTURE_CUBE)
+ size *= 6;
+ else
+ size *= depth;
+
+ ct->buffer_size += size;
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ }
+}
+
+
+static struct pipe_texture *
+cell_texture_create(struct pipe_screen *screen,
+ const struct pipe_texture *templat)
+{
+ struct cell_texture *ct = CALLOC_STRUCT(cell_texture);
+ if (!ct)
+ return NULL;
+
+ ct->base = *templat;
+ pipe_reference_init(&ct->base.reference, 1);
+ ct->base.screen = screen;
+
+ cell_texture_layout(ct);
+
+ ct->buffer = screen->buffer_create(screen, 32, PIPE_BUFFER_USAGE_PIXEL,
+ ct->buffer_size);
+
+ if (!ct->buffer) {
+ FREE(ct);
+ return NULL;
+ }
+
+ return &ct->base;
+}
+
+
+static void
+cell_texture_destroy(struct pipe_texture *pt)
+{
+ struct cell_texture *ct = cell_texture(pt);
+
+ if (ct->mapped) {
+ pipe_buffer_unmap(ct->buffer->screen, ct->buffer);
+ ct->mapped = NULL;
+ }
+
+ pipe_buffer_reference(&ct->buffer, NULL);
+
+ FREE(ct);
+}
+
+
+
+/**
+ * Convert image from linear layout to tiled layout. 4-byte pixels.
+ */
+static void
+twiddle_image_uint(uint w, uint h, uint tile_size, uint *dst,
+ uint src_stride, const uint *src)
+{
+ const uint tile_size2 = tile_size * tile_size;
+ const uint h_t = (h + tile_size - 1) / tile_size;
+ const uint w_t = (w + tile_size - 1) / tile_size;
+
+ uint it, jt; /* tile counters */
+ uint i, j; /* intra-tile counters */
+
+ src_stride /= 4; /* convert from bytes to pixels */
+
+ /* loop over dest tiles */
+ for (it = 0; it < h_t; it++) {
+ for (jt = 0; jt < w_t; jt++) {
+ /* start of dest tile: */
+ uint *tdst = dst + (it * w_t + jt) * tile_size2;
+
+ /* compute size of this tile (may be smaller than tile_size) */
+ /* XXX note: a compiler bug was found here. That's why the code
+ * looks as it does.
+ */
+ uint tile_width = w - jt * tile_size;
+ tile_width = MIN2(tile_width, tile_size);
+ uint tile_height = h - it * tile_size;
+ tile_height = MIN2(tile_height, tile_size);
+
+ /* loop over texels in the tile */
+ for (i = 0; i < tile_height; i++) {
+ for (j = 0; j < tile_width; j++) {
+ const uint srci = it * tile_size + i;
+ const uint srcj = jt * tile_size + j;
+ ASSERT(srci < h);
+ ASSERT(srcj < w);
+ tdst[i * tile_size + j] = src[srci * src_stride + srcj];
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * For Cell. Basically, rearrange the pixels/quads from this layout:
+ * +--+--+--+--+
+ * |p0|p1|p2|p3|....
+ * +--+--+--+--+
+ *
+ * to this layout:
+ * +--+--+
+ * |p0|p1|....
+ * +--+--+
+ * |p2|p3|
+ * +--+--+
+ */
+static void
+twiddle_tile(const uint *tileIn, uint *tileOut)
+{
+ int y, x;
+
+ for (y = 0; y < TILE_SIZE; y+=2) {
+ for (x = 0; x < TILE_SIZE; x+=2) {
+ int k = 4 * (y/2 * TILE_SIZE/2 + x/2);
+ tileOut[y * TILE_SIZE + (x + 0)] = tileIn[k];
+ tileOut[y * TILE_SIZE + (x + 1)] = tileIn[k+1];
+ tileOut[(y + 1) * TILE_SIZE + (x + 0)] = tileIn[k+2];
+ tileOut[(y + 1) * TILE_SIZE + (x + 1)] = tileIn[k+3];
+ }
+ }
+}
+
+
+/**
+ * Convert image from tiled layout to linear layout. 4-byte pixels.
+ */
+static void
+untwiddle_image_uint(uint w, uint h, uint tile_size, uint *dst,
+ uint dst_stride, const uint *src)
+{
+ const uint tile_size2 = tile_size * tile_size;
+ const uint h_t = (h + tile_size - 1) / tile_size;
+ const uint w_t = (w + tile_size - 1) / tile_size;
+ uint *tile_buf;
+ uint it, jt; /* tile counters */
+ uint i, j; /* intra-tile counters */
+
+ dst_stride /= 4; /* convert from bytes to pixels */
+
+ tile_buf = align_malloc(tile_size * tile_size * 4, 16);
+
+ /* loop over src tiles */
+ for (it = 0; it < h_t; it++) {
+ for (jt = 0; jt < w_t; jt++) {
+ /* start of src tile: */
+ const uint *tsrc = src + (it * w_t + jt) * tile_size2;
+
+ twiddle_tile(tsrc, tile_buf);
+ tsrc = tile_buf;
+
+ /* compute size of this tile (may be smaller than tile_size) */
+ /* XXX note: a compiler bug was found here. That's why the code
+ * looks as it does.
+ */
+ uint tile_width = w - jt * tile_size;
+ tile_width = MIN2(tile_width, tile_size);
+ uint tile_height = h - it * tile_size;
+ tile_height = MIN2(tile_height, tile_size);
+
+ /* loop over texels in the tile */
+ for (i = 0; i < tile_height; i++) {
+ for (j = 0; j < tile_width; j++) {
+ uint dsti = it * tile_size + i;
+ uint dstj = jt * tile_size + j;
+ ASSERT(dsti < h);
+ ASSERT(dstj < w);
+ dst[dsti * dst_stride + dstj] = tsrc[i * tile_size + j];
+ }
+ }
+ }
+ }
+
+ align_free(tile_buf);
+}
+
+
+static struct pipe_surface *
+cell_get_tex_surface(struct pipe_screen *screen,
+ struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned usage)
+{
+ struct cell_texture *ct = cell_texture(pt);
+ struct pipe_surface *ps;
+
+ ps = CALLOC_STRUCT(pipe_surface);
+ if (ps) {
+ pipe_reference_init(&ps->reference, 1);
+ pipe_texture_reference(&ps->texture, pt);
+ ps->format = pt->format;
+ ps->width = pt->width[level];
+ ps->height = pt->height[level];
+ ps->offset = ct->level_offset[level];
+ /* XXX may need to override usage flags (see sp_texture.c) */
+ ps->usage = usage;
+ ps->face = face;
+ ps->level = level;
+ ps->zslice = zslice;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ps->offset += face * pt->nblocksy[level] * ct->stride[level];
+ }
+ else if (pt->target == PIPE_TEXTURE_3D) {
+ ps->offset += zslice * pt->nblocksy[level] * ct->stride[level];
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+ }
+ return ps;
+}
+
+
+static void
+cell_tex_surface_destroy(struct pipe_surface *surf)
+{
+ pipe_texture_reference(&surf->texture, NULL);
+ FREE(surf);
+}
+
+
+/**
+ * Create new pipe_transfer object.
+ * This is used by the user to put tex data into a texture (and get it
+ * back out for glGetTexImage).
+ */
+static struct pipe_transfer *
+cell_get_tex_transfer(struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct cell_texture *ct = cell_texture(texture);
+ struct cell_transfer *ctrans;
+
+ assert(texture);
+ assert(level <= texture->last_level);
+
+ ctrans = CALLOC_STRUCT(cell_transfer);
+ if (ctrans) {
+ struct pipe_transfer *pt = &ctrans->base;
+ pipe_texture_reference(&pt->texture, texture);
+ pt->format = texture->format;
+ pt->block = texture->block;
+ pt->x = x;
+ pt->y = y;
+ pt->width = w;
+ pt->height = h;
+ pt->nblocksx = texture->nblocksx[level];
+ pt->nblocksy = texture->nblocksy[level];
+ pt->stride = ct->stride[level];
+ pt->usage = usage;
+ pt->face = face;
+ pt->level = level;
+ pt->zslice = zslice;
+
+ ctrans->offset = ct->level_offset[level];
+
+ if (texture->target == PIPE_TEXTURE_CUBE) {
+ ctrans->offset += face * pt->nblocksy * pt->stride;
+ }
+ else if (texture->target == PIPE_TEXTURE_3D) {
+ ctrans->offset += zslice * pt->nblocksy * pt->stride;
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+ return pt;
+ }
+ return NULL;
+}
+
+
+static void
+cell_tex_transfer_destroy(struct pipe_transfer *t)
+{
+ struct cell_transfer *transfer = cell_transfer(t);
+ /* Effectively do the texture_update work here - if texture images
+ * needed post-processing to put them into hardware layout, this is
+ * where it would happen. For cell, nothing to do.
+ */
+ assert (transfer->base.texture);
+ pipe_texture_reference(&transfer->base.texture, NULL);
+ FREE(transfer);
+}
+
+
+/**
+ * Return pointer to texture image data in linear layout.
+ */
+static void *
+cell_transfer_map(struct pipe_screen *screen, struct pipe_transfer *transfer)
+{
+ struct cell_transfer *ctrans = cell_transfer(transfer);
+ struct pipe_texture *pt = transfer->texture;
+ struct cell_texture *ct = cell_texture(pt);
+ const uint level = ctrans->base.level;
+ const uint texWidth = pt->width[level];
+ const uint texHeight = pt->height[level];
+ const uint stride = ct->stride[level];
+ unsigned flags = 0x0;
+ unsigned size;
+
+ assert(transfer->texture);
+
+ if (transfer->usage != PIPE_TRANSFER_READ) {
+ flags |= PIPE_BUFFER_USAGE_CPU_WRITE;
+ }
+
+ if (transfer->usage != PIPE_TRANSFER_WRITE) {
+ flags |= PIPE_BUFFER_USAGE_CPU_READ;
+ }
+
+ if (!ct->mapped) {
+ /* map now */
+ ct->mapped = pipe_buffer_map(screen, ct->buffer, flags);
+ }
+
+ /*
+ * Create a buffer of ordinary memory for the linear texture.
+ * This is the memory that the user will read/write.
+ */
+ size = pt->nblocksx[level] * pt->nblocksy[level] * pt->block.size;
+
+ ctrans->map = align_malloc(size, 16);
+ if (!ctrans->map)
+ return NULL; /* out of memory */
+
+ if (transfer->usage == PIPE_TRANSFER_READ ||
+ transfer->usage == PIPE_TRANSFER_READ_WRITE) {
+ /* need to untwiddle the texture to make a linear version */
+ const uint bpp = pf_get_size(ct->base.format);
+ if (bpp == 4) {
+ const uint *src = (uint *) (ct->mapped + ctrans->offset);
+ uint *dst = ctrans->map;
+ untwiddle_image_uint(texWidth, texHeight, TILE_SIZE,
+ dst, stride, src);
+ }
+ else {
+ // xxx fix
+ }
+ }
+
+ return ctrans->map;
+}
+
+
+/**
+ * Called when user is done reading/writing texture data.
+ * If new data was written, this is where we convert the linear data
+ * to tiled data.
+ */
+static void
+cell_transfer_unmap(struct pipe_screen *screen,
+ struct pipe_transfer *transfer)
+{
+ struct cell_transfer *ctrans = cell_transfer(transfer);
+ struct pipe_texture *pt = transfer->texture;
+ struct cell_texture *ct = cell_texture(pt);
+ const uint level = ctrans->base.level;
+ const uint texWidth = pt->width[level];
+ const uint texHeight = pt->height[level];
+ const uint stride = ct->stride[level];
+
+ if (!ct->mapped) {
+ /* map now */
+ ct->mapped = pipe_buffer_map(screen, ct->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ }
+
+ if (transfer->usage == PIPE_TRANSFER_WRITE ||
+ transfer->usage == PIPE_TRANSFER_READ_WRITE) {
+ /* The user wrote new texture data into the mapped buffer.
+ * We need to convert the new linear data into the twiddled/tiled format.
+ */
+ const uint bpp = pf_get_size(ct->base.format);
+ if (bpp == 4) {
+ const uint *src = ctrans->map;
+ uint *dst = (uint *) (ct->mapped + ctrans->offset);
+ twiddle_image_uint(texWidth, texHeight, TILE_SIZE, dst, stride, src);
+ }
+ else {
+ // xxx fix
+ }
+ }
+
+ align_free(ctrans->map);
+ ctrans->map = NULL;
+}
+
+
+void
+cell_init_screen_texture_funcs(struct pipe_screen *screen)
+{
+ screen->texture_create = cell_texture_create;
+ screen->texture_destroy = cell_texture_destroy;
+
+ screen->get_tex_surface = cell_get_tex_surface;
+ screen->tex_surface_destroy = cell_tex_surface_destroy;
+
+ screen->get_tex_transfer = cell_get_tex_transfer;
+ screen->tex_transfer_destroy = cell_tex_transfer_destroy;
+
+ screen->transfer_map = cell_transfer_map;
+ screen->transfer_unmap = cell_transfer_unmap;
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_texture.h b/src/gallium/drivers/cell/ppu/cell_texture.h
new file mode 100644
index 0000000000..3ffc0bfdb5
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_texture.h
@@ -0,0 +1,86 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef CELL_TEXTURE_H
+#define CELL_TEXTURE_H
+
+
+struct cell_context;
+struct pipe_texture;
+
+
+/**
+ * Subclass of pipe_texture
+ */
+struct cell_texture
+{
+ struct pipe_texture base;
+
+ unsigned long level_offset[CELL_MAX_TEXTURE_LEVELS];
+ unsigned long stride[CELL_MAX_TEXTURE_LEVELS];
+
+ /** The tiled texture data is held in this buffer */
+ struct pipe_buffer *buffer;
+ unsigned long buffer_size;
+
+ /** The buffer above, mapped. This is the memory from which the
+ * SPUs will fetch texels. This texture data is in the tiled layout.
+ */
+ ubyte *mapped;
+};
+
+
+struct cell_transfer
+{
+ struct pipe_transfer base;
+
+ unsigned long offset;
+ void *map;
+};
+
+
+/** cast wrapper */
+static INLINE struct cell_texture *
+cell_texture(struct pipe_texture *pt)
+{
+ return (struct cell_texture *) pt;
+}
+
+
+/** cast wrapper */
+static INLINE struct cell_transfer *
+cell_transfer(struct pipe_transfer *pt)
+{
+ return (struct cell_transfer *) pt;
+}
+
+
+extern void
+cell_init_screen_texture_funcs(struct pipe_screen *screen);
+
+
+#endif /* CELL_TEXTURE_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_vbuf.c b/src/gallium/drivers/cell/ppu/cell_vbuf.c
new file mode 100644
index 0000000000..cfaffb52a8
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_vbuf.c
@@ -0,0 +1,332 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Vertex buffer code. The draw module transforms vertices to window
+ * coords, etc. and emits the vertices into buffer supplied by this module.
+ * When a vertex buffer is full, or we flush, we'll send the vertex data
+ * to the SPUs.
+ *
+ * Authors
+ * Brian Paul
+ */
+
+
+#include "cell_batch.h"
+#include "cell_context.h"
+#include "cell_fence.h"
+#include "cell_flush.h"
+#include "cell_spu.h"
+#include "cell_vbuf.h"
+#include "draw/draw_vbuf.h"
+#include "util/u_memory.h"
+
+
+/** Allow vertex data to be inlined after RENDER command */
+#define ALLOW_INLINE_VERTS 1
+
+
+/**
+ * Subclass of vbuf_render because we need a cell_context pointer in
+ * a few places.
+ */
+struct cell_vbuf_render
+{
+ struct vbuf_render base;
+ struct cell_context *cell;
+ uint prim; /**< PIPE_PRIM_x */
+ uint vertex_size; /**< in bytes */
+ void *vertex_buffer; /**< just for debug, really */
+ uint vertex_buf; /**< in [0, CELL_NUM_BUFFERS-1] */
+ uint vertex_buffer_size; /**< size in bytes */
+};
+
+
+/** cast wrapper */
+static struct cell_vbuf_render *
+cell_vbuf_render(struct vbuf_render *vbr)
+{
+ return (struct cell_vbuf_render *) vbr;
+}
+
+
+
+static const struct vertex_info *
+cell_vbuf_get_vertex_info(struct vbuf_render *vbr)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ return &cvbr->cell->vertex_info;
+}
+
+
+static boolean
+cell_vbuf_allocate_vertices(struct vbuf_render *vbr,
+ ushort vertex_size, ushort nr_vertices)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ unsigned size = vertex_size * nr_vertices;
+ /*printf("Alloc verts %u * %u\n", vertex_size, nr_vertices);*/
+
+ assert(cvbr->vertex_buf == ~0);
+ cvbr->vertex_buf = cell_get_empty_buffer(cvbr->cell);
+ cvbr->vertex_buffer = cvbr->cell->buffer[cvbr->vertex_buf];
+ cvbr->vertex_buffer_size = size;
+ cvbr->vertex_size = vertex_size;
+
+ return cvbr->vertex_buffer != NULL;
+}
+
+
+static void
+cell_vbuf_release_vertices(struct vbuf_render *vbr)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ struct cell_context *cell = cvbr->cell;
+
+ /*
+ printf("%s vertex_buf = %u count = %u\n",
+ __FUNCTION__, cvbr->vertex_buf, vertices_used);
+ */
+
+ /* Make sure texture buffers aren't released until we're done rendering
+ * with them.
+ */
+ cell_add_fenced_textures(cell);
+
+ /* Tell SPUs they can release the vert buf */
+ if (cvbr->vertex_buf != ~0U) {
+ STATIC_ASSERT(sizeof(struct cell_command_release_verts) % 16 == 0);
+ struct cell_command_release_verts *release
+ = (struct cell_command_release_verts *)
+ cell_batch_alloc16(cell, sizeof(struct cell_command_release_verts));
+ release->opcode[0] = CELL_CMD_RELEASE_VERTS;
+ release->vertex_buf = cvbr->vertex_buf;
+ }
+
+ cvbr->vertex_buf = ~0;
+ cell_flush_int(cell, 0x0);
+
+ cvbr->vertex_buffer = NULL;
+}
+
+
+static void *
+cell_vbuf_map_vertices(struct vbuf_render *vbr)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ return cvbr->vertex_buffer;
+}
+
+
+static void
+cell_vbuf_unmap_vertices(struct vbuf_render *vbr,
+ ushort min_index,
+ ushort max_index )
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ assert( cvbr->vertex_buffer_size >= (max_index+1) * cvbr->vertex_size );
+ /* do nothing */
+}
+
+
+
+static boolean
+cell_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ cvbr->prim = prim;
+ /*printf("cell_set_prim %u\n", prim);*/
+ return TRUE;
+}
+
+
+static void
+cell_vbuf_draw(struct vbuf_render *vbr,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ struct cell_context *cell = cvbr->cell;
+ float xmin, ymin, xmax, ymax;
+ uint i;
+ uint nr_vertices = 0, min_index = ~0;
+ const void *vertices = cvbr->vertex_buffer;
+ const uint vertex_size = cvbr->vertex_size;
+
+ for (i = 0; i < nr_indices; i++) {
+ if (indices[i] > nr_vertices)
+ nr_vertices = indices[i];
+ if (indices[i] < min_index)
+ min_index = indices[i];
+ }
+ nr_vertices++;
+
+#if 0
+ /*if (min_index > 0)*/
+ printf("%s min_index = %u\n", __FUNCTION__, min_index);
+#endif
+
+#if 0
+ printf("cell_vbuf_draw() nr_indices = %u nr_verts = %u\n",
+ nr_indices, nr_vertices);
+ printf(" ");
+ for (i = 0; i < nr_indices; i += 3) {
+ printf("%u %u %u, ", indices[i+0], indices[i+1], indices[i+2]);
+ }
+ printf("\n");
+#elif 0
+ printf("cell_vbuf_draw() nr_indices = %u nr_verts = %u indexes = [%u %u %u ...]\n",
+ nr_indices, nr_vertices,
+ indices[0], indices[1], indices[2]);
+ printf("ind space = %u, vert space = %u, space = %u\n",
+ nr_indices * 2,
+ nr_vertices * 4 * cell->vertex_info.size,
+ cell_batch_free_space(cell));
+#endif
+
+ /* compute x/y bounding box */
+ xmin = ymin = 1e50;
+ xmax = ymax = -1e50;
+ for (i = min_index; i < nr_vertices; i++) {
+ const float *v = (float *) ((ubyte *) vertices + i * vertex_size);
+ if (v[0] < xmin)
+ xmin = v[0];
+ if (v[0] > xmax)
+ xmax = v[0];
+ if (v[1] < ymin)
+ ymin = v[1];
+ if (v[1] > ymax)
+ ymax = v[1];
+ }
+#if 0
+ printf("PPU Bounds %g, %g .. %g, %g\n", xmin, ymin, xmax, ymax);
+ fflush(stdout);
+#endif
+
+ if (cvbr->prim != PIPE_PRIM_TRIANGLES)
+ return; /* only render tris for now */
+
+ /* build/insert batch RENDER command */
+ {
+ const uint index_bytes = ROUNDUP16(nr_indices * 2);
+ const uint vertex_bytes = ROUNDUP16(nr_vertices * 4 * cell->vertex_info.size);
+ STATIC_ASSERT(sizeof(struct cell_command_render) % 16 == 0);
+ const uint batch_size = sizeof(struct cell_command_render) + index_bytes;
+
+ struct cell_command_render *render
+ = (struct cell_command_render *)
+ cell_batch_alloc16(cell, batch_size);
+
+ render->opcode[0] = CELL_CMD_RENDER;
+ render->prim_type = cvbr->prim;
+
+ render->num_indexes = nr_indices;
+ render->min_index = min_index;
+
+ /* append indices after render command */
+ memcpy(render + 1, indices, nr_indices * 2);
+
+ /* if there's room, append vertices after the indices, else leave
+ * vertices in the original/separate buffer.
+ */
+ render->vertex_size = 4 * cell->vertex_info.size;
+ render->num_verts = nr_vertices;
+ if (ALLOW_INLINE_VERTS &&
+ min_index == 0 &&
+ vertex_bytes + 16 <= cell_batch_free_space(cell)) {
+ /* vertex data inlined, after indices, at 16-byte boundary */
+ void *dst = cell_batch_alloc16(cell, vertex_bytes);
+ memcpy(dst, vertices, vertex_bytes);
+ render->inline_verts = TRUE;
+ render->vertex_buf = ~0;
+ }
+ else {
+ /* vertex data in separate buffer */
+ render->inline_verts = FALSE;
+ ASSERT(cvbr->vertex_buf >= 0);
+ render->vertex_buf = cvbr->vertex_buf;
+ }
+
+ render->xmin = xmin;
+ render->ymin = ymin;
+ render->xmax = xmax;
+ render->ymax = ymax;
+ }
+
+#if 0
+ /* helpful for debug */
+ cell_flush_int(cell, CELL_FLUSH_WAIT);
+#endif
+}
+
+
+static void
+cell_vbuf_destroy(struct vbuf_render *vbr)
+{
+ struct cell_vbuf_render *cvbr = cell_vbuf_render(vbr);
+ cvbr->cell->vbuf_render = NULL;
+ FREE(cvbr);
+}
+
+
+/**
+ * Initialize the post-transform vertex buffer information for the given
+ * context.
+ */
+void
+cell_init_vbuf(struct cell_context *cell)
+{
+ assert(cell->draw);
+
+ cell->vbuf_render = CALLOC_STRUCT(cell_vbuf_render);
+
+ /* The max number of indexes is what can fix into a batch buffer,
+ * minus the render and release-verts commands.
+ */
+ cell->vbuf_render->base.max_indices
+ = (CELL_BUFFER_SIZE
+ - sizeof(struct cell_command_render)
+ - sizeof(struct cell_command_release_verts))
+ / sizeof(ushort);
+ cell->vbuf_render->base.max_vertex_buffer_bytes = CELL_BUFFER_SIZE;
+
+ cell->vbuf_render->base.get_vertex_info = cell_vbuf_get_vertex_info;
+ cell->vbuf_render->base.allocate_vertices = cell_vbuf_allocate_vertices;
+ cell->vbuf_render->base.map_vertices = cell_vbuf_map_vertices;
+ cell->vbuf_render->base.unmap_vertices = cell_vbuf_unmap_vertices;
+ cell->vbuf_render->base.set_primitive = cell_vbuf_set_primitive;
+ cell->vbuf_render->base.draw = cell_vbuf_draw;
+ cell->vbuf_render->base.release_vertices = cell_vbuf_release_vertices;
+ cell->vbuf_render->base.destroy = cell_vbuf_destroy;
+
+ cell->vbuf_render->cell = cell;
+#if 1
+ cell->vbuf_render->vertex_buf = ~0;
+#endif
+
+ cell->vbuf = draw_vbuf_stage(cell->draw, &cell->vbuf_render->base);
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_vbuf.h b/src/gallium/drivers/cell/ppu/cell_vbuf.h
new file mode 100644
index 0000000000..d265cbf770
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_vbuf.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef CELL_VBUF_H
+#define CELL_VBUF_H
+
+
+struct cell_context;
+
+extern void
+cell_init_vbuf(struct cell_context *cell);
+
+
+#endif /* CELL_VBUF_H */
diff --git a/src/gallium/drivers/cell/ppu/cell_vertex_fetch.c b/src/gallium/drivers/cell/ppu/cell_vertex_fetch.c
new file mode 100644
index 0000000000..9cba537d9e
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_vertex_fetch.c
@@ -0,0 +1,346 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_context.h"
+#include "pipe/p_format.h"
+
+#include "../auxiliary/draw/draw_context.h"
+#include "../auxiliary/draw/draw_private.h"
+
+#include "cell_context.h"
+#include "rtasm/rtasm_ppc_spe.h"
+
+
+/**
+ * Emit a 4x4 matrix transpose operation
+ *
+ * \param p Function that the transpose operation is to be appended to
+ * \param row0 Register containing row 0 of the source matrix
+ * \param row1 Register containing row 1 of the source matrix
+ * \param row2 Register containing row 2 of the source matrix
+ * \param row3 Register containing row 3 of the source matrix
+ * \param dest_ptr Register containing the address of the destination matrix
+ * \param shuf_ptr Register containing the address of the shuffled data
+ * \param count Number of colums to actually be written to the destination
+ *
+ * \note
+ * This function assumes that the registers named by \c row0, \c row1,
+ * \c row2, and \c row3 are scratch and can be modified by the generated code.
+ * Furthermore, these registers will be released, via calls to
+ * \c release_register, by this function.
+ *
+ * \note
+ * This function requires that four temporary are available on entry.
+ */
+static void
+emit_matrix_transpose(struct spe_function *p,
+ unsigned row0, unsigned row1, unsigned row2,
+ unsigned row3, unsigned dest_ptr,
+ unsigned shuf_ptr, unsigned count)
+{
+ int shuf_hi = spe_allocate_available_register(p);
+ int shuf_lo = spe_allocate_available_register(p);
+ int t1 = spe_allocate_available_register(p);
+ int t2 = spe_allocate_available_register(p);
+ int t3;
+ int t4;
+ int col0;
+ int col1;
+ int col2;
+ int col3;
+
+
+ spe_lqd(p, shuf_hi, shuf_ptr, 3*16);
+ spe_lqd(p, shuf_lo, shuf_ptr, 4*16);
+ spe_shufb(p, t1, row0, row2, shuf_hi);
+ spe_shufb(p, t2, row0, row2, shuf_lo);
+
+
+ /* row0 and row2 are now no longer needed. Re-use those registers as
+ * temporaries.
+ */
+ t3 = row0;
+ t4 = row2;
+
+ spe_shufb(p, t3, row1, row3, shuf_hi);
+ spe_shufb(p, t4, row1, row3, shuf_lo);
+
+
+ /* row1 and row3 are now no longer needed. Re-use those registers as
+ * temporaries.
+ */
+ col0 = row1;
+ col1 = row3;
+
+ spe_shufb(p, col0, t1, t3, shuf_hi);
+ if (count > 1) {
+ spe_shufb(p, col1, t1, t3, shuf_lo);
+ }
+
+ /* t1 and t3 are now no longer needed. Re-use those registers as
+ * temporaries.
+ */
+ col2 = t1;
+ col3 = t3;
+
+ if (count > 2) {
+ spe_shufb(p, col2, t2, t4, shuf_hi);
+ }
+
+ if (count > 3) {
+ spe_shufb(p, col3, t2, t4, shuf_lo);
+ }
+
+
+ /* Store the results. Remember that the stqd instruction is encoded using
+ * the qword offset (stand-alone assemblers to the byte-offset to
+ * qword-offset conversion for you), so the byte-offset needs be divided by
+ * 16.
+ */
+ switch (count) {
+ case 4:
+ spe_stqd(p, col3, dest_ptr, 3 * 16);
+ case 3:
+ spe_stqd(p, col2, dest_ptr, 2 * 16);
+ case 2:
+ spe_stqd(p, col1, dest_ptr, 1 * 16);
+ case 1:
+ spe_stqd(p, col0, dest_ptr, 0 * 16);
+ }
+
+
+ /* Release all of the temporary registers used.
+ */
+ spe_release_register(p, col0);
+ spe_release_register(p, col1);
+ spe_release_register(p, col2);
+ spe_release_register(p, col3);
+ spe_release_register(p, shuf_hi);
+ spe_release_register(p, shuf_lo);
+ spe_release_register(p, t2);
+ spe_release_register(p, t4);
+}
+
+
+#if 0
+/* This appears to not be used currently */
+static void
+emit_fetch(struct spe_function *p,
+ unsigned in_ptr, unsigned *offset,
+ unsigned out_ptr, unsigned shuf_ptr,
+ enum pipe_format format)
+{
+ const unsigned count = (pf_size_x(format) != 0) + (pf_size_y(format) != 0)
+ + (pf_size_z(format) != 0) + (pf_size_w(format) != 0);
+ const unsigned type = pf_type(format);
+ const unsigned bytes = pf_size_x(format);
+
+ int v0 = spe_allocate_available_register(p);
+ int v1 = spe_allocate_available_register(p);
+ int v2 = spe_allocate_available_register(p);
+ int v3 = spe_allocate_available_register(p);
+ int tmp = spe_allocate_available_register(p);
+ int float_zero = -1;
+ int float_one = -1;
+ float scale_signed = 0.0;
+ float scale_unsigned = 0.0;
+
+ spe_lqd(p, v0, in_ptr, (0 + offset[0]) * 16);
+ spe_lqd(p, v1, in_ptr, (1 + offset[0]) * 16);
+ spe_lqd(p, v2, in_ptr, (2 + offset[0]) * 16);
+ spe_lqd(p, v3, in_ptr, (3 + offset[0]) * 16);
+ offset[0] += 4;
+
+ switch (bytes) {
+ case 1:
+ scale_signed = 1.0f / 127.0f;
+ scale_unsigned = 1.0f / 255.0f;
+ spe_lqd(p, tmp, shuf_ptr, 1 * 16);
+ spe_shufb(p, v0, v0, v0, tmp);
+ spe_shufb(p, v1, v1, v1, tmp);
+ spe_shufb(p, v2, v2, v2, tmp);
+ spe_shufb(p, v3, v3, v3, tmp);
+ break;
+ case 2:
+ scale_signed = 1.0f / 32767.0f;
+ scale_unsigned = 1.0f / 65535.0f;
+ spe_lqd(p, tmp, shuf_ptr, 2 * 16);
+ spe_shufb(p, v0, v0, v0, tmp);
+ spe_shufb(p, v1, v1, v1, tmp);
+ spe_shufb(p, v2, v2, v2, tmp);
+ spe_shufb(p, v3, v3, v3, tmp);
+ break;
+ case 4:
+ scale_signed = 1.0f / 2147483647.0f;
+ scale_unsigned = 1.0f / 4294967295.0f;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (type) {
+ case PIPE_FORMAT_TYPE_FLOAT:
+ break;
+ case PIPE_FORMAT_TYPE_UNORM:
+ spe_ilhu(p, tmp, ((unsigned) scale_unsigned) >> 16);
+ spe_iohl(p, tmp, ((unsigned) scale_unsigned) & 0x0ffff);
+ spe_cuflt(p, v0, v0, 0);
+ spe_fm(p, v0, v0, tmp);
+ break;
+ case PIPE_FORMAT_TYPE_SNORM:
+ spe_ilhu(p, tmp, ((unsigned) scale_signed) >> 16);
+ spe_iohl(p, tmp, ((unsigned) scale_signed) & 0x0ffff);
+ spe_csflt(p, v0, v0, 0);
+ spe_fm(p, v0, v0, tmp);
+ break;
+ case PIPE_FORMAT_TYPE_USCALED:
+ spe_cuflt(p, v0, v0, 0);
+ break;
+ case PIPE_FORMAT_TYPE_SSCALED:
+ spe_csflt(p, v0, v0, 0);
+ break;
+ }
+
+
+ if (count < 4) {
+ float_one = spe_allocate_available_register(p);
+ spe_il(p, float_one, 1);
+ spe_cuflt(p, float_one, float_one, 0);
+
+ if (count < 3) {
+ float_zero = spe_allocate_available_register(p);
+ spe_il(p, float_zero, 0);
+ }
+ }
+
+ spe_release_register(p, tmp);
+
+ emit_matrix_transpose(p, v0, v1, v2, v3, out_ptr, shuf_ptr, count);
+
+ switch (count) {
+ case 1:
+ spe_stqd(p, float_zero, out_ptr, 1 * 16);
+ case 2:
+ spe_stqd(p, float_zero, out_ptr, 2 * 16);
+ case 3:
+ spe_stqd(p, float_one, out_ptr, 3 * 16);
+ }
+
+ if (float_zero != -1) {
+ spe_release_register(p, float_zero);
+ }
+
+ if (float_one != -1) {
+ spe_release_register(p, float_one);
+ }
+}
+#endif
+
+
+void cell_update_vertex_fetch(struct draw_context *draw)
+{
+#if 0
+ struct cell_context *const cell =
+ (struct cell_context *) draw->driver_private;
+ struct spe_function *p = &cell->attrib_fetch;
+ unsigned function_index[PIPE_MAX_ATTRIBS];
+ unsigned unique_attr_formats;
+ int out_ptr;
+ int in_ptr;
+ int shuf_ptr;
+ unsigned i;
+ unsigned j;
+
+
+ /* Determine how many unique input attribute formats there are. At the
+ * same time, store the index of the lowest numbered attribute that has
+ * the same format as any non-unique format.
+ */
+ unique_attr_formats = 1;
+ function_index[0] = 0;
+ for (i = 1; i < draw->vertex_fetch.nr_attrs; i++) {
+ const enum pipe_format curr_fmt = draw->vertex_element[i].src_format;
+
+ for (j = 0; j < i; j++) {
+ if (curr_fmt == draw->vertex_element[j].src_format) {
+ break;
+ }
+ }
+
+ if (j == i) {
+ unique_attr_formats++;
+ }
+
+ function_index[i] = j;
+ }
+
+
+ /* Each fetch function can be a maximum of 34 instructions (note: this is
+ * actually a slight over-estimate).
+ */
+ spe_init_func(p, 34 * SPE_INST_SIZE * unique_attr_formats);
+
+
+ /* Allocate registers for the function's input parameters.
+ */
+ out_ptr = spe_allocate_register(p, 3);
+ in_ptr = spe_allocate_register(p, 4);
+ shuf_ptr = spe_allocate_register(p, 5);
+
+
+ /* Generate code for the individual attribute fetch functions.
+ */
+ for (i = 0; i < draw->vertex_fetch.nr_attrs; i++) {
+ unsigned offset;
+
+ if (function_index[i] == i) {
+ cell->attrib_fetch_offsets[i] = (unsigned) ((void *) p->csr
+ - (void *) p->store);
+
+ offset = 0;
+ emit_fetch(p, in_ptr, &offset, out_ptr, shuf_ptr,
+ draw->vertex_element[i].src_format);
+ spe_bi(p, 0, 0, 0);
+
+ /* Round up to the next 16-byte boundary.
+ */
+ if ((((unsigned) p->store) & 0x0f) != 0) {
+ const unsigned align = ((unsigned) p->store) & 0x0f;
+ p->store = (uint32_t *) (((void *) p->store) + align);
+ }
+ } else {
+ /* Use the same function entry-point as a previously seen attribute
+ * with the same format.
+ */
+ cell->attrib_fetch_offsets[i] =
+ cell->attrib_fetch_offsets[function_index[i]];
+ }
+ }
+#else
+ assert(0);
+#endif
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_vertex_shader.c b/src/gallium/drivers/cell/ppu/cell_vertex_shader.c
new file mode 100644
index 0000000000..403cf6d50f
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_vertex_shader.c
@@ -0,0 +1,146 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file cell_vertex_shader.c
+ * Vertex shader interface routines for Cell.
+ *
+ * \author Ian Romanick <idr@us.ibm.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+
+#include "cell_context.h"
+#include "cell_draw_arrays.h"
+#include "cell_flush.h"
+#include "cell_spu.h"
+#include "cell_batch.h"
+
+#include "cell/common.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+
+/**
+ * Run the vertex shader on all vertices in the vertex queue.
+ * Called by the draw module when the vertx cache needs to be flushed.
+ */
+void
+cell_vertex_shader_queue_flush(struct draw_context *draw)
+{
+#if 0
+ struct cell_context *const cell =
+ (struct cell_context *) draw->driver_private;
+ struct cell_command_vs *const vs = &cell_global.command[0].vs;
+ uint64_t *batch;
+ struct cell_array_info *array_info;
+ unsigned i, j;
+ struct cell_attribute_fetch_code *cf;
+
+ assert(draw->vs.queue_nr != 0);
+
+ /* XXX: do this on statechange:
+ */
+ draw_update_vertex_fetch(draw);
+ cell_update_vertex_fetch(draw);
+
+
+ batch = cell_batch_alloc(cell, sizeof(batch[0]) + sizeof(*cf));
+ batch[0] = CELL_CMD_STATE_ATTRIB_FETCH;
+ cf = (struct cell_attribute_fetch_code *) (&batch[1]);
+ cf->base = (uint64_t) cell->attrib_fetch.store;
+ cf->size = ROUNDUP16((unsigned)((void *) cell->attrib_fetch.csr
+ - (void *) cell->attrib_fetch.store));
+
+
+ for (i = 0; i < draw->vertex_fetch.nr_attrs; i++) {
+ const enum pipe_format format = draw->vertex_element[i].src_format;
+ const unsigned count = ((pf_size_x(format) != 0)
+ + (pf_size_y(format) != 0)
+ + (pf_size_z(format) != 0)
+ + (pf_size_w(format) != 0));
+ const unsigned size = pf_size_x(format) * count;
+
+ batch = cell_batch_alloc(cell, sizeof(batch[0]) + sizeof(*array_info));
+
+ batch[0] = CELL_CMD_STATE_VS_ARRAY_INFO;
+
+ array_info = (struct cell_array_info *) &batch[1];
+ assert(draw->vertex_fetch.src_ptr[i] != NULL);
+ array_info->base = (uintptr_t) draw->vertex_fetch.src_ptr[i];
+ array_info->attr = i;
+ array_info->pitch = draw->vertex_fetch.pitch[i];
+ array_info->size = size;
+ array_info->function_offset = cell->attrib_fetch_offsets[i];
+ }
+
+ batch = cell_batch_alloc(cell, sizeof(batch[0])
+ + sizeof(struct pipe_viewport_state));
+ batch[0] = CELL_CMD_STATE_VIEWPORT;
+ (void) memcpy(&batch[1], &draw->viewport,
+ sizeof(struct pipe_viewport_state));
+
+ {
+ uint64_t uniforms = (uintptr_t) draw->user.constants;
+
+ batch = cell_batch_alloc(cell, 2 *sizeof(batch[0]));
+ batch[0] = CELL_CMD_STATE_UNIFORMS;
+ batch[1] = uniforms;
+ }
+
+ cell_batch_flush(cell);
+
+ vs->opcode = CELL_CMD_VS_EXECUTE;
+ vs->nr_attrs = draw->vertex_fetch.nr_attrs;
+
+ (void) memcpy(vs->plane, draw->plane, sizeof(draw->plane));
+ vs->nr_planes = draw->nr_planes;
+
+ for (i = 0; i < draw->vs.queue_nr; i += SPU_VERTS_PER_BATCH) {
+ const unsigned n = MIN2(SPU_VERTS_PER_BATCH, draw->vs.queue_nr - i);
+
+ for (j = 0; j < n; j++) {
+ vs->elts[j] = draw->vs.queue[i + j].elt;
+ vs->vOut[j] = (uintptr_t) draw->vs.queue[i + j].vertex;
+ }
+
+ for (/* empty */; j < SPU_VERTS_PER_BATCH; j++) {
+ vs->elts[j] = vs->elts[0];
+ vs->vOut[j] = (uintptr_t) draw->vs.queue[i + j].vertex;
+ }
+
+ vs->num_elts = n;
+ send_mbox_message(cell_global.spe_contexts[0], CELL_CMD_VS_EXECUTE);
+
+ cell_flush_int(cell, CELL_FLUSH_WAIT);
+ }
+
+ draw->vs.post_nr = draw->vs.queue_nr;
+ draw->vs.queue_nr = 0;
+#else
+ assert(0);
+#endif
+}
diff --git a/src/gallium/drivers/cell/ppu/cell_winsys.h b/src/gallium/drivers/cell/ppu/cell_winsys.h
new file mode 100644
index 0000000000..ae2af5696b
--- /dev/null
+++ b/src/gallium/drivers/cell/ppu/cell_winsys.h
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef CELL_WINSYS_H
+#define CELL_WINSYS_H
+
+#include "pipe/p_compiler.h"
+
+
+/**
+ * Very simple winsys at this time.
+ * Will probably eventually add SPU control info.
+ */
+struct cell_winsys
+{
+ uint preferredFormat;
+};
+
+
+extern struct cell_winsys *
+cell_get_winsys(uint format);
+
+
+
+#endif
diff --git a/src/gallium/drivers/cell/spu/.gitignore b/src/gallium/drivers/cell/spu/.gitignore
new file mode 100644
index 0000000000..2be9a2d324
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/.gitignore
@@ -0,0 +1 @@
+g3d_spu
diff --git a/src/gallium/drivers/cell/spu/Makefile b/src/gallium/drivers/cell/spu/Makefile
new file mode 100644
index 0000000000..3cc52301da
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/Makefile
@@ -0,0 +1,83 @@
+# Gallium3D Cell driver: SPU code
+
+# This makefile builds the g3d_spu.a file that's linked into the
+# PPU code/library.
+
+
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+
+PROG = g3d
+
+PROG_SPU = $(PROG)_spu
+PROG_SPU_A = $(PROG)_spu.a
+PROG_SPU_EMBED_O = $(PROG)_spu-embed.o
+
+
+SOURCES = \
+ spu_command.c \
+ spu_dcache.c \
+ spu_funcs.c \
+ spu_main.c \
+ spu_per_fragment_op.c \
+ spu_render.c \
+ spu_texture.c \
+ spu_tile.c \
+ spu_tri.c
+
+OLD_SOURCES = \
+ spu_exec.c \
+ spu_util.c \
+ spu_vertex_fetch.c \
+ spu_vertex_shader.c
+
+
+SPU_OBJECTS = $(SOURCES:.c=.o)
+
+SPU_ASM_OUT = $(SOURCES:.c=.s)
+
+
+INCLUDE_DIRS = \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/gallium/drivers
+
+
+.c.o:
+ $(SPU_CC) $(SPU_CFLAGS) -c $<
+
+.c.s:
+ $(SPU_CC) $(SPU_CFLAGS) -O3 -S $<
+
+
+# The .a file will be linked into the main/PPU executable
+default: $(PROG_SPU_A)
+
+$(PROG_SPU_A): $(PROG_SPU_EMBED_O)
+ $(SPU_AR) $(SPU_AR_FLAGS) $(PROG_SPU_A) $(PROG_SPU_EMBED_O)
+
+$(PROG_SPU_EMBED_O): $(PROG_SPU)
+ $(SPU_EMBED) $(SPU_EMBED_FLAGS) $(PROG_SPU) $(PROG_SPU) $(PROG_SPU_EMBED_O)
+
+$(PROG_SPU): $(SPU_OBJECTS)
+ $(SPU_CC) -o $(PROG_SPU) $(SPU_OBJECTS) $(SPU_LFLAGS)
+
+
+
+asmfiles: $(SPU_ASM_OUT)
+
+
+clean:
+ rm -f *~ *.o *.a *.d *.s $(PROG_SPU)
+
+
+
+depend: $(SOURCES)
+ rm -f depend
+ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(INCLUDE_DIRS) $(SOURCES) 2> /dev/null
+
+include depend
+
diff --git a/src/gallium/drivers/cell/spu/spu_colorpack.h b/src/gallium/drivers/cell/spu/spu_colorpack.h
new file mode 100644
index 0000000000..d7ce005524
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_colorpack.h
@@ -0,0 +1,145 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#ifndef SPU_COLORPACK_H
+#define SPU_COLORPACK_H
+
+
+#include <transpose_matrix4x4.h>
+#include <spu_intrinsics.h>
+
+
+static INLINE unsigned int
+spu_pack_R8G8B8A8(vector float rgba)
+{
+ vector unsigned int out = spu_convtu(rgba, 32);
+
+ out = spu_shuffle(out, out, ((vector unsigned char) {
+ 0, 4, 8, 12, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }) );
+
+ return spu_extract(out, 0);
+}
+
+
+static INLINE unsigned int
+spu_pack_A8R8G8B8(vector float rgba)
+{
+ vector unsigned int out = spu_convtu(rgba, 32);
+ out = spu_shuffle(out, out, ((vector unsigned char) {
+ 12, 0, 4, 8, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}) );
+ return spu_extract(out, 0);
+}
+
+
+static INLINE unsigned int
+spu_pack_B8G8R8A8(vector float rgba)
+{
+ vector unsigned int out = spu_convtu(rgba, 32);
+ out = spu_shuffle(out, out, ((vector unsigned char) {
+ 8, 4, 0, 12, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}) );
+ return spu_extract(out, 0);
+}
+
+
+static INLINE unsigned int
+spu_pack_color_shuffle(vector float rgba, vector unsigned char shuffle)
+{
+ vector unsigned int out = spu_convtu(rgba, 32);
+ out = spu_shuffle(out, out, shuffle);
+ return spu_extract(out, 0);
+}
+
+
+static INLINE vector float
+spu_unpack_B8G8R8A8(uint color)
+{
+ vector unsigned int color_u4 = spu_splats(color);
+ color_u4 = spu_shuffle(color_u4, color_u4,
+ ((vector unsigned char) {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 3, 3, 3, 3}) );
+ return spu_convtf(color_u4, 32);
+}
+
+
+static INLINE vector float
+spu_unpack_A8R8G8B8(uint color)
+{
+ vector unsigned int color_u4 = spu_splats(color);
+ color_u4 = spu_shuffle(color_u4, color_u4,
+ ((vector unsigned char) {
+ 1, 1, 1, 1,
+ 2, 2, 2, 2,
+ 3, 3, 3, 3,
+ 0, 0, 0, 0}) );
+ return spu_convtf(color_u4, 32);
+}
+
+
+/**
+ * \param color_in - array of 32-bit packed ARGB colors
+ * \param color_out - returns float colors in RRRR, GGGG, BBBB, AAAA order
+ */
+static INLINE void
+spu_unpack_A8R8G8B8_transpose4(const vector unsigned int color_in[4],
+ vector float color_out[4])
+{
+ vector unsigned int c0;
+
+ c0 = spu_shuffle(color_in[0], color_in[0],
+ ((vector unsigned char) {
+ 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0, 0}) );
+ color_out[0] = spu_convtf(c0, 32);
+
+ c0 = spu_shuffle(color_in[1], color_in[1],
+ ((vector unsigned char) {
+ 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0, 0}) );
+ color_out[1] = spu_convtf(c0, 32);
+
+ c0 = spu_shuffle(color_in[2], color_in[2],
+ ((vector unsigned char) {
+ 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0, 0}) );
+ color_out[2] = spu_convtf(c0, 32);
+
+ c0 = spu_shuffle(color_in[3], color_in[3],
+ ((vector unsigned char) {
+ 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0, 0}) );
+ color_out[3] = spu_convtf(c0, 32);
+
+ _transpose_matrix4x4(color_out, color_out);
+}
+
+
+
+#endif /* SPU_COLORPACK_H */
diff --git a/src/gallium/drivers/cell/spu/spu_command.c b/src/gallium/drivers/cell/spu/spu_command.c
new file mode 100644
index 0000000000..5c0179d954
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_command.c
@@ -0,0 +1,815 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * SPU command processing code
+ */
+
+
+#include <stdio.h>
+#include <libmisc.h>
+
+#include "pipe/p_defines.h"
+
+#include "spu_command.h"
+#include "spu_main.h"
+#include "spu_render.h"
+#include "spu_per_fragment_op.h"
+#include "spu_texture.h"
+#include "spu_tile.h"
+#include "spu_vertex_shader.h"
+#include "spu_dcache.h"
+#include "cell/common.h"
+
+
+struct spu_vs_context draw;
+
+
+/**
+ * Buffers containing dynamically generated SPU code:
+ */
+static unsigned char attribute_fetch_code_buffer[136 * PIPE_MAX_ATTRIBS]
+ ALIGN16_ATTRIB;
+
+
+
+static INLINE int
+align(int value, int alignment)
+{
+ return (value + alignment - 1) & ~(alignment - 1);
+}
+
+
+
+/**
+ * Tell the PPU that this SPU has finished copying a buffer to
+ * local store and that it may be reused by the PPU.
+ * This is done by writting a 16-byte batch-buffer-status block back into
+ * main memory (in cell_context->buffer_status[]).
+ */
+static void
+release_buffer(uint buffer)
+{
+ /* Evidently, using less than a 16-byte status doesn't work reliably */
+ static const vector unsigned int status = {CELL_BUFFER_STATUS_FREE,
+ CELL_BUFFER_STATUS_FREE,
+ CELL_BUFFER_STATUS_FREE,
+ CELL_BUFFER_STATUS_FREE};
+ const uint index = 4 * (spu.init.id * CELL_NUM_BUFFERS + buffer);
+ uint *dst = spu.init.buffer_status + index;
+
+ ASSERT(buffer < CELL_NUM_BUFFERS);
+
+ mfc_put((void *) &status, /* src in local memory */
+ (unsigned int) dst, /* dst in main memory */
+ sizeof(status), /* size */
+ TAG_MISC, /* tag is unimportant */
+ 0, /* tid */
+ 0 /* rid */);
+}
+
+
+/**
+ * Write CELL_FENCE_SIGNALLED back to the fence status qword in main memory.
+ * There's a qword of status per SPU.
+ */
+static void
+cmd_fence(struct cell_command_fence *fence_cmd)
+{
+ static const vector unsigned int status = {CELL_FENCE_SIGNALLED,
+ CELL_FENCE_SIGNALLED,
+ CELL_FENCE_SIGNALLED,
+ CELL_FENCE_SIGNALLED};
+ uint *dst = (uint *) fence_cmd->fence;
+ dst += 4 * spu.init.id; /* main store/memory address, not local store */
+ ASSERT_ALIGN16(dst);
+ mfc_put((void *) &status, /* src in local memory */
+ (unsigned int) dst, /* dst in main memory */
+ sizeof(status), /* size */
+ TAG_FENCE, /* tag */
+ 0, /* tid */
+ 0 /* rid */);
+}
+
+
+static void
+cmd_clear_surface(const struct cell_command_clear_surface *clear)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "CLEAR SURF %u to 0x%08x\n", clear->surface, clear->value);
+
+ if (clear->surface == 0) {
+ spu.fb.color_clear_value = clear->value;
+ if (spu.init.debug_flags & CELL_DEBUG_CHECKER) {
+ uint x = (spu.init.id << 4) | (spu.init.id << 12) |
+ (spu.init.id << 20) | (spu.init.id << 28);
+ spu.fb.color_clear_value ^= x;
+ }
+ }
+ else {
+ spu.fb.depth_clear_value = clear->value;
+ }
+
+#define CLEAR_OPT 1
+#if CLEAR_OPT
+
+ /* Simply set all tiles' status to CLEAR.
+ * When we actually begin rendering into a tile, we'll initialize it to
+ * the clear value. If any tiles go untouched during the frame,
+ * really_clear_tiles() will set them to the clear value.
+ */
+ if (clear->surface == 0) {
+ memset(spu.ctile_status, TILE_STATUS_CLEAR, sizeof(spu.ctile_status));
+ }
+ else {
+ memset(spu.ztile_status, TILE_STATUS_CLEAR, sizeof(spu.ztile_status));
+ }
+
+#else
+
+ /*
+ * This path clears the whole framebuffer to the clear color right now.
+ */
+
+ /*
+ printf("SPU: %s num=%d w=%d h=%d\n",
+ __FUNCTION__, num_tiles, spu.fb.width_tiles, spu.fb.height_tiles);
+ */
+
+ /* init a single tile to the clear value */
+ if (clear->surface == 0) {
+ clear_c_tile(&spu.ctile);
+ }
+ else {
+ clear_z_tile(&spu.ztile);
+ }
+
+ /* walk over my tiles, writing the 'clear' tile's data */
+ {
+ const uint num_tiles = spu.fb.width_tiles * spu.fb.height_tiles;
+ uint i;
+ for (i = spu.init.id; i < num_tiles; i += spu.init.num_spus) {
+ uint tx = i % spu.fb.width_tiles;
+ uint ty = i / spu.fb.width_tiles;
+ if (clear->surface == 0)
+ put_tile(tx, ty, &spu.ctile, TAG_SURFACE_CLEAR, 0);
+ else
+ put_tile(tx, ty, &spu.ztile, TAG_SURFACE_CLEAR, 1);
+ }
+ }
+
+ if (spu.init.debug_flags & CELL_DEBUG_SYNC) {
+ wait_on_mask(1 << TAG_SURFACE_CLEAR);
+ }
+
+#endif /* CLEAR_OPT */
+
+ D_PRINTF(CELL_DEBUG_CMD, "CLEAR SURF done\n");
+}
+
+
+static void
+cmd_release_verts(const struct cell_command_release_verts *release)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "RELEASE VERTS %u\n", release->vertex_buf);
+ ASSERT(release->vertex_buf != ~0U);
+ release_buffer(release->vertex_buf);
+}
+
+
+/**
+ * Process a CELL_CMD_STATE_FRAGMENT_OPS command.
+ * This involves installing new fragment ops SPU code.
+ * If this function is never called, we'll use a regular C fallback function
+ * for fragment processing.
+ */
+static void
+cmd_state_fragment_ops(const struct cell_command_fragment_ops *fops)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "CMD_STATE_FRAGMENT_OPS\n");
+
+ /* Copy state info (for fallback case only - this will eventually
+ * go away when the fallback case goes away)
+ */
+ memcpy(&spu.depth_stencil_alpha, &fops->dsa, sizeof(fops->dsa));
+ memcpy(&spu.blend, &fops->blend, sizeof(fops->blend));
+ memcpy(&spu.blend_color, &fops->blend_color, sizeof(fops->blend_color));
+
+ /* Make sure the SPU knows which buffers it's expected to read when
+ * it's told to pull tiles.
+ */
+ spu.read_depth_stencil = (spu.depth_stencil_alpha.depth.enabled || spu.depth_stencil_alpha.stencil[0].enabled);
+
+ /* If we're forcing the fallback code to be used (for debug purposes),
+ * install that. Otherwise install the incoming SPU code.
+ */
+ if ((spu.init.debug_flags & CELL_DEBUG_FRAGMENT_OP_FALLBACK) != 0) {
+ static unsigned int warned = 0;
+ if (!warned) {
+ fprintf(stderr, "Cell Warning: using fallback per-fragment code\n");
+ warned = 1;
+ }
+ /* The following two lines aren't really necessary if you
+ * know the debug flags won't change during a run, and if you
+ * know that the function pointers are initialized correctly.
+ * We set them here to allow a person to change the debug
+ * flags during a run (from inside a debugger).
+ */
+ spu.fragment_ops[CELL_FACING_FRONT] = spu_fallback_fragment_ops;
+ spu.fragment_ops[CELL_FACING_BACK] = spu_fallback_fragment_ops;
+ return;
+ }
+
+ /* Make sure the SPU code buffer is large enough to hold the incoming code.
+ * Note that we *don't* use align_malloc() and align_free(), because
+ * those utility functions are *not* available in SPU code.
+ * */
+ if (spu.fragment_ops_code_size < fops->total_code_size) {
+ if (spu.fragment_ops_code != NULL) {
+ free(spu.fragment_ops_code);
+ }
+ spu.fragment_ops_code_size = fops->total_code_size;
+ spu.fragment_ops_code = malloc(fops->total_code_size);
+ if (spu.fragment_ops_code == NULL) {
+ /* Whoops. */
+ fprintf(stderr, "CELL Warning: failed to allocate fragment ops code (%d bytes) - using fallback\n", fops->total_code_size);
+ spu.fragment_ops_code = NULL;
+ spu.fragment_ops_code_size = 0;
+ spu.fragment_ops[CELL_FACING_FRONT] = spu_fallback_fragment_ops;
+ spu.fragment_ops[CELL_FACING_BACK] = spu_fallback_fragment_ops;
+ return;
+ }
+ }
+
+ /* Copy the SPU code from the command buffer to the spu buffer */
+ memcpy(spu.fragment_ops_code, fops->code, fops->total_code_size);
+
+ /* Set the pointers for the front-facing and back-facing fragments
+ * to the specified offsets within the code. Note that if the
+ * front-facing and back-facing code are the same, they'll have
+ * the same offset.
+ */
+ spu.fragment_ops[CELL_FACING_FRONT] = (spu_fragment_ops_func) &spu.fragment_ops_code[fops->front_code_index];
+ spu.fragment_ops[CELL_FACING_BACK] = (spu_fragment_ops_func) &spu.fragment_ops_code[fops->back_code_index];
+}
+
+static void
+cmd_state_fragment_program(const struct cell_command_fragment_program *fp)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "CMD_STATE_FRAGMENT_PROGRAM\n");
+ /* Copy SPU code from batch buffer to spu buffer */
+ memcpy(spu.fragment_program_code, fp->code,
+ SPU_MAX_FRAGMENT_PROGRAM_INSTS * 4);
+#if 01
+ /* Point function pointer at new code */
+ spu.fragment_program = (spu_fragment_program_func)spu.fragment_program_code;
+#endif
+}
+
+
+static uint
+cmd_state_fs_constants(const qword *buffer, uint pos)
+{
+ const uint num_const = spu_extract((vector unsigned int)buffer[pos+1], 0);
+ const float *constants = (const float *) &buffer[pos+2];
+ uint i;
+
+ D_PRINTF(CELL_DEBUG_CMD, "CMD_STATE_FS_CONSTANTS (%u)\n", num_const);
+
+ /* Expand each float to float[4] for SOA execution */
+ for (i = 0; i < num_const; i++) {
+ D_PRINTF(CELL_DEBUG_CMD, " const[%u] = %f\n", i, constants[i]);
+ spu.constants[i] = spu_splats(constants[i]);
+ }
+
+ /* return new buffer pos (in 16-byte words) */
+ return pos + 2 + (ROUNDUP16(num_const * sizeof(float)) / 16);
+}
+
+
+static void
+cmd_state_framebuffer(const struct cell_command_framebuffer *cmd)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "FRAMEBUFFER: %d x %d at %p, cformat 0x%x zformat 0x%x\n",
+ cmd->width,
+ cmd->height,
+ cmd->color_start,
+ cmd->color_format,
+ cmd->depth_format);
+
+ ASSERT_ALIGN16(cmd->color_start);
+ ASSERT_ALIGN16(cmd->depth_start);
+
+ spu.fb.color_start = cmd->color_start;
+ spu.fb.depth_start = cmd->depth_start;
+ spu.fb.color_format = cmd->color_format;
+ spu.fb.depth_format = cmd->depth_format;
+ spu.fb.width = cmd->width;
+ spu.fb.height = cmd->height;
+ spu.fb.width_tiles = (spu.fb.width + TILE_SIZE - 1) / TILE_SIZE;
+ spu.fb.height_tiles = (spu.fb.height + TILE_SIZE - 1) / TILE_SIZE;
+
+ switch (spu.fb.depth_format) {
+ case PIPE_FORMAT_Z32_UNORM:
+ spu.fb.zsize = 4;
+ spu.fb.zscale = (float) 0xffffffffu;
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ spu.fb.zsize = 4;
+ spu.fb.zscale = (float) 0x00ffffffu;
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ spu.fb.zsize = 2;
+ spu.fb.zscale = (float) 0xffffu;
+ break;
+ default:
+ spu.fb.zsize = 0;
+ break;
+ }
+}
+
+
+/**
+ * Tex texture mask_s/t and scale_s/t fields depend on the texture size and
+ * sampler wrap modes.
+ */
+static void
+update_tex_masks(struct spu_texture *texture,
+ const struct pipe_sampler_state *sampler)
+{
+ uint i;
+
+ for (i = 0; i < CELL_MAX_TEXTURE_LEVELS; i++) {
+ int width = texture->level[i].width;
+ int height = texture->level[i].height;
+
+ if (sampler->wrap_s == PIPE_TEX_WRAP_REPEAT)
+ texture->level[i].mask_s = spu_splats(width - 1);
+ else
+ texture->level[i].mask_s = spu_splats(~0);
+
+ if (sampler->wrap_t == PIPE_TEX_WRAP_REPEAT)
+ texture->level[i].mask_t = spu_splats(height - 1);
+ else
+ texture->level[i].mask_t = spu_splats(~0);
+
+ if (sampler->normalized_coords) {
+ texture->level[i].scale_s = spu_splats((float) width);
+ texture->level[i].scale_t = spu_splats((float) height);
+ }
+ else {
+ texture->level[i].scale_s = spu_splats(1.0f);
+ texture->level[i].scale_t = spu_splats(1.0f);
+ }
+ }
+}
+
+
+static void
+cmd_state_sampler(const struct cell_command_sampler *sampler)
+{
+ uint unit = sampler->unit;
+
+ D_PRINTF(CELL_DEBUG_CMD, "SAMPLER [%u]\n", unit);
+
+ spu.sampler[unit] = sampler->state;
+
+ switch (spu.sampler[unit].min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ spu.min_sample_texture_2d[unit] = sample_texture_2d_bilinear;
+ break;
+ case PIPE_TEX_FILTER_ANISO:
+ /* fall-through, for now */
+ case PIPE_TEX_FILTER_NEAREST:
+ spu.min_sample_texture_2d[unit] = sample_texture_2d_nearest;
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ switch (spu.sampler[sampler->unit].mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ spu.mag_sample_texture_2d[unit] = sample_texture_2d_bilinear;
+ break;
+ case PIPE_TEX_FILTER_ANISO:
+ /* fall-through, for now */
+ case PIPE_TEX_FILTER_NEAREST:
+ spu.mag_sample_texture_2d[unit] = sample_texture_2d_nearest;
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ switch (spu.sampler[sampler->unit].min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ spu.sample_texture_2d[unit] = sample_texture_2d_lod;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ spu.sample_texture_2d[unit] = spu.mag_sample_texture_2d[unit];
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ update_tex_masks(&spu.texture[unit], &spu.sampler[unit]);
+}
+
+
+static void
+cmd_state_texture(const struct cell_command_texture *texture)
+{
+ const uint unit = texture->unit;
+ uint i;
+
+ D_PRINTF(CELL_DEBUG_CMD, "TEXTURE [%u]\n", texture->unit);
+
+ spu.texture[unit].max_level = 0;
+ spu.texture[unit].target = texture->target;
+
+ for (i = 0; i < CELL_MAX_TEXTURE_LEVELS; i++) {
+ uint width = texture->width[i];
+ uint height = texture->height[i];
+ uint depth = texture->depth[i];
+
+ D_PRINTF(CELL_DEBUG_CMD, " LEVEL %u: at %p size[0] %u x %u\n", i,
+ texture->start[i], texture->width[i], texture->height[i]);
+
+ spu.texture[unit].level[i].start = texture->start[i];
+ spu.texture[unit].level[i].width = width;
+ spu.texture[unit].level[i].height = height;
+ spu.texture[unit].level[i].depth = depth;
+
+ spu.texture[unit].level[i].tiles_per_row =
+ (width + TILE_SIZE - 1) / TILE_SIZE;
+
+ spu.texture[unit].level[i].bytes_per_image =
+ 4 * align(width, TILE_SIZE) * align(height, TILE_SIZE) * depth;
+
+ spu.texture[unit].level[i].max_s = spu_splats((int) width - 1);
+ spu.texture[unit].level[i].max_t = spu_splats((int) height - 1);
+
+ if (texture->start[i])
+ spu.texture[unit].max_level = i;
+ }
+
+ update_tex_masks(&spu.texture[unit], &spu.sampler[unit]);
+}
+
+
+static void
+cmd_state_vertex_info(const struct vertex_info *vinfo)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "VERTEX_INFO num_attribs=%u\n", vinfo->num_attribs);
+ ASSERT(vinfo->num_attribs >= 1);
+ ASSERT(vinfo->num_attribs <= 8);
+ memcpy(&spu.vertex_info, vinfo, sizeof(*vinfo));
+}
+
+
+static void
+cmd_state_vs_array_info(const struct cell_array_info *vs_info)
+{
+ const unsigned attr = vs_info->attr;
+
+ ASSERT(attr < PIPE_MAX_ATTRIBS);
+ draw.vertex_fetch.src_ptr[attr] = vs_info->base;
+ draw.vertex_fetch.pitch[attr] = vs_info->pitch;
+ draw.vertex_fetch.size[attr] = vs_info->size;
+ draw.vertex_fetch.code_offset[attr] = vs_info->function_offset;
+ draw.vertex_fetch.dirty = 1;
+}
+
+
+static void
+cmd_state_attrib_fetch(const struct cell_attribute_fetch_code *code)
+{
+ mfc_get(attribute_fetch_code_buffer,
+ (unsigned int) code->base, /* src */
+ code->size,
+ TAG_BATCH_BUFFER,
+ 0, /* tid */
+ 0 /* rid */);
+ wait_on_mask(1 << TAG_BATCH_BUFFER);
+
+ draw.vertex_fetch.code = attribute_fetch_code_buffer;
+}
+
+
+static void
+cmd_finish(void)
+{
+ D_PRINTF(CELL_DEBUG_CMD, "FINISH\n");
+ really_clear_tiles(0);
+ /* wait for all outstanding DMAs to finish */
+ mfc_write_tag_mask(~0);
+ mfc_read_tag_status_all();
+ /* send mbox message to PPU */
+ spu_write_out_mbox(CELL_CMD_FINISH);
+}
+
+
+/**
+ * Execute a batch of commands which was sent to us by the PPU.
+ * See the cell_emit_state.c code to see where the commands come from.
+ *
+ * The opcode param encodes the location of the buffer and its size.
+ */
+static void
+cmd_batch(uint opcode)
+{
+ const uint buf = (opcode >> 8) & 0xff;
+ uint size = (opcode >> 16);
+ qword buffer[CELL_BUFFER_SIZE / 16] ALIGN16_ATTRIB;
+ const unsigned usize = ROUNDUP16(size) / sizeof(buffer[0]);
+ uint pos;
+
+ D_PRINTF(CELL_DEBUG_CMD, "BATCH buffer %u, len %u, from %p\n",
+ buf, size, spu.init.buffers[buf]);
+
+ ASSERT((opcode & CELL_CMD_OPCODE_MASK) == CELL_CMD_BATCH);
+
+ ASSERT_ALIGN16(spu.init.buffers[buf]);
+
+ size = ROUNDUP16(size);
+
+ ASSERT_ALIGN16(spu.init.buffers[buf]);
+
+ mfc_get(buffer, /* dest */
+ (unsigned int) spu.init.buffers[buf], /* src */
+ size,
+ TAG_BATCH_BUFFER,
+ 0, /* tid */
+ 0 /* rid */);
+ wait_on_mask(1 << TAG_BATCH_BUFFER);
+
+ /* Tell PPU we're done copying the buffer to local store */
+ D_PRINTF(CELL_DEBUG_CMD, "release batch buf %u\n", buf);
+ release_buffer(buf);
+
+ /*
+ * Loop over commands in the batch buffer
+ */
+ for (pos = 0; pos < usize; /* no incr */) {
+ switch (si_to_uint(buffer[pos])) {
+ /*
+ * rendering commands
+ */
+ case CELL_CMD_CLEAR_SURFACE:
+ {
+ struct cell_command_clear_surface *clr
+ = (struct cell_command_clear_surface *) &buffer[pos];
+ cmd_clear_surface(clr);
+ pos += sizeof(*clr) / 16;
+ }
+ break;
+ case CELL_CMD_RENDER:
+ {
+ struct cell_command_render *render
+ = (struct cell_command_render *) &buffer[pos];
+ uint pos_incr;
+ cmd_render(render, &pos_incr);
+ pos += ((pos_incr+1)&~1) / 2; // should 'fix' cmd_render return
+ }
+ break;
+ /*
+ * state-update commands
+ */
+ case CELL_CMD_STATE_FRAMEBUFFER:
+ {
+ struct cell_command_framebuffer *fb
+ = (struct cell_command_framebuffer *) &buffer[pos];
+ cmd_state_framebuffer(fb);
+ pos += sizeof(*fb) / 16;
+ }
+ break;
+ case CELL_CMD_STATE_FRAGMENT_OPS:
+ {
+ struct cell_command_fragment_ops *fops
+ = (struct cell_command_fragment_ops *) &buffer[pos];
+ cmd_state_fragment_ops(fops);
+ /* This is a variant-sized command */
+ pos += ROUNDUP16(sizeof(*fops) + fops->total_code_size) / 16;
+ }
+ break;
+ case CELL_CMD_STATE_FRAGMENT_PROGRAM:
+ {
+ struct cell_command_fragment_program *fp
+ = (struct cell_command_fragment_program *) &buffer[pos];
+ cmd_state_fragment_program(fp);
+ pos += sizeof(*fp) / 16;
+ }
+ break;
+ case CELL_CMD_STATE_FS_CONSTANTS:
+ pos = cmd_state_fs_constants(buffer, pos);
+ break;
+ case CELL_CMD_STATE_RASTERIZER:
+ {
+ struct cell_command_rasterizer *rast =
+ (struct cell_command_rasterizer *) &buffer[pos];
+ spu.rasterizer = rast->rasterizer;
+ pos += sizeof(*rast) / 16;
+ }
+ break;
+ case CELL_CMD_STATE_SAMPLER:
+ {
+ struct cell_command_sampler *sampler
+ = (struct cell_command_sampler *) &buffer[pos];
+ cmd_state_sampler(sampler);
+ pos += sizeof(*sampler) / 16;
+ }
+ break;
+ case CELL_CMD_STATE_TEXTURE:
+ {
+ struct cell_command_texture *texture
+ = (struct cell_command_texture *) &buffer[pos];
+ cmd_state_texture(texture);
+ pos += sizeof(*texture) / 16;
+ }
+ break;
+ case CELL_CMD_STATE_VERTEX_INFO:
+ cmd_state_vertex_info((struct vertex_info *) &buffer[pos+1]);
+ pos += 1 + ROUNDUP16(sizeof(struct vertex_info)) / 16;
+ break;
+ case CELL_CMD_STATE_VIEWPORT:
+ (void) memcpy(& draw.viewport, &buffer[pos+1],
+ sizeof(struct pipe_viewport_state));
+ pos += 1 + ROUNDUP16(sizeof(struct pipe_viewport_state)) / 16;
+ break;
+ case CELL_CMD_STATE_UNIFORMS:
+ draw.constants = (const float (*)[4]) (uintptr_t)spu_extract((vector unsigned int)buffer[pos+1],0);
+ pos += 2;
+ break;
+ case CELL_CMD_STATE_VS_ARRAY_INFO:
+ cmd_state_vs_array_info((struct cell_array_info *) &buffer[pos+1]);
+ pos += 1 + ROUNDUP16(sizeof(struct cell_array_info)) / 16;
+ break;
+ case CELL_CMD_STATE_BIND_VS:
+#if 0
+ spu_bind_vertex_shader(&draw,
+ (struct cell_shader_info *) &buffer[pos+1]);
+#endif
+ pos += 1 + ROUNDUP16(sizeof(struct cell_shader_info)) / 16;
+ break;
+ case CELL_CMD_STATE_ATTRIB_FETCH:
+ cmd_state_attrib_fetch((struct cell_attribute_fetch_code *)
+ &buffer[pos+1]);
+ pos += 1 + ROUNDUP16(sizeof(struct cell_attribute_fetch_code)) / 16;
+ break;
+ /*
+ * misc commands
+ */
+ case CELL_CMD_FINISH:
+ cmd_finish();
+ pos += 1;
+ break;
+ case CELL_CMD_FENCE:
+ {
+ struct cell_command_fence *fence_cmd =
+ (struct cell_command_fence *) &buffer[pos];
+ cmd_fence(fence_cmd);
+ pos += sizeof(*fence_cmd) / 16;
+ }
+ break;
+ case CELL_CMD_RELEASE_VERTS:
+ {
+ struct cell_command_release_verts *release
+ = (struct cell_command_release_verts *) &buffer[pos];
+ cmd_release_verts(release);
+ pos += sizeof(*release) / 16;
+ }
+ break;
+ case CELL_CMD_FLUSH_BUFFER_RANGE: {
+ struct cell_buffer_range *br = (struct cell_buffer_range *)
+ &buffer[pos+1];
+
+ spu_dcache_mark_dirty((unsigned) br->base, br->size);
+ pos += 1 + ROUNDUP16(sizeof(struct cell_buffer_range)) / 16;
+ break;
+ }
+ default:
+ printf("SPU %u: bad opcode: 0x%x\n", spu.init.id, si_to_uint(buffer[pos]));
+ ASSERT(0);
+ break;
+ }
+ }
+
+ D_PRINTF(CELL_DEBUG_CMD, "BATCH complete\n");
+}
+
+
+#define PERF 0
+
+
+/**
+ * Main loop for SPEs: Get a command, execute it, repeat.
+ */
+void
+command_loop(void)
+{
+ int exitFlag = 0;
+ uint t0, t1;
+
+ D_PRINTF(CELL_DEBUG_CMD, "Enter command loop\n");
+
+ while (!exitFlag) {
+ unsigned opcode;
+
+ D_PRINTF(CELL_DEBUG_CMD, "Wait for cmd...\n");
+
+ if (PERF)
+ spu_write_decrementer(~0);
+
+ /* read/wait from mailbox */
+ opcode = (unsigned int) spu_read_in_mbox();
+ D_PRINTF(CELL_DEBUG_CMD, "got cmd 0x%x\n", opcode);
+
+ if (PERF)
+ t0 = spu_read_decrementer();
+
+ switch (opcode & CELL_CMD_OPCODE_MASK) {
+ case CELL_CMD_EXIT:
+ D_PRINTF(CELL_DEBUG_CMD, "EXIT\n");
+ exitFlag = 1;
+ break;
+ case CELL_CMD_VS_EXECUTE:
+#if 0
+ spu_execute_vertex_shader(&draw, &cmd.vs);
+#endif
+ break;
+ case CELL_CMD_BATCH:
+ cmd_batch(opcode);
+ break;
+ default:
+ printf("Bad opcode 0x%x!\n", opcode & CELL_CMD_OPCODE_MASK);
+ }
+
+ if (PERF) {
+ t1 = spu_read_decrementer();
+ printf("wait mbox time: %gms batch time: %gms\n",
+ (~0u - t0) * spu.init.inv_timebase,
+ (t0 - t1) * spu.init.inv_timebase);
+ }
+ }
+
+ D_PRINTF(CELL_DEBUG_CMD, "Exit command loop\n");
+
+ if (spu.init.debug_flags & CELL_DEBUG_CACHE)
+ spu_dcache_report();
+}
+
+/* Initialize this module; we manage the fragment ops buffer here. */
+void
+spu_command_init(void)
+{
+ /* Install default/fallback fragment processing function.
+ * This will normally be overriden by a code-gen'd function
+ * unless CELL_FORCE_FRAGMENT_OPS_FALLBACK is set.
+ */
+ spu.fragment_ops[CELL_FACING_FRONT] = spu_fallback_fragment_ops;
+ spu.fragment_ops[CELL_FACING_BACK] = spu_fallback_fragment_ops;
+
+ /* Set up the basic empty buffer for code-gen'ed fragment ops */
+ spu.fragment_ops_code = NULL;
+ spu.fragment_ops_code_size = 0;
+}
+
+void
+spu_command_close(void)
+{
+ /* Deallocate the code-gen buffer for fragment ops, and reset the
+ * fragment ops functions to their initial setting (just to leave
+ * things in a good state).
+ */
+ if (spu.fragment_ops_code != NULL) {
+ free(spu.fragment_ops_code);
+ }
+ spu_command_init();
+}
diff --git a/src/gallium/drivers/cell/spu/spu_command.h b/src/gallium/drivers/cell/spu/spu_command.h
new file mode 100644
index 0000000000..83dcdade28
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_command.h
@@ -0,0 +1,35 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+extern void
+command_loop(void);
+
+extern void
+spu_command_init(void);
+
+extern void
+spu_command_close(void);
diff --git a/src/gallium/drivers/cell/spu/spu_dcache.c b/src/gallium/drivers/cell/spu/spu_dcache.c
new file mode 100644
index 0000000000..a6d67634fd
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_dcache.c
@@ -0,0 +1,145 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "cell/common.h"
+#include "spu_main.h"
+#include "spu_dcache.h"
+
+#define CACHELINE_LOG2SIZE 7
+#define LINE_SIZE (1U << 7)
+#define ALIGN_MASK (~(LINE_SIZE - 1))
+
+#define CACHE_NAME data
+#define CACHED_TYPE qword
+#define CACHE_TYPE CACHE_TYPE_RO
+#define CACHE_SET_TAGID(set) (((set) & 0x03) + TAG_DCACHE0)
+#define CACHE_LOG2NNWAY 2
+#define CACHE_LOG2NSETS 6
+#ifdef DEBUG
+#define CACHE_STATS 1
+#endif
+#include <cache-api.h>
+
+/* Yes folks, this is ugly.
+ */
+#undef CACHE_NWAY
+#undef CACHE_NSETS
+#define CACHE_NAME data
+#define CACHE_NWAY 4
+#define CACHE_NSETS (1U << 6)
+
+
+/**
+ * Fetch between arbitrary number of bytes from an unaligned address
+ *
+ * \param dst Destination data buffer
+ * \param ea Main memory effective address of source data
+ * \param size Number of bytes to read
+ *
+ * \warning
+ * As is hinted by the type of the \c dst pointer, this function writes
+ * multiples of 16-bytes.
+ */
+void
+spu_dcache_fetch_unaligned(qword *dst, unsigned ea, unsigned size)
+{
+ const int shift = ea & 0x0f;
+ const unsigned read_size = ROUNDUP16(size + shift);
+ const unsigned last_read = ROUNDUP16(ea + size);
+ const qword *const last_write = dst + (ROUNDUP16(size) / 16);
+ unsigned i;
+
+
+ if (shift == 0) {
+ /* Data is already aligned. Fetch directly into the destination buffer.
+ */
+ for (i = 0; i < size; i += 16) {
+ *(dst++) = cache_rd(data, ea + i);
+ }
+ } else {
+ qword hi;
+
+
+ /* Please exercise extreme caution when modifying this code. This code
+ * must not read past the end of the page containing the source data,
+ * and it must not write more than ((size + 15) / 16) qwords to the
+ * destination buffer.
+ */
+ ea &= ~0x0f;
+ hi = cache_rd(data, ea);
+ for (i = 16; i < read_size; i += 16) {
+ qword lo = cache_rd(data, ea + i);
+
+ *(dst++) = si_or((qword) spu_slqwbyte(hi, shift),
+ (qword) spu_rlmaskqwbyte(lo, shift - 16));
+ hi = lo;
+ }
+
+ if (dst != last_write) {
+ *(dst++) = si_or((qword) spu_slqwbyte(hi, shift), si_il(0));
+ }
+ }
+
+ ASSERT((ea + i) == last_read);
+ ASSERT(dst == last_write);
+}
+
+
+/**
+ * Notify the cache that a range of main memory may have been modified
+ */
+void
+spu_dcache_mark_dirty(unsigned ea, unsigned size)
+{
+ unsigned i;
+ const unsigned aligned_start = (ea & ALIGN_MASK);
+ const unsigned aligned_end = (ea + size + (LINE_SIZE - 1))
+ & ALIGN_MASK;
+
+
+ for (i = 0; i < (CACHE_NWAY * CACHE_NSETS); i++) {
+ const unsigned entry = __cache_dir[i];
+ const unsigned addr = entry & ~0x0f;
+
+ __cache_dir[i] = ((addr >= aligned_start) && (addr < aligned_end))
+ ? (entry & ~CACHELINE_VALID) : entry;
+ }
+}
+
+
+/**
+ * Print cache utilization report
+ */
+void
+spu_dcache_report(void)
+{
+#ifdef CACHE_STATS
+ if (spu.init.id == 0) {
+ printf("SPU 0: Texture cache report:\n");
+ cache_pr_stats(data);
+ }
+#endif
+}
+
+
diff --git a/src/gallium/drivers/cell/spu/spu_dcache.h b/src/gallium/drivers/cell/spu/spu_dcache.h
new file mode 100644
index 0000000000..39a19eb31b
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_dcache.h
@@ -0,0 +1,37 @@
+/*
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * AUTHORS, COPYRIGHT HOLDERS, AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SPU_DCACHE_H
+#define SPU_DCACHE_H
+
+extern void
+spu_dcache_fetch_unaligned(qword *dst, unsigned ea, unsigned size);
+
+extern void
+spu_dcache_mark_dirty(unsigned ea, unsigned size);
+
+extern void
+spu_dcache_report(void);
+
+#endif /* SPU_DCACHE_H */
diff --git a/src/gallium/drivers/cell/spu/spu_exec.c b/src/gallium/drivers/cell/spu/spu_exec.c
new file mode 100644
index 0000000000..0eaae2e451
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_exec.c
@@ -0,0 +1,1907 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * TGSI interpretor/executor.
+ *
+ * Flow control information:
+ *
+ * Since we operate on 'quads' (4 pixels or 4 vertices in parallel)
+ * flow control statements (IF/ELSE/ENDIF, LOOP/ENDLOOP) require special
+ * care since a condition may be true for some quad components but false
+ * for other components.
+ *
+ * We basically execute all statements (even if they're in the part of
+ * an IF/ELSE clause that's "not taken") and use a special mask to
+ * control writing to destination registers. This is the ExecMask.
+ * See store_dest().
+ *
+ * The ExecMask is computed from three other masks (CondMask, LoopMask and
+ * ContMask) which are controlled by the flow control instructions (namely:
+ * (IF/ELSE/ENDIF, LOOP/ENDLOOP and CONT).
+ *
+ *
+ * Authors:
+ * Michal Krol
+ * Brian Paul
+ */
+
+#include <transpose_matrix4x4.h>
+#include <simdmath/ceilf4.h>
+#include <simdmath/cosf4.h>
+#include <simdmath/divf4.h>
+#include <simdmath/floorf4.h>
+#include <simdmath/log2f4.h>
+#include <simdmath/powf4.h>
+#include <simdmath/sinf4.h>
+#include <simdmath/sqrtf4.h>
+#include <simdmath/truncf4.h>
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "spu_exec.h"
+#include "spu_main.h"
+#include "spu_vertex_shader.h"
+#include "spu_dcache.h"
+#include "cell/common.h"
+
+#define TILE_TOP_LEFT 0
+#define TILE_TOP_RIGHT 1
+#define TILE_BOTTOM_LEFT 2
+#define TILE_BOTTOM_RIGHT 3
+
+/*
+ * Shorthand locations of various utility registers (_I = Index, _C = Channel)
+ */
+#define TEMP_0_I TGSI_EXEC_TEMP_00000000_I
+#define TEMP_0_C TGSI_EXEC_TEMP_00000000_C
+#define TEMP_7F_I TGSI_EXEC_TEMP_7FFFFFFF_I
+#define TEMP_7F_C TGSI_EXEC_TEMP_7FFFFFFF_C
+#define TEMP_80_I TGSI_EXEC_TEMP_80000000_I
+#define TEMP_80_C TGSI_EXEC_TEMP_80000000_C
+#define TEMP_FF_I TGSI_EXEC_TEMP_FFFFFFFF_I
+#define TEMP_FF_C TGSI_EXEC_TEMP_FFFFFFFF_C
+#define TEMP_1_I TGSI_EXEC_TEMP_ONE_I
+#define TEMP_1_C TGSI_EXEC_TEMP_ONE_C
+#define TEMP_2_I TGSI_EXEC_TEMP_TWO_I
+#define TEMP_2_C TGSI_EXEC_TEMP_TWO_C
+#define TEMP_128_I TGSI_EXEC_TEMP_128_I
+#define TEMP_128_C TGSI_EXEC_TEMP_128_C
+#define TEMP_M128_I TGSI_EXEC_TEMP_MINUS_128_I
+#define TEMP_M128_C TGSI_EXEC_TEMP_MINUS_128_C
+#define TEMP_KILMASK_I TGSI_EXEC_TEMP_KILMASK_I
+#define TEMP_KILMASK_C TGSI_EXEC_TEMP_KILMASK_C
+#define TEMP_OUTPUT_I TGSI_EXEC_TEMP_OUTPUT_I
+#define TEMP_OUTPUT_C TGSI_EXEC_TEMP_OUTPUT_C
+#define TEMP_PRIMITIVE_I TGSI_EXEC_TEMP_PRIMITIVE_I
+#define TEMP_PRIMITIVE_C TGSI_EXEC_TEMP_PRIMITIVE_C
+#define TEMP_R0 TGSI_EXEC_TEMP_R0
+
+#define FOR_EACH_CHANNEL(CHAN)\
+ for (CHAN = 0; CHAN < 4; CHAN++)
+
+#define IS_CHANNEL_ENABLED(INST, CHAN)\
+ ((INST).FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define IS_CHANNEL_ENABLED2(INST, CHAN)\
+ ((INST).FullDstRegisters[1].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define FOR_EACH_ENABLED_CHANNEL(INST, CHAN)\
+ FOR_EACH_CHANNEL( CHAN )\
+ if (IS_CHANNEL_ENABLED( INST, CHAN ))
+
+#define FOR_EACH_ENABLED_CHANNEL2(INST, CHAN)\
+ FOR_EACH_CHANNEL( CHAN )\
+ if (IS_CHANNEL_ENABLED2( INST, CHAN ))
+
+
+/** The execution mask depends on the conditional mask and the loop mask */
+#define UPDATE_EXEC_MASK(MACH) \
+ MACH->ExecMask = MACH->CondMask & MACH->LoopMask & MACH->ContMask & MACH->FuncMask
+
+
+#define CHAN_X 0
+#define CHAN_Y 1
+#define CHAN_Z 2
+#define CHAN_W 3
+
+
+
+/**
+ * Initialize machine state by expanding tokens to full instructions,
+ * allocating temporary storage, setting up constants, etc.
+ * After this, we can call spu_exec_machine_run() many times.
+ */
+void
+spu_exec_machine_init(struct spu_exec_machine *mach,
+ uint numSamplers,
+ struct spu_sampler *samplers,
+ unsigned processor)
+{
+ const qword zero = si_il(0);
+ const qword not_zero = si_il(~0);
+
+ (void) numSamplers;
+ mach->Samplers = samplers;
+ mach->Processor = processor;
+ mach->Addrs = &mach->Temps[TGSI_EXEC_NUM_TEMPS];
+
+ /* Setup constants. */
+ mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q = zero;
+ mach->Temps[TEMP_FF_I].xyzw[TEMP_FF_C].q = not_zero;
+ mach->Temps[TEMP_7F_I].xyzw[TEMP_7F_C].q = si_shli(not_zero, -1);
+ mach->Temps[TEMP_80_I].xyzw[TEMP_80_C].q = si_shli(not_zero, 31);
+
+ mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q = (qword) spu_splats(1.0f);
+ mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].q = (qword) spu_splats(2.0f);
+ mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].q = (qword) spu_splats(128.0f);
+ mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].q = (qword) spu_splats(-128.0f);
+}
+
+
+static INLINE qword
+micro_abs(qword src)
+{
+ return si_rotmi(si_shli(src, 1), -1);
+}
+
+static INLINE qword
+micro_ceil(qword src)
+{
+ return (qword) _ceilf4((vec_float4) src);
+}
+
+static INLINE qword
+micro_cos(qword src)
+{
+ return (qword) _cosf4((vec_float4) src);
+}
+
+static const qword br_shuf = {
+ TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
+ TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
+ TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
+ TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
+ TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
+ TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
+ TILE_BOTTOM_RIGHT + 0, TILE_BOTTOM_RIGHT + 1,
+ TILE_BOTTOM_RIGHT + 2, TILE_BOTTOM_RIGHT + 3,
+};
+
+static const qword bl_shuf = {
+ TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
+ TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
+ TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
+ TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
+ TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
+ TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
+ TILE_BOTTOM_LEFT + 0, TILE_BOTTOM_LEFT + 1,
+ TILE_BOTTOM_LEFT + 2, TILE_BOTTOM_LEFT + 3,
+};
+
+static const qword tl_shuf = {
+ TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
+ TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
+ TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
+ TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
+ TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
+ TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
+ TILE_TOP_LEFT + 0, TILE_TOP_LEFT + 1,
+ TILE_TOP_LEFT + 2, TILE_TOP_LEFT + 3,
+};
+
+static qword
+micro_ddx(qword src)
+{
+ qword bottom_right = si_shufb(src, src, br_shuf);
+ qword bottom_left = si_shufb(src, src, bl_shuf);
+
+ return si_fs(bottom_right, bottom_left);
+}
+
+static qword
+micro_ddy(qword src)
+{
+ qword top_left = si_shufb(src, src, tl_shuf);
+ qword bottom_left = si_shufb(src, src, bl_shuf);
+
+ return si_fs(top_left, bottom_left);
+}
+
+static INLINE qword
+micro_div(qword src0, qword src1)
+{
+ return (qword) _divf4((vec_float4) src0, (vec_float4) src1);
+}
+
+static qword
+micro_flr(qword src)
+{
+ return (qword) _floorf4((vec_float4) src);
+}
+
+static qword
+micro_frc(qword src)
+{
+ return si_fs(src, (qword) _floorf4((vec_float4) src));
+}
+
+static INLINE qword
+micro_ge(qword src0, qword src1)
+{
+ return si_or(si_fceq(src0, src1), si_fcgt(src0, src1));
+}
+
+static qword
+micro_lg2(qword src)
+{
+ return (qword) _log2f4((vec_float4) src);
+}
+
+static INLINE qword
+micro_lt(qword src0, qword src1)
+{
+ const qword tmp = si_or(si_fceq(src0, src1), si_fcgt(src0, src1));
+
+ return si_xori(tmp, 0xff);
+}
+
+static INLINE qword
+micro_max(qword src0, qword src1)
+{
+ return si_selb(src1, src0, si_fcgt(src0, src1));
+}
+
+static INLINE qword
+micro_min(qword src0, qword src1)
+{
+ return si_selb(src0, src1, si_fcgt(src0, src1));
+}
+
+static qword
+micro_neg(qword src)
+{
+ return si_xor(src, (qword) spu_splats(0x80000000));
+}
+
+static qword
+micro_set_sign(qword src)
+{
+ return si_or(src, (qword) spu_splats(0x80000000));
+}
+
+static qword
+micro_pow(qword src0, qword src1)
+{
+ return (qword) _powf4((vec_float4) src0, (vec_float4) src1);
+}
+
+static qword
+micro_rnd(qword src)
+{
+ const qword half = (qword) spu_splats(0.5f);
+
+ /* May be able to use _roundf4. There may be some difference, though.
+ */
+ return (qword) _floorf4((vec_float4) si_fa(src, half));
+}
+
+static INLINE qword
+micro_ishr(qword src0, qword src1)
+{
+ return si_rotma(src0, si_sfi(src1, 0));
+}
+
+static qword
+micro_trunc(qword src)
+{
+ return (qword) _truncf4((vec_float4) src);
+}
+
+static qword
+micro_sin(qword src)
+{
+ return (qword) _sinf4((vec_float4) src);
+}
+
+static INLINE qword
+micro_sqrt(qword src)
+{
+ return (qword) _sqrtf4((vec_float4) src);
+}
+
+static void
+fetch_src_file_channel(
+ const struct spu_exec_machine *mach,
+ const uint file,
+ const uint swizzle,
+ const union spu_exec_channel *index,
+ union spu_exec_channel *chan )
+{
+ switch( swizzle ) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ switch( file ) {
+ case TGSI_FILE_CONSTANT: {
+ unsigned i;
+
+ for (i = 0; i < 4; i++) {
+ const float *ptr = mach->Consts[index->i[i]];
+ float tmp[4];
+
+ spu_dcache_fetch_unaligned((qword *) tmp,
+ (uintptr_t)(ptr + swizzle),
+ sizeof(float));
+
+ chan->f[i] = tmp[0];
+ }
+ break;
+ }
+
+ case TGSI_FILE_INPUT:
+ chan->u[0] = mach->Inputs[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Inputs[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Inputs[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Inputs[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ chan->u[0] = mach->Temps[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Temps[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Temps[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Temps[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ case TGSI_FILE_IMMEDIATE:
+ ASSERT( index->i[0] < (int) mach->ImmLimit );
+ ASSERT( index->i[1] < (int) mach->ImmLimit );
+ ASSERT( index->i[2] < (int) mach->ImmLimit );
+ ASSERT( index->i[3] < (int) mach->ImmLimit );
+
+ chan->f[0] = mach->Imms[index->i[0]][swizzle];
+ chan->f[1] = mach->Imms[index->i[1]][swizzle];
+ chan->f[2] = mach->Imms[index->i[2]][swizzle];
+ chan->f[3] = mach->Imms[index->i[3]][swizzle];
+ break;
+
+ case TGSI_FILE_ADDRESS:
+ chan->u[0] = mach->Addrs[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Addrs[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Addrs[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Addrs[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ case TGSI_FILE_OUTPUT:
+ /* vertex/fragment output vars can be read too */
+ chan->u[0] = mach->Outputs[index->i[0]].xyzw[swizzle].u[0];
+ chan->u[1] = mach->Outputs[index->i[1]].xyzw[swizzle].u[1];
+ chan->u[2] = mach->Outputs[index->i[2]].xyzw[swizzle].u[2];
+ chan->u[3] = mach->Outputs[index->i[3]].xyzw[swizzle].u[3];
+ break;
+
+ default:
+ ASSERT( 0 );
+ }
+ break;
+
+ case TGSI_EXTSWIZZLE_ZERO:
+ *chan = mach->Temps[TEMP_0_I].xyzw[TEMP_0_C];
+ break;
+
+ case TGSI_EXTSWIZZLE_ONE:
+ *chan = mach->Temps[TEMP_1_I].xyzw[TEMP_1_C];
+ break;
+
+ default:
+ ASSERT( 0 );
+ }
+}
+
+static void
+fetch_source(
+ const struct spu_exec_machine *mach,
+ union spu_exec_channel *chan,
+ const struct tgsi_full_src_register *reg,
+ const uint chan_index )
+{
+ union spu_exec_channel index;
+ uint swizzle;
+
+ index.i[0] =
+ index.i[1] =
+ index.i[2] =
+ index.i[3] = reg->SrcRegister.Index;
+
+ if (reg->SrcRegister.Indirect) {
+ union spu_exec_channel index2;
+ union spu_exec_channel indir_index;
+
+ index2.i[0] =
+ index2.i[1] =
+ index2.i[2] =
+ index2.i[3] = reg->SrcRegisterInd.Index;
+
+ swizzle = tgsi_util_get_src_register_swizzle(&reg->SrcRegisterInd,
+ CHAN_X);
+ fetch_src_file_channel(
+ mach,
+ reg->SrcRegisterInd.File,
+ swizzle,
+ &index2,
+ &indir_index );
+
+ index.q = si_a(index.q, indir_index.q);
+ }
+
+ if( reg->SrcRegister.Dimension ) {
+ switch( reg->SrcRegister.File ) {
+ case TGSI_FILE_INPUT:
+ index.q = si_mpyi(index.q, 17);
+ break;
+ case TGSI_FILE_CONSTANT:
+ index.q = si_shli(index.q, 12);
+ break;
+ default:
+ ASSERT( 0 );
+ }
+
+ index.i[0] += reg->SrcRegisterDim.Index;
+ index.i[1] += reg->SrcRegisterDim.Index;
+ index.i[2] += reg->SrcRegisterDim.Index;
+ index.i[3] += reg->SrcRegisterDim.Index;
+
+ if (reg->SrcRegisterDim.Indirect) {
+ union spu_exec_channel index2;
+ union spu_exec_channel indir_index;
+
+ index2.i[0] =
+ index2.i[1] =
+ index2.i[2] =
+ index2.i[3] = reg->SrcRegisterDimInd.Index;
+
+ swizzle = tgsi_util_get_src_register_swizzle( &reg->SrcRegisterDimInd, CHAN_X );
+ fetch_src_file_channel(
+ mach,
+ reg->SrcRegisterDimInd.File,
+ swizzle,
+ &index2,
+ &indir_index );
+
+ index.q = si_a(index.q, indir_index.q);
+ }
+ }
+
+ swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
+ fetch_src_file_channel(
+ mach,
+ reg->SrcRegister.File,
+ swizzle,
+ &index,
+ chan );
+
+ switch (tgsi_util_get_full_src_register_sign_mode( reg, chan_index )) {
+ case TGSI_UTIL_SIGN_CLEAR:
+ chan->q = micro_abs(chan->q);
+ break;
+
+ case TGSI_UTIL_SIGN_SET:
+ chan->q = micro_set_sign(chan->q);
+ break;
+
+ case TGSI_UTIL_SIGN_TOGGLE:
+ chan->q = micro_neg(chan->q);
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+ }
+
+ if (reg->SrcRegisterExtMod.Complement) {
+ chan->q = si_fs(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, chan->q);
+ }
+}
+
+static void
+store_dest(
+ struct spu_exec_machine *mach,
+ const union spu_exec_channel *chan,
+ const struct tgsi_full_dst_register *reg,
+ const struct tgsi_full_instruction *inst,
+ uint chan_index )
+{
+ union spu_exec_channel *dst;
+
+ switch( reg->DstRegister.File ) {
+ case TGSI_FILE_NULL:
+ return;
+
+ case TGSI_FILE_OUTPUT:
+ dst = &mach->Outputs[mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0]
+ + reg->DstRegister.Index].xyzw[chan_index];
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ dst = &mach->Temps[reg->DstRegister.Index].xyzw[chan_index];
+ break;
+
+ case TGSI_FILE_ADDRESS:
+ dst = &mach->Addrs[reg->DstRegister.Index].xyzw[chan_index];
+ break;
+
+ default:
+ ASSERT( 0 );
+ return;
+ }
+
+ switch (inst->Instruction.Saturate)
+ {
+ case TGSI_SAT_NONE:
+ if (mach->ExecMask & 0x1)
+ dst->i[0] = chan->i[0];
+ if (mach->ExecMask & 0x2)
+ dst->i[1] = chan->i[1];
+ if (mach->ExecMask & 0x4)
+ dst->i[2] = chan->i[2];
+ if (mach->ExecMask & 0x8)
+ dst->i[3] = chan->i[3];
+ break;
+
+ case TGSI_SAT_ZERO_ONE:
+ /* XXX need to obey ExecMask here */
+ dst->q = micro_max(chan->q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
+ dst->q = micro_min(dst->q, mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q);
+ break;
+
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ ASSERT( 0 );
+ break;
+
+ default:
+ ASSERT( 0 );
+ }
+}
+
+#define FETCH(VAL,INDEX,CHAN)\
+ fetch_source (mach, VAL, &inst->FullSrcRegisters[INDEX], CHAN)
+
+#define STORE(VAL,INDEX,CHAN)\
+ store_dest (mach, VAL, &inst->FullDstRegisters[INDEX], inst, CHAN )
+
+
+/**
+ * Execute ARB-style KIL which is predicated by a src register.
+ * Kill fragment if any of the four values is less than zero.
+ */
+static void
+exec_kil(struct spu_exec_machine *mach,
+ const struct tgsi_full_instruction *inst)
+{
+ uint uniquemask;
+ uint chan_index;
+ uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
+ union spu_exec_channel r[1];
+
+ /* This mask stores component bits that were already tested. Note that
+ * we test if the value is less than zero, so 1.0 and 0.0 need not to be
+ * tested. */
+ uniquemask = (1 << TGSI_EXTSWIZZLE_ZERO) | (1 << TGSI_EXTSWIZZLE_ONE);
+
+ for (chan_index = 0; chan_index < 4; chan_index++)
+ {
+ uint swizzle;
+ uint i;
+
+ /* unswizzle channel */
+ swizzle = tgsi_util_get_full_src_register_extswizzle (
+ &inst->FullSrcRegisters[0],
+ chan_index);
+
+ /* check if the component has not been already tested */
+ if (uniquemask & (1 << swizzle))
+ continue;
+ uniquemask |= 1 << swizzle;
+
+ FETCH(&r[0], 0, chan_index);
+ for (i = 0; i < 4; i++)
+ if (r[0].f[i] < 0.0f)
+ kilmask |= 1 << i;
+ }
+
+ mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
+}
+
+/**
+ * Execute NVIDIA-style KIL which is predicated by a condition code.
+ * Kill fragment if the condition code is TRUE.
+ */
+static void
+exec_kilp(struct tgsi_exec_machine *mach,
+ const struct tgsi_full_instruction *inst)
+{
+ uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */
+
+ /* TODO: build kilmask from CC mask */
+
+ mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] |= kilmask;
+}
+
+/*
+ * Fetch a texel using STR texture coordinates.
+ */
+static void
+fetch_texel( struct spu_sampler *sampler,
+ const union spu_exec_channel *s,
+ const union spu_exec_channel *t,
+ const union spu_exec_channel *p,
+ float lodbias, /* XXX should be float[4] */
+ union spu_exec_channel *r,
+ union spu_exec_channel *g,
+ union spu_exec_channel *b,
+ union spu_exec_channel *a )
+{
+ qword rgba[4];
+ qword out[4];
+
+ sampler->get_samples(sampler, s->f, t->f, p->f, lodbias,
+ (float (*)[4]) rgba);
+
+ _transpose_matrix4x4((vec_float4 *) out, (vec_float4 *) rgba);
+ r->q = out[0];
+ g->q = out[1];
+ b->q = out[2];
+ a->q = out[3];
+}
+
+
+static void
+exec_tex(struct spu_exec_machine *mach,
+ const struct tgsi_full_instruction *inst,
+ boolean biasLod, boolean projected)
+{
+ const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
+ union spu_exec_channel r[8];
+ uint chan_index;
+ float lodBias;
+
+ /* printf("Sampler %u unit %u\n", sampler, unit); */
+
+ switch (inst->InstructionExtTexture.Texture) {
+ case TGSI_TEXTURE_1D:
+
+ FETCH(&r[0], 0, CHAN_X);
+
+ if (projected) {
+ FETCH(&r[1], 0, CHAN_W);
+ r[0].q = micro_div(r[0].q, r[1].q);
+ }
+
+ if (biasLod) {
+ FETCH(&r[1], 0, CHAN_W);
+ lodBias = r[2].f[0];
+ }
+ else
+ lodBias = 0.0;
+
+ fetch_texel(&mach->Samplers[unit],
+ &r[0], NULL, NULL, lodBias, /* S, T, P, BIAS */
+ &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */
+ break;
+
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_RECT:
+
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 0, CHAN_Z);
+
+ if (projected) {
+ FETCH(&r[3], 0, CHAN_W);
+ r[0].q = micro_div(r[0].q, r[3].q);
+ r[1].q = micro_div(r[1].q, r[3].q);
+ r[2].q = micro_div(r[2].q, r[3].q);
+ }
+
+ if (biasLod) {
+ FETCH(&r[3], 0, CHAN_W);
+ lodBias = r[3].f[0];
+ }
+ else
+ lodBias = 0.0;
+
+ fetch_texel(&mach->Samplers[unit],
+ &r[0], &r[1], &r[2], lodBias, /* inputs */
+ &r[0], &r[1], &r[2], &r[3]); /* outputs */
+ break;
+
+ case TGSI_TEXTURE_3D:
+ case TGSI_TEXTURE_CUBE:
+
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 0, CHAN_Z);
+
+ if (projected) {
+ FETCH(&r[3], 0, CHAN_W);
+ r[0].q = micro_div(r[0].q, r[3].q);
+ r[1].q = micro_div(r[1].q, r[3].q);
+ r[2].q = micro_div(r[2].q, r[3].q);
+ }
+
+ if (biasLod) {
+ FETCH(&r[3], 0, CHAN_W);
+ lodBias = r[3].f[0];
+ }
+ else
+ lodBias = 0.0;
+
+ fetch_texel(&mach->Samplers[unit],
+ &r[0], &r[1], &r[2], lodBias,
+ &r[0], &r[1], &r[2], &r[3]);
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[chan_index], 0, chan_index );
+ }
+}
+
+
+
+static void
+constant_interpolation(
+ struct spu_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan )
+{
+ unsigned i;
+
+ for( i = 0; i < QUAD_SIZE; i++ ) {
+ mach->Inputs[attrib].xyzw[chan].f[i] = mach->InterpCoefs[attrib].a0[chan];
+ }
+}
+
+static void
+linear_interpolation(
+ struct spu_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan )
+{
+ const float x = mach->QuadPos.xyzw[0].f[0];
+ const float y = mach->QuadPos.xyzw[1].f[0];
+ const float dadx = mach->InterpCoefs[attrib].dadx[chan];
+ const float dady = mach->InterpCoefs[attrib].dady[chan];
+ const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
+ mach->Inputs[attrib].xyzw[chan].f[0] = a0;
+ mach->Inputs[attrib].xyzw[chan].f[1] = a0 + dadx;
+ mach->Inputs[attrib].xyzw[chan].f[2] = a0 + dady;
+ mach->Inputs[attrib].xyzw[chan].f[3] = a0 + dadx + dady;
+}
+
+static void
+perspective_interpolation(
+ struct spu_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan )
+{
+ const float x = mach->QuadPos.xyzw[0].f[0];
+ const float y = mach->QuadPos.xyzw[1].f[0];
+ const float dadx = mach->InterpCoefs[attrib].dadx[chan];
+ const float dady = mach->InterpCoefs[attrib].dady[chan];
+ const float a0 = mach->InterpCoefs[attrib].a0[chan] + dadx * x + dady * y;
+ const float *w = mach->QuadPos.xyzw[3].f;
+ /* divide by W here */
+ mach->Inputs[attrib].xyzw[chan].f[0] = a0 / w[0];
+ mach->Inputs[attrib].xyzw[chan].f[1] = (a0 + dadx) / w[1];
+ mach->Inputs[attrib].xyzw[chan].f[2] = (a0 + dady) / w[2];
+ mach->Inputs[attrib].xyzw[chan].f[3] = (a0 + dadx + dady) / w[3];
+}
+
+
+typedef void (* interpolation_func)(
+ struct spu_exec_machine *mach,
+ unsigned attrib,
+ unsigned chan );
+
+static void
+exec_declaration(struct spu_exec_machine *mach,
+ const struct tgsi_full_declaration *decl)
+{
+ if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
+ if( decl->Declaration.File == TGSI_FILE_INPUT ) {
+ unsigned first, last, mask;
+ interpolation_func interp;
+
+ first = decl->DeclarationRange.First;
+ last = decl->DeclarationRange.Last;
+ mask = decl->Declaration.UsageMask;
+
+ switch( decl->Declaration.Interpolate ) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ interp = constant_interpolation;
+ break;
+
+ case TGSI_INTERPOLATE_LINEAR:
+ interp = linear_interpolation;
+ break;
+
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ interp = perspective_interpolation;
+ break;
+
+ default:
+ ASSERT( 0 );
+ }
+
+ if( mask == TGSI_WRITEMASK_XYZW ) {
+ unsigned i, j;
+
+ for( i = first; i <= last; i++ ) {
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ interp( mach, i, j );
+ }
+ }
+ }
+ else {
+ unsigned i, j;
+
+ for( j = 0; j < NUM_CHANNELS; j++ ) {
+ if( mask & (1 << j) ) {
+ for( i = first; i <= last; i++ ) {
+ interp( mach, i, j );
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void
+exec_instruction(
+ struct spu_exec_machine *mach,
+ const struct tgsi_full_instruction *inst,
+ int *pc )
+{
+ uint chan_index;
+ union spu_exec_channel r[8];
+
+ (*pc)++;
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = si_cflts(r[0].q, 0);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_LIT:
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y ) || IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( &r[0], 0, CHAN_X );
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ r[0].q = micro_max(r[0].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
+ STORE( &r[0], 0, CHAN_Y );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( &r[1], 0, CHAN_Y );
+ r[1].q = micro_max(r[1].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
+
+ FETCH( &r[2], 0, CHAN_W );
+ r[2].q = micro_min(r[2].q, mach->Temps[TEMP_128_I].xyzw[TEMP_128_C].q);
+ r[2].q = micro_max(r[2].q, mach->Temps[TEMP_M128_I].xyzw[TEMP_M128_C].q);
+ r[1].q = micro_pow(r[1].q, r[2].q);
+
+ /* r0 = (r0 > 0.0) ? r1 : 0.0
+ */
+ r[0].q = si_fcgt(r[0].q, mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q);
+ r[0].q = si_selb(mach->Temps[TEMP_0_I].xyzw[TEMP_0_C].q, r[1].q,
+ r[0].q);
+ STORE( &r[0], 0, CHAN_Z );
+ }
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_RCP:
+ FETCH( &r[0], 0, CHAN_X );
+ r[0].q = micro_div(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, r[0].q);
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_RSQ:
+ FETCH( &r[0], 0, CHAN_X );
+ r[0].q = micro_sqrt(r[0].q);
+ r[0].q = micro_div(mach->Temps[TEMP_1_I].xyzw[TEMP_1_C].q, r[0].q);
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_EXP:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_LOG:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_MUL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index )
+ {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ r[0].q = si_fm(r[0].q, r[1].q);
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_ADD:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = si_fa(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DP3:
+ /* TGSI_OPCODE_DOT3 */
+ FETCH( &r[0], 0, CHAN_X );
+ FETCH( &r[1], 1, CHAN_X );
+ r[0].q = si_fm(r[0].q, r[1].q);
+
+ FETCH( &r[1], 0, CHAN_Y );
+ FETCH( &r[2], 1, CHAN_Y );
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+
+ FETCH( &r[1], 0, CHAN_Z );
+ FETCH( &r[2], 1, CHAN_Z );
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DP4:
+ /* TGSI_OPCODE_DOT4 */
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 1, CHAN_X);
+
+ r[0].q = si_fm(r[0].q, r[1].q);
+
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 1, CHAN_Y);
+
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FETCH(&r[1], 0, CHAN_Z);
+ FETCH(&r[2], 1, CHAN_Z);
+
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FETCH(&r[1], 0, CHAN_W);
+ FETCH(&r[2], 1, CHAN_W);
+
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DST:
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_X );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ FETCH( &r[0], 0, CHAN_Y );
+ FETCH( &r[1], 1, CHAN_Y);
+ r[0].q = si_fm(r[0].q, r[1].q);
+ STORE( &r[0], 0, CHAN_Y );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ FETCH( &r[0], 0, CHAN_Z );
+ STORE( &r[0], 0, CHAN_Z );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ FETCH( &r[0], 1, CHAN_W );
+ STORE( &r[0], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_MIN:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ r[0].q = micro_min(r[0].q, r[1].q);
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_MAX:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ r[0].q = micro_max(r[0].q, r[1].q);
+
+ STORE(&r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SLT:
+ /* TGSI_OPCODE_SETLT */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+
+ r[0].q = micro_ge(r[0].q, r[1].q);
+ r[0].q = si_xori(r[0].q, 0xff);
+
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SGE:
+ /* TGSI_OPCODE_SETGE */
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = micro_ge(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MAD:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ FETCH( &r[2], 2, chan_index );
+ r[0].q = si_fma(r[0].q, r[1].q, r[2].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SUB:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+
+ r[0].q = si_fs(r[0].q, r[1].q);
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_LRP:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+ FETCH(&r[2], 2, chan_index);
+
+ r[1].q = si_fs(r[1].q, r[2].q);
+ r[0].q = si_fma(r[0].q, r[1].q, r[2].q);
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_CND:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_DP2A:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_FRC:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_frc(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_CLAMP:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_FLR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_flr(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_ROUND:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_rnd(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_EX2:
+ FETCH(&r[0], 0, CHAN_X);
+
+ r[0].q = micro_pow(mach->Temps[TEMP_2_I].xyzw[TEMP_2_C].q, r[0].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_LG2:
+ FETCH( &r[0], 0, CHAN_X );
+ r[0].q = micro_lg2(r[0].q);
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_POW:
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 1, CHAN_X);
+
+ r[0].q = micro_pow(r[0].q, r[1].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_XPD:
+ /* TGSI_OPCODE_XPD */
+ FETCH(&r[0], 0, CHAN_Y);
+ FETCH(&r[1], 1, CHAN_Z);
+ FETCH(&r[3], 0, CHAN_Z);
+ FETCH(&r[4], 1, CHAN_Y);
+
+ /* r2 = (r0 * r1) - (r3 * r5)
+ */
+ r[2].q = si_fm(r[3].q, r[5].q);
+ r[2].q = si_fms(r[0].q, r[1].q, r[2].q);
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_X )) {
+ STORE( &r[2], 0, CHAN_X );
+ }
+
+ FETCH(&r[2], 1, CHAN_X);
+ FETCH(&r[5], 0, CHAN_X);
+
+ /* r3 = (r3 * r2) - (r1 * r5)
+ */
+ r[1].q = si_fm(r[1].q, r[5].q);
+ r[3].q = si_fms(r[3].q, r[2].q, r[1].q);
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Y )) {
+ STORE( &r[3], 0, CHAN_Y );
+ }
+
+ /* r5 = (r5 * r4) - (r0 * r2)
+ */
+ r[0].q = si_fm(r[0].q, r[2].q);
+ r[5].q = si_fms(r[5].q, r[4].q, r[0].q);
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_Z )) {
+ STORE( &r[5], 0, CHAN_Z );
+ }
+
+ if (IS_CHANNEL_ENABLED( *inst, CHAN_W )) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_ABS:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+
+ r[0].q = micro_abs(r[0].q);
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_RCC:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_DPH:
+ FETCH(&r[0], 0, CHAN_X);
+ FETCH(&r[1], 1, CHAN_X);
+
+ r[0].q = si_fm(r[0].q, r[1].q);
+
+ FETCH(&r[1], 0, CHAN_Y);
+ FETCH(&r[2], 1, CHAN_Y);
+
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FETCH(&r[1], 0, CHAN_Z);
+ FETCH(&r[2], 1, CHAN_Z);
+
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FETCH(&r[1], 1, CHAN_W);
+
+ r[0].q = si_fa(r[0].q, r[1].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_COS:
+ FETCH(&r[0], 0, CHAN_X);
+
+ r[0].q = micro_cos(r[0].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DDX:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_ddx(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_DDY:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_ddy(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_KILP:
+ exec_kilp (mach, inst);
+ break;
+
+ case TGSI_OPCODE_KIL:
+ exec_kil (mach, inst);
+ break;
+
+ case TGSI_OPCODE_PK2H:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_PK2US:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_PK4B:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_PK4UB:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_RFL:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_SEQ:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+
+ r[0].q = si_fceq(r[0].q, r[1].q);
+
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SFL:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_SGT:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = si_fcgt(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SIN:
+ FETCH( &r[0], 0, CHAN_X );
+ r[0].q = micro_sin(r[0].q);
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SLE:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+
+ r[0].q = si_fcgt(r[0].q, r[1].q);
+ r[0].q = si_xori(r[0].q, 0xff);
+
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SNE:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+
+ r[0].q = si_fceq(r[0].q, r[1].q);
+ r[0].q = si_xori(r[0].q, 0xff);
+
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_STR:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_TEX:
+ /* simple texture lookup */
+ /* src[0] = texcoord */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, FALSE, FALSE);
+ break;
+
+ case TGSI_OPCODE_TXB:
+ /* Texture lookup with lod bias */
+ /* src[0] = texcoord (src[0].w = load bias) */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, TRUE, FALSE);
+ break;
+
+ case TGSI_OPCODE_TXD:
+ /* Texture lookup with explict partial derivatives */
+ /* src[0] = texcoord */
+ /* src[1] = d[strq]/dx */
+ /* src[2] = d[strq]/dy */
+ /* src[3] = sampler unit */
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_TXL:
+ /* Texture lookup with explit LOD */
+ /* src[0] = texcoord (src[0].w = load bias) */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, TRUE, FALSE);
+ break;
+
+ case TGSI_OPCODE_TXP:
+ /* Texture lookup with projection */
+ /* src[0] = texcoord (src[0].w = projection) */
+ /* src[1] = sampler unit */
+ exec_tex(mach, inst, TRUE, TRUE);
+ break;
+
+ case TGSI_OPCODE_UP2H:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_UP2US:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_UP4B:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_UP4UB:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_X2D:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_ARA:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_ARR:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_BRA:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_CAL:
+ /* skip the call if no execution channels are enabled */
+ if (mach->ExecMask) {
+ /* do the call */
+
+ /* push the Cond, Loop, Cont stacks */
+ ASSERT(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
+ mach->CondStack[mach->CondStackTop++] = mach->CondMask;
+ ASSERT(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
+ ASSERT(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->ContStack[mach->ContStackTop++] = mach->ContMask;
+
+ ASSERT(mach->FuncStackTop < TGSI_EXEC_MAX_CALL_NESTING);
+ mach->FuncStack[mach->FuncStackTop++] = mach->FuncMask;
+
+ /* note that PC was already incremented above */
+ mach->CallStack[mach->CallStackTop++] = *pc;
+ *pc = inst->InstructionExtLabel.Label;
+ }
+ break;
+
+ case TGSI_OPCODE_RET:
+ mach->FuncMask &= ~mach->ExecMask;
+ UPDATE_EXEC_MASK(mach);
+
+ if (mach->ExecMask == 0x0) {
+ /* really return now (otherwise, keep executing */
+
+ if (mach->CallStackTop == 0) {
+ /* returning from main() */
+ *pc = -1;
+ return;
+ }
+ *pc = mach->CallStack[--mach->CallStackTop];
+
+ /* pop the Cond, Loop, Cont stacks */
+ ASSERT(mach->CondStackTop > 0);
+ mach->CondMask = mach->CondStack[--mach->CondStackTop];
+ ASSERT(mach->LoopStackTop > 0);
+ mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
+ ASSERT(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[--mach->ContStackTop];
+ ASSERT(mach->FuncStackTop > 0);
+ mach->FuncMask = mach->FuncStack[--mach->FuncStackTop];
+
+ UPDATE_EXEC_MASK(mach);
+ }
+ break;
+
+ case TGSI_OPCODE_SSG:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_CMP:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH(&r[0], 0, chan_index);
+ FETCH(&r[1], 1, chan_index);
+ FETCH(&r[2], 2, chan_index);
+
+ /* r0 = (r0 < 0.0) ? r1 : r2
+ */
+ r[3].q = si_xor(r[3].q, r[3].q);
+ r[0].q = micro_lt(r[0].q, r[3].q);
+ r[0].q = si_selb(r[1].q, r[2].q, r[0].q);
+
+ STORE(&r[0], 0, chan_index);
+ }
+ break;
+
+ case TGSI_OPCODE_SCS:
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) || IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
+ FETCH( &r[0], 0, CHAN_X );
+ }
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_X ) ) {
+ r[1].q = micro_cos(r[0].q);
+ STORE( &r[1], 0, CHAN_X );
+ }
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_Y ) ) {
+ r[1].q = micro_sin(r[0].q);
+ STORE( &r[1], 0, CHAN_Y );
+ }
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_Z ) ) {
+ STORE( &mach->Temps[TEMP_0_I].xyzw[TEMP_0_C], 0, CHAN_Z );
+ }
+ if( IS_CHANNEL_ENABLED( *inst, CHAN_W ) ) {
+ STORE( &mach->Temps[TEMP_1_I].xyzw[TEMP_1_C], 0, CHAN_W );
+ }
+ break;
+
+ case TGSI_OPCODE_NRM:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_DIV:
+ ASSERT( 0 );
+ break;
+
+ case TGSI_OPCODE_DP2:
+ FETCH( &r[0], 0, CHAN_X );
+ FETCH( &r[1], 1, CHAN_X );
+ r[0].q = si_fm(r[0].q, r[1].q);
+
+ FETCH( &r[1], 0, CHAN_Y );
+ FETCH( &r[2], 1, CHAN_Y );
+ r[0].q = si_fma(r[1].q, r[2].q, r[0].q);
+
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_IF:
+ /* push CondMask */
+ ASSERT(mach->CondStackTop < TGSI_EXEC_MAX_COND_NESTING);
+ mach->CondStack[mach->CondStackTop++] = mach->CondMask;
+ FETCH( &r[0], 0, CHAN_X );
+ /* update CondMask */
+ if( ! r[0].u[0] ) {
+ mach->CondMask &= ~0x1;
+ }
+ if( ! r[0].u[1] ) {
+ mach->CondMask &= ~0x2;
+ }
+ if( ! r[0].u[2] ) {
+ mach->CondMask &= ~0x4;
+ }
+ if( ! r[0].u[3] ) {
+ mach->CondMask &= ~0x8;
+ }
+ UPDATE_EXEC_MASK(mach);
+ /* Todo: If CondMask==0, jump to ELSE */
+ break;
+
+ case TGSI_OPCODE_ELSE:
+ /* invert CondMask wrt previous mask */
+ {
+ uint prevMask;
+ ASSERT(mach->CondStackTop > 0);
+ prevMask = mach->CondStack[mach->CondStackTop - 1];
+ mach->CondMask = ~mach->CondMask & prevMask;
+ UPDATE_EXEC_MASK(mach);
+ /* Todo: If CondMask==0, jump to ENDIF */
+ }
+ break;
+
+ case TGSI_OPCODE_ENDIF:
+ /* pop CondMask */
+ ASSERT(mach->CondStackTop > 0);
+ mach->CondMask = mach->CondStack[--mach->CondStackTop];
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_END:
+ /* halt execution */
+ *pc = -1;
+ break;
+
+ case TGSI_OPCODE_REP:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_ENDREP:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_PUSHA:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_POPA:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_CEIL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_ceil(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_I2F:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = si_csflt(r[0].q, 0);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_NOT:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = si_xorbi(r[0].q, 0xff);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_TRUNC:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ r[0].q = micro_trunc(r[0].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SHL:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+
+ r[0].q = si_shl(r[0].q, r[1].q);
+
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SHR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = micro_ishr(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_AND:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = si_and(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_OR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = si_or(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_MOD:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_XOR:
+ FOR_EACH_ENABLED_CHANNEL( *inst, chan_index ) {
+ FETCH( &r[0], 0, chan_index );
+ FETCH( &r[1], 1, chan_index );
+ r[0].q = si_xor(r[0].q, r[1].q);
+ STORE( &r[0], 0, chan_index );
+ }
+ break;
+
+ case TGSI_OPCODE_SAD:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_TXF:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_TXQ:
+ ASSERT (0);
+ break;
+
+ case TGSI_OPCODE_EMIT:
+ mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] += 16;
+ mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]]++;
+ break;
+
+ case TGSI_OPCODE_ENDPRIM:
+ mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]++;
+ mach->Primitives[mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0]] = 0;
+ break;
+
+ case TGSI_OPCODE_BGNFOR:
+ /* fall-through (for now) */
+ case TGSI_OPCODE_BGNLOOP:
+ /* push LoopMask and ContMasks */
+ ASSERT(mach->LoopStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->LoopStack[mach->LoopStackTop++] = mach->LoopMask;
+ ASSERT(mach->ContStackTop < TGSI_EXEC_MAX_LOOP_NESTING);
+ mach->ContStack[mach->ContStackTop++] = mach->ContMask;
+ break;
+
+ case TGSI_OPCODE_ENDFOR:
+ /* fall-through (for now at least) */
+ case TGSI_OPCODE_ENDLOOP:
+ /* Restore ContMask, but don't pop */
+ ASSERT(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[mach->ContStackTop - 1];
+ if (mach->LoopMask) {
+ /* repeat loop: jump to instruction just past BGNLOOP */
+ *pc = inst->InstructionExtLabel.Label + 1;
+ }
+ else {
+ /* exit loop: pop LoopMask */
+ ASSERT(mach->LoopStackTop > 0);
+ mach->LoopMask = mach->LoopStack[--mach->LoopStackTop];
+ /* pop ContMask */
+ ASSERT(mach->ContStackTop > 0);
+ mach->ContMask = mach->ContStack[--mach->ContStackTop];
+ }
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_BRK:
+ /* turn off loop channels for each enabled exec channel */
+ mach->LoopMask &= ~mach->ExecMask;
+ /* Todo: if mach->LoopMask == 0, jump to end of loop */
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_CONT:
+ /* turn off cont channels for each enabled exec channel */
+ mach->ContMask &= ~mach->ExecMask;
+ /* Todo: if mach->LoopMask == 0, jump to end of loop */
+ UPDATE_EXEC_MASK(mach);
+ break;
+
+ case TGSI_OPCODE_BGNSUB:
+ /* no-op */
+ break;
+
+ case TGSI_OPCODE_ENDSUB:
+ /* no-op */
+ break;
+
+ case TGSI_OPCODE_NOISE1:
+ ASSERT( 0 );
+ break;
+
+ case TGSI_OPCODE_NOISE2:
+ ASSERT( 0 );
+ break;
+
+ case TGSI_OPCODE_NOISE3:
+ ASSERT( 0 );
+ break;
+
+ case TGSI_OPCODE_NOISE4:
+ ASSERT( 0 );
+ break;
+
+ case TGSI_OPCODE_NOP:
+ break;
+
+ default:
+ ASSERT( 0 );
+ }
+}
+
+
+/**
+ * Run TGSI interpreter.
+ * \return bitmask of "alive" quad components
+ */
+uint
+spu_exec_machine_run( struct spu_exec_machine *mach )
+{
+ uint i;
+ int pc = 0;
+
+ mach->CondMask = 0xf;
+ mach->LoopMask = 0xf;
+ mach->ContMask = 0xf;
+ mach->FuncMask = 0xf;
+ mach->ExecMask = 0xf;
+
+ mach->CondStackTop = 0; /* temporarily subvert this ASSERTion */
+ ASSERT(mach->CondStackTop == 0);
+ ASSERT(mach->LoopStackTop == 0);
+ ASSERT(mach->ContStackTop == 0);
+ ASSERT(mach->CallStackTop == 0);
+
+ mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0] = 0;
+ mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0] = 0;
+
+ if( mach->Processor == TGSI_PROCESSOR_GEOMETRY ) {
+ mach->Temps[TEMP_PRIMITIVE_I].xyzw[TEMP_PRIMITIVE_C].u[0] = 0;
+ mach->Primitives[0] = 0;
+ }
+
+
+ /* execute declarations (interpolants) */
+ if( mach->Processor == TGSI_PROCESSOR_FRAGMENT ) {
+ for (i = 0; i < mach->NumDeclarations; i++) {
+ union {
+ struct tgsi_full_declaration decl;
+ qword buffer[ROUNDUP16(sizeof(struct tgsi_full_declaration)) / 16];
+ } d ALIGN16_ATTRIB;
+ unsigned ea = (unsigned) (mach->Declarations + pc);
+
+ spu_dcache_fetch_unaligned(d.buffer, ea, sizeof(d.decl));
+
+ exec_declaration( mach, &d.decl );
+ }
+ }
+
+ /* execute instructions, until pc is set to -1 */
+ while (pc != -1) {
+ union {
+ struct tgsi_full_instruction inst;
+ qword buffer[ROUNDUP16(sizeof(struct tgsi_full_instruction)) / 16];
+ } i ALIGN16_ATTRIB;
+ unsigned ea = (unsigned) (mach->Instructions + pc);
+
+ spu_dcache_fetch_unaligned(i.buffer, ea, sizeof(i.inst));
+ exec_instruction( mach, & i.inst, &pc );
+ }
+
+#if 0
+ /* we scale from floats in [0,1] to Zbuffer ints in sp_quad_depth_test.c */
+ if (mach->Processor == TGSI_PROCESSOR_FRAGMENT) {
+ /*
+ * Scale back depth component.
+ */
+ for (i = 0; i < 4; i++)
+ mach->Outputs[0].xyzw[2].f[i] *= ctx->DrawBuffer->_DepthMaxF;
+ }
+#endif
+
+ return ~mach->Temps[TEMP_KILMASK_I].xyzw[TEMP_KILMASK_C].u[0];
+}
+
+
diff --git a/src/gallium/drivers/cell/spu/spu_exec.h b/src/gallium/drivers/cell/spu/spu_exec.h
new file mode 100644
index 0000000000..8605679940
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_exec.h
@@ -0,0 +1,172 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#if !defined SPU_EXEC_H
+#define SPU_EXEC_H
+
+#include "pipe/p_compiler.h"
+#include "tgsi/tgsi_exec.h"
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Registers may be treated as float, signed int or unsigned int.
+ */
+union spu_exec_channel
+{
+ float f[QUAD_SIZE];
+ int i[QUAD_SIZE];
+ unsigned u[QUAD_SIZE];
+ qword q;
+};
+
+/**
+ * A vector[RGBA] of channels[4 pixels]
+ */
+struct spu_exec_vector
+{
+ union spu_exec_channel xyzw[NUM_CHANNELS];
+};
+
+/**
+ * For fragment programs, information for computing fragment input
+ * values from plane equation of the triangle/line.
+ */
+struct spu_interp_coef
+{
+ float a0[NUM_CHANNELS]; /* in an xyzw layout */
+ float dadx[NUM_CHANNELS];
+ float dady[NUM_CHANNELS];
+};
+
+
+struct softpipe_tile_cache; /**< Opaque to TGSI */
+
+/**
+ * Information for sampling textures, which must be implemented
+ * by code outside the TGSI executor.
+ */
+struct spu_sampler
+{
+ const struct pipe_sampler_state *state;
+ struct pipe_texture *texture;
+ /** Get samples for four fragments in a quad */
+ void (*get_samples)(struct spu_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE]);
+ void *pipe; /*XXX temporary*/
+ struct softpipe_tile_cache *cache;
+};
+
+
+/**
+ * Run-time virtual machine state for executing TGSI shader.
+ */
+struct spu_exec_machine
+{
+ /*
+ * 32 program temporaries
+ * 4 internal temporaries
+ * 1 address
+ */
+ struct spu_exec_vector Temps[TGSI_EXEC_NUM_TEMPS
+ + TGSI_EXEC_NUM_TEMP_EXTRAS + 1]
+ ALIGN16_ATTRIB;
+
+ struct spu_exec_vector *Addrs;
+
+ struct spu_sampler *Samplers;
+
+ float Imms[TGSI_EXEC_NUM_IMMEDIATES][4];
+ unsigned ImmLimit;
+ float (*Consts)[4];
+ struct spu_exec_vector *Inputs;
+ struct spu_exec_vector *Outputs;
+ unsigned Processor;
+
+ /* GEOMETRY processor only. */
+ unsigned *Primitives;
+
+ /* FRAGMENT processor only. */
+ const struct spu_interp_coef *InterpCoefs;
+ struct spu_exec_vector QuadPos;
+
+ /* Conditional execution masks */
+ uint CondMask; /**< For IF/ELSE/ENDIF */
+ uint LoopMask; /**< For BGNLOOP/ENDLOOP */
+ uint ContMask; /**< For loop CONT statements */
+ uint FuncMask; /**< For function calls */
+ uint ExecMask; /**< = CondMask & LoopMask */
+
+ /** Condition mask stack (for nested conditionals) */
+ uint CondStack[TGSI_EXEC_MAX_COND_NESTING];
+ int CondStackTop;
+
+ /** Loop mask stack (for nested loops) */
+ uint LoopStack[TGSI_EXEC_MAX_LOOP_NESTING];
+ int LoopStackTop;
+
+ /** Loop continue mask stack (see comments in tgsi_exec.c) */
+ uint ContStack[TGSI_EXEC_MAX_LOOP_NESTING];
+ int ContStackTop;
+
+ /** Function execution mask stack (for executing subroutine code) */
+ uint FuncStack[TGSI_EXEC_MAX_CALL_NESTING];
+ int FuncStackTop;
+
+ /** Function call stack for saving/restoring the program counter */
+ uint CallStack[TGSI_EXEC_MAX_CALL_NESTING];
+ int CallStackTop;
+
+ struct tgsi_full_instruction *Instructions;
+ uint NumInstructions;
+
+ struct tgsi_full_declaration *Declarations;
+ uint NumDeclarations;
+};
+
+
+extern void
+spu_exec_machine_init(struct spu_exec_machine *mach,
+ uint numSamplers,
+ struct spu_sampler *samplers,
+ unsigned processor);
+
+extern uint
+spu_exec_machine_run( struct spu_exec_machine *mach );
+
+
+#if defined __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* SPU_EXEC_H */
diff --git a/src/gallium/drivers/cell/spu/spu_funcs.c b/src/gallium/drivers/cell/spu/spu_funcs.c
new file mode 100644
index 0000000000..ff3d609d25
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_funcs.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * SPU functions accessed by shaders.
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include <string.h>
+#include <libmisc.h>
+#include <math.h>
+#include <cos14_v.h>
+#include <sin14_v.h>
+#include <simdmath/exp2f4.h>
+#include <simdmath/log2f4.h>
+#include <simdmath/powf4.h>
+
+#include "cell/common.h"
+#include "spu_main.h"
+#include "spu_funcs.h"
+#include "spu_texture.h"
+
+
+/** For "return"-ing four vectors */
+struct vec_4x4
+{
+ vector float v[4];
+};
+
+
+static vector float
+spu_cos(vector float x)
+{
+ return _cos14_v(x);
+}
+
+static vector float
+spu_sin(vector float x)
+{
+ return _sin14_v(x);
+}
+
+static vector float
+spu_pow(vector float x, vector float y)
+{
+ return _powf4(x, y);
+}
+
+static vector float
+spu_exp2(vector float x)
+{
+ return _exp2f4(x);
+}
+
+static vector float
+spu_log2(vector float x)
+{
+ return _log2f4(x);
+}
+
+
+static struct vec_4x4
+spu_tex_2d(vector float s, vector float t, vector float r, vector float q,
+ unsigned unit)
+{
+ struct vec_4x4 colors;
+ (void) r;
+ (void) q;
+ spu.sample_texture_2d[unit](s, t, unit, 0, 0, colors.v);
+ return colors;
+}
+
+static struct vec_4x4
+spu_tex_3d(vector float s, vector float t, vector float r, vector float q,
+ unsigned unit)
+{
+ struct vec_4x4 colors;
+ (void) r;
+ (void) q;
+ spu.sample_texture_2d[unit](s, t, unit, 0, 0, colors.v);
+ return colors;
+}
+
+static struct vec_4x4
+spu_tex_cube(vector float s, vector float t, vector float r, vector float q,
+ unsigned unit)
+{
+ struct vec_4x4 colors;
+ (void) q;
+ sample_texture_cube(s, t, r, unit, colors.v);
+ return colors;
+}
+
+
+/**
+ * Add named function to list of "exported" functions that will be
+ * made available to the PPU-hosted code generator.
+ */
+static void
+export_func(struct cell_spu_function_info *spu_functions,
+ const char *name, void *addr)
+{
+ uint n = spu_functions->num;
+ ASSERT(strlen(name) < 16);
+ strcpy(spu_functions->names[n], name);
+ spu_functions->addrs[n] = (uint) addr;
+ spu_functions->num++;
+ ASSERT(spu_functions->num <= 16);
+}
+
+
+/**
+ * Return info about the SPU's function to the PPU / main memory.
+ * The PPU needs to know the address of some SPU-side functions so
+ * that we can generate shader code with function calls.
+ */
+void
+return_function_info(void)
+{
+ struct cell_spu_function_info funcs ALIGN16_ATTRIB;
+ int tag = TAG_MISC;
+
+ ASSERT(sizeof(funcs) == 256); /* must be multiple of 16 bytes */
+
+ funcs.num = 0;
+ export_func(&funcs, "spu_cos", &spu_cos);
+ export_func(&funcs, "spu_sin", &spu_sin);
+ export_func(&funcs, "spu_pow", &spu_pow);
+ export_func(&funcs, "spu_exp2", &spu_exp2);
+ export_func(&funcs, "spu_log2", &spu_log2);
+ export_func(&funcs, "spu_tex_2d", &spu_tex_2d);
+ export_func(&funcs, "spu_tex_3d", &spu_tex_3d);
+ export_func(&funcs, "spu_tex_cube", &spu_tex_cube);
+
+ /* Send the function info back to the PPU / main memory */
+ mfc_put((void *) &funcs, /* src in local store */
+ (unsigned int) spu.init.spu_functions, /* dst in main memory */
+ sizeof(funcs), /* bytes */
+ tag,
+ 0, /* tid */
+ 0 /* rid */);
+ wait_on_mask(1 << tag);
+}
+
+
+
diff --git a/src/gallium/drivers/cell/spu/spu_funcs.h b/src/gallium/drivers/cell/spu/spu_funcs.h
new file mode 100644
index 0000000000..3adb6ae99f
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_funcs.h
@@ -0,0 +1,35 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SPU_FUNCS_H
+#define SPU_FUNCS_H
+
+extern void
+return_function_info(void);
+
+#endif
+
diff --git a/src/gallium/drivers/cell/spu/spu_main.c b/src/gallium/drivers/cell/spu/spu_main.c
new file mode 100644
index 0000000000..97c86d194d
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_main.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/* main() for Cell SPU code */
+
+
+#include <stdio.h>
+#include <libmisc.h>
+
+#include "pipe/p_defines.h"
+
+#include "spu_funcs.h"
+#include "spu_command.h"
+#include "spu_main.h"
+#include "spu_per_fragment_op.h"
+#include "spu_texture.h"
+//#include "spu_test.h"
+#include "cell/common.h"
+
+
+/*
+helpful headers:
+/usr/lib/gcc/spu/4.1.1/include/spu_mfcio.h
+/opt/cell/sdk/usr/include/libmisc.h
+*/
+
+struct spu_global spu;
+
+
+static void
+one_time_init(void)
+{
+ memset(spu.ctile_status, TILE_STATUS_DEFINED, sizeof(spu.ctile_status));
+ memset(spu.ztile_status, TILE_STATUS_DEFINED, sizeof(spu.ztile_status));
+ invalidate_tex_cache();
+}
+
+/* In some versions of the SDK the SPE main takes 'unsigned long' as a
+ * parameter. In others it takes 'unsigned long long'. Use a define to
+ * select between the two.
+ */
+#ifdef SPU_MAIN_PARAM_LONG_LONG
+typedef unsigned long long main_param_t;
+#else
+typedef unsigned long main_param_t;
+#endif
+
+/**
+ * SPE entrypoint.
+ */
+int
+main(main_param_t speid, main_param_t argp)
+{
+ int tag = 0;
+
+ (void) speid;
+
+ ASSERT(sizeof(tile_t) == TILE_SIZE * TILE_SIZE * 4);
+ ASSERT(sizeof(struct cell_command_render) % 8 == 0);
+ ASSERT(sizeof(struct cell_command_fragment_ops) % 8 == 0);
+ ASSERT(((unsigned long) &spu.fragment_program_code) % 8 == 0);
+
+ one_time_init();
+ spu_command_init();
+
+ D_PRINTF(CELL_DEBUG_CMD, "main() speid=%lu\n", (unsigned long) speid);
+ D_PRINTF(CELL_DEBUG_FRAGMENT_OP_FALLBACK, "using fragment op fallback\n");
+
+ /* get initialization data */
+ mfc_get(&spu.init, /* dest */
+ (unsigned int) argp, /* src */
+ sizeof(struct cell_init_info), /* bytes */
+ tag,
+ 0, /* tid */
+ 0 /* rid */);
+ wait_on_mask( 1 << tag );
+
+ if (spu.init.id == 0) {
+ return_function_info();
+ }
+
+#if 0
+ if (spu.init.id==0)
+ spu_test_misc(spu.init.id);
+#endif
+
+ command_loop();
+
+ spu_command_close();
+
+ return 0;
+}
diff --git a/src/gallium/drivers/cell/spu/spu_main.h b/src/gallium/drivers/cell/spu/spu_main.h
new file mode 100644
index 0000000000..33767e7c51
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_main.h
@@ -0,0 +1,254 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SPU_MAIN_H
+#define SPU_MAIN_H
+
+
+#include <spu_mfcio.h>
+
+#include "cell/common.h"
+#include "draw/draw_vertex.h"
+#include "pipe/p_state.h"
+
+
+#if DEBUG
+/* These debug macros use the unusual construction ", ##__VA_ARGS__"
+ * which expands to the expected comma + args if variadic arguments
+ * are supplied, but swallows the comma if there are no variadic
+ * arguments (which avoids syntax errors that would otherwise occur).
+ */
+#define D_PRINTF(flag, format,...) \
+ if (spu.init.debug_flags & (flag)) \
+ printf("SPU %u: " format, spu.init.id, ##__VA_ARGS__)
+#else
+#define D_PRINTF(...)
+#endif
+
+
+/**
+ * A tile is basically a TILE_SIZE x TILE_SIZE block of 4-byte pixels.
+ * The data may be addressed through several different types.
+ */
+typedef union {
+ ushort us[TILE_SIZE][TILE_SIZE];
+ uint ui[TILE_SIZE][TILE_SIZE];
+ vector unsigned short us8[TILE_SIZE/2][TILE_SIZE/4];
+ vector unsigned int ui4[TILE_SIZE/2][TILE_SIZE/2];
+} tile_t;
+
+
+#define TILE_STATUS_CLEAR 1
+#define TILE_STATUS_DEFINED 2 /**< defined in FB, but not in local store */
+#define TILE_STATUS_CLEAN 3 /**< in local store, but not changed */
+#define TILE_STATUS_DIRTY 4 /**< modified locally, but not put back yet */
+#define TILE_STATUS_GETTING 5 /**< mfc_get() called but not yet arrived */
+
+
+/** Function for sampling textures */
+typedef void (*spu_sample_texture_2d_func)(vector float s,
+ vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4]);
+
+
+/** Function for performing per-fragment ops */
+typedef void (*spu_fragment_ops_func)(uint x, uint y,
+ tile_t *colorTile,
+ tile_t *depthStencilTile,
+ vector float fragZ,
+ vector float fragRed,
+ vector float fragGreen,
+ vector float fragBlue,
+ vector float fragAlpha,
+ vector unsigned int mask);
+
+/** Function for running fragment program */
+typedef vector unsigned int (*spu_fragment_program_func)(vector float *inputs,
+ vector float *outputs,
+ vector float *constants);
+
+
+struct spu_framebuffer
+{
+ void *color_start; /**< addr of color surface in main memory */
+ void *depth_start; /**< addr of depth surface in main memory */
+ enum pipe_format color_format;
+ enum pipe_format depth_format;
+ uint width, height; /**< size in pixels */
+ uint width_tiles, height_tiles; /**< width and height in tiles */
+
+ uint color_clear_value;
+ uint depth_clear_value;
+
+ uint zsize; /**< 0, 2 or 4 bytes per Z */
+ float zscale; /**< 65535.0, 2^24-1 or 2^32-1 */
+} ALIGN16_ATTRIB;
+
+
+/** per-texture level info */
+struct spu_texture_level
+{
+ void *start;
+ ushort width, height, depth;
+ ushort tiles_per_row;
+ uint bytes_per_image;
+ /** texcoord scale factors */
+ vector float scale_s, scale_t, scale_r;
+ /** texcoord masks (if REPEAT then size-1, else ~0) */
+ vector signed int mask_s, mask_t, mask_r;
+ /** texcoord clamp limits */
+ vector signed int max_s, max_t, max_r;
+} ALIGN16_ATTRIB;
+
+
+struct spu_texture
+{
+ struct spu_texture_level level[CELL_MAX_TEXTURE_LEVELS];
+ uint max_level;
+ uint target; /**< PIPE_TEXTURE_x */
+} ALIGN16_ATTRIB;
+
+
+/**
+ * All SPU global/context state will be in a singleton object of this type:
+ */
+struct spu_global
+{
+ /** One-time init/constant info */
+ struct cell_init_info init;
+
+ /*
+ * Current state
+ */
+ struct spu_framebuffer fb;
+ struct pipe_depth_stencil_alpha_state depth_stencil_alpha;
+ struct pipe_blend_state blend;
+ struct pipe_blend_color blend_color;
+ struct pipe_sampler_state sampler[PIPE_MAX_SAMPLERS];
+ struct pipe_rasterizer_state rasterizer;
+ struct spu_texture texture[PIPE_MAX_SAMPLERS];
+ struct vertex_info vertex_info;
+
+ /** Current color and Z tiles */
+ tile_t ctile ALIGN16_ATTRIB;
+ tile_t ztile ALIGN16_ATTRIB;
+
+ /** Read depth/stencil tiles? */
+ boolean read_depth_stencil;
+
+ /** Current tiles' status */
+ ubyte cur_ctile_status, cur_ztile_status;
+
+ /** Status of all tiles in framebuffer */
+ ubyte ctile_status[CELL_MAX_HEIGHT/TILE_SIZE][CELL_MAX_WIDTH/TILE_SIZE] ALIGN16_ATTRIB;
+ ubyte ztile_status[CELL_MAX_HEIGHT/TILE_SIZE][CELL_MAX_WIDTH/TILE_SIZE] ALIGN16_ATTRIB;
+
+ /** Current fragment ops machine code, at 8-byte boundary */
+ uint *fragment_ops_code;
+ uint fragment_ops_code_size;
+ /** Current fragment ops functions, 0 = frontfacing, 1 = backfacing */
+ spu_fragment_ops_func fragment_ops[2];
+
+ /** Current fragment program machine code, at 8-byte boundary */
+ uint fragment_program_code[SPU_MAX_FRAGMENT_PROGRAM_INSTS] ALIGN8_ATTRIB;
+ /** Current fragment ops function */
+ spu_fragment_program_func fragment_program;
+
+ /** Current texture sampler function */
+ spu_sample_texture_2d_func sample_texture_2d[CELL_MAX_SAMPLERS];
+ spu_sample_texture_2d_func min_sample_texture_2d[CELL_MAX_SAMPLERS];
+ spu_sample_texture_2d_func mag_sample_texture_2d[CELL_MAX_SAMPLERS];
+
+ /** Fragment program constants */
+ vector float constants[4 * CELL_MAX_CONSTANTS];
+
+} ALIGN16_ATTRIB;
+
+
+extern struct spu_global spu;
+
+
+
+/* DMA TAGS */
+
+#define TAG_SURFACE_CLEAR 10
+#define TAG_VERTEX_BUFFER 11
+#define TAG_READ_TILE_COLOR 12
+#define TAG_READ_TILE_Z 13
+#define TAG_WRITE_TILE_COLOR 14
+#define TAG_WRITE_TILE_Z 15
+#define TAG_INDEX_BUFFER 16
+#define TAG_BATCH_BUFFER 17
+#define TAG_MISC 18
+#define TAG_DCACHE0 20
+#define TAG_DCACHE1 21
+#define TAG_DCACHE2 22
+#define TAG_DCACHE3 23
+#define TAG_FENCE 24
+
+
+static INLINE void
+wait_on_mask(unsigned tagMask)
+{
+ mfc_write_tag_mask( tagMask );
+ /* wait for completion of _any_ DMAs specified by tagMask */
+ mfc_read_tag_status_any();
+}
+
+
+static INLINE void
+wait_on_mask_all(unsigned tagMask)
+{
+ mfc_write_tag_mask( tagMask );
+ /* wait for completion of _any_ DMAs specified by tagMask */
+ mfc_read_tag_status_all();
+}
+
+
+
+
+
+static INLINE void
+memset16(ushort *d, ushort value, uint count)
+{
+ uint i;
+ for (i = 0; i < count; i++)
+ d[i] = value;
+}
+
+
+static INLINE void
+memset32(uint *d, uint value, uint count)
+{
+ uint i;
+ for (i = 0; i < count; i++)
+ d[i] = value;
+}
+
+
+#endif /* SPU_MAIN_H */
diff --git a/src/gallium/drivers/cell/spu/spu_per_fragment_op.c b/src/gallium/drivers/cell/spu/spu_per_fragment_op.c
new file mode 100644
index 0000000000..eba9f95cf1
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_per_fragment_op.c
@@ -0,0 +1,631 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \author Brian Paul
+ */
+
+
+#include <transpose_matrix4x4.h>
+#include "pipe/p_format.h"
+#include "spu_main.h"
+#include "spu_colorpack.h"
+#include "spu_per_fragment_op.h"
+
+
+#define LINEAR_QUAD_LAYOUT 1
+
+
+static INLINE vector float
+spu_min(vector float a, vector float b)
+{
+ vector unsigned int m;
+ m = spu_cmpgt(a, b); /* m = a > b ? ~0 : 0 */
+ return spu_sel(a, b, m);
+}
+
+
+static INLINE vector float
+spu_max(vector float a, vector float b)
+{
+ vector unsigned int m;
+ m = spu_cmpgt(a, b); /* m = a > b ? ~0 : 0 */
+ return spu_sel(b, a, m);
+}
+
+
+/**
+ * Called by rasterizer for each quad after the shader has run. Do
+ * all the per-fragment operations including alpha test, z test,
+ * stencil test, blend, colormask and logicops. This is a
+ * fallback/debug function. In reality we'll use a generated function
+ * produced by the PPU. But this function is useful for
+ * debug/validation.
+ */
+void
+spu_fallback_fragment_ops(uint x, uint y,
+ tile_t *colorTile,
+ tile_t *depthStencilTile,
+ vector float fragZ,
+ vector float fragR,
+ vector float fragG,
+ vector float fragB,
+ vector float fragA,
+ vector unsigned int mask)
+{
+ vector float frag_aos[4];
+ unsigned int fbc0, fbc1, fbc2, fbc3 ; /* framebuffer/tile colors */
+ unsigned int fragc0, fragc1, fragc2, fragc3; /* fragment colors */
+
+ /*
+ * Do alpha test
+ */
+ if (spu.depth_stencil_alpha.alpha.enabled) {
+ vector float ref = spu_splats(spu.depth_stencil_alpha.alpha.ref_value);
+ vector unsigned int amask;
+
+ switch (spu.depth_stencil_alpha.alpha.func) {
+ case PIPE_FUNC_LESS:
+ amask = spu_cmpgt(ref, fragA); /* mask = (fragA < ref) */
+ break;
+ case PIPE_FUNC_GREATER:
+ amask = spu_cmpgt(fragA, ref); /* mask = (fragA > ref) */
+ break;
+ case PIPE_FUNC_GEQUAL:
+ amask = spu_cmpgt(ref, fragA);
+ amask = spu_nor(amask, amask);
+ break;
+ case PIPE_FUNC_LEQUAL:
+ amask = spu_cmpgt(fragA, ref);
+ amask = spu_nor(amask, amask);
+ break;
+ case PIPE_FUNC_EQUAL:
+ amask = spu_cmpeq(ref, fragA);
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ amask = spu_cmpeq(ref, fragA);
+ amask = spu_nor(amask, amask);
+ break;
+ case PIPE_FUNC_ALWAYS:
+ amask = spu_splats(0xffffffffU);
+ break;
+ case PIPE_FUNC_NEVER:
+ amask = spu_splats( 0x0U);
+ break;
+ default:
+ ;
+ }
+
+ mask = spu_and(mask, amask);
+ }
+
+
+ /*
+ * Z and/or stencil testing...
+ */
+ if (spu.depth_stencil_alpha.depth.enabled ||
+ spu.depth_stencil_alpha.stencil[0].enabled) {
+
+ /* get four Z/Stencil values from tile */
+ vector unsigned int mask24 = spu_splats((unsigned int)0x00ffffffU);
+ vector unsigned int ifbZS = depthStencilTile->ui4[y/2][x/2];
+ vector unsigned int ifbZ = spu_and(ifbZS, mask24);
+ vector unsigned int ifbS = spu_andc(ifbZS, mask24);
+
+ if (spu.depth_stencil_alpha.stencil[0].enabled) {
+ /* do stencil test */
+ ASSERT(spu.fb.depth_format == PIPE_FORMAT_S8Z24_UNORM);
+
+ }
+ else if (spu.depth_stencil_alpha.depth.enabled) {
+ /* do depth test */
+
+ ASSERT(spu.fb.depth_format == PIPE_FORMAT_S8Z24_UNORM ||
+ spu.fb.depth_format == PIPE_FORMAT_X8Z24_UNORM);
+
+ vector unsigned int ifragZ;
+ vector unsigned int zmask;
+
+ /* convert four fragZ from float to uint */
+ fragZ = spu_mul(fragZ, spu_splats((float) 0xffffff));
+ ifragZ = spu_convtu(fragZ, 0);
+
+ /* do depth comparison, setting zmask with results */
+ switch (spu.depth_stencil_alpha.depth.func) {
+ case PIPE_FUNC_LESS:
+ zmask = spu_cmpgt(ifbZ, ifragZ); /* mask = (ifragZ < ifbZ) */
+ break;
+ case PIPE_FUNC_GREATER:
+ zmask = spu_cmpgt(ifragZ, ifbZ); /* mask = (ifbZ > ifragZ) */
+ break;
+ case PIPE_FUNC_GEQUAL:
+ zmask = spu_cmpgt(ifbZ, ifragZ);
+ zmask = spu_nor(zmask, zmask);
+ break;
+ case PIPE_FUNC_LEQUAL:
+ zmask = spu_cmpgt(ifragZ, ifbZ);
+ zmask = spu_nor(zmask, zmask);
+ break;
+ case PIPE_FUNC_EQUAL:
+ zmask = spu_cmpeq(ifbZ, ifragZ);
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ zmask = spu_cmpeq(ifbZ, ifragZ);
+ zmask = spu_nor(zmask, zmask);
+ break;
+ case PIPE_FUNC_ALWAYS:
+ zmask = spu_splats(0xffffffffU);
+ break;
+ case PIPE_FUNC_NEVER:
+ zmask = spu_splats( 0x0U);
+ break;
+ default:
+ ;
+ }
+
+ mask = spu_and(mask, zmask);
+
+ /* merge framebuffer Z and fragment Z according to the mask */
+ ifbZ = spu_or(spu_and(ifragZ, mask),
+ spu_andc(ifbZ, mask));
+ }
+
+ if (spu_extract(spu_orx(mask), 0)) {
+ /* put new fragment Z/Stencil values back into Z/Stencil tile */
+ depthStencilTile->ui4[y/2][x/2] = spu_or(ifbZ, ifbS);
+
+ spu.cur_ztile_status = TILE_STATUS_DIRTY;
+ }
+ }
+
+
+ /*
+ * If we'll need the current framebuffer/tile colors for blending
+ * or logicop or colormask, fetch them now.
+ */
+ if (spu.blend.blend_enable ||
+ spu.blend.logicop_enable ||
+ spu.blend.colormask != 0xf) {
+
+#if LINEAR_QUAD_LAYOUT /* See comments/diagram below */
+ fbc0 = colorTile->ui[y][x*2+0];
+ fbc1 = colorTile->ui[y][x*2+1];
+ fbc2 = colorTile->ui[y][x*2+2];
+ fbc3 = colorTile->ui[y][x*2+3];
+#else
+ fbc0 = colorTile->ui[y+0][x+0];
+ fbc1 = colorTile->ui[y+0][x+1];
+ fbc2 = colorTile->ui[y+1][x+0];
+ fbc3 = colorTile->ui[y+1][x+1];
+#endif
+ }
+
+
+ /*
+ * Do blending
+ */
+ if (spu.blend.blend_enable) {
+ /* blending terms, misc regs */
+ vector float term1r, term1g, term1b, term1a;
+ vector float term2r, term2g, term2b, term2a;
+ vector float one, tmp;
+
+ vector float fbRGBA[4]; /* current framebuffer colors */
+
+ /* convert framebuffer colors from packed int to vector float */
+ {
+ vector float temp[4]; /* float colors in AOS form */
+ switch (spu.fb.color_format) {
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ temp[0] = spu_unpack_B8G8R8A8(fbc0);
+ temp[1] = spu_unpack_B8G8R8A8(fbc1);
+ temp[2] = spu_unpack_B8G8R8A8(fbc2);
+ temp[3] = spu_unpack_B8G8R8A8(fbc3);
+ break;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ temp[0] = spu_unpack_A8R8G8B8(fbc0);
+ temp[1] = spu_unpack_A8R8G8B8(fbc1);
+ temp[2] = spu_unpack_A8R8G8B8(fbc2);
+ temp[3] = spu_unpack_A8R8G8B8(fbc3);
+ break;
+ default:
+ ASSERT(0);
+ }
+ _transpose_matrix4x4(fbRGBA, temp); /* fbRGBA = transpose(temp) */
+ }
+
+ /*
+ * Compute Src RGB terms (fragment color * factor)
+ */
+ switch (spu.blend.rgb_src_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ term1r = fragR;
+ term1g = fragG;
+ term1b = fragB;
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ term1r =
+ term1g =
+ term1b = spu_splats(0.0f);
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ term1r = spu_mul(fragR, fragR);
+ term1g = spu_mul(fragG, fragG);
+ term1b = spu_mul(fragB, fragB);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ term1r = spu_mul(fragR, fragA);
+ term1g = spu_mul(fragG, fragA);
+ term1b = spu_mul(fragB, fragA);
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ term1r = spu_mul(fragR, fbRGBA[0]);
+ term1g = spu_mul(fragG, fbRGBA[1]);
+ term1b = spu_mul(fragB, fbRGBA[1]);
+ break;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ term1r = spu_mul(fragR, fbRGBA[3]);
+ term1g = spu_mul(fragG, fbRGBA[3]);
+ term1b = spu_mul(fragB, fbRGBA[3]);
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ term1r = spu_mul(fragR, spu_splats(spu.blend_color.color[0]));
+ term1g = spu_mul(fragG, spu_splats(spu.blend_color.color[1]));
+ term1b = spu_mul(fragB, spu_splats(spu.blend_color.color[2]));
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ term1r = spu_mul(fragR, spu_splats(spu.blend_color.color[3]));
+ term1g = spu_mul(fragG, spu_splats(spu.blend_color.color[3]));
+ term1b = spu_mul(fragB, spu_splats(spu.blend_color.color[3]));
+ break;
+ /* XXX more cases */
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Compute Src Alpha term (fragment alpha * factor)
+ */
+ switch (spu.blend.alpha_src_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ term1a = fragA;
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ term1a = spu_splats(0.0f);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ term1a = spu_mul(fragA, fragA);
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ term1a = spu_mul(fragA, fbRGBA[3]);
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ term1a = spu_mul(fragR, spu_splats(spu.blend_color.color[3]));
+ break;
+ /* XXX more cases */
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Compute Dest RGB terms (framebuffer color * factor)
+ */
+ switch (spu.blend.rgb_dst_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ term2r = fbRGBA[0];
+ term2g = fbRGBA[1];
+ term2b = fbRGBA[2];
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ term2r =
+ term2g =
+ term2b = spu_splats(0.0f);
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ term2r = spu_mul(fbRGBA[0], fragR);
+ term2g = spu_mul(fbRGBA[1], fragG);
+ term2b = spu_mul(fbRGBA[2], fragB);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ term2r = spu_mul(fbRGBA[0], fragA);
+ term2g = spu_mul(fbRGBA[1], fragA);
+ term2b = spu_mul(fbRGBA[2], fragA);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ one = spu_splats(1.0f);
+ tmp = spu_sub(one, fragA);
+ term2r = spu_mul(fbRGBA[0], tmp);
+ term2g = spu_mul(fbRGBA[1], tmp);
+ term2b = spu_mul(fbRGBA[2], tmp);
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ term2r = spu_mul(fbRGBA[0], fbRGBA[0]);
+ term2g = spu_mul(fbRGBA[1], fbRGBA[1]);
+ term2b = spu_mul(fbRGBA[2], fbRGBA[2]);
+ break;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ term2r = spu_mul(fbRGBA[0], fbRGBA[3]);
+ term2g = spu_mul(fbRGBA[1], fbRGBA[3]);
+ term2b = spu_mul(fbRGBA[2], fbRGBA[3]);
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ term2r = spu_mul(fbRGBA[0], spu_splats(spu.blend_color.color[0]));
+ term2g = spu_mul(fbRGBA[1], spu_splats(spu.blend_color.color[1]));
+ term2b = spu_mul(fbRGBA[2], spu_splats(spu.blend_color.color[2]));
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ term2r = spu_mul(fbRGBA[0], spu_splats(spu.blend_color.color[3]));
+ term2g = spu_mul(fbRGBA[1], spu_splats(spu.blend_color.color[3]));
+ term2b = spu_mul(fbRGBA[2], spu_splats(spu.blend_color.color[3]));
+ break;
+ /* XXX more cases */
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Compute Dest Alpha term (framebuffer alpha * factor)
+ */
+ switch (spu.blend.alpha_dst_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ term2a = fbRGBA[3];
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ term2a = spu_splats(0.0f);
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ term2a = spu_mul(fbRGBA[3], fragA);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ one = spu_splats(1.0f);
+ tmp = spu_sub(one, fragA);
+ term2a = spu_mul(fbRGBA[3], tmp);
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ term2a = spu_mul(fbRGBA[3], fbRGBA[3]);
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ term2a = spu_mul(fbRGBA[3], spu_splats(spu.blend_color.color[3]));
+ break;
+ /* XXX more cases */
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Combine Src/Dest RGB terms
+ */
+ switch (spu.blend.rgb_func) {
+ case PIPE_BLEND_ADD:
+ fragR = spu_add(term1r, term2r);
+ fragG = spu_add(term1g, term2g);
+ fragB = spu_add(term1b, term2b);
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ fragR = spu_sub(term1r, term2r);
+ fragG = spu_sub(term1g, term2g);
+ fragB = spu_sub(term1b, term2b);
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ fragR = spu_sub(term2r, term1r);
+ fragG = spu_sub(term2g, term1g);
+ fragB = spu_sub(term2b, term1b);
+ break;
+ case PIPE_BLEND_MIN:
+ fragR = spu_min(term1r, term2r);
+ fragG = spu_min(term1g, term2g);
+ fragB = spu_min(term1b, term2b);
+ break;
+ case PIPE_BLEND_MAX:
+ fragR = spu_max(term1r, term2r);
+ fragG = spu_max(term1g, term2g);
+ fragB = spu_max(term1b, term2b);
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Combine Src/Dest A term
+ */
+ switch (spu.blend.alpha_func) {
+ case PIPE_BLEND_ADD:
+ fragA = spu_add(term1a, term2a);
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ fragA = spu_sub(term1a, term2a);
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ fragA = spu_sub(term2a, term1a);
+ break;
+ case PIPE_BLEND_MIN:
+ fragA = spu_min(term1a, term2a);
+ break;
+ case PIPE_BLEND_MAX:
+ fragA = spu_max(term1a, term2a);
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+
+
+ /*
+ * Convert RRRR,GGGG,BBBB,AAAA to RGBA,RGBA,RGBA,RGBA.
+ */
+#if 0
+ /* original code */
+ {
+ vector float frag_soa[4];
+ frag_soa[0] = fragR;
+ frag_soa[1] = fragG;
+ frag_soa[2] = fragB;
+ frag_soa[3] = fragA;
+ _transpose_matrix4x4(frag_aos, frag_soa);
+ }
+#else
+ /* short-cut relying on function parameter layout: */
+ _transpose_matrix4x4(frag_aos, &fragR);
+ (void) fragG;
+ (void) fragB;
+#endif
+
+ /*
+ * Pack fragment float colors into 32-bit RGBA words.
+ */
+ switch (spu.fb.color_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ fragc0 = spu_pack_A8R8G8B8(frag_aos[0]);
+ fragc1 = spu_pack_A8R8G8B8(frag_aos[1]);
+ fragc2 = spu_pack_A8R8G8B8(frag_aos[2]);
+ fragc3 = spu_pack_A8R8G8B8(frag_aos[3]);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ fragc0 = spu_pack_B8G8R8A8(frag_aos[0]);
+ fragc1 = spu_pack_B8G8R8A8(frag_aos[1]);
+ fragc2 = spu_pack_B8G8R8A8(frag_aos[2]);
+ fragc3 = spu_pack_B8G8R8A8(frag_aos[3]);
+ break;
+ default:
+ fprintf(stderr, "SPU: Bad pixel format in spu_default_fragment_ops\n");
+ ASSERT(0);
+ }
+
+
+ /*
+ * Do color masking
+ */
+ if (spu.blend.colormask != 0xf) {
+ uint cmask = 0x0; /* each byte corresponds to a color channel */
+
+ /* Form bitmask depending on color buffer format and colormask bits */
+ switch (spu.fb.color_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ if (spu.blend.colormask & PIPE_MASK_R)
+ cmask |= 0x00ff0000; /* red */
+ if (spu.blend.colormask & PIPE_MASK_G)
+ cmask |= 0x0000ff00; /* green */
+ if (spu.blend.colormask & PIPE_MASK_B)
+ cmask |= 0x000000ff; /* blue */
+ if (spu.blend.colormask & PIPE_MASK_A)
+ cmask |= 0xff000000; /* alpha */
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ if (spu.blend.colormask & PIPE_MASK_R)
+ cmask |= 0x0000ff00; /* red */
+ if (spu.blend.colormask & PIPE_MASK_G)
+ cmask |= 0x00ff0000; /* green */
+ if (spu.blend.colormask & PIPE_MASK_B)
+ cmask |= 0xff000000; /* blue */
+ if (spu.blend.colormask & PIPE_MASK_A)
+ cmask |= 0x000000ff; /* alpha */
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /*
+ * Apply color mask to the 32-bit packed colors.
+ * if (cmask[i])
+ * frag color[i] = frag color[i];
+ * else
+ * frag color[i] = framebuffer color[i];
+ */
+ fragc0 = (fragc0 & cmask) | (fbc0 & ~cmask);
+ fragc1 = (fragc1 & cmask) | (fbc1 & ~cmask);
+ fragc2 = (fragc2 & cmask) | (fbc2 & ~cmask);
+ fragc3 = (fragc3 & cmask) | (fbc3 & ~cmask);
+ }
+
+
+ /*
+ * Do logic ops
+ */
+ if (spu.blend.logicop_enable) {
+ /* XXX to do */
+ /* apply logicop to 32-bit packed colors (fragcx and fbcx) */
+ }
+
+
+ /*
+ * If mask is non-zero, mark tile as dirty.
+ */
+ if (spu_extract(spu_orx(mask), 0)) {
+ spu.cur_ctile_status = TILE_STATUS_DIRTY;
+ }
+ else {
+ /* write no fragments */
+ return;
+ }
+
+
+ /*
+ * Write new fragment/quad colors to the framebuffer/tile.
+ * Only write pixels where the corresponding mask word is set.
+ */
+#if LINEAR_QUAD_LAYOUT
+ /*
+ * Quad layout:
+ * +--+--+--+--+
+ * |p0|p1|p2|p3|...
+ * +--+--+--+--+
+ */
+ if (spu_extract(mask, 0))
+ colorTile->ui[y][x*2] = fragc0;
+ if (spu_extract(mask, 1))
+ colorTile->ui[y][x*2+1] = fragc1;
+ if (spu_extract(mask, 2))
+ colorTile->ui[y][x*2+2] = fragc2;
+ if (spu_extract(mask, 3))
+ colorTile->ui[y][x*2+3] = fragc3;
+#else
+ /*
+ * Quad layout:
+ * +--+--+
+ * |p0|p1|...
+ * +--+--+
+ * |p2|p3|...
+ * +--+--+
+ */
+ if (spu_extract(mask, 0))
+ colorTile->ui[y+0][x+0] = fragc0;
+ if (spu_extract(mask, 1))
+ colorTile->ui[y+0][x+1] = fragc1;
+ if (spu_extract(mask, 2))
+ colorTile->ui[y+1][x+0] = fragc2;
+ if (spu_extract(mask, 3))
+ colorTile->ui[y+1][x+1] = fragc3;
+#endif
+}
diff --git a/src/gallium/drivers/cell/spu/spu_per_fragment_op.h b/src/gallium/drivers/cell/spu/spu_per_fragment_op.h
new file mode 100644
index 0000000000..f817abf046
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_per_fragment_op.h
@@ -0,0 +1,44 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SPU_PER_FRAGMENT_OP
+#define SPU_PER_FRAGMENT_OP
+
+
+extern void
+spu_fallback_fragment_ops(uint x, uint y,
+ tile_t *colorTile,
+ tile_t *depthStencilTile,
+ vector float fragZ,
+ vector float fragRed,
+ vector float fragGreen,
+ vector float fragBlue,
+ vector float fragAlpha,
+ vector unsigned int mask);
+
+
+#endif /* SPU_PER_FRAGMENT_OP */
diff --git a/src/gallium/drivers/cell/spu/spu_render.c b/src/gallium/drivers/cell/spu/spu_render.c
new file mode 100644
index 0000000000..5ffb7073ab
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_render.c
@@ -0,0 +1,356 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <libmisc.h>
+#include <spu_mfcio.h>
+
+#include "spu_main.h"
+#include "spu_render.h"
+#include "spu_shuffle.h"
+#include "spu_tri.h"
+#include "spu_tile.h"
+#include "cell/common.h"
+#include "util/u_memory.h"
+
+
+/**
+ * Given a rendering command's bounding box (in pixels) compute the
+ * location of the corresponding screen tile bounding box.
+ */
+static INLINE void
+tile_bounding_box(const struct cell_command_render *render,
+ uint *txmin, uint *tymin,
+ uint *box_num_tiles, uint *box_width_tiles)
+{
+#if 0
+ /* Debug: full-window bounding box */
+ uint txmax = spu.fb.width_tiles - 1;
+ uint tymax = spu.fb.height_tiles - 1;
+ *txmin = 0;
+ *tymin = 0;
+ *box_num_tiles = spu.fb.width_tiles * spu.fb.height_tiles;
+ *box_width_tiles = spu.fb.width_tiles;
+ (void) render;
+ (void) txmax;
+ (void) tymax;
+#else
+ uint txmax, tymax, box_height_tiles;
+
+ *txmin = (uint) render->xmin / TILE_SIZE;
+ *tymin = (uint) render->ymin / TILE_SIZE;
+ txmax = (uint) render->xmax / TILE_SIZE;
+ tymax = (uint) render->ymax / TILE_SIZE;
+ if (txmax >= spu.fb.width_tiles)
+ txmax = spu.fb.width_tiles-1;
+ if (tymax >= spu.fb.height_tiles)
+ tymax = spu.fb.height_tiles-1;
+ *box_width_tiles = txmax - *txmin + 1;
+ box_height_tiles = tymax - *tymin + 1;
+ *box_num_tiles = *box_width_tiles * box_height_tiles;
+#endif
+#if 0
+ printf("SPU %u: bounds: %g, %g ... %g, %g\n", spu.init.id,
+ render->xmin, render->ymin, render->xmax, render->ymax);
+ printf("SPU %u: tiles: %u, %u .. %u, %u\n",
+ spu.init.id, *txmin, *tymin, txmax, tymax);
+ ASSERT(render->xmin <= render->xmax);
+ ASSERT(render->ymin <= render->ymax);
+#endif
+}
+
+
+/** Check if the tile at (tx,ty) belongs to this SPU */
+static INLINE boolean
+my_tile(uint tx, uint ty)
+{
+ return (spu.fb.width_tiles * ty + tx) % spu.init.num_spus == spu.init.id;
+}
+
+
+/**
+ * Start fetching non-clear color/Z tiles from main memory
+ */
+static INLINE void
+get_cz_tiles(uint tx, uint ty)
+{
+ if (spu.read_depth_stencil) {
+ if (spu.cur_ztile_status != TILE_STATUS_CLEAR) {
+ //printf("SPU %u: getting Z tile %u, %u\n", spu.init.id, tx, ty);
+ get_tile(tx, ty, &spu.ztile, TAG_READ_TILE_Z, 1);
+ spu.cur_ztile_status = TILE_STATUS_GETTING;
+ }
+ }
+
+ if (spu.cur_ctile_status != TILE_STATUS_CLEAR) {
+ //printf("SPU %u: getting C tile %u, %u\n", spu.init.id, tx, ty);
+ get_tile(tx, ty, &spu.ctile, TAG_READ_TILE_COLOR, 0);
+ spu.cur_ctile_status = TILE_STATUS_GETTING;
+ }
+}
+
+
+/**
+ * Start putting dirty color/Z tiles back to main memory
+ */
+static INLINE void
+put_cz_tiles(uint tx, uint ty)
+{
+ if (spu.cur_ztile_status == TILE_STATUS_DIRTY) {
+ /* tile was modified and needs to be written back */
+ //printf("SPU %u: put dirty Z tile %u, %u\n", spu.init.id, tx, ty);
+ put_tile(tx, ty, &spu.ztile, TAG_WRITE_TILE_Z, 1);
+ spu.cur_ztile_status = TILE_STATUS_DEFINED;
+ }
+ else if (spu.cur_ztile_status == TILE_STATUS_GETTING) {
+ /* tile was never used */
+ spu.cur_ztile_status = TILE_STATUS_DEFINED;
+ //printf("SPU %u: put getting Z tile %u, %u\n", spu.init.id, tx, ty);
+ }
+
+ if (spu.cur_ctile_status == TILE_STATUS_DIRTY) {
+ /* tile was modified and needs to be written back */
+ //printf("SPU %u: put dirty C tile %u, %u\n", spu.init.id, tx, ty);
+ put_tile(tx, ty, &spu.ctile, TAG_WRITE_TILE_COLOR, 0);
+ spu.cur_ctile_status = TILE_STATUS_DEFINED;
+ }
+ else if (spu.cur_ctile_status == TILE_STATUS_GETTING) {
+ /* tile was never used */
+ spu.cur_ctile_status = TILE_STATUS_DEFINED;
+ //printf("SPU %u: put getting C tile %u, %u\n", spu.init.id, tx, ty);
+ }
+}
+
+
+/**
+ * Wait for 'put' of color/z tiles to complete.
+ */
+static INLINE void
+wait_put_cz_tiles(void)
+{
+ wait_on_mask(1 << TAG_WRITE_TILE_COLOR);
+ if (spu.read_depth_stencil) {
+ wait_on_mask(1 << TAG_WRITE_TILE_Z);
+ }
+}
+
+
+/**
+ * Render primitives
+ * \param pos_incr returns value indicating how may words to skip after
+ * this command in the batch buffer
+ */
+void
+cmd_render(const struct cell_command_render *render, uint *pos_incr)
+{
+ /* we'll DMA into these buffers */
+ ubyte vertex_data[CELL_BUFFER_SIZE] ALIGN16_ATTRIB;
+ const uint vertex_size = render->vertex_size; /* in bytes */
+ /*const*/ uint total_vertex_bytes = render->num_verts * vertex_size;
+ uint index_bytes;
+ const ubyte *vertices;
+ const ushort *indexes;
+ uint i, j;
+ uint num_tiles;
+
+ D_PRINTF(CELL_DEBUG_CMD,
+ "RENDER prim=%u num_vert=%u num_ind=%u inline_vert=%u\n",
+ render->prim_type,
+ render->num_verts,
+ render->num_indexes,
+ render->inline_verts);
+
+ ASSERT(sizeof(*render) % 4 == 0);
+ ASSERT(total_vertex_bytes % 16 == 0);
+ ASSERT(render->prim_type == PIPE_PRIM_TRIANGLES);
+ ASSERT(render->num_indexes % 3 == 0);
+
+
+ /* indexes are right after the render command in the batch buffer */
+ indexes = (const ushort *) (render + 1);
+ index_bytes = ROUNDUP8(render->num_indexes * 2);
+ *pos_incr = index_bytes / 8 + sizeof(*render) / 8;
+
+
+ if (render->inline_verts) {
+ /* Vertices are after indexes in batch buffer at next 16-byte addr */
+ vertices = (const ubyte *) render + (*pos_incr * 8);
+ vertices = (const ubyte *) align_pointer((void *) vertices, 16);
+ ASSERT_ALIGN16(vertices);
+ *pos_incr = ((vertices + total_vertex_bytes) - (ubyte *) render) / 8;
+ }
+ else {
+ /* Begin DMA fetch of vertex buffer */
+ ubyte *src = spu.init.buffers[render->vertex_buf];
+ ubyte *dest = vertex_data;
+
+ /* skip vertex data we won't use */
+#if 01
+ src += render->min_index * vertex_size;
+ dest += render->min_index * vertex_size;
+ total_vertex_bytes -= render->min_index * vertex_size;
+#endif
+ ASSERT(total_vertex_bytes % 16 == 0);
+ ASSERT_ALIGN16(dest);
+ ASSERT_ALIGN16(src);
+
+ mfc_get(dest, /* in vertex_data[] array */
+ (unsigned int) src, /* src in main memory */
+ total_vertex_bytes, /* size */
+ TAG_VERTEX_BUFFER,
+ 0, /* tid */
+ 0 /* rid */);
+
+ vertices = vertex_data;
+
+ wait_on_mask(1 << TAG_VERTEX_BUFFER);
+ }
+
+
+ /**
+ ** find tiles which intersect the prim bounding box
+ **/
+ uint txmin, tymin, box_width_tiles, box_num_tiles;
+ tile_bounding_box(render, &txmin, &tymin,
+ &box_num_tiles, &box_width_tiles);
+
+
+ /* make sure any pending clears have completed */
+ wait_on_mask(1 << TAG_SURFACE_CLEAR); /* XXX temporary */
+
+
+ num_tiles = 0;
+
+ /**
+ ** loop over tiles, rendering tris
+ **/
+ for (i = 0; i < box_num_tiles; i++) {
+ const uint tx = txmin + i % box_width_tiles;
+ const uint ty = tymin + i / box_width_tiles;
+
+ ASSERT(tx < spu.fb.width_tiles);
+ ASSERT(ty < spu.fb.height_tiles);
+
+ if (!my_tile(tx, ty))
+ continue;
+
+ num_tiles++;
+
+ spu.cur_ctile_status = spu.ctile_status[ty][tx];
+ spu.cur_ztile_status = spu.ztile_status[ty][tx];
+
+ get_cz_tiles(tx, ty);
+
+ uint drawn = 0;
+
+ const qword vertex_sizes = (qword)spu_splats(vertex_size);
+ const qword verticess = (qword)spu_splats((uint)vertices);
+
+ ASSERT_ALIGN16(&indexes[0]);
+
+ const uint num_indexes = render->num_indexes;
+
+ /* loop over tris
+ * &indexes[0] will be 16 byte aligned. This loop is heavily unrolled
+ * avoiding variable rotates when extracting vertex indices.
+ */
+ for (j = 0; j < num_indexes; j += 24) {
+ /* Load three vectors, containing 24 ushort indices */
+ const qword* lower_qword = (qword*)&indexes[j];
+ const qword indices0 = lower_qword[0];
+ const qword indices1 = lower_qword[1];
+ const qword indices2 = lower_qword[2];
+
+ /* stores three indices for each tri n in slots 0, 1 and 2 of vsn */
+ /* Straightforward rotates for these */
+ qword vs0 = indices0;
+ qword vs1 = si_shlqbyi(indices0, 6);
+ qword vs3 = si_shlqbyi(indices1, 2);
+ qword vs4 = si_shlqbyi(indices1, 8);
+ qword vs6 = si_shlqbyi(indices2, 4);
+ qword vs7 = si_shlqbyi(indices2, 10);
+
+ /* For tri 2 and 5, the three indices are split across two machine
+ * words - rotate and combine */
+ const qword tmp2a = si_shlqbyi(indices0, 12);
+ const qword tmp2b = si_rotqmbyi(indices1, 12|16);
+ qword vs2 = si_selb(tmp2a, tmp2b, si_fsmh(si_from_uint(0x20)));
+
+ const qword tmp5a = si_shlqbyi(indices1, 14);
+ const qword tmp5b = si_rotqmbyi(indices2, 14|16);
+ qword vs5 = si_selb(tmp5a, tmp5b, si_fsmh(si_from_uint(0x60)));
+
+ /* unpack indices from halfword slots to word slots */
+ vs0 = si_shufb(vs0, vs0, SHUFB8(0,A,0,B,0,C,0,0));
+ vs1 = si_shufb(vs1, vs1, SHUFB8(0,A,0,B,0,C,0,0));
+ vs2 = si_shufb(vs2, vs2, SHUFB8(0,A,0,B,0,C,0,0));
+ vs3 = si_shufb(vs3, vs3, SHUFB8(0,A,0,B,0,C,0,0));
+ vs4 = si_shufb(vs4, vs4, SHUFB8(0,A,0,B,0,C,0,0));
+ vs5 = si_shufb(vs5, vs5, SHUFB8(0,A,0,B,0,C,0,0));
+ vs6 = si_shufb(vs6, vs6, SHUFB8(0,A,0,B,0,C,0,0));
+ vs7 = si_shufb(vs7, vs7, SHUFB8(0,A,0,B,0,C,0,0));
+
+ /* Calculate address of vertex in vertices[] */
+ vs0 = si_mpya(vs0, vertex_sizes, verticess);
+ vs1 = si_mpya(vs1, vertex_sizes, verticess);
+ vs2 = si_mpya(vs2, vertex_sizes, verticess);
+ vs3 = si_mpya(vs3, vertex_sizes, verticess);
+ vs4 = si_mpya(vs4, vertex_sizes, verticess);
+ vs5 = si_mpya(vs5, vertex_sizes, verticess);
+ vs6 = si_mpya(vs6, vertex_sizes, verticess);
+ vs7 = si_mpya(vs7, vertex_sizes, verticess);
+
+ /* Select the appropriate call based on the number of vertices
+ * remaining */
+ switch(num_indexes - j) {
+ default: drawn += tri_draw(vs7, tx, ty);
+ case 21: drawn += tri_draw(vs6, tx, ty);
+ case 18: drawn += tri_draw(vs5, tx, ty);
+ case 15: drawn += tri_draw(vs4, tx, ty);
+ case 12: drawn += tri_draw(vs3, tx, ty);
+ case 9: drawn += tri_draw(vs2, tx, ty);
+ case 6: drawn += tri_draw(vs1, tx, ty);
+ case 3: drawn += tri_draw(vs0, tx, ty);
+ }
+ }
+
+ //printf("SPU %u: drew %u of %u\n", spu.init.id, drawn, render->num_indexes/3);
+
+ /* write color/z tiles back to main framebuffer, if dirtied */
+ put_cz_tiles(tx, ty);
+
+ wait_put_cz_tiles(); /* XXX seems unnecessary... */
+
+ spu.ctile_status[ty][tx] = spu.cur_ctile_status;
+ spu.ztile_status[ty][tx] = spu.cur_ztile_status;
+ }
+
+ D_PRINTF(CELL_DEBUG_CMD,
+ "RENDER done (%u tiles hit)\n",
+ num_tiles);
+}
diff --git a/src/gallium/drivers/cell/spu/spu_render.h b/src/gallium/drivers/cell/spu/spu_render.h
new file mode 100644
index 0000000000..493434f087
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_render.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef SPU_RENDER_H
+#define SPU_RENDER_H
+
+#include "cell/common.h"
+
+extern void
+cmd_render(const struct cell_command_render *render, uint *pos_incr);
+
+#endif /* SPU_RENDER_H */
+
diff --git a/src/gallium/drivers/cell/spu/spu_shuffle.h b/src/gallium/drivers/cell/spu/spu_shuffle.h
new file mode 100644
index 0000000000..74f2a0b6d2
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_shuffle.h
@@ -0,0 +1,186 @@
+#ifndef SPU_SHUFFLE_H
+#define SPU_SHUFFLE_H
+
+/*
+ * Generate shuffle patterns with minimal fuss.
+ *
+ * Based on ideas from
+ * http://www.insomniacgames.com/tech/articles/0408/files/shuffles.pdf
+ *
+ * A-P indicates 0-15th position in first vector
+ * a-p indicates 0-15th position in second vector
+ *
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * |00|01|02|03|04|05|06|07|08|09|0a|0b|0c|0d|0e|0f|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | A| B| C| D|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | A| B| C| D| E| F| G| H|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * | A| B| C| D| E| F| G| H| I| J| K| L| M| N| O| P|
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ *
+ * x or X indicates 0xff
+ * 8 indicates 0x80
+ * 0 indicates 0x00
+ *
+ * The macros SHUFFLE4() SHUFFLE8() and SHUFFLE16() provide a const vector
+ * unsigned char literal suitable for use with spu_shuffle().
+ *
+ * The macros SHUFB4() SHUFB8() and SHUFB16() provide a const qword vector
+ * literal suitable for use with si_shufb().
+ *
+ *
+ * For example :
+ * SHUFB4(A,A,A,A)
+ * expands to :
+ * ((const qword){0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3})
+ *
+ * SHUFFLE8(A,B,a,b,C,c,8,8)
+ * expands to :
+ * ((const vector unsigned char){0x00,0x01,0x02,0x03,0x10,0x11,0x12,0x13,
+ * 0x04,0x05,0x14,0x15,0xe0,0xe0,0xe0,0xe0})
+ *
+ */
+
+#include <spu_intrinsics.h>
+
+#define SHUFFLE_PATTERN_4_A__ 0x00, 0x01, 0x02, 0x03
+#define SHUFFLE_PATTERN_4_B__ 0x04, 0x05, 0x06, 0x07
+#define SHUFFLE_PATTERN_4_C__ 0x08, 0x09, 0x0a, 0x0b
+#define SHUFFLE_PATTERN_4_D__ 0x0c, 0x0d, 0x0e, 0x0f
+#define SHUFFLE_PATTERN_4_a__ 0x10, 0x11, 0x12, 0x13
+#define SHUFFLE_PATTERN_4_b__ 0x14, 0x15, 0x16, 0x17
+#define SHUFFLE_PATTERN_4_c__ 0x18, 0x19, 0x1a, 0x1b
+#define SHUFFLE_PATTERN_4_d__ 0x1c, 0x1d, 0x1e, 0x1f
+#define SHUFFLE_PATTERN_4_X__ 0xc0, 0xc0, 0xc0, 0xc0
+#define SHUFFLE_PATTERN_4_x__ 0xc0, 0xc0, 0xc0, 0xc0
+#define SHUFFLE_PATTERN_4_0__ 0x80, 0x80, 0x80, 0x80
+#define SHUFFLE_PATTERN_4_8__ 0xe0, 0xe0, 0xe0, 0xe0
+
+#define SHUFFLE_VECTOR_4__(A, B, C, D) \
+ SHUFFLE_PATTERN_4_##A##__, \
+ SHUFFLE_PATTERN_4_##B##__, \
+ SHUFFLE_PATTERN_4_##C##__, \
+ SHUFFLE_PATTERN_4_##D##__
+
+#define SHUFFLE4(A, B, C, D) \
+ ((const vector unsigned char){ \
+ SHUFFLE_VECTOR_4__(A, B, C, D) \
+ })
+
+#define SHUFB4(A, B, C, D) \
+ ((const qword){ \
+ SHUFFLE_VECTOR_4__(A, B, C, D) \
+ })
+
+
+#define SHUFFLE_PATTERN_8_A__ 0x00, 0x01
+#define SHUFFLE_PATTERN_8_B__ 0x02, 0x03
+#define SHUFFLE_PATTERN_8_C__ 0x04, 0x05
+#define SHUFFLE_PATTERN_8_D__ 0x06, 0x07
+#define SHUFFLE_PATTERN_8_E__ 0x08, 0x09
+#define SHUFFLE_PATTERN_8_F__ 0x0a, 0x0b
+#define SHUFFLE_PATTERN_8_G__ 0x0c, 0x0d
+#define SHUFFLE_PATTERN_8_H__ 0x0e, 0x0f
+#define SHUFFLE_PATTERN_8_a__ 0x10, 0x11
+#define SHUFFLE_PATTERN_8_b__ 0x12, 0x13
+#define SHUFFLE_PATTERN_8_c__ 0x14, 0x15
+#define SHUFFLE_PATTERN_8_d__ 0x16, 0x17
+#define SHUFFLE_PATTERN_8_e__ 0x18, 0x19
+#define SHUFFLE_PATTERN_8_f__ 0x1a, 0x1b
+#define SHUFFLE_PATTERN_8_g__ 0x1c, 0x1d
+#define SHUFFLE_PATTERN_8_h__ 0x1e, 0x1f
+#define SHUFFLE_PATTERN_8_X__ 0xc0, 0xc0
+#define SHUFFLE_PATTERN_8_x__ 0xc0, 0xc0
+#define SHUFFLE_PATTERN_8_0__ 0x80, 0x80
+#define SHUFFLE_PATTERN_8_8__ 0xe0, 0xe0
+
+
+#define SHUFFLE_VECTOR_8__(A, B, C, D, E, F, G, H) \
+ SHUFFLE_PATTERN_8_##A##__, \
+ SHUFFLE_PATTERN_8_##B##__, \
+ SHUFFLE_PATTERN_8_##C##__, \
+ SHUFFLE_PATTERN_8_##D##__, \
+ SHUFFLE_PATTERN_8_##E##__, \
+ SHUFFLE_PATTERN_8_##F##__, \
+ SHUFFLE_PATTERN_8_##G##__, \
+ SHUFFLE_PATTERN_8_##H##__
+
+#define SHUFFLE8(A, B, C, D, E, F, G, H) \
+ ((const vector unsigned char){ \
+ SHUFFLE_VECTOR_8__(A, B, C, D, E, F, G, H) \
+ })
+
+#define SHUFB8(A, B, C, D, E, F, G, H) \
+ ((const qword){ \
+ SHUFFLE_VECTOR_8__(A, B, C, D, E, F, G, H) \
+ })
+
+
+#define SHUFFLE_PATTERN_16_A__ 0x00
+#define SHUFFLE_PATTERN_16_B__ 0x01
+#define SHUFFLE_PATTERN_16_C__ 0x02
+#define SHUFFLE_PATTERN_16_D__ 0x03
+#define SHUFFLE_PATTERN_16_E__ 0x04
+#define SHUFFLE_PATTERN_16_F__ 0x05
+#define SHUFFLE_PATTERN_16_G__ 0x06
+#define SHUFFLE_PATTERN_16_H__ 0x07
+#define SHUFFLE_PATTERN_16_I__ 0x08
+#define SHUFFLE_PATTERN_16_J__ 0x09
+#define SHUFFLE_PATTERN_16_K__ 0x0a
+#define SHUFFLE_PATTERN_16_L__ 0x0b
+#define SHUFFLE_PATTERN_16_M__ 0x0c
+#define SHUFFLE_PATTERN_16_N__ 0x0d
+#define SHUFFLE_PATTERN_16_O__ 0x0e
+#define SHUFFLE_PATTERN_16_P__ 0x0f
+#define SHUFFLE_PATTERN_16_a__ 0x10
+#define SHUFFLE_PATTERN_16_b__ 0x11
+#define SHUFFLE_PATTERN_16_c__ 0x12
+#define SHUFFLE_PATTERN_16_d__ 0x13
+#define SHUFFLE_PATTERN_16_e__ 0x14
+#define SHUFFLE_PATTERN_16_f__ 0x15
+#define SHUFFLE_PATTERN_16_g__ 0x16
+#define SHUFFLE_PATTERN_16_h__ 0x17
+#define SHUFFLE_PATTERN_16_i__ 0x18
+#define SHUFFLE_PATTERN_16_j__ 0x19
+#define SHUFFLE_PATTERN_16_k__ 0x1a
+#define SHUFFLE_PATTERN_16_l__ 0x1b
+#define SHUFFLE_PATTERN_16_m__ 0x1c
+#define SHUFFLE_PATTERN_16_n__ 0x1d
+#define SHUFFLE_PATTERN_16_o__ 0x1e
+#define SHUFFLE_PATTERN_16_p__ 0x1f
+#define SHUFFLE_PATTERN_16_X__ 0xc0
+#define SHUFFLE_PATTERN_16_x__ 0xc0
+#define SHUFFLE_PATTERN_16_0__ 0x80
+#define SHUFFLE_PATTERN_16_8__ 0xe0
+
+#define SHUFFLE_VECTOR_16__(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) \
+ SHUFFLE_PATTERN_16_##A##__, \
+ SHUFFLE_PATTERN_16_##B##__, \
+ SHUFFLE_PATTERN_16_##C##__, \
+ SHUFFLE_PATTERN_16_##D##__, \
+ SHUFFLE_PATTERN_16_##E##__, \
+ SHUFFLE_PATTERN_16_##F##__, \
+ SHUFFLE_PATTERN_16_##G##__, \
+ SHUFFLE_PATTERN_16_##H##__, \
+ SHUFFLE_PATTERN_16_##I##__, \
+ SHUFFLE_PATTERN_16_##J##__, \
+ SHUFFLE_PATTERN_16_##K##__, \
+ SHUFFLE_PATTERN_16_##L##__, \
+ SHUFFLE_PATTERN_16_##M##__, \
+ SHUFFLE_PATTERN_16_##N##__, \
+ SHUFFLE_PATTERN_16_##O##__, \
+ SHUFFLE_PATTERN_16_##P##__
+
+#define SHUFFLE16(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) \
+ ((const vector unsigned char){ \
+ SHUFFLE_VECTOR_16__(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) \
+ })
+
+#define SHUFB16(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) \
+ ((const qword){ \
+ SHUFFLE_VECTOR_16__(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P) \
+ })
+
+#endif
diff --git a/src/gallium/drivers/cell/spu/spu_texture.c b/src/gallium/drivers/cell/spu/spu_texture.c
new file mode 100644
index 0000000000..69784c8978
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_texture.c
@@ -0,0 +1,641 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <math.h>
+
+#include "pipe/p_compiler.h"
+#include "spu_main.h"
+#include "spu_texture.h"
+#include "spu_tile.h"
+#include "spu_colorpack.h"
+#include "spu_dcache.h"
+
+
+/**
+ * Mark all tex cache entries as invalid.
+ */
+void
+invalidate_tex_cache(void)
+{
+ uint lvl;
+ for (lvl = 0; lvl < CELL_MAX_TEXTURE_LEVELS; lvl++) {
+ uint unit = 0;
+ uint bytes = 4 * spu.texture[unit].level[lvl].width
+ * spu.texture[unit].level[lvl].height;
+
+ if (spu.texture[unit].target == PIPE_TEXTURE_CUBE)
+ bytes *= 6;
+ else if (spu.texture[unit].target == PIPE_TEXTURE_3D)
+ bytes *= spu.texture[unit].level[lvl].depth;
+
+ spu_dcache_mark_dirty((unsigned) spu.texture[unit].level[lvl].start, bytes);
+ }
+}
+
+
+/**
+ * Get four texels from locations (x[0], y[0]), (x[1], y[1]) ...
+ *
+ * NOTE: in the typical case of bilinear filtering, the four texels
+ * are in a 2x2 group so we could get by with just two dcache fetches
+ * (two side-by-side texels per fetch). But when bilinear filtering
+ * wraps around a texture edge, we'll probably need code like we have
+ * now.
+ * FURTHERMORE: since we're rasterizing a quad of 2x2 pixels at a time,
+ * it's quite likely that the four pixels in a quad will need some of the
+ * same texels. So look into doing texture fetches for four pixels at
+ * a time.
+ */
+static void
+get_four_texels(const struct spu_texture_level *tlevel, uint face,
+ vec_int4 x, vec_int4 y,
+ vec_uint4 *texels)
+{
+ unsigned texture_ea = (uintptr_t) tlevel->start;
+ const vec_int4 tile_x = spu_rlmask(x, -5); /* tile_x = x / 32 */
+ const vec_int4 tile_y = spu_rlmask(y, -5); /* tile_y = y / 32 */
+ const qword offset_x = si_andi((qword) x, 0x1f); /* offset_x = x & 0x1f */
+ const qword offset_y = si_andi((qword) y, 0x1f); /* offset_y = y & 0x1f */
+
+ const qword tiles_per_row = (qword) spu_splats(tlevel->tiles_per_row);
+ const qword tile_size = (qword) spu_splats((unsigned) sizeof(tile_t));
+
+ qword tile_offset = si_mpya((qword) tile_y, tiles_per_row, (qword) tile_x);
+ tile_offset = si_mpy((qword) tile_offset, tile_size);
+
+ qword texel_offset = si_a(si_mpyui(offset_y, 32), offset_x);
+ texel_offset = si_mpyui(texel_offset, 4);
+
+ vec_uint4 offset = (vec_uint4) si_a(tile_offset, texel_offset);
+
+ texture_ea = texture_ea + face * tlevel->bytes_per_image;
+
+ spu_dcache_fetch_unaligned((qword *) & texels[0],
+ texture_ea + spu_extract(offset, 0), 4);
+ spu_dcache_fetch_unaligned((qword *) & texels[1],
+ texture_ea + spu_extract(offset, 1), 4);
+ spu_dcache_fetch_unaligned((qword *) & texels[2],
+ texture_ea + spu_extract(offset, 2), 4);
+ spu_dcache_fetch_unaligned((qword *) & texels[3],
+ texture_ea + spu_extract(offset, 3), 4);
+}
+
+
+/** clamp vec to [0, max] */
+static INLINE vector signed int
+spu_clamp(vector signed int vec, vector signed int max)
+{
+ static const vector signed int zero = {0,0,0,0};
+ vector unsigned int c;
+ c = spu_cmpgt(vec, zero); /* c = vec > zero ? ~0 : 0 */
+ vec = spu_sel(zero, vec, c);
+ c = spu_cmpgt(vec, max); /* c = vec > max ? ~0 : 0 */
+ vec = spu_sel(vec, max, c);
+ return vec;
+}
+
+
+
+/**
+ * Do nearest texture sampling for four pixels.
+ * \param colors returned colors in SOA format (rrrr, gggg, bbbb, aaaa).
+ */
+void
+sample_texture_2d_nearest(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4])
+{
+ const struct spu_texture_level *tlevel = &spu.texture[unit].level[level];
+ vector float ss = spu_mul(s, tlevel->scale_s);
+ vector float tt = spu_mul(t, tlevel->scale_t);
+ vector signed int is = spu_convts(ss, 0);
+ vector signed int it = spu_convts(tt, 0);
+ vec_uint4 texels[4];
+
+ /* PIPE_TEX_WRAP_REPEAT */
+ is = spu_and(is, tlevel->mask_s);
+ it = spu_and(it, tlevel->mask_t);
+
+ /* PIPE_TEX_WRAP_CLAMP */
+ is = spu_clamp(is, tlevel->max_s);
+ it = spu_clamp(it, tlevel->max_t);
+
+ get_four_texels(tlevel, face, is, it, texels);
+
+ /* convert four packed ARGBA pixels to float RRRR,GGGG,BBBB,AAAA */
+ spu_unpack_A8R8G8B8_transpose4(texels, colors);
+}
+
+
+/**
+ * Do bilinear texture sampling for four pixels.
+ * \param colors returned colors in SOA format (rrrr, gggg, bbbb, aaaa).
+ */
+void
+sample_texture_2d_bilinear(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4])
+{
+ const struct spu_texture_level *tlevel = &spu.texture[unit].level[level];
+ static const vector float half = {-0.5f, -0.5f, -0.5f, -0.5f};
+
+ vector float ss = spu_madd(s, tlevel->scale_s, half);
+ vector float tt = spu_madd(t, tlevel->scale_t, half);
+
+ vector signed int is0 = spu_convts(ss, 0);
+ vector signed int it0 = spu_convts(tt, 0);
+
+ /* is + 1, it + 1 */
+ vector signed int is1 = spu_add(is0, 1);
+ vector signed int it1 = spu_add(it0, 1);
+
+ /* PIPE_TEX_WRAP_REPEAT */
+ is0 = spu_and(is0, tlevel->mask_s);
+ it0 = spu_and(it0, tlevel->mask_t);
+ is1 = spu_and(is1, tlevel->mask_s);
+ it1 = spu_and(it1, tlevel->mask_t);
+
+ /* PIPE_TEX_WRAP_CLAMP */
+ is0 = spu_clamp(is0, tlevel->max_s);
+ it0 = spu_clamp(it0, tlevel->max_t);
+ is1 = spu_clamp(is1, tlevel->max_s);
+ it1 = spu_clamp(it1, tlevel->max_t);
+
+ /* get packed int texels */
+ vector unsigned int texels[16];
+ get_four_texels(tlevel, face, is0, it0, texels + 0); /* upper-left */
+ get_four_texels(tlevel, face, is1, it0, texels + 4); /* upper-right */
+ get_four_texels(tlevel, face, is0, it1, texels + 8); /* lower-left */
+ get_four_texels(tlevel, face, is1, it1, texels + 12); /* lower-right */
+
+ /* convert packed int texels to float colors */
+ vector float ftexels[16];
+ spu_unpack_A8R8G8B8_transpose4(texels + 0, ftexels + 0);
+ spu_unpack_A8R8G8B8_transpose4(texels + 4, ftexels + 4);
+ spu_unpack_A8R8G8B8_transpose4(texels + 8, ftexels + 8);
+ spu_unpack_A8R8G8B8_transpose4(texels + 12, ftexels + 12);
+
+ /* Compute weighting factors in [0,1]
+ * Multiply texcoord by 1024, AND with 1023, convert back to float.
+ */
+ vector float ss1024 = spu_mul(ss, spu_splats(1024.0f));
+ vector signed int iss1024 = spu_convts(ss1024, 0);
+ iss1024 = spu_and(iss1024, 1023);
+ vector float sWeights0 = spu_convtf(iss1024, 10);
+
+ vector float tt1024 = spu_mul(tt, spu_splats(1024.0f));
+ vector signed int itt1024 = spu_convts(tt1024, 0);
+ itt1024 = spu_and(itt1024, 1023);
+ vector float tWeights0 = spu_convtf(itt1024, 10);
+
+ /* 1 - sWeight and 1 - tWeight */
+ vector float sWeights1 = spu_sub(spu_splats(1.0f), sWeights0);
+ vector float tWeights1 = spu_sub(spu_splats(1.0f), tWeights0);
+
+ /* reds, for four pixels */
+ ftexels[ 0] = spu_mul(ftexels[ 0], spu_mul(sWeights1, tWeights1)); /*ul*/
+ ftexels[ 4] = spu_mul(ftexels[ 4], spu_mul(sWeights0, tWeights1)); /*ur*/
+ ftexels[ 8] = spu_mul(ftexels[ 8], spu_mul(sWeights1, tWeights0)); /*ll*/
+ ftexels[12] = spu_mul(ftexels[12], spu_mul(sWeights0, tWeights0)); /*lr*/
+ colors[0] = spu_add(spu_add(ftexels[0], ftexels[4]),
+ spu_add(ftexels[8], ftexels[12]));
+
+ /* greens, for four pixels */
+ ftexels[ 1] = spu_mul(ftexels[ 1], spu_mul(sWeights1, tWeights1)); /*ul*/
+ ftexels[ 5] = spu_mul(ftexels[ 5], spu_mul(sWeights0, tWeights1)); /*ur*/
+ ftexels[ 9] = spu_mul(ftexels[ 9], spu_mul(sWeights1, tWeights0)); /*ll*/
+ ftexels[13] = spu_mul(ftexels[13], spu_mul(sWeights0, tWeights0)); /*lr*/
+ colors[1] = spu_add(spu_add(ftexels[1], ftexels[5]),
+ spu_add(ftexels[9], ftexels[13]));
+
+ /* blues, for four pixels */
+ ftexels[ 2] = spu_mul(ftexels[ 2], spu_mul(sWeights1, tWeights1)); /*ul*/
+ ftexels[ 6] = spu_mul(ftexels[ 6], spu_mul(sWeights0, tWeights1)); /*ur*/
+ ftexels[10] = spu_mul(ftexels[10], spu_mul(sWeights1, tWeights0)); /*ll*/
+ ftexels[14] = spu_mul(ftexels[14], spu_mul(sWeights0, tWeights0)); /*lr*/
+ colors[2] = spu_add(spu_add(ftexels[2], ftexels[6]),
+ spu_add(ftexels[10], ftexels[14]));
+
+ /* alphas, for four pixels */
+ ftexels[ 3] = spu_mul(ftexels[ 3], spu_mul(sWeights1, tWeights1)); /*ul*/
+ ftexels[ 7] = spu_mul(ftexels[ 7], spu_mul(sWeights0, tWeights1)); /*ur*/
+ ftexels[11] = spu_mul(ftexels[11], spu_mul(sWeights1, tWeights0)); /*ll*/
+ ftexels[15] = spu_mul(ftexels[15], spu_mul(sWeights0, tWeights0)); /*lr*/
+ colors[3] = spu_add(spu_add(ftexels[3], ftexels[7]),
+ spu_add(ftexels[11], ftexels[15]));
+}
+
+
+
+/**
+ * Adapted from /opt/cell/sdk/usr/spu/include/transpose_matrix4x4.h
+ */
+static INLINE void
+transpose(vector unsigned int *mOut0,
+ vector unsigned int *mOut1,
+ vector unsigned int *mOut2,
+ vector unsigned int *mOut3,
+ vector unsigned int *mIn)
+{
+ vector unsigned int abcd, efgh, ijkl, mnop; /* input vectors */
+ vector unsigned int aeim, bfjn, cgko, dhlp; /* output vectors */
+ vector unsigned int aibj, ckdl, emfn, gohp; /* intermediate vectors */
+
+ vector unsigned char shufflehi = ((vector unsigned char) {
+ 0x00, 0x01, 0x02, 0x03,
+ 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x14, 0x15, 0x16, 0x17});
+ vector unsigned char shufflelo = ((vector unsigned char) {
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F});
+ abcd = *(mIn+0);
+ efgh = *(mIn+1);
+ ijkl = *(mIn+2);
+ mnop = *(mIn+3);
+
+ aibj = spu_shuffle(abcd, ijkl, shufflehi);
+ ckdl = spu_shuffle(abcd, ijkl, shufflelo);
+ emfn = spu_shuffle(efgh, mnop, shufflehi);
+ gohp = spu_shuffle(efgh, mnop, shufflelo);
+
+ aeim = spu_shuffle(aibj, emfn, shufflehi);
+ bfjn = spu_shuffle(aibj, emfn, shufflelo);
+ cgko = spu_shuffle(ckdl, gohp, shufflehi);
+ dhlp = spu_shuffle(ckdl, gohp, shufflelo);
+
+ *mOut0 = aeim;
+ *mOut1 = bfjn;
+ *mOut2 = cgko;
+ *mOut3 = dhlp;
+}
+
+
+/**
+ * Bilinear filtering, using int instead of float arithmetic for computing
+ * sample weights.
+ */
+void
+sample_texture_2d_bilinear_int(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4])
+{
+ const struct spu_texture_level *tlevel = &spu.texture[unit].level[level];
+ static const vector float half = {-0.5f, -0.5f, -0.5f, -0.5f};
+
+ /* Scale texcoords by size of texture, and add half pixel bias */
+ vector float ss = spu_madd(s, tlevel->scale_s, half);
+ vector float tt = spu_madd(t, tlevel->scale_t, half);
+
+ /* convert float coords to fixed-pt coords with 7 fraction bits */
+ vector signed int is = spu_convts(ss, 7); /* XXX really need floor() here */
+ vector signed int it = spu_convts(tt, 7); /* XXX really need floor() here */
+
+ /* compute integer texel weights in [0, 127] */
+ vector signed int sWeights0 = spu_and(is, 127);
+ vector signed int tWeights0 = spu_and(it, 127);
+ vector signed int sWeights1 = spu_sub(127, sWeights0);
+ vector signed int tWeights1 = spu_sub(127, tWeights0);
+
+ /* texel coords: is0 = is / 128, it0 = is / 128 */
+ vector signed int is0 = spu_rlmask(is, -7);
+ vector signed int it0 = spu_rlmask(it, -7);
+
+ /* texel coords: i1 = is0 + 1, it1 = it0 + 1 */
+ vector signed int is1 = spu_add(is0, 1);
+ vector signed int it1 = spu_add(it0, 1);
+
+ /* PIPE_TEX_WRAP_REPEAT */
+ is0 = spu_and(is0, tlevel->mask_s);
+ it0 = spu_and(it0, tlevel->mask_t);
+ is1 = spu_and(is1, tlevel->mask_s);
+ it1 = spu_and(it1, tlevel->mask_t);
+
+ /* PIPE_TEX_WRAP_CLAMP */
+ is0 = spu_clamp(is0, tlevel->max_s);
+ it0 = spu_clamp(it0, tlevel->max_t);
+ is1 = spu_clamp(is1, tlevel->max_s);
+ it1 = spu_clamp(it1, tlevel->max_t);
+
+ /* get packed int texels */
+ vector unsigned int texels[16];
+ get_four_texels(tlevel, face, is0, it0, texels + 0); /* upper-left */
+ get_four_texels(tlevel, face, is1, it0, texels + 4); /* upper-right */
+ get_four_texels(tlevel, face, is0, it1, texels + 8); /* lower-left */
+ get_four_texels(tlevel, face, is1, it1, texels + 12); /* lower-right */
+
+ /* twiddle packed 32-bit BGRA pixels into RGBA as four unsigned ints */
+ {
+ static const unsigned char ZERO = 0x80;
+ int i;
+ for (i = 0; i < 16; i++) {
+ texels[i] = spu_shuffle(texels[i], texels[i],
+ ((vector unsigned char) {
+ ZERO, ZERO, ZERO, 1,
+ ZERO, ZERO, ZERO, 2,
+ ZERO, ZERO, ZERO, 3,
+ ZERO, ZERO, ZERO, 0}));
+ }
+ }
+
+ /* convert RGBA,RGBA,RGBA,RGBA to RRRR,GGGG,BBBB,AAAA */
+ vector unsigned int texel0, texel1, texel2, texel3, texel4, texel5, texel6, texel7,
+ texel8, texel9, texel10, texel11, texel12, texel13, texel14, texel15;
+ transpose(&texel0, &texel1, &texel2, &texel3, texels + 0);
+ transpose(&texel4, &texel5, &texel6, &texel7, texels + 4);
+ transpose(&texel8, &texel9, &texel10, &texel11, texels + 8);
+ transpose(&texel12, &texel13, &texel14, &texel15, texels + 12);
+
+ /* computed weighted colors */
+ vector unsigned int c0, c1, c2, c3, cSum;
+
+ /* red */
+ c0 = (vector unsigned int) si_mpy((qword) texel0, si_mpy((qword) sWeights1, (qword) tWeights1)); /*ul*/
+ c1 = (vector unsigned int) si_mpy((qword) texel4, si_mpy((qword) sWeights0, (qword) tWeights1)); /*ur*/
+ c2 = (vector unsigned int) si_mpy((qword) texel8, si_mpy((qword) sWeights1, (qword) tWeights0)); /*ll*/
+ c3 = (vector unsigned int) si_mpy((qword) texel12, si_mpy((qword) sWeights0, (qword) tWeights0)); /*lr*/
+ cSum = spu_add(spu_add(c0, c1), spu_add(c2, c3));
+ colors[0] = spu_convtf(cSum, 22);
+
+ /* green */
+ c0 = (vector unsigned int) si_mpy((qword) texel1, si_mpy((qword) sWeights1, (qword) tWeights1)); /*ul*/
+ c1 = (vector unsigned int) si_mpy((qword) texel5, si_mpy((qword) sWeights0, (qword) tWeights1)); /*ur*/
+ c2 = (vector unsigned int) si_mpy((qword) texel9, si_mpy((qword) sWeights1, (qword) tWeights0)); /*ll*/
+ c3 = (vector unsigned int) si_mpy((qword) texel13, si_mpy((qword) sWeights0, (qword) tWeights0)); /*lr*/
+ cSum = spu_add(spu_add(c0, c1), spu_add(c2, c3));
+ colors[1] = spu_convtf(cSum, 22);
+
+ /* blue */
+ c0 = (vector unsigned int) si_mpy((qword) texel2, si_mpy((qword) sWeights1, (qword) tWeights1)); /*ul*/
+ c1 = (vector unsigned int) si_mpy((qword) texel6, si_mpy((qword) sWeights0, (qword) tWeights1)); /*ur*/
+ c2 = (vector unsigned int) si_mpy((qword) texel10, si_mpy((qword) sWeights1, (qword) tWeights0)); /*ll*/
+ c3 = (vector unsigned int) si_mpy((qword) texel14, si_mpy((qword) sWeights0, (qword) tWeights0)); /*lr*/
+ cSum = spu_add(spu_add(c0, c1), spu_add(c2, c3));
+ colors[2] = spu_convtf(cSum, 22);
+
+ /* alpha */
+ c0 = (vector unsigned int) si_mpy((qword) texel3, si_mpy((qword) sWeights1, (qword) tWeights1)); /*ul*/
+ c1 = (vector unsigned int) si_mpy((qword) texel7, si_mpy((qword) sWeights0, (qword) tWeights1)); /*ur*/
+ c2 = (vector unsigned int) si_mpy((qword) texel11, si_mpy((qword) sWeights1, (qword) tWeights0)); /*ll*/
+ c3 = (vector unsigned int) si_mpy((qword) texel15, si_mpy((qword) sWeights0, (qword) tWeights0)); /*lr*/
+ cSum = spu_add(spu_add(c0, c1), spu_add(c2, c3));
+ colors[3] = spu_convtf(cSum, 22);
+}
+
+
+
+/**
+ * Compute level of detail factor from texcoords.
+ */
+static INLINE float
+compute_lambda_2d(uint unit, vector float s, vector float t)
+{
+ uint baseLevel = 0;
+ float width = spu.texture[unit].level[baseLevel].width;
+ float height = spu.texture[unit].level[baseLevel].width;
+ float dsdx = width * (spu_extract(s, 1) - spu_extract(s, 0));
+ float dsdy = width * (spu_extract(s, 2) - spu_extract(s, 0));
+ float dtdx = height * (spu_extract(t, 1) - spu_extract(t, 0));
+ float dtdy = height * (spu_extract(t, 2) - spu_extract(t, 0));
+#if 0
+ /* ideal value */
+ float x = dsdx * dsdx + dtdx * dtdx;
+ float y = dsdy * dsdy + dtdy * dtdy;
+ float rho = x > y ? x : y;
+ rho = sqrtf(rho);
+#else
+ /* approximation */
+ dsdx = fabsf(dsdx);
+ dsdy = fabsf(dsdy);
+ dtdx = fabsf(dtdx);
+ dtdy = fabsf(dtdy);
+ float rho = (dsdx + dsdy + dtdx + dtdy) * 0.5;
+#endif
+ float lambda = logf(rho) * 1.442695f; /* compute logbase2(rho) */
+ return lambda;
+}
+
+
+/**
+ * Blend two sets of colors according to weight.
+ */
+static void
+blend_colors(vector float c0[4], const vector float c1[4], float weight)
+{
+ vector float t = spu_splats(weight);
+ vector float dc0 = spu_sub(c1[0], c0[0]);
+ vector float dc1 = spu_sub(c1[1], c0[1]);
+ vector float dc2 = spu_sub(c1[2], c0[2]);
+ vector float dc3 = spu_sub(c1[3], c0[3]);
+ c0[0] = spu_madd(dc0, t, c0[0]);
+ c0[1] = spu_madd(dc1, t, c0[1]);
+ c0[2] = spu_madd(dc2, t, c0[2]);
+ c0[3] = spu_madd(dc3, t, c0[3]);
+}
+
+
+/**
+ * Texture sampling with level of detail selection and possibly mipmap
+ * interpolation.
+ */
+void
+sample_texture_2d_lod(vector float s, vector float t,
+ uint unit, uint level_ignored, uint face,
+ vector float colors[4])
+{
+ /*
+ * Note that we're computing a lambda/lod here that's used for all
+ * four pixels in the quad.
+ */
+ float lambda = compute_lambda_2d(unit, s, t);
+
+ (void) face;
+ (void) level_ignored;
+
+ /* apply lod bias */
+ lambda += spu.sampler[unit].lod_bias;
+
+ /* clamp */
+ if (lambda < spu.sampler[unit].min_lod)
+ lambda = spu.sampler[unit].min_lod;
+ else if (lambda > spu.sampler[unit].max_lod)
+ lambda = spu.sampler[unit].max_lod;
+
+ if (lambda <= 0.0f) {
+ /* magnify */
+ spu.mag_sample_texture_2d[unit](s, t, unit, 0, face, colors);
+ }
+ else {
+ /* minify */
+ if (spu.sampler[unit].min_img_filter == PIPE_TEX_FILTER_LINEAR) {
+ /* sample two mipmap levels and interpolate */
+ int level = (int) lambda;
+ if (level > (int) spu.texture[unit].max_level)
+ level = spu.texture[unit].max_level;
+ spu.min_sample_texture_2d[unit](s, t, unit, level, face, colors);
+ if (spu.sampler[unit].min_img_filter == PIPE_TEX_FILTER_LINEAR) {
+ /* sample second mipmap level */
+ float weight = lambda - (float) level;
+ level++;
+ if (level <= (int) spu.texture[unit].max_level) {
+ vector float colors2[4];
+ spu.min_sample_texture_2d[unit](s, t, unit, level, face, colors2);
+ blend_colors(colors, colors2, weight);
+ }
+ }
+ }
+ else {
+ /* sample one mipmap level */
+ int level = (int) (lambda + 0.5f);
+ if (level > (int) spu.texture[unit].max_level)
+ level = spu.texture[unit].max_level;
+ spu.min_sample_texture_2d[unit](s, t, unit, level, face, colors);
+ }
+ }
+}
+
+
+/** XXX need a SIMD version of this */
+static unsigned
+choose_cube_face(float rx, float ry, float rz, float *newS, float *newT)
+{
+ /*
+ major axis
+ direction target sc tc ma
+ ---------- ------------------------------- --- --- ---
+ +rx TEXTURE_CUBE_MAP_POSITIVE_X_EXT -rz -ry rx
+ -rx TEXTURE_CUBE_MAP_NEGATIVE_X_EXT +rz -ry rx
+ +ry TEXTURE_CUBE_MAP_POSITIVE_Y_EXT +rx +rz ry
+ -ry TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT +rx -rz ry
+ +rz TEXTURE_CUBE_MAP_POSITIVE_Z_EXT +rx -ry rz
+ -rz TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT -rx -ry rz
+ */
+ const float arx = fabsf(rx);
+ const float ary = fabsf(ry);
+ const float arz = fabsf(rz);
+ unsigned face;
+ float sc, tc, ma;
+
+ if (arx > ary && arx > arz) {
+ if (rx >= 0.0F) {
+ face = PIPE_TEX_FACE_POS_X;
+ sc = -rz;
+ tc = -ry;
+ ma = arx;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_X;
+ sc = rz;
+ tc = -ry;
+ ma = arx;
+ }
+ }
+ else if (ary > arx && ary > arz) {
+ if (ry >= 0.0F) {
+ face = PIPE_TEX_FACE_POS_Y;
+ sc = rx;
+ tc = rz;
+ ma = ary;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_Y;
+ sc = rx;
+ tc = -rz;
+ ma = ary;
+ }
+ }
+ else {
+ if (rz > 0.0F) {
+ face = PIPE_TEX_FACE_POS_Z;
+ sc = rx;
+ tc = -ry;
+ ma = arz;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_Z;
+ sc = -rx;
+ tc = -ry;
+ ma = arz;
+ }
+ }
+
+ *newS = (sc / ma + 1.0F) * 0.5F;
+ *newT = (tc / ma + 1.0F) * 0.5F;
+
+ return face;
+}
+
+
+
+void
+sample_texture_cube(vector float s, vector float t, vector float r,
+ uint unit, vector float colors[4])
+{
+ uint p, faces[4], level = 0;
+ float newS[4], newT[4];
+
+ /* Compute cube faces referenced by the four sets of texcoords.
+ * XXX we should SIMD-ize this.
+ */
+ for (p = 0; p < 4; p++) {
+ float rx = spu_extract(s, p);
+ float ry = spu_extract(t, p);
+ float rz = spu_extract(r, p);
+ faces[p] = choose_cube_face(rx, ry, rz, &newS[p], &newT[p]);
+ }
+
+ if (faces[0] == faces[1] &&
+ faces[0] == faces[2] &&
+ faces[0] == faces[3]) {
+ /* GOOD! All four texcoords refer to the same cube face */
+ s = (vector float) {newS[0], newS[1], newS[2], newS[3]};
+ t = (vector float) {newT[0], newT[1], newT[2], newT[3]};
+ spu.sample_texture_2d[unit](s, t, unit, level, faces[0], colors);
+ }
+ else {
+ /* BAD! The four texcoords refer to different faces */
+ for (p = 0; p < 4; p++) {
+ vector float c[4];
+
+ spu.sample_texture_2d[unit](spu_splats(newS[p]), spu_splats(newT[p]),
+ unit, level, faces[p], c);
+
+ float red = spu_extract(c[0], p);
+ float green = spu_extract(c[1], p);
+ float blue = spu_extract(c[2], p);
+ float alpha = spu_extract(c[3], p);
+
+ colors[0] = spu_insert(red, colors[0], p);
+ colors[1] = spu_insert(green, colors[1], p);
+ colors[2] = spu_insert(blue, colors[2], p);
+ colors[3] = spu_insert(alpha, colors[3], p);
+ }
+ }
+}
diff --git a/src/gallium/drivers/cell/spu/spu_texture.h b/src/gallium/drivers/cell/spu/spu_texture.h
new file mode 100644
index 0000000000..7b75b007b5
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_texture.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SPU_TEXTURE_H
+#define SPU_TEXTURE_H
+
+
+#include "pipe/p_compiler.h"
+
+
+extern void
+invalidate_tex_cache(void);
+
+
+extern void
+sample_texture_2d_nearest(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4]);
+
+
+extern void
+sample_texture_2d_bilinear(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4]);
+
+extern void
+sample_texture_2d_bilinear_int(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4]);
+
+
+extern void
+sample_texture_2d_lod(vector float s, vector float t,
+ uint unit, uint level, uint face,
+ vector float colors[4]);
+
+
+extern void
+sample_texture_cube(vector float s, vector float t, vector float r,
+ uint unit, vector float colors[4]);
+
+
+#endif /* SPU_TEXTURE_H */
diff --git a/src/gallium/drivers/cell/spu/spu_tile.c b/src/gallium/drivers/cell/spu/spu_tile.c
new file mode 100644
index 0000000000..6905015a48
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_tile.c
@@ -0,0 +1,126 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#include "spu_tile.h"
+#include "spu_main.h"
+
+
+/**
+ * Get tile of color or Z values from main memory, put into SPU memory.
+ */
+void
+get_tile(uint tx, uint ty, tile_t *tile, int tag, int zBuf)
+{
+ const uint offset = ty * spu.fb.width_tiles + tx;
+ const uint bytesPerTile = TILE_SIZE * TILE_SIZE * (zBuf ? spu.fb.zsize : 4);
+ const ubyte *src = zBuf ? spu.fb.depth_start : spu.fb.color_start;
+
+ src += offset * bytesPerTile;
+
+ ASSERT(tx < spu.fb.width_tiles);
+ ASSERT(ty < spu.fb.height_tiles);
+ ASSERT_ALIGN16(tile);
+ /*
+ printf("get_tile: dest: %p src: 0x%x size: %d\n",
+ tile, (unsigned int) src, bytesPerTile);
+ */
+ mfc_get(tile->ui, /* dest in local memory */
+ (unsigned int) src, /* src in main memory */
+ bytesPerTile,
+ tag,
+ 0, /* tid */
+ 0 /* rid */);
+}
+
+
+/**
+ * Move tile of color or Z values from SPU memory to main memory.
+ */
+void
+put_tile(uint tx, uint ty, const tile_t *tile, int tag, int zBuf)
+{
+ const uint offset = ty * spu.fb.width_tiles + tx;
+ const uint bytesPerTile = TILE_SIZE * TILE_SIZE * (zBuf ? spu.fb.zsize : 4);
+ ubyte *dst = zBuf ? spu.fb.depth_start : spu.fb.color_start;
+
+ dst += offset * bytesPerTile;
+
+ ASSERT(tx < spu.fb.width_tiles);
+ ASSERT(ty < spu.fb.height_tiles);
+ ASSERT_ALIGN16(tile);
+ /*
+ printf("SPU %u: put_tile: src: %p dst: 0x%x size: %d\n",
+ spu.init.id,
+ tile, (unsigned int) dst, bytesPerTile);
+ */
+ mfc_put((void *) tile->ui, /* src in local memory */
+ (unsigned int) dst, /* dst in main memory */
+ bytesPerTile,
+ tag,
+ 0, /* tid */
+ 0 /* rid */);
+}
+
+
+/**
+ * For tiles whose status is TILE_STATUS_CLEAR, write solid-filled
+ * tiles back to the main framebuffer.
+ */
+void
+really_clear_tiles(uint surfaceIndex)
+{
+ const uint num_tiles = spu.fb.width_tiles * spu.fb.height_tiles;
+ uint i;
+
+ if (surfaceIndex == 0) {
+ clear_c_tile(&spu.ctile);
+
+ for (i = spu.init.id; i < num_tiles; i += spu.init.num_spus) {
+ uint tx = i % spu.fb.width_tiles;
+ uint ty = i / spu.fb.width_tiles;
+ if (spu.ctile_status[ty][tx] == TILE_STATUS_CLEAR) {
+ put_tile(tx, ty, &spu.ctile, TAG_SURFACE_CLEAR, 0);
+ }
+ }
+ }
+ else {
+ clear_z_tile(&spu.ztile);
+
+ for (i = spu.init.id; i < num_tiles; i += spu.init.num_spus) {
+ uint tx = i % spu.fb.width_tiles;
+ uint ty = i / spu.fb.width_tiles;
+ if (spu.ztile_status[ty][tx] == TILE_STATUS_CLEAR)
+ put_tile(tx, ty, &spu.ctile, TAG_SURFACE_CLEAR, 1);
+ }
+ }
+
+#if 0
+ wait_on_mask(1 << TAG_SURFACE_CLEAR);
+#endif
+}
diff --git a/src/gallium/drivers/cell/spu/spu_tile.h b/src/gallium/drivers/cell/spu/spu_tile.h
new file mode 100644
index 0000000000..7bfb52be8f
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_tile.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SPU_TILE_H
+#define SPU_TILE_H
+
+
+#include <libmisc.h>
+#include <spu_mfcio.h>
+#include "spu_main.h"
+#include "cell/common.h"
+
+
+
+extern void
+get_tile(uint tx, uint ty, tile_t *tile, int tag, int zBuf);
+
+extern void
+put_tile(uint tx, uint ty, const tile_t *tile, int tag, int zBuf);
+
+extern void
+really_clear_tiles(uint surfaceIndex);
+
+
+static INLINE void
+clear_c_tile(tile_t *ctile)
+{
+ memset32((uint*) ctile->ui,
+ spu.fb.color_clear_value,
+ TILE_SIZE * TILE_SIZE);
+}
+
+
+static INLINE void
+clear_z_tile(tile_t *ztile)
+{
+ if (spu.fb.zsize == 2) {
+ memset16((ushort*) ztile->us,
+ spu.fb.depth_clear_value,
+ TILE_SIZE * TILE_SIZE);
+ }
+ else {
+ ASSERT(spu.fb.zsize != 0);
+ memset32((uint*) ztile->ui,
+ spu.fb.depth_clear_value,
+ TILE_SIZE * TILE_SIZE);
+ }
+}
+
+
+#endif /* SPU_TILE_H */
diff --git a/src/gallium/drivers/cell/spu/spu_tri.c b/src/gallium/drivers/cell/spu/spu_tri.c
new file mode 100644
index 0000000000..58be001be4
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_tri.c
@@ -0,0 +1,843 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Triangle rendering within a tile.
+ */
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "util/u_math.h"
+#include "spu_colorpack.h"
+#include "spu_main.h"
+#include "spu_shuffle.h"
+#include "spu_texture.h"
+#include "spu_tile.h"
+#include "spu_tri.h"
+
+
+/** Masks are uint[4] vectors with each element being 0 or 0xffffffff */
+typedef vector unsigned int mask_t;
+
+
+
+/**
+ * Simplified types taken from other parts of Gallium
+ */
+struct vertex_header {
+ vector float data[1];
+};
+
+
+
+/* XXX fix this */
+#undef CEILF
+#define CEILF(X) ((float) (int) ((X) + 0.99999f))
+
+
+#define QUAD_TOP_LEFT 0
+#define QUAD_TOP_RIGHT 1
+#define QUAD_BOTTOM_LEFT 2
+#define QUAD_BOTTOM_RIGHT 3
+#define MASK_TOP_LEFT (1 << QUAD_TOP_LEFT)
+#define MASK_TOP_RIGHT (1 << QUAD_TOP_RIGHT)
+#define MASK_BOTTOM_LEFT (1 << QUAD_BOTTOM_LEFT)
+#define MASK_BOTTOM_RIGHT (1 << QUAD_BOTTOM_RIGHT)
+#define MASK_ALL 0xf
+
+
+#define CHAN0 0
+#define CHAN1 1
+#define CHAN2 2
+#define CHAN3 3
+
+
+#define DEBUG_VERTS 0
+
+/**
+ * Triangle edge info
+ */
+struct edge {
+ union {
+ struct {
+ float dx; /**< X(v1) - X(v0), used only during setup */
+ float dy; /**< Y(v1) - Y(v0), used only during setup */
+ };
+ vec_float4 ds; /**< vector accessor for dx and dy */
+ };
+ float dxdy; /**< dx/dy */
+ float sx, sy; /**< first sample point coord */
+ int lines; /**< number of lines on this edge */
+};
+
+
+struct interp_coef
+{
+ vector float a0;
+ vector float dadx;
+ vector float dady;
+};
+
+
+/**
+ * Triangle setup info (derived from draw_stage).
+ * Also used for line drawing (taking some liberties).
+ */
+struct setup_stage {
+
+ /* Vertices are just an array of floats making up each attribute in
+ * turn. Currently fixed at 4 floats, but should change in time.
+ * Codegen will help cope with this.
+ */
+ union {
+ struct {
+ const struct vertex_header *vmin;
+ const struct vertex_header *vmid;
+ const struct vertex_header *vmax;
+ const struct vertex_header *vprovoke;
+ };
+ qword vertex_headers;
+ };
+
+ struct edge ebot;
+ struct edge etop;
+ struct edge emaj;
+
+ float oneOverArea; /* XXX maybe make into vector? */
+
+ uint facing;
+
+ uint tx, ty; /**< position of current tile (x, y) */
+
+ union {
+ struct {
+ int cliprect_minx;
+ int cliprect_miny;
+ int cliprect_maxx;
+ int cliprect_maxy;
+ };
+ qword cliprect;
+ };
+
+ struct interp_coef coef[PIPE_MAX_SHADER_INPUTS];
+
+ struct {
+ vec_int4 quad; /**< [0] = row0, [1] = row1; {left[0],left[1],right[0],right[1]} */
+ int y;
+ unsigned y_flags;
+ unsigned mask; /**< mask of MASK_BOTTOM/TOP_LEFT/RIGHT bits */
+ } span;
+};
+
+
+static struct setup_stage setup;
+
+
+static INLINE vector float
+splatx(vector float v)
+{
+ return spu_splats(spu_extract(v, CHAN0));
+}
+
+static INLINE vector float
+splaty(vector float v)
+{
+ return spu_splats(spu_extract(v, CHAN1));
+}
+
+static INLINE vector float
+splatz(vector float v)
+{
+ return spu_splats(spu_extract(v, CHAN2));
+}
+
+static INLINE vector float
+splatw(vector float v)
+{
+ return spu_splats(spu_extract(v, CHAN3));
+}
+
+
+/**
+ * Setup fragment shader inputs by evaluating triangle's vertex
+ * attribute coefficient info.
+ * \param x quad x pos
+ * \param y quad y pos
+ * \param fragZ returns quad Z values
+ * \param fragInputs returns fragment program inputs
+ * Note: this code could be incorporated into the fragment program
+ * itself to avoid the loop and switch.
+ */
+static void
+eval_inputs(float x, float y, vector float *fragZ, vector float fragInputs[])
+{
+ static const vector float deltaX = (const vector float) {0, 1, 0, 1};
+ static const vector float deltaY = (const vector float) {0, 0, 1, 1};
+
+ const uint posSlot = 0;
+ const vector float pos = setup.coef[posSlot].a0;
+ const vector float dposdx = setup.coef[posSlot].dadx;
+ const vector float dposdy = setup.coef[posSlot].dady;
+ const vector float fragX = spu_splats(x) + deltaX;
+ const vector float fragY = spu_splats(y) + deltaY;
+ vector float fragW, wInv;
+ uint i;
+
+ *fragZ = splatz(pos) + fragX * splatz(dposdx) + fragY * splatz(dposdy);
+ fragW = splatw(pos) + fragX * splatw(dposdx) + fragY * splatw(dposdy);
+ wInv = spu_re(fragW); /* 1 / w */
+
+ /* loop over fragment program inputs */
+ for (i = 0; i < spu.vertex_info.num_attribs; i++) {
+ uint attr = i + 1;
+ enum interp_mode interp = spu.vertex_info.attrib[attr].interp_mode;
+
+ /* constant term */
+ vector float a0 = setup.coef[attr].a0;
+ vector float r0 = splatx(a0);
+ vector float r1 = splaty(a0);
+ vector float r2 = splatz(a0);
+ vector float r3 = splatw(a0);
+
+ if (interp == INTERP_LINEAR || interp == INTERP_PERSPECTIVE) {
+ /* linear term */
+ vector float dadx = setup.coef[attr].dadx;
+ vector float dady = setup.coef[attr].dady;
+ /* Use SPU intrinsics here to get slightly better code.
+ * originally: r0 += fragX * splatx(dadx) + fragY * splatx(dady);
+ */
+ r0 = spu_madd(fragX, splatx(dadx), spu_madd(fragY, splatx(dady), r0));
+ r1 = spu_madd(fragX, splaty(dadx), spu_madd(fragY, splaty(dady), r1));
+ r2 = spu_madd(fragX, splatz(dadx), spu_madd(fragY, splatz(dady), r2));
+ r3 = spu_madd(fragX, splatw(dadx), spu_madd(fragY, splatw(dady), r3));
+ if (interp == INTERP_PERSPECTIVE) {
+ /* perspective term */
+ r0 *= wInv;
+ r1 *= wInv;
+ r2 *= wInv;
+ r3 *= wInv;
+ }
+ }
+ fragInputs[CHAN0] = r0;
+ fragInputs[CHAN1] = r1;
+ fragInputs[CHAN2] = r2;
+ fragInputs[CHAN3] = r3;
+ fragInputs += 4;
+ }
+}
+
+
+/**
+ * Emit a quad (pass to next stage). No clipping is done.
+ * Note: about 1/5 to 1/7 of the time, mask is zero and this function
+ * should be skipped. But adding the test for that slows things down
+ * overall.
+ */
+static INLINE void
+emit_quad( int x, int y, mask_t mask)
+{
+ /* If any bits in mask are set... */
+ if (spu_extract(spu_orx(mask), 0)) {
+ const int ix = x - setup.cliprect_minx;
+ const int iy = y - setup.cliprect_miny;
+
+ spu.cur_ctile_status = TILE_STATUS_DIRTY;
+ spu.cur_ztile_status = TILE_STATUS_DIRTY;
+
+ {
+ /*
+ * Run fragment shader, execute per-fragment ops, update fb/tile.
+ */
+ vector float inputs[4*4], outputs[2*4];
+ vector unsigned int kill_mask;
+ vector float fragZ;
+
+ eval_inputs((float) x, (float) y, &fragZ, inputs);
+
+ ASSERT(spu.fragment_program);
+ ASSERT(spu.fragment_ops);
+
+ /* Execute the current fragment program */
+ kill_mask = spu.fragment_program(inputs, outputs, spu.constants);
+
+ mask = spu_andc(mask, kill_mask);
+
+ /* Execute per-fragment/quad operations, including:
+ * alpha test, z test, stencil test, blend and framebuffer writing.
+ * Note that there are two different fragment operations functions
+ * that can be called, one for front-facing fragments, and one
+ * for back-facing fragments. (Often the two are the same;
+ * but in some cases, like two-sided stenciling, they can be
+ * very different.) So choose the correct function depending
+ * on the calculated facing.
+ */
+ spu.fragment_ops[setup.facing](ix, iy, &spu.ctile, &spu.ztile,
+ fragZ,
+ outputs[0*4+0],
+ outputs[0*4+1],
+ outputs[0*4+2],
+ outputs[0*4+3],
+ mask);
+ }
+ }
+}
+
+
+/**
+ * Given an X or Y coordinate, return the block/quad coordinate that it
+ * belongs to.
+ */
+static INLINE int
+block(int x)
+{
+ return x & ~1;
+}
+
+
+/**
+ * Render a horizontal span of quads
+ */
+static void
+flush_spans(void)
+{
+ int minleft, maxright;
+
+ const int l0 = spu_extract(setup.span.quad, 0);
+ const int l1 = spu_extract(setup.span.quad, 1);
+ const int r0 = spu_extract(setup.span.quad, 2);
+ const int r1 = spu_extract(setup.span.quad, 3);
+
+ switch (setup.span.y_flags) {
+ case 0x3:
+ /* both odd and even lines written (both quad rows) */
+ minleft = MIN2(l0, l1);
+ maxright = MAX2(r0, r1);
+ break;
+
+ case 0x1:
+ /* only even line written (quad top row) */
+ minleft = l0;
+ maxright = r0;
+ break;
+
+ case 0x2:
+ /* only odd line written (quad bottom row) */
+ minleft = l1;
+ maxright = r1;
+ break;
+
+ default:
+ return;
+ }
+
+ /* OK, we're very likely to need the tile data now.
+ * clear or finish waiting if needed.
+ */
+ if (spu.cur_ctile_status == TILE_STATUS_GETTING) {
+ /* wait for mfc_get() to complete */
+ //printf("SPU: %u: waiting for ctile\n", spu.init.id);
+ wait_on_mask(1 << TAG_READ_TILE_COLOR);
+ spu.cur_ctile_status = TILE_STATUS_CLEAN;
+ }
+ else if (spu.cur_ctile_status == TILE_STATUS_CLEAR) {
+ //printf("SPU %u: clearing C tile %u, %u\n", spu.init.id, setup.tx, setup.ty);
+ clear_c_tile(&spu.ctile);
+ spu.cur_ctile_status = TILE_STATUS_DIRTY;
+ }
+ ASSERT(spu.cur_ctile_status != TILE_STATUS_DEFINED);
+
+ if (spu.read_depth_stencil) {
+ if (spu.cur_ztile_status == TILE_STATUS_GETTING) {
+ /* wait for mfc_get() to complete */
+ //printf("SPU: %u: waiting for ztile\n", spu.init.id);
+ wait_on_mask(1 << TAG_READ_TILE_Z);
+ spu.cur_ztile_status = TILE_STATUS_CLEAN;
+ }
+ else if (spu.cur_ztile_status == TILE_STATUS_CLEAR) {
+ //printf("SPU %u: clearing Z tile %u, %u\n", spu.init.id, setup.tx, setup.ty);
+ clear_z_tile(&spu.ztile);
+ spu.cur_ztile_status = TILE_STATUS_DIRTY;
+ }
+ ASSERT(spu.cur_ztile_status != TILE_STATUS_DEFINED);
+ }
+
+ /* XXX this loop could be moved into the above switch cases... */
+
+ /* Setup for mask calculation */
+ const vec_int4 quad_LlRr = setup.span.quad;
+ const vec_int4 quad_RrLl = spu_rlqwbyte(quad_LlRr, 8);
+ const vec_int4 quad_LLll = spu_shuffle(quad_LlRr, quad_LlRr, SHUFFLE4(A,A,B,B));
+ const vec_int4 quad_RRrr = spu_shuffle(quad_RrLl, quad_RrLl, SHUFFLE4(A,A,B,B));
+
+ const vec_int4 twos = spu_splats(2);
+
+ const int x = block(minleft);
+ vec_int4 xs = {x, x+1, x, x+1};
+
+ for (; spu_extract(xs, 0) <= block(maxright); xs += twos) {
+ /**
+ * Computes mask to indicate which pixels in the 2x2 quad are actually
+ * inside the triangle's bounds.
+ */
+
+ /* Calculate ({x,x+1,x,x+1} >= {l[0],l[0],l[1],l[1]}) */
+ const mask_t gt_LLll_xs = spu_cmpgt(quad_LLll, xs);
+ const mask_t gte_xs_LLll = spu_nand(gt_LLll_xs, gt_LLll_xs);
+
+ /* Calculate ({r[0],r[0],r[1],r[1]} > {x,x+1,x,x+1}) */
+ const mask_t gt_RRrr_xs = spu_cmpgt(quad_RRrr, xs);
+
+ /* Combine results to create mask */
+ const mask_t mask = spu_and(gte_xs_LLll, gt_RRrr_xs);
+
+ emit_quad(spu_extract(xs, 0), setup.span.y, mask);
+ }
+
+ setup.span.y = 0;
+ setup.span.y_flags = 0;
+ /* Zero right elements */
+ setup.span.quad = spu_shuffle(setup.span.quad, setup.span.quad, SHUFFLE4(A,B,0,0));
+}
+
+
+#if DEBUG_VERTS
+static void
+print_vertex(const struct vertex_header *v)
+{
+ uint i;
+ fprintf(stderr, " Vertex: (%p)\n", v);
+ for (i = 0; i < spu.vertex_info.num_attribs; i++) {
+ fprintf(stderr, " %d: %f %f %f %f\n", i,
+ spu_extract(v->data[i], 0),
+ spu_extract(v->data[i], 1),
+ spu_extract(v->data[i], 2),
+ spu_extract(v->data[i], 3));
+ }
+}
+#endif
+
+/* Returns the minimum of each slot of two vec_float4s as qwords.
+ * i.e. return[n] = min(q0[n],q1[n]);
+ */
+static qword
+minfq(qword q0, qword q1)
+{
+ const qword q0q1m = si_fcgt(q0, q1);
+ return si_selb(q0, q1, q0q1m);
+}
+
+/* Returns the minimum of each slot of three vec_float4s as qwords.
+ * i.e. return[n] = min(q0[n],q1[n],q2[n]);
+ */
+static qword
+min3fq(qword q0, qword q1, qword q2)
+{
+ return minfq(minfq(q0, q1), q2);
+}
+
+/* Returns the maximum of each slot of two vec_float4s as qwords.
+ * i.e. return[n] = min(q0[n],q1[n],q2[n]);
+ */
+static qword
+maxfq(qword q0, qword q1) {
+ const qword q0q1m = si_fcgt(q0, q1);
+ return si_selb(q1, q0, q0q1m);
+}
+
+/* Returns the maximum of each slot of three vec_float4s as qwords.
+ * i.e. return[n] = min(q0[n],q1[n],q2[n]);
+ */
+static qword
+max3fq(qword q0, qword q1, qword q2) {
+ return maxfq(maxfq(q0, q1), q2);
+}
+
+/**
+ * Sort vertices from top to bottom.
+ * Compute area and determine front vs. back facing.
+ * Do coarse clip test against tile bounds
+ * \return FALSE if tri is totally outside tile, TRUE otherwise
+ */
+static boolean
+setup_sort_vertices(const qword vs)
+{
+ float area, sign;
+
+#if DEBUG_VERTS
+ if (spu.init.id==0) {
+ fprintf(stderr, "SPU %u: Triangle:\n", spu.init.id);
+ print_vertex(v0);
+ print_vertex(v1);
+ print_vertex(v2);
+ }
+#endif
+
+ {
+ /* Load the float values for various processing... */
+ const qword f0 = (qword)(((const struct vertex_header*)si_to_ptr(vs))->data[0]);
+ const qword f1 = (qword)(((const struct vertex_header*)si_to_ptr(si_rotqbyi(vs, 4)))->data[0]);
+ const qword f2 = (qword)(((const struct vertex_header*)si_to_ptr(si_rotqbyi(vs, 8)))->data[0]);
+
+ /* Check if triangle is completely outside the tile bounds
+ * Find the min and max x and y positions of the three poits */
+ const qword minf = min3fq(f0, f1, f2);
+ const qword maxf = max3fq(f0, f1, f2);
+
+ /* Compare min and max against cliprect vals */
+ const qword maxsmins = si_shufb(maxf, minf, SHUFB4(A,B,a,b));
+ const qword outside = si_fcgt(maxsmins, si_csflt(setup.cliprect, 0));
+
+ /* Use a little magic to work out of the tri is visible or not */
+ if(si_to_uint(si_xori(si_gb(outside), 0xc))) return FALSE;
+
+ /* determine bottom to top order of vertices */
+ /* A table of shuffle patterns for putting vertex_header pointers into
+ correct order. Quite magical. */
+ const qword sort_order_patterns[] = {
+ SHUFB4(A,B,C,C),
+ SHUFB4(C,A,B,C),
+ SHUFB4(A,C,B,C),
+ SHUFB4(B,C,A,C),
+ SHUFB4(B,A,C,C),
+ SHUFB4(C,B,A,C) };
+
+ /* Collate y values into two vectors for comparison.
+ Using only one shuffle constant! ;) */
+ const qword y_02_ = si_shufb(f0, f2, SHUFB4(0,B,b,C));
+ const qword y_10_ = si_shufb(f1, f0, SHUFB4(0,B,b,C));
+ const qword y_012 = si_shufb(y_02_, f1, SHUFB4(0,B,b,C));
+ const qword y_120 = si_shufb(y_10_, f2, SHUFB4(0,B,b,C));
+
+ /* Perform comparison: {y0,y1,y2} > {y1,y2,y0} */
+ const qword compare = si_fcgt(y_012, y_120);
+ /* Compress the result of the comparison into 4 bits */
+ const qword gather = si_gb(compare);
+ /* Subtract one to attain the index into the LUT. Magical. */
+ const unsigned int index = si_to_uint(gather) - 1;
+
+ /* Load the appropriate pattern and construct the desired vector. */
+ setup.vertex_headers = si_shufb(vs, vs, sort_order_patterns[index]);
+
+ /* Using the result of the comparison, set sign.
+ Very magical. */
+ sign = ((si_to_uint(si_cntb(gather)) == 2) ? 1.0f : -1.0f);
+ }
+
+ setup.ebot.ds = spu_sub(setup.vmid->data[0], setup.vmin->data[0]);
+ setup.emaj.ds = spu_sub(setup.vmax->data[0], setup.vmin->data[0]);
+ setup.etop.ds = spu_sub(setup.vmax->data[0], setup.vmid->data[0]);
+
+ /*
+ * Compute triangle's area. Use 1/area to compute partial
+ * derivatives of attributes later.
+ */
+ area = setup.emaj.dx * setup.ebot.dy - setup.ebot.dx * setup.emaj.dy;
+
+ setup.oneOverArea = 1.0f / area;
+
+ /* The product of area * sign indicates front/back orientation (0/1).
+ * Just in case someone gets the bright idea of switching the front
+ * and back constants without noticing that we're assuming their
+ * values in this operation, also assert that the values are
+ * what we think they are.
+ */
+ ASSERT(CELL_FACING_FRONT == 0);
+ ASSERT(CELL_FACING_BACK == 1);
+ setup.facing = (area * sign > 0.0f)
+ ^ (spu.rasterizer.front_winding == PIPE_WINDING_CW);
+
+ return TRUE;
+}
+
+
+/**
+ * Compute a0 for a constant-valued coefficient (GL_FLAT shading).
+ * The value value comes from vertex->data[slot].
+ * The result will be put into setup.coef[slot].a0.
+ * \param slot which attribute slot
+ */
+static INLINE void
+const_coeff4(uint slot)
+{
+ setup.coef[slot].dadx = (vector float) {0.0, 0.0, 0.0, 0.0};
+ setup.coef[slot].dady = (vector float) {0.0, 0.0, 0.0, 0.0};
+ setup.coef[slot].a0 = setup.vprovoke->data[slot];
+}
+
+
+/**
+ * As above, but interp setup all four vector components.
+ */
+static INLINE void
+tri_linear_coeff4(uint slot)
+{
+ const vector float vmin_d = setup.vmin->data[slot];
+ const vector float vmid_d = setup.vmid->data[slot];
+ const vector float vmax_d = setup.vmax->data[slot];
+ const vector float xxxx = spu_splats(spu_extract(setup.vmin->data[0], 0) - 0.5f);
+ const vector float yyyy = spu_splats(spu_extract(setup.vmin->data[0], 1) - 0.5f);
+
+ vector float botda = vmid_d - vmin_d;
+ vector float majda = vmax_d - vmin_d;
+
+ vector float a = spu_sub(spu_mul(spu_splats(setup.ebot.dy), majda),
+ spu_mul(botda, spu_splats(setup.emaj.dy)));
+ vector float b = spu_sub(spu_mul(spu_splats(setup.emaj.dx), botda),
+ spu_mul(majda, spu_splats(setup.ebot.dx)));
+
+ setup.coef[slot].dadx = spu_mul(a, spu_splats(setup.oneOverArea));
+ setup.coef[slot].dady = spu_mul(b, spu_splats(setup.oneOverArea));
+
+ vector float tempx = spu_mul(setup.coef[slot].dadx, xxxx);
+ vector float tempy = spu_mul(setup.coef[slot].dady, yyyy);
+
+ setup.coef[slot].a0 = spu_sub(vmin_d, spu_add(tempx, tempy));
+}
+
+
+/**
+ * Compute a0, dadx and dady for a perspective-corrected interpolant,
+ * for a triangle.
+ * We basically multiply the vertex value by 1/w before computing
+ * the plane coefficients (a0, dadx, dady).
+ * Later, when we compute the value at a particular fragment position we'll
+ * divide the interpolated value by the interpolated W at that fragment.
+ */
+static void
+tri_persp_coeff4(uint slot)
+{
+ const vector float xxxx = spu_splats(spu_extract(setup.vmin->data[0], 0) - 0.5f);
+ const vector float yyyy = spu_splats(spu_extract(setup.vmin->data[0], 1) - 0.5f);
+
+ const vector float vmin_w = spu_splats(spu_extract(setup.vmin->data[0], 3));
+ const vector float vmid_w = spu_splats(spu_extract(setup.vmid->data[0], 3));
+ const vector float vmax_w = spu_splats(spu_extract(setup.vmax->data[0], 3));
+
+ vector float vmin_d = setup.vmin->data[slot];
+ vector float vmid_d = setup.vmid->data[slot];
+ vector float vmax_d = setup.vmax->data[slot];
+
+ vmin_d = spu_mul(vmin_d, vmin_w);
+ vmid_d = spu_mul(vmid_d, vmid_w);
+ vmax_d = spu_mul(vmax_d, vmax_w);
+
+ vector float botda = vmid_d - vmin_d;
+ vector float majda = vmax_d - vmin_d;
+
+ vector float a = spu_sub(spu_mul(spu_splats(setup.ebot.dy), majda),
+ spu_mul(botda, spu_splats(setup.emaj.dy)));
+ vector float b = spu_sub(spu_mul(spu_splats(setup.emaj.dx), botda),
+ spu_mul(majda, spu_splats(setup.ebot.dx)));
+
+ setup.coef[slot].dadx = spu_mul(a, spu_splats(setup.oneOverArea));
+ setup.coef[slot].dady = spu_mul(b, spu_splats(setup.oneOverArea));
+
+ vector float tempx = spu_mul(setup.coef[slot].dadx, xxxx);
+ vector float tempy = spu_mul(setup.coef[slot].dady, yyyy);
+
+ setup.coef[slot].a0 = spu_sub(vmin_d, spu_add(tempx, tempy));
+}
+
+
+
+/**
+ * Compute the setup.coef[] array dadx, dady, a0 values.
+ * Must be called after setup.vmin,vmid,vmax,vprovoke are initialized.
+ */
+static void
+setup_tri_coefficients(void)
+{
+ uint i;
+
+ for (i = 0; i < spu.vertex_info.num_attribs; i++) {
+ switch (spu.vertex_info.attrib[i].interp_mode) {
+ case INTERP_NONE:
+ break;
+ case INTERP_CONSTANT:
+ const_coeff4(i);
+ break;
+ case INTERP_POS:
+ /* fall-through */
+ case INTERP_LINEAR:
+ tri_linear_coeff4(i);
+ break;
+ case INTERP_PERSPECTIVE:
+ tri_persp_coeff4(i);
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+}
+
+
+static void
+setup_tri_edges(void)
+{
+ float vmin_x = spu_extract(setup.vmin->data[0], 0) + 0.5f;
+ float vmid_x = spu_extract(setup.vmid->data[0], 0) + 0.5f;
+
+ float vmin_y = spu_extract(setup.vmin->data[0], 1) - 0.5f;
+ float vmid_y = spu_extract(setup.vmid->data[0], 1) - 0.5f;
+ float vmax_y = spu_extract(setup.vmax->data[0], 1) - 0.5f;
+
+ setup.emaj.sy = CEILF(vmin_y);
+ setup.emaj.lines = (int) CEILF(vmax_y - setup.emaj.sy);
+ setup.emaj.dxdy = setup.emaj.dx / setup.emaj.dy;
+ setup.emaj.sx = vmin_x + (setup.emaj.sy - vmin_y) * setup.emaj.dxdy;
+
+ setup.etop.sy = CEILF(vmid_y);
+ setup.etop.lines = (int) CEILF(vmax_y - setup.etop.sy);
+ setup.etop.dxdy = setup.etop.dx / setup.etop.dy;
+ setup.etop.sx = vmid_x + (setup.etop.sy - vmid_y) * setup.etop.dxdy;
+
+ setup.ebot.sy = CEILF(vmin_y);
+ setup.ebot.lines = (int) CEILF(vmid_y - setup.ebot.sy);
+ setup.ebot.dxdy = setup.ebot.dx / setup.ebot.dy;
+ setup.ebot.sx = vmin_x + (setup.ebot.sy - vmin_y) * setup.ebot.dxdy;
+}
+
+
+/**
+ * Render the upper or lower half of a triangle.
+ * Scissoring/cliprect is applied here too.
+ */
+static void
+subtriangle(struct edge *eleft, struct edge *eright, unsigned lines)
+{
+ const int minx = setup.cliprect_minx;
+ const int maxx = setup.cliprect_maxx;
+ const int miny = setup.cliprect_miny;
+ const int maxy = setup.cliprect_maxy;
+ int y, start_y, finish_y;
+ int sy = (int)eleft->sy;
+
+ ASSERT((int)eleft->sy == (int) eright->sy);
+
+ /* clip top/bottom */
+ start_y = sy;
+ finish_y = sy + lines;
+
+ if (start_y < miny)
+ start_y = miny;
+
+ if (finish_y > maxy)
+ finish_y = maxy;
+
+ start_y -= sy;
+ finish_y -= sy;
+
+ /*
+ _mesa_printf("%s %d %d\n", __FUNCTION__, start_y, finish_y);
+ */
+
+ for (y = start_y; y < finish_y; y++) {
+
+ /* avoid accumulating adds as floats don't have the precision to
+ * accurately iterate large triangle edges that way. luckily we
+ * can just multiply these days.
+ *
+ * this is all drowned out by the attribute interpolation anyway.
+ */
+ int left = (int)(eleft->sx + y * eleft->dxdy);
+ int right = (int)(eright->sx + y * eright->dxdy);
+
+ /* clip left/right */
+ if (left < minx)
+ left = minx;
+ if (right > maxx)
+ right = maxx;
+
+ if (left < right) {
+ int _y = sy + y;
+ if (block(_y) != setup.span.y) {
+ flush_spans();
+ setup.span.y = block(_y);
+ }
+
+ int offset = _y&1;
+ vec_int4 quad_LlRr = {left, left, right, right};
+ /* Store left and right in 0 or 1 row of quad based on offset */
+ setup.span.quad = spu_sel(quad_LlRr, setup.span.quad, spu_maskw(5<<offset));
+ setup.span.y_flags |= 1<<offset;
+ }
+ }
+
+
+ /* save the values so that emaj can be restarted:
+ */
+ eleft->sx += lines * eleft->dxdy;
+ eright->sx += lines * eright->dxdy;
+ eleft->sy += lines;
+ eright->sy += lines;
+}
+
+
+/**
+ * Draw triangle into tile at (tx, ty) (tile coords)
+ * The tile data should have already been fetched.
+ */
+boolean
+tri_draw(const qword vs,
+ uint tx, uint ty)
+{
+ setup.tx = tx;
+ setup.ty = ty;
+
+ /* set clipping bounds to tile bounds */
+ const qword clipbase = (qword)((vec_uint4){tx, ty});
+ const qword clipmin = si_mpyui(clipbase, TILE_SIZE);
+ const qword clipmax = si_ai(clipmin, TILE_SIZE);
+ setup.cliprect = si_shufb(clipmin, clipmax, SHUFB4(A,B,a,b));
+
+ if(!setup_sort_vertices(vs)) {
+ return FALSE; /* totally clipped */
+ }
+
+ setup_tri_coefficients();
+ setup_tri_edges();
+
+ setup.span.y = 0;
+ setup.span.y_flags = 0;
+ /* Zero right elements */
+ setup.span.quad = spu_shuffle(setup.span.quad, setup.span.quad, SHUFFLE4(A,B,0,0));
+
+ if (setup.oneOverArea < 0.0) {
+ /* emaj on left */
+ subtriangle( &setup.emaj, &setup.ebot, setup.ebot.lines );
+ subtriangle( &setup.emaj, &setup.etop, setup.etop.lines );
+ }
+ else {
+ /* emaj on right */
+ subtriangle( &setup.ebot, &setup.emaj, setup.ebot.lines );
+ subtriangle( &setup.etop, &setup.emaj, setup.etop.lines );
+ }
+
+ flush_spans();
+
+ return TRUE;
+}
diff --git a/src/gallium/drivers/cell/spu/spu_tri.h b/src/gallium/drivers/cell/spu/spu_tri.h
new file mode 100644
index 0000000000..82e3b19ad7
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_tri.h
@@ -0,0 +1,37 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef SPU_TRI_H
+#define SPU_TRI_H
+
+
+extern boolean
+tri_draw(const qword vs, uint tx, uint ty);
+
+
+#endif /* SPU_TRI_H */
diff --git a/src/gallium/drivers/cell/spu/spu_util.c b/src/gallium/drivers/cell/spu/spu_util.c
new file mode 100644
index 0000000000..af25dd3718
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_util.c
@@ -0,0 +1,167 @@
+
+#include "cell/common.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_debug.h"
+#include "tgsi/tgsi_parse.h"
+//#include "tgsi_build.h"
+#include "tgsi/tgsi_util.h"
+
+unsigned
+tgsi_util_get_src_register_swizzle(
+ const struct tgsi_src_register *reg,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ return reg->SwizzleX;
+ case 1:
+ return reg->SwizzleY;
+ case 2:
+ return reg->SwizzleZ;
+ case 3:
+ return reg->SwizzleW;
+ default:
+ ASSERT( 0 );
+ }
+ return 0;
+}
+
+unsigned
+tgsi_util_get_src_register_extswizzle(
+ const struct tgsi_src_register_ext_swz *reg,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ return reg->ExtSwizzleX;
+ case 1:
+ return reg->ExtSwizzleY;
+ case 2:
+ return reg->ExtSwizzleZ;
+ case 3:
+ return reg->ExtSwizzleW;
+ default:
+ ASSERT( 0 );
+ }
+ return 0;
+}
+
+unsigned
+tgsi_util_get_full_src_register_extswizzle(
+ const struct tgsi_full_src_register *reg,
+ unsigned component )
+{
+ unsigned swizzle;
+
+ /*
+ * First, calculate the extended swizzle for a given channel. This will give
+ * us either a channel index into the simple swizzle or a constant 1 or 0.
+ */
+ swizzle = tgsi_util_get_src_register_extswizzle(
+ &reg->SrcRegisterExtSwz,
+ component );
+
+ ASSERT (TGSI_SWIZZLE_X == TGSI_EXTSWIZZLE_X);
+ ASSERT (TGSI_SWIZZLE_Y == TGSI_EXTSWIZZLE_Y);
+ ASSERT (TGSI_SWIZZLE_Z == TGSI_EXTSWIZZLE_Z);
+ ASSERT (TGSI_SWIZZLE_W == TGSI_EXTSWIZZLE_W);
+ ASSERT (TGSI_EXTSWIZZLE_ZERO > TGSI_SWIZZLE_W);
+ ASSERT (TGSI_EXTSWIZZLE_ONE > TGSI_SWIZZLE_W);
+
+ /*
+ * Second, calculate the simple swizzle for the unswizzled channel index.
+ * Leave the constants intact, they are not affected by the simple swizzle.
+ */
+ if( swizzle <= TGSI_SWIZZLE_W ) {
+ swizzle = tgsi_util_get_src_register_swizzle(
+ &reg->SrcRegister,
+ component );
+ }
+
+ return swizzle;
+}
+
+unsigned
+tgsi_util_get_src_register_extnegate(
+ const struct tgsi_src_register_ext_swz *reg,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ return reg->NegateX;
+ case 1:
+ return reg->NegateY;
+ case 2:
+ return reg->NegateZ;
+ case 3:
+ return reg->NegateW;
+ default:
+ ASSERT( 0 );
+ }
+ return 0;
+}
+
+void
+tgsi_util_set_src_register_extnegate(
+ struct tgsi_src_register_ext_swz *reg,
+ unsigned negate,
+ unsigned component )
+{
+ switch( component ) {
+ case 0:
+ reg->NegateX = negate;
+ break;
+ case 1:
+ reg->NegateY = negate;
+ break;
+ case 2:
+ reg->NegateZ = negate;
+ break;
+ case 3:
+ reg->NegateW = negate;
+ break;
+ default:
+ ASSERT( 0 );
+ }
+}
+
+unsigned
+tgsi_util_get_full_src_register_sign_mode(
+ const struct tgsi_full_src_register *reg,
+ unsigned component )
+{
+ unsigned sign_mode;
+
+ if( reg->SrcRegisterExtMod.Absolute ) {
+ /* Consider only the post-abs negation. */
+
+ if( reg->SrcRegisterExtMod.Negate ) {
+ sign_mode = TGSI_UTIL_SIGN_SET;
+ }
+ else {
+ sign_mode = TGSI_UTIL_SIGN_CLEAR;
+ }
+ }
+ else {
+ /* Accumulate the three negations. */
+
+ unsigned negate;
+
+ negate = reg->SrcRegister.Negate;
+ if( tgsi_util_get_src_register_extnegate( &reg->SrcRegisterExtSwz, component ) ) {
+ negate = !negate;
+ }
+ if( reg->SrcRegisterExtMod.Negate ) {
+ negate = !negate;
+ }
+
+ if( negate ) {
+ sign_mode = TGSI_UTIL_SIGN_TOGGLE;
+ }
+ else {
+ sign_mode = TGSI_UTIL_SIGN_KEEP;
+ }
+ }
+
+ return sign_mode;
+}
diff --git a/src/gallium/drivers/cell/spu/spu_vertex_fetch.c b/src/gallium/drivers/cell/spu/spu_vertex_fetch.c
new file mode 100644
index 0000000000..03375d84a5
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_vertex_fetch.c
@@ -0,0 +1,145 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * (C) Copyright IBM Corporation 2008
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Ian Romanick <idr@us.ibm.com>
+ */
+
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+#include "spu_exec.h"
+#include "spu_vertex_shader.h"
+#include "spu_main.h"
+#include "spu_dcache.h"
+
+typedef void (*spu_fetch_func)(qword *out, const qword *in,
+ const qword *shuffle_data);
+
+
+static const qword fetch_shuffle_data[5] ALIGN16_ATTRIB = {
+ /* Shuffle used by CVT_64_FLOAT
+ */
+ {
+ 0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ },
+
+ /* Shuffle used by CVT_8_USCALED and CVT_8_SSCALED
+ */
+ {
+ 0x00, 0x80, 0x80, 0x80, 0x01, 0x80, 0x80, 0x80,
+ 0x02, 0x80, 0x80, 0x80, 0x03, 0x80, 0x80, 0x80,
+ },
+
+ /* Shuffle used by CVT_16_USCALED and CVT_16_SSCALED
+ */
+ {
+ 0x00, 0x01, 0x80, 0x80, 0x02, 0x03, 0x80, 0x80,
+ 0x04, 0x05, 0x80, 0x80, 0x06, 0x07, 0x80, 0x80,
+ },
+
+ /* High value shuffle used by trans4x4.
+ */
+ {
+ 0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17
+ },
+
+ /* Low value shuffle used by trans4x4.
+ */
+ {
+ 0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F
+ }
+};
+
+
+/**
+ * Fetch vertex attributes for 'count' vertices.
+ */
+static void generic_vertex_fetch(struct spu_vs_context *draw,
+ struct spu_exec_machine *machine,
+ const unsigned *elts,
+ unsigned count)
+{
+ unsigned nr_attrs = draw->vertex_fetch.nr_attrs;
+ unsigned attr;
+
+ ASSERT(count <= 4);
+
+#if DRAW_DBG
+ printf("SPU: %s count = %u, nr_attrs = %u\n",
+ __FUNCTION__, count, nr_attrs);
+#endif
+
+ /* loop over vertex attributes (vertex shader inputs)
+ */
+ for (attr = 0; attr < nr_attrs; attr++) {
+ const unsigned pitch = draw->vertex_fetch.pitch[attr];
+ const uint64_t src = draw->vertex_fetch.src_ptr[attr];
+ const spu_fetch_func fetch = (spu_fetch_func)
+ (draw->vertex_fetch.code + draw->vertex_fetch.code_offset[attr]);
+ unsigned i;
+ unsigned idx;
+ const unsigned bytes_per_entry = draw->vertex_fetch.size[attr];
+ const unsigned quads_per_entry = (bytes_per_entry + 15) / 16;
+ qword in[2 * 4] ALIGN16_ATTRIB;
+
+
+ /* Fetch four attributes for four vertices.
+ */
+ idx = 0;
+ for (i = 0; i < count; i++) {
+ const uint64_t addr = src + (elts[i] * pitch);
+
+#if DRAW_DBG
+ printf("SPU: fetching = 0x%llx\n", addr);
+#endif
+
+ spu_dcache_fetch_unaligned(& in[idx], addr, bytes_per_entry);
+ idx += quads_per_entry;
+ }
+
+ /* Be nice and zero out any missing vertices.
+ */
+ (void) memset(& in[idx], 0, (8 - idx) * sizeof(qword));
+
+
+ /* Convert all 4 vertices to vectors of float.
+ */
+ (*fetch)(&machine->Inputs[attr].xyzw[0].q, in, fetch_shuffle_data);
+ }
+}
+
+
+void spu_update_vertex_fetch( struct spu_vs_context *draw )
+{
+ draw->vertex_fetch.fetch_func = generic_vertex_fetch;
+}
diff --git a/src/gallium/drivers/cell/spu/spu_vertex_shader.c b/src/gallium/drivers/cell/spu/spu_vertex_shader.c
new file mode 100644
index 0000000000..fbe5b34d39
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_vertex_shader.c
@@ -0,0 +1,244 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Brian Paul
+ * Ian Romanick <idr@us.ibm.com>
+ */
+
+#include <spu_mfcio.h>
+
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "draw/draw_private.h"
+#include "draw/draw_context.h"
+#include "cell/common.h"
+#include "spu_vertex_shader.h"
+#include "spu_exec.h"
+#include "spu_main.h"
+
+
+#define MAX_VERTEX_SIZE ((2 + PIPE_MAX_SHADER_OUTPUTS) * 4 * sizeof(float))
+
+
+#define CLIP_RIGHT_BIT 0x01
+#define CLIP_LEFT_BIT 0x02
+#define CLIP_TOP_BIT 0x04
+#define CLIP_BOTTOM_BIT 0x08
+#define CLIP_FAR_BIT 0x10
+#define CLIP_NEAR_BIT 0x20
+
+
+static INLINE float
+dot4(const float *a, const float *b)
+{
+ return (a[0]*b[0] +
+ a[1]*b[1] +
+ a[2]*b[2] +
+ a[3]*b[3]);
+}
+
+static INLINE unsigned
+compute_clipmask(const float *clip, /*const*/ float plane[][4], unsigned nr)
+{
+ unsigned mask = 0;
+ unsigned i;
+
+ /* Do the hardwired planes first:
+ */
+ if (-clip[0] + clip[3] < 0) mask |= CLIP_RIGHT_BIT;
+ if ( clip[0] + clip[3] < 0) mask |= CLIP_LEFT_BIT;
+ if (-clip[1] + clip[3] < 0) mask |= CLIP_TOP_BIT;
+ if ( clip[1] + clip[3] < 0) mask |= CLIP_BOTTOM_BIT;
+ if (-clip[2] + clip[3] < 0) mask |= CLIP_FAR_BIT;
+ if ( clip[2] + clip[3] < 0) mask |= CLIP_NEAR_BIT;
+
+ /* Followed by any remaining ones:
+ */
+ for (i = 6; i < nr; i++) {
+ if (dot4(clip, plane[i]) < 0)
+ mask |= (1<<i);
+ }
+
+ return mask;
+}
+
+
+/**
+ * Transform vertices with the current vertex program/shader
+ * Up to four vertices can be shaded at a time.
+ * \param vbuffer the input vertex data
+ * \param elts indexes of four input vertices
+ * \param count number of vertices to shade [1..4]
+ * \param vOut array of pointers to four output vertices
+ */
+static void
+run_vertex_program(struct spu_vs_context *draw,
+ unsigned elts[4], unsigned count,
+ const uint64_t *vOut)
+{
+ struct spu_exec_machine *machine = &draw->machine;
+ unsigned int j;
+
+ ALIGN16_DECL(struct spu_exec_vector, inputs, PIPE_MAX_ATTRIBS);
+ ALIGN16_DECL(struct spu_exec_vector, outputs, PIPE_MAX_ATTRIBS);
+ const float *scale = draw->viewport.scale;
+ const float *trans = draw->viewport.translate;
+
+ ASSERT(count <= 4);
+
+ machine->Processor = TGSI_PROCESSOR_VERTEX;
+
+ ASSERT_ALIGN16(draw->constants);
+ machine->Consts = (float (*)[4]) draw->constants;
+
+ machine->Inputs = ALIGN16_ASSIGN(inputs);
+ machine->Outputs = ALIGN16_ASSIGN(outputs);
+
+ spu_vertex_fetch( draw, machine, elts, count );
+
+ /* run shader */
+ spu_exec_machine_run( machine );
+
+
+ /* store machine results */
+ for (j = 0; j < count; j++) {
+ unsigned slot;
+ float x, y, z, w;
+ unsigned char buffer[sizeof(struct vertex_header)
+ + MAX_VERTEX_SIZE] ALIGN16_ATTRIB;
+ struct vertex_header *const tmpOut =
+ (struct vertex_header *) buffer;
+ const unsigned vert_size = ROUNDUP16(sizeof(struct vertex_header)
+ + (sizeof(float) * 4
+ * draw->num_vs_outputs));
+
+ mfc_get(tmpOut, vOut[j], vert_size, TAG_VERTEX_BUFFER, 0, 0);
+ wait_on_mask(1 << TAG_VERTEX_BUFFER);
+
+
+ /* Handle attr[0] (position) specially:
+ *
+ * XXX: Computing the clipmask should be done in the vertex
+ * program as a set of DP4 instructions appended to the
+ * user-provided code.
+ */
+ x = tmpOut->clip[0] = machine->Outputs[0].xyzw[0].f[j];
+ y = tmpOut->clip[1] = machine->Outputs[0].xyzw[1].f[j];
+ z = tmpOut->clip[2] = machine->Outputs[0].xyzw[2].f[j];
+ w = tmpOut->clip[3] = machine->Outputs[0].xyzw[3].f[j];
+
+ tmpOut->clipmask = compute_clipmask(tmpOut->clip, draw->plane,
+ draw->nr_planes);
+ tmpOut->edgeflag = 1;
+
+ /* divide by w */
+ w = 1.0f / w;
+ x *= w;
+ y *= w;
+ z *= w;
+
+ /* Viewport mapping */
+ tmpOut->data[0][0] = x * scale[0] + trans[0];
+ tmpOut->data[0][1] = y * scale[1] + trans[1];
+ tmpOut->data[0][2] = z * scale[2] + trans[2];
+ tmpOut->data[0][3] = w;
+
+ /* Remaining attributes are packed into sequential post-transform
+ * vertex attrib slots.
+ */
+ for (slot = 1; slot < draw->num_vs_outputs; slot++) {
+ tmpOut->data[slot][0] = machine->Outputs[slot].xyzw[0].f[j];
+ tmpOut->data[slot][1] = machine->Outputs[slot].xyzw[1].f[j];
+ tmpOut->data[slot][2] = machine->Outputs[slot].xyzw[2].f[j];
+ tmpOut->data[slot][3] = machine->Outputs[slot].xyzw[3].f[j];
+ }
+
+ mfc_put(tmpOut, vOut[j], vert_size, TAG_VERTEX_BUFFER, 0, 0);
+ } /* loop over vertices */
+}
+
+
+unsigned char immediates[(sizeof(float) * 4 * TGSI_EXEC_NUM_IMMEDIATES) + 32]
+ ALIGN16_ATTRIB;
+
+
+void
+spu_bind_vertex_shader(struct spu_vs_context *draw,
+ struct cell_shader_info *vs)
+{
+ const unsigned immediate_addr = vs->immediates;
+ const unsigned immediate_size =
+ ROUNDUP16((sizeof(float) * 4 * vs->num_immediates)
+ + (immediate_addr & 0x0f));
+
+
+ mfc_get(immediates, immediate_addr & ~0x0f, immediate_size,
+ TAG_VERTEX_BUFFER, 0, 0);
+
+ draw->machine.Instructions = (struct tgsi_full_instruction *)
+ vs->instructions;
+ draw->machine.NumInstructions = vs->num_instructions;
+
+ draw->machine.Declarations = (struct tgsi_full_declaration *)
+ vs->declarations;
+ draw->machine.NumDeclarations = vs->num_declarations;
+
+ draw->num_vs_outputs = vs->num_outputs;
+
+ /* specify the shader to interpret/execute */
+ spu_exec_machine_init(&draw->machine,
+ PIPE_MAX_SAMPLERS,
+ NULL /*samplers*/,
+ PIPE_SHADER_VERTEX);
+
+ wait_on_mask(1 << TAG_VERTEX_BUFFER);
+
+ (void) memcpy(& draw->machine.Imms, &immediates[immediate_addr & 0x0f],
+ sizeof(float) * 4 * vs->num_immediates);
+}
+
+
+void
+spu_execute_vertex_shader(struct spu_vs_context *draw,
+ const struct cell_command_vs *vs)
+{
+ unsigned i;
+
+ (void) memcpy(draw->plane, vs->plane, sizeof(float) * 4 * vs->nr_planes);
+ draw->nr_planes = vs->nr_planes;
+ draw->vertex_fetch.nr_attrs = vs->nr_attrs;
+
+ for (i = 0; i < vs->num_elts; i += 4) {
+ const unsigned batch_size = MIN2(vs->num_elts - i, 4);
+
+ run_vertex_program(draw, & vs->elts[i], batch_size, &vs->vOut[i]);
+ }
+}
diff --git a/src/gallium/drivers/cell/spu/spu_vertex_shader.h b/src/gallium/drivers/cell/spu/spu_vertex_shader.h
new file mode 100644
index 0000000000..4c74f5e74d
--- /dev/null
+++ b/src/gallium/drivers/cell/spu/spu_vertex_shader.h
@@ -0,0 +1,66 @@
+#ifndef SPU_VERTEX_SHADER_H
+#define SPU_VERTEX_SHADER_H
+
+#include "cell/common.h"
+#include "pipe/p_format.h"
+#include "spu_exec.h"
+
+struct spu_vs_context;
+
+typedef void (*spu_full_fetch_func)( struct spu_vs_context *draw,
+ struct spu_exec_machine *machine,
+ const unsigned *elts,
+ unsigned count );
+
+struct spu_vs_context {
+ struct pipe_viewport_state viewport;
+
+ struct {
+ uint64_t src_ptr[PIPE_MAX_ATTRIBS];
+ unsigned pitch[PIPE_MAX_ATTRIBS];
+ unsigned size[PIPE_MAX_ATTRIBS];
+ unsigned code_offset[PIPE_MAX_ATTRIBS];
+ unsigned nr_attrs;
+ boolean dirty;
+
+ spu_full_fetch_func fetch_func;
+ void *code;
+ } vertex_fetch;
+
+ /* Clip derived state:
+ */
+ float plane[12][4];
+ unsigned nr_planes;
+
+ struct spu_exec_machine machine;
+ const float (*constants)[4];
+
+ unsigned num_vs_outputs;
+};
+
+extern void spu_update_vertex_fetch(struct spu_vs_context *draw);
+
+static INLINE void spu_vertex_fetch(struct spu_vs_context *draw,
+ struct spu_exec_machine *machine,
+ const unsigned *elts,
+ unsigned count)
+{
+ if (draw->vertex_fetch.dirty) {
+ spu_update_vertex_fetch(draw);
+ draw->vertex_fetch.dirty = 0;
+ }
+
+ (*draw->vertex_fetch.fetch_func)(draw, machine, elts, count);
+}
+
+struct cell_command_vs;
+
+extern void
+spu_bind_vertex_shader(struct spu_vs_context *draw,
+ struct cell_shader_info *vs);
+
+extern void
+spu_execute_vertex_shader(struct spu_vs_context *draw,
+ const struct cell_command_vs *vs);
+
+#endif /* SPU_VERTEX_SHADER_H */
diff --git a/src/gallium/drivers/failover/Makefile b/src/gallium/drivers/failover/Makefile
new file mode 100644
index 0000000000..dfb7f5dcf6
--- /dev/null
+++ b/src/gallium/drivers/failover/Makefile
@@ -0,0 +1,11 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = failover
+
+C_SOURCES = \
+ fo_state.c \
+ fo_state_emit.c \
+ fo_context.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/failover/SConscript b/src/gallium/drivers/failover/SConscript
new file mode 100644
index 0000000000..f8e9b1b491
--- /dev/null
+++ b/src/gallium/drivers/failover/SConscript
@@ -0,0 +1,13 @@
+Import('*')
+
+env = env.Clone()
+
+failover = env.ConvenienceLibrary(
+ target = 'failover',
+ source = [
+ 'fo_state.c',
+ 'fo_state_emit.c',
+ 'fo_context.c',
+ ])
+
+Export('failover')
diff --git a/src/gallium/drivers/failover/fo_context.c b/src/gallium/drivers/failover/fo_context.c
new file mode 100644
index 0000000000..37184eac7b
--- /dev/null
+++ b/src/gallium/drivers/failover/fo_context.c
@@ -0,0 +1,182 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_memory.h"
+#include "pipe/p_context.h"
+
+#include "fo_context.h"
+#include "fo_winsys.h"
+
+
+
+static void failover_destroy( struct pipe_context *pipe )
+{
+ struct failover_context *failover = failover_context( pipe );
+
+ free( failover );
+}
+
+
+
+static boolean failover_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count)
+{
+ struct failover_context *failover = failover_context( pipe );
+
+ /* If there has been any statechange since last time, try hardware
+ * rendering again:
+ */
+ if (failover->dirty) {
+ failover->mode = FO_HW;
+ }
+
+ /* Try hardware:
+ */
+ if (failover->mode == FO_HW) {
+ if (!failover->hw->draw_elements( failover->hw,
+ indexBuffer,
+ indexSize,
+ prim,
+ start,
+ count )) {
+
+ failover->hw->flush( failover->hw, ~0, NULL );
+ failover->mode = FO_SW;
+ }
+ }
+
+ /* Possibly try software:
+ */
+ if (failover->mode == FO_SW) {
+
+ if (failover->dirty)
+ failover_state_emit( failover );
+
+ failover->sw->draw_elements( failover->sw,
+ indexBuffer,
+ indexSize,
+ prim,
+ start,
+ count );
+
+ /* Be ready to switch back to hardware rendering without an
+ * intervening flush. Unlikely to be much performance impact to
+ * this:
+ */
+ failover->sw->flush( failover->sw, ~0, NULL );
+ }
+
+ return TRUE;
+}
+
+
+static boolean failover_draw_arrays( struct pipe_context *pipe,
+ unsigned prim, unsigned start, unsigned count)
+{
+ return failover_draw_elements(pipe, NULL, 0, prim, start, count);
+}
+
+static unsigned int
+failover_is_texture_referenced( struct pipe_context *_pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ struct failover_context *failover = failover_context( _pipe );
+ struct pipe_context *pipe = (failover->mode == FO_HW) ?
+ failover->hw : failover->sw;
+
+ return pipe->is_texture_referenced(pipe, texture, face, level);
+}
+
+static unsigned int
+failover_is_buffer_referenced( struct pipe_context *_pipe,
+ struct pipe_buffer *buf)
+{
+ struct failover_context *failover = failover_context( _pipe );
+ struct pipe_context *pipe = (failover->mode == FO_HW) ?
+ failover->hw : failover->sw;
+
+ return pipe->is_buffer_referenced(pipe, buf);
+}
+
+struct pipe_context *failover_create( struct pipe_context *hw,
+ struct pipe_context *sw )
+{
+ struct failover_context *failover = CALLOC_STRUCT(failover_context);
+ if (failover == NULL)
+ return NULL;
+
+ failover->hw = hw;
+ failover->sw = sw;
+ failover->pipe.winsys = hw->winsys;
+ failover->pipe.screen = hw->screen;
+ failover->pipe.destroy = failover_destroy;
+#if 0
+ failover->pipe.is_format_supported = hw->is_format_supported;
+ failover->pipe.get_name = hw->get_name;
+ failover->pipe.get_vendor = hw->get_vendor;
+ failover->pipe.get_param = hw->get_param;
+ failover->pipe.get_paramf = hw->get_paramf;
+#endif
+
+ failover->pipe.draw_arrays = failover_draw_arrays;
+ failover->pipe.draw_elements = failover_draw_elements;
+ failover->pipe.clear = hw->clear;
+
+ /* No software occlusion fallback (or other optional functionality)
+ * at this point - if the hardware doesn't support it, don't
+ * advertise it to the application.
+ */
+ failover->pipe.begin_query = hw->begin_query;
+ failover->pipe.end_query = hw->end_query;
+
+ failover_init_state_functions( failover );
+
+ failover->pipe.surface_copy = hw->surface_copy;
+ failover->pipe.surface_fill = hw->surface_fill;
+
+#if 0
+ failover->pipe.texture_create = hw->texture_create;
+ failover->pipe.texture_destroy = hw->texture_destroy;
+ failover->pipe.get_tex_surface = hw->get_tex_surface;
+ failover->pipe.texture_update = hw->texture_update;
+#endif
+
+ failover->pipe.flush = hw->flush;
+ failover->pipe.is_texture_referenced = failover_is_texture_referenced;
+ failover->pipe.is_buffer_referenced = failover_is_buffer_referenced;
+
+ failover->dirty = 0;
+
+ return &failover->pipe;
+}
+
diff --git a/src/gallium/drivers/failover/fo_context.h b/src/gallium/drivers/failover/fo_context.h
new file mode 100644
index 0000000000..9ba86ba866
--- /dev/null
+++ b/src/gallium/drivers/failover/fo_context.h
@@ -0,0 +1,125 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef FO_CONTEXT_H
+#define FO_CONTEXT_H
+
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+
+
+
+#define FO_NEW_VIEWPORT 0x1
+#define FO_NEW_RASTERIZER 0x2
+#define FO_NEW_FRAGMENT_SHADER 0x4
+#define FO_NEW_BLEND 0x8
+#define FO_NEW_CLIP 0x10
+#define FO_NEW_SCISSOR 0x20
+#define FO_NEW_STIPPLE 0x40
+#define FO_NEW_FRAMEBUFFER 0x80
+#define FO_NEW_ALPHA_TEST 0x100
+#define FO_NEW_DEPTH_STENCIL 0x200
+#define FO_NEW_SAMPLER 0x400
+#define FO_NEW_TEXTURE 0x800
+#define FO_NEW_VERTEX 0x2000
+#define FO_NEW_VERTEX_SHADER 0x4000
+#define FO_NEW_BLEND_COLOR 0x8000
+#define FO_NEW_CLEAR_COLOR 0x10000
+#define FO_NEW_VERTEX_BUFFER 0x20000
+#define FO_NEW_VERTEX_ELEMENT 0x40000
+
+
+
+#define FO_HW 0
+#define FO_SW 1
+
+struct fo_state {
+ void *sw_state;
+ void *hw_state;
+};
+struct failover_context {
+ struct pipe_context pipe; /**< base class */
+
+
+ /* The most recent drawing state as set by the driver:
+ */
+ const struct fo_state *blend;
+ const struct fo_state *sampler[PIPE_MAX_SAMPLERS];
+ const struct fo_state *depth_stencil;
+ const struct fo_state *rasterizer;
+ const struct fo_state *fragment_shader;
+ const struct fo_state *vertex_shader;
+
+ struct pipe_blend_color blend_color;
+ struct pipe_clip_state clip;
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_poly_stipple poly_stipple;
+ struct pipe_scissor_state scissor;
+ struct pipe_texture *texture[PIPE_MAX_SAMPLERS];
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vertex_elements[PIPE_MAX_ATTRIBS];
+
+ uint num_vertex_buffers;
+ uint num_vertex_elements;
+
+ void *sw_sampler_state[PIPE_MAX_SAMPLERS];
+ void *hw_sampler_state[PIPE_MAX_SAMPLERS];
+
+ unsigned dirty;
+
+ unsigned num_samplers;
+ unsigned num_textures;
+
+ unsigned mode;
+ struct pipe_context *hw;
+ struct pipe_context *sw;
+};
+
+
+
+void failover_init_state_functions( struct failover_context *failover );
+void failover_state_emit( struct failover_context *failover );
+
+static INLINE struct failover_context *
+failover_context( struct pipe_context *pipe )
+{
+ return (struct failover_context *)pipe;
+}
+
+/* Internal functions
+ */
+void
+failover_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf);
+
+
+#endif /* FO_CONTEXT_H */
diff --git a/src/gallium/drivers/failover/fo_state.c b/src/gallium/drivers/failover/fo_state.c
new file mode 100644
index 0000000000..c8eb926299
--- /dev/null
+++ b/src/gallium/drivers/failover/fo_state.c
@@ -0,0 +1,481 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "fo_context.h"
+
+
+/* This looks like a lot of work at the moment - we're keeping a
+ * duplicate copy of the state up-to-date.
+ *
+ * This can change in two ways:
+ * - With constant state objects we would only need to save a pointer,
+ * not the whole object.
+ * - By adding a callback in the state tracker to re-emit state. The
+ * state tracker knows the current state already and can re-emit it
+ * without additional complexity.
+ *
+ * This works as a proof-of-concept, but a final version will have
+ * lower overheads.
+ */
+
+
+
+static void *
+failover_create_blend_state( struct pipe_context *pipe,
+ const struct pipe_blend_state *blend )
+{
+ struct fo_state *state = malloc(sizeof(struct fo_state));
+ struct failover_context *failover = failover_context(pipe);
+
+ state->sw_state = failover->sw->create_blend_state(failover->sw, blend);
+ state->hw_state = failover->hw->create_blend_state(failover->hw, blend);
+
+ return state;
+}
+
+static void
+failover_bind_blend_state( struct pipe_context *pipe,
+ void *blend )
+{
+ struct failover_context *failover = failover_context(pipe);
+ struct fo_state *state = (struct fo_state *)blend;
+ failover->blend = state;
+ failover->dirty |= FO_NEW_BLEND;
+ failover->sw->bind_blend_state( failover->sw, state->sw_state );
+ failover->hw->bind_blend_state( failover->hw, state->hw_state );
+}
+
+static void
+failover_delete_blend_state( struct pipe_context *pipe,
+ void *blend )
+{
+ struct fo_state *state = (struct fo_state*)blend;
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->sw->delete_blend_state(failover->sw, state->sw_state);
+ failover->hw->delete_blend_state(failover->hw, state->hw_state);
+ state->sw_state = 0;
+ state->hw_state = 0;
+ free(state);
+}
+
+static void
+failover_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color )
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->blend_color = *blend_color;
+ failover->dirty |= FO_NEW_BLEND_COLOR;
+ failover->sw->set_blend_color( failover->sw, blend_color );
+ failover->hw->set_blend_color( failover->hw, blend_color );
+}
+
+static void
+failover_set_clip_state( struct pipe_context *pipe,
+ const struct pipe_clip_state *clip )
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->clip = *clip;
+ failover->dirty |= FO_NEW_CLIP;
+ failover->sw->set_clip_state( failover->sw, clip );
+ failover->hw->set_clip_state( failover->hw, clip );
+}
+
+
+static void *
+failover_create_depth_stencil_state(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *templ)
+{
+ struct fo_state *state = malloc(sizeof(struct fo_state));
+ struct failover_context *failover = failover_context(pipe);
+
+ state->sw_state = failover->sw->create_depth_stencil_alpha_state(failover->sw, templ);
+ state->hw_state = failover->hw->create_depth_stencil_alpha_state(failover->hw, templ);
+
+ return state;
+}
+
+static void
+failover_bind_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ struct failover_context *failover = failover_context(pipe);
+ struct fo_state *state = (struct fo_state *)depth_stencil;
+ failover->depth_stencil = state;
+ failover->dirty |= FO_NEW_DEPTH_STENCIL;
+ failover->sw->bind_depth_stencil_alpha_state(failover->sw, state->sw_state);
+ failover->hw->bind_depth_stencil_alpha_state(failover->hw, state->hw_state);
+}
+
+static void
+failover_delete_depth_stencil_state(struct pipe_context *pipe,
+ void *ds)
+{
+ struct fo_state *state = (struct fo_state*)ds;
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->sw->delete_depth_stencil_alpha_state(failover->sw, state->sw_state);
+ failover->hw->delete_depth_stencil_alpha_state(failover->hw, state->hw_state);
+ state->sw_state = 0;
+ state->hw_state = 0;
+ free(state);
+}
+
+static void
+failover_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *framebuffer)
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->framebuffer = *framebuffer;
+ failover->dirty |= FO_NEW_FRAMEBUFFER;
+ failover->sw->set_framebuffer_state( failover->sw, framebuffer );
+ failover->hw->set_framebuffer_state( failover->hw, framebuffer );
+}
+
+
+static void *
+failover_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct fo_state *state = malloc(sizeof(struct fo_state));
+ struct failover_context *failover = failover_context(pipe);
+
+ state->sw_state = failover->sw->create_fs_state(failover->sw, templ);
+ state->hw_state = failover->hw->create_fs_state(failover->hw, templ);
+
+ return state;
+}
+
+static void
+failover_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct failover_context *failover = failover_context(pipe);
+ struct fo_state *state = (struct fo_state*)fs;
+ failover->fragment_shader = state;
+ failover->dirty |= FO_NEW_FRAGMENT_SHADER;
+ failover->sw->bind_fs_state(failover->sw, state->sw_state);
+ failover->hw->bind_fs_state(failover->hw, state->hw_state);
+}
+
+static void
+failover_delete_fs_state(struct pipe_context *pipe,
+ void *fs)
+{
+ struct fo_state *state = (struct fo_state*)fs;
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->sw->delete_fs_state(failover->sw, state->sw_state);
+ failover->hw->delete_fs_state(failover->hw, state->hw_state);
+ state->sw_state = 0;
+ state->hw_state = 0;
+ free(state);
+}
+
+static void *
+failover_create_vs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct fo_state *state = malloc(sizeof(struct fo_state));
+ struct failover_context *failover = failover_context(pipe);
+
+ state->sw_state = failover->sw->create_vs_state(failover->sw, templ);
+ state->hw_state = failover->hw->create_vs_state(failover->hw, templ);
+
+ return state;
+}
+
+static void
+failover_bind_vs_state(struct pipe_context *pipe,
+ void *vs)
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ struct fo_state *state = (struct fo_state*)vs;
+ failover->vertex_shader = state;
+ failover->dirty |= FO_NEW_VERTEX_SHADER;
+ failover->sw->bind_vs_state(failover->sw, state->sw_state);
+ failover->hw->bind_vs_state(failover->hw, state->hw_state);
+}
+
+static void
+failover_delete_vs_state(struct pipe_context *pipe,
+ void *vs)
+{
+ struct fo_state *state = (struct fo_state*)vs;
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->sw->delete_vs_state(failover->sw, state->sw_state);
+ failover->hw->delete_vs_state(failover->hw, state->hw_state);
+ state->sw_state = 0;
+ state->hw_state = 0;
+ free(state);
+}
+
+static void
+failover_set_polygon_stipple( struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple )
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->poly_stipple = *stipple;
+ failover->dirty |= FO_NEW_STIPPLE;
+ failover->sw->set_polygon_stipple( failover->sw, stipple );
+ failover->hw->set_polygon_stipple( failover->hw, stipple );
+}
+
+
+static void *
+failover_create_rasterizer_state(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *templ)
+{
+ struct fo_state *state = malloc(sizeof(struct fo_state));
+ struct failover_context *failover = failover_context(pipe);
+
+ state->sw_state = failover->sw->create_rasterizer_state(failover->sw, templ);
+ state->hw_state = failover->hw->create_rasterizer_state(failover->hw, templ);
+
+ return state;
+}
+
+static void
+failover_bind_rasterizer_state(struct pipe_context *pipe,
+ void *raster)
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ struct fo_state *state = (struct fo_state*)raster;
+ failover->rasterizer = state;
+ failover->dirty |= FO_NEW_RASTERIZER;
+ failover->sw->bind_rasterizer_state(failover->sw, state->sw_state);
+ failover->hw->bind_rasterizer_state(failover->hw, state->hw_state);
+}
+
+static void
+failover_delete_rasterizer_state(struct pipe_context *pipe,
+ void *raster)
+{
+ struct fo_state *state = (struct fo_state*)raster;
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->sw->delete_rasterizer_state(failover->sw, state->sw_state);
+ failover->hw->delete_rasterizer_state(failover->hw, state->hw_state);
+ state->sw_state = 0;
+ state->hw_state = 0;
+ free(state);
+}
+
+
+static void
+failover_set_scissor_state( struct pipe_context *pipe,
+ const struct pipe_scissor_state *scissor )
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->scissor = *scissor;
+ failover->dirty |= FO_NEW_SCISSOR;
+ failover->sw->set_scissor_state( failover->sw, scissor );
+ failover->hw->set_scissor_state( failover->hw, scissor );
+}
+
+
+static void *
+failover_create_sampler_state(struct pipe_context *pipe,
+ const struct pipe_sampler_state *templ)
+{
+ struct fo_state *state = malloc(sizeof(struct fo_state));
+ struct failover_context *failover = failover_context(pipe);
+
+ state->sw_state = failover->sw->create_sampler_state(failover->sw, templ);
+ state->hw_state = failover->hw->create_sampler_state(failover->hw, templ);
+
+ return state;
+}
+
+static void
+failover_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct failover_context *failover = failover_context(pipe);
+ struct fo_state *state = (struct fo_state*)sampler;
+ uint i;
+ assert(num <= PIPE_MAX_SAMPLERS);
+ /* Check for no-op */
+ if (num == failover->num_samplers &&
+ !memcmp(failover->sampler, sampler, num * sizeof(void *)))
+ return;
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ failover->sw_sampler_state[i] = i < num ? state[i].sw_state : NULL;
+ failover->hw_sampler_state[i] = i < num ? state[i].hw_state : NULL;
+ }
+ failover->dirty |= FO_NEW_SAMPLER;
+ failover->num_samplers = num;
+ failover->sw->bind_sampler_states(failover->sw, num,
+ failover->sw_sampler_state);
+ failover->hw->bind_sampler_states(failover->hw, num,
+ failover->hw_sampler_state);
+}
+
+static void
+failover_delete_sampler_state(struct pipe_context *pipe, void *sampler)
+{
+ struct fo_state *state = (struct fo_state*)sampler;
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->sw->delete_sampler_state(failover->sw, state->sw_state);
+ failover->hw->delete_sampler_state(failover->hw, state->hw_state);
+ state->sw_state = 0;
+ state->hw_state = 0;
+ free(state);
+}
+
+
+static void
+failover_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num,
+ struct pipe_texture **texture)
+{
+ struct failover_context *failover = failover_context(pipe);
+ uint i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == failover->num_textures &&
+ !memcmp(failover->texture, texture, num * sizeof(struct pipe_texture *)))
+ return;
+ for (i = 0; i < num; i++)
+ pipe_texture_reference((struct pipe_texture **) &failover->texture[i],
+ texture[i]);
+ for (i = num; i < failover->num_textures; i++)
+ pipe_texture_reference((struct pipe_texture **) &failover->texture[i],
+ NULL);
+ failover->dirty |= FO_NEW_TEXTURE;
+ failover->num_textures = num;
+ failover->sw->set_sampler_textures( failover->sw, num, texture );
+ failover->hw->set_sampler_textures( failover->hw, num, texture );
+}
+
+
+static void
+failover_set_viewport_state( struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport )
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ failover->viewport = *viewport;
+ failover->dirty |= FO_NEW_VIEWPORT;
+ failover->sw->set_viewport_state( failover->sw, viewport );
+ failover->hw->set_viewport_state( failover->hw, viewport );
+}
+
+
+static void
+failover_set_vertex_buffers(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer *vertex_buffers)
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ memcpy(failover->vertex_buffers, vertex_buffers,
+ count * sizeof(vertex_buffers[0]));
+ failover->dirty |= FO_NEW_VERTEX_BUFFER;
+ failover->num_vertex_buffers = count;
+ failover->sw->set_vertex_buffers( failover->sw, count, vertex_buffers );
+ failover->hw->set_vertex_buffers( failover->hw, count, vertex_buffers );
+}
+
+
+static void
+failover_set_vertex_elements(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_element *vertex_elements)
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ memcpy(failover->vertex_elements, vertex_elements,
+ count * sizeof(vertex_elements[0]));
+
+ failover->dirty |= FO_NEW_VERTEX_ELEMENT;
+ failover->num_vertex_elements = count;
+ failover->sw->set_vertex_elements( failover->sw, count, vertex_elements );
+ failover->hw->set_vertex_elements( failover->hw, count, vertex_elements );
+}
+
+void
+failover_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct failover_context *failover = failover_context(pipe);
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ failover->sw->set_constant_buffer(failover->sw, shader, index, buf);
+ failover->hw->set_constant_buffer(failover->hw, shader, index, buf);
+}
+
+
+void
+failover_init_state_functions( struct failover_context *failover )
+{
+ failover->pipe.create_blend_state = failover_create_blend_state;
+ failover->pipe.bind_blend_state = failover_bind_blend_state;
+ failover->pipe.delete_blend_state = failover_delete_blend_state;
+ failover->pipe.create_sampler_state = failover_create_sampler_state;
+ failover->pipe.bind_sampler_states = failover_bind_sampler_states;
+ failover->pipe.delete_sampler_state = failover_delete_sampler_state;
+ failover->pipe.create_depth_stencil_alpha_state = failover_create_depth_stencil_state;
+ failover->pipe.bind_depth_stencil_alpha_state = failover_bind_depth_stencil_state;
+ failover->pipe.delete_depth_stencil_alpha_state = failover_delete_depth_stencil_state;
+ failover->pipe.create_rasterizer_state = failover_create_rasterizer_state;
+ failover->pipe.bind_rasterizer_state = failover_bind_rasterizer_state;
+ failover->pipe.delete_rasterizer_state = failover_delete_rasterizer_state;
+ failover->pipe.create_fs_state = failover_create_fs_state;
+ failover->pipe.bind_fs_state = failover_bind_fs_state;
+ failover->pipe.delete_fs_state = failover_delete_fs_state;
+ failover->pipe.create_vs_state = failover_create_vs_state;
+ failover->pipe.bind_vs_state = failover_bind_vs_state;
+ failover->pipe.delete_vs_state = failover_delete_vs_state;
+
+ failover->pipe.set_blend_color = failover_set_blend_color;
+ failover->pipe.set_clip_state = failover_set_clip_state;
+ failover->pipe.set_framebuffer_state = failover_set_framebuffer_state;
+ failover->pipe.set_polygon_stipple = failover_set_polygon_stipple;
+ failover->pipe.set_scissor_state = failover_set_scissor_state;
+ failover->pipe.set_sampler_textures = failover_set_sampler_textures;
+ failover->pipe.set_viewport_state = failover_set_viewport_state;
+ failover->pipe.set_vertex_buffers = failover_set_vertex_buffers;
+ failover->pipe.set_vertex_elements = failover_set_vertex_elements;
+ failover->pipe.set_constant_buffer = failover_set_constant_buffer;
+}
diff --git a/src/gallium/drivers/failover/fo_state_emit.c b/src/gallium/drivers/failover/fo_state_emit.c
new file mode 100644
index 0000000000..bd4fce9d20
--- /dev/null
+++ b/src/gallium/drivers/failover/fo_state_emit.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "fo_context.h"
+
+/* This looks like a lot of work at the moment - we're keeping a
+ * duplicate copy of the state up-to-date.
+ *
+ * This can change in two ways:
+ * - With constant state objects we would only need to save a pointer,
+ * not the whole object.
+ * - By adding a callback in the state tracker to re-emit state. The
+ * state tracker knows the current state already and can re-emit it
+ * without additional complexity.
+ *
+ * This works as a proof-of-concept, but a final version will have
+ * lower overheads.
+ */
+
+
+/* Bring the software pipe uptodate with current state.
+ *
+ * With constant state objects we would probably just send all state
+ * to both rasterizers all the time???
+ */
+void
+failover_state_emit( struct failover_context *failover )
+{
+ if (failover->dirty & FO_NEW_BLEND)
+ failover->sw->bind_blend_state( failover->sw,
+ failover->blend->sw_state );
+
+ if (failover->dirty & FO_NEW_BLEND_COLOR)
+ failover->sw->set_blend_color( failover->sw, &failover->blend_color );
+
+ if (failover->dirty & FO_NEW_CLIP)
+ failover->sw->set_clip_state( failover->sw, &failover->clip );
+
+ if (failover->dirty & FO_NEW_DEPTH_STENCIL)
+ failover->sw->bind_depth_stencil_alpha_state( failover->sw,
+ failover->depth_stencil->sw_state );
+
+ if (failover->dirty & FO_NEW_FRAMEBUFFER)
+ failover->sw->set_framebuffer_state( failover->sw, &failover->framebuffer );
+
+ if (failover->dirty & FO_NEW_FRAGMENT_SHADER)
+ failover->sw->bind_fs_state( failover->sw,
+ failover->fragment_shader->sw_state );
+
+ if (failover->dirty & FO_NEW_VERTEX_SHADER)
+ failover->sw->bind_vs_state( failover->sw,
+ failover->vertex_shader->sw_state );
+
+ if (failover->dirty & FO_NEW_STIPPLE)
+ failover->sw->set_polygon_stipple( failover->sw, &failover->poly_stipple );
+
+ if (failover->dirty & FO_NEW_RASTERIZER)
+ failover->sw->bind_rasterizer_state( failover->sw,
+ failover->rasterizer->sw_state );
+
+ if (failover->dirty & FO_NEW_SCISSOR)
+ failover->sw->set_scissor_state( failover->sw, &failover->scissor );
+
+ if (failover->dirty & FO_NEW_VIEWPORT)
+ failover->sw->set_viewport_state( failover->sw, &failover->viewport );
+
+ if (failover->dirty & FO_NEW_SAMPLER) {
+ failover->sw->bind_sampler_states( failover->sw, failover->num_samplers,
+ failover->sw_sampler_state );
+ }
+
+ if (failover->dirty & FO_NEW_TEXTURE) {
+ failover->sw->set_sampler_textures( failover->sw, failover->num_textures,
+ failover->texture );
+ }
+
+ if (failover->dirty & FO_NEW_VERTEX_BUFFER) {
+ failover->sw->set_vertex_buffers( failover->sw,
+ failover->num_vertex_buffers,
+ failover->vertex_buffers );
+ }
+
+ if (failover->dirty & FO_NEW_VERTEX_ELEMENT) {
+ failover->sw->set_vertex_elements( failover->sw,
+ failover->num_vertex_elements,
+ failover->vertex_elements );
+ }
+
+ failover->dirty = 0;
+}
diff --git a/src/gallium/drivers/failover/fo_winsys.h b/src/gallium/drivers/failover/fo_winsys.h
new file mode 100644
index 0000000000..a8ce997a1f
--- /dev/null
+++ b/src/gallium/drivers/failover/fo_winsys.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef FO_WINSYS_H
+#define FO_WINSYS_H
+
+
+/* This is the interface that failover requires any window system
+ * hosting it to implement. This is the only include file in failover
+ * which is public.
+ */
+
+
+struct pipe_context;
+
+
+struct pipe_context *failover_create( struct pipe_context *hw,
+ struct pipe_context *sw );
+
+
+#endif /* FO_WINSYS_H */
diff --git a/src/gallium/drivers/i915simple/Makefile b/src/gallium/drivers/i915simple/Makefile
new file mode 100644
index 0000000000..fb533c1796
--- /dev/null
+++ b/src/gallium/drivers/i915simple/Makefile
@@ -0,0 +1,28 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = i915simple
+
+C_SOURCES = \
+ i915_blit.c \
+ i915_buffer.c \
+ i915_clear.c \
+ i915_flush.c \
+ i915_context.c \
+ i915_debug.c \
+ i915_debug_fp.c \
+ i915_state.c \
+ i915_state_immediate.c \
+ i915_state_dynamic.c \
+ i915_state_derived.c \
+ i915_state_emit.c \
+ i915_state_sampler.c \
+ i915_screen.c \
+ i915_prim_emit.c \
+ i915_prim_vbuf.c \
+ i915_texture.c \
+ i915_fpc_emit.c \
+ i915_fpc_translate.c \
+ i915_surface.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/i915simple/SConscript b/src/gallium/drivers/i915simple/SConscript
new file mode 100644
index 0000000000..778c4ed0fd
--- /dev/null
+++ b/src/gallium/drivers/i915simple/SConscript
@@ -0,0 +1,30 @@
+Import('*')
+
+env = env.Clone()
+
+i915simple = env.ConvenienceLibrary(
+ target = 'i915simple',
+ source = [
+ 'i915_blit.c',
+ 'i915_buffer.c',
+ 'i915_clear.c',
+ 'i915_context.c',
+ 'i915_debug.c',
+ 'i915_debug_fp.c',
+ 'i915_flush.c',
+ 'i915_fpc_emit.c',
+ 'i915_fpc_translate.c',
+ 'i915_prim_emit.c',
+ 'i915_prim_vbuf.c',
+ 'i915_screen.c',
+ 'i915_state.c',
+ 'i915_state_derived.c',
+ 'i915_state_dynamic.c',
+ 'i915_state_emit.c',
+ 'i915_state_immediate.c',
+ 'i915_state_sampler.c',
+ 'i915_surface.c',
+ 'i915_texture.c',
+ ])
+
+Export('i915simple')
diff --git a/src/gallium/drivers/i915simple/i915_batch.h b/src/gallium/drivers/i915simple/i915_batch.h
new file mode 100644
index 0000000000..b813784723
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_batch.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_BATCH_H
+#define I915_BATCH_H
+
+#include "intel_batchbuffer.h"
+
+#define BEGIN_BATCH(dwords, relocs) \
+ (intel_batchbuffer_check(i915->batch, dwords, relocs))
+
+#define OUT_BATCH(dword) \
+ intel_batchbuffer_dword(i915->batch, dword)
+
+#define OUT_RELOC(buf, usage, offset) \
+ intel_batchbuffer_reloc(i915->batch, buf, usage, offset)
+
+#define FLUSH_BATCH(fence) do { \
+ intel_batchbuffer_flush(i915->batch, fence); \
+ i915->hardware_dirty = ~0; \
+} while (0)
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_blit.c b/src/gallium/drivers/i915simple/i915_blit.c
new file mode 100644
index 0000000000..83dfc33528
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_blit.c
@@ -0,0 +1,151 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "i915_blit.h"
+#include "i915_reg.h"
+#include "i915_batch.h"
+#include "i915_debug.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+void
+i915_fill_blit(struct i915_context *i915,
+ unsigned cpp,
+ unsigned short dst_pitch,
+ struct intel_buffer *dst_buffer,
+ unsigned dst_offset,
+ short x, short y,
+ short w, short h,
+ unsigned color)
+{
+ unsigned BR13, CMD;
+
+
+ I915_DBG(i915,
+ "%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ dst_buffer, dst_pitch, dst_offset, x, y, w, h);
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = (((int) dst_pitch) & 0xffff) |
+ (0xF0 << 16) | (1 << 24);
+ CMD = XY_COLOR_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (((int) dst_pitch) & 0xffff) |
+ (0xF0 << 16) | (1 << 24) | (1 << 25);
+ CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB);
+ break;
+ default:
+ return;
+ }
+
+ if (!BEGIN_BATCH(6, 1)) {
+ FLUSH_BATCH(NULL);
+ assert(BEGIN_BATCH(6, 1));
+ }
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y << 16) | x);
+ OUT_BATCH(((y + h) << 16) | (x + w));
+ OUT_RELOC(dst_buffer, INTEL_USAGE_2D_TARGET, dst_offset);
+ OUT_BATCH(color);
+ FLUSH_BATCH(NULL);
+}
+
+void
+i915_copy_blit(struct i915_context *i915,
+ unsigned do_flip,
+ unsigned cpp,
+ unsigned short src_pitch,
+ struct intel_buffer *src_buffer,
+ unsigned src_offset,
+ unsigned short dst_pitch,
+ struct intel_buffer *dst_buffer,
+ unsigned dst_offset,
+ short src_x, short src_y,
+ short dst_x, short dst_y,
+ short w, short h)
+{
+ unsigned CMD, BR13;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+
+
+ I915_DBG(i915,
+ "%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_offset, src_x, src_y,
+ dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = (((int) dst_pitch) & 0xffff) |
+ (0xCC << 16) | (1 << 24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (((int) dst_pitch) & 0xffff) |
+ (0xCC << 16) | (1 << 24) | (1 << 25);
+ CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ break;
+ default:
+ return;
+ }
+
+ if (dst_y2 < dst_y || dst_x2 < dst_x) {
+ return;
+ }
+
+ /* Hardware can handle negative pitches but loses the ability to do
+ * proper overlapping blits in that case. We don't really have a
+ * need for either at this stage.
+ */
+ assert (dst_pitch > 0 && src_pitch > 0);
+
+ if (!BEGIN_BATCH(8, 2)) {
+ FLUSH_BATCH(NULL);
+ assert(BEGIN_BATCH(8, 2));
+ }
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((dst_y << 16) | dst_x);
+ OUT_BATCH((dst_y2 << 16) | dst_x2);
+ OUT_RELOC(dst_buffer, INTEL_USAGE_2D_TARGET, dst_offset);
+ OUT_BATCH((src_y << 16) | src_x);
+ OUT_BATCH(((int) src_pitch & 0xffff));
+ OUT_RELOC(src_buffer, INTEL_USAGE_2D_SOURCE, src_offset);
+ FLUSH_BATCH(NULL);
+}
diff --git a/src/gallium/drivers/i915simple/i915_blit.h b/src/gallium/drivers/i915simple/i915_blit.h
new file mode 100644
index 0000000000..8ce3220cfd
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_blit.h
@@ -0,0 +1,55 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_BLIT_H
+#define I915_BLIT_H
+
+#include "i915_context.h"
+
+extern void i915_copy_blit(struct i915_context *i915,
+ unsigned do_flip,
+ unsigned cpp,
+ unsigned short src_pitch,
+ struct intel_buffer *src_buffer,
+ unsigned src_offset,
+ unsigned short dst_pitch,
+ struct intel_buffer *dst_buffer,
+ unsigned dst_offset,
+ short srcx, short srcy,
+ short dstx, short dsty,
+ short w, short h);
+
+extern void i915_fill_blit(struct i915_context *i915,
+ unsigned cpp,
+ unsigned short dst_pitch,
+ struct intel_buffer *dst_buffer,
+ unsigned dst_offset,
+ short x, short y,
+ short w, short h, unsigned color);
+
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_buffer.c b/src/gallium/drivers/i915simple/i915_buffer.c
new file mode 100644
index 0000000000..effeba1297
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_buffer.c
@@ -0,0 +1,136 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 Jakob Bornecrantz
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "i915_screen.h"
+#include "i915_buffer.h"
+
+struct intel_buffer;
+
+struct i915_buffer
+{
+ struct pipe_buffer base;
+
+ struct intel_buffer *ibuf; /** hw buffer */
+
+ void *data; /**< user and malloc data */
+ boolean own; /**< we own the data incase of malloc */
+};
+
+static INLINE struct i915_buffer *
+i915_buffer(struct pipe_buffer *buffer)
+{
+ return (struct i915_buffer *)buffer;
+}
+
+static struct pipe_buffer *
+i915_buffer_create(struct pipe_screen *screen,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct i915_buffer *buf = CALLOC_STRUCT(i915_buffer);
+
+ if (!buf)
+ return NULL;
+
+ pipe_reference_init(&buf->base.reference, 1);
+ buf->base.alignment = alignment;
+ buf->base.screen = screen;
+ buf->base.usage = usage;
+ buf->base.size = size;
+ buf->data = MALLOC(size);
+ buf->own = TRUE;
+
+ if (!buf->data)
+ goto err;
+
+ return &buf->base;
+
+err:
+ FREE(buf);
+ return NULL;
+}
+
+static struct pipe_buffer *
+i915_user_buffer_create(struct pipe_screen *screen,
+ void *ptr,
+ unsigned bytes)
+{
+ struct i915_buffer *buf = CALLOC_STRUCT(i915_buffer);
+
+ if (!buf)
+ return NULL;
+
+ pipe_reference_init(&buf->base.reference, 1);
+ buf->base.alignment = 0;
+ buf->base.screen = screen;
+ buf->base.usage = 0;
+ buf->base.size = bytes;
+ buf->data = ptr;
+ buf->own = FALSE;
+
+ return &buf->base;
+}
+
+static void *
+i915_buffer_map(struct pipe_screen *screen,
+ struct pipe_buffer *buffer,
+ unsigned usage)
+{
+ struct i915_buffer *buf = i915_buffer(buffer);
+ assert(!buf->ibuf);
+ return buf->data;
+}
+
+static void
+i915_buffer_unmap(struct pipe_screen *screen,
+ struct pipe_buffer *buffer)
+{
+ struct i915_buffer *buf = i915_buffer(buffer);
+ assert(!buf->ibuf);
+}
+
+static void
+i915_buffer_destroy(struct pipe_buffer *buffer)
+{
+ struct i915_buffer *buf = i915_buffer(buffer);
+ assert(!buf->ibuf);
+
+ if (buf->own)
+ FREE(buf->data);
+ FREE(buf);
+}
+
+void i915_init_screen_buffer_functions(struct i915_screen *screen)
+{
+ screen->base.buffer_create = i915_buffer_create;
+ screen->base.user_buffer_create = i915_user_buffer_create;
+ screen->base.buffer_map = i915_buffer_map;
+ screen->base.buffer_map_range = NULL;
+ screen->base.buffer_flush_mapped_range = NULL;
+ screen->base.buffer_unmap = i915_buffer_unmap;
+ screen->base.buffer_destroy = i915_buffer_destroy;
+}
diff --git a/src/gallium/drivers/i915simple/i915_buffer.h b/src/gallium/drivers/i915simple/i915_buffer.h
new file mode 100644
index 0000000000..80fda7c62f
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_buffer.h
@@ -0,0 +1,31 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 Jakob Bornecrantz
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_BUFFER_H
+#define I915_BUFFER_H
+
+void i915_init_screen_buffer_functions(struct i915_screen *screen);
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_clear.c b/src/gallium/drivers/i915simple/i915_clear.c
new file mode 100644
index 0000000000..90530f2826
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_clear.c
@@ -0,0 +1,48 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors:
+ * Brian Paul
+ */
+
+
+#include "util/u_clear.h"
+#include "i915_context.h"
+#include "i915_state.h"
+
+
+/**
+ * Clear the given buffers to the specified values.
+ * No masking, no scissor (clear entire buffer).
+ */
+void
+i915_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil)
+{
+ util_clear(pipe, &i915_context(pipe)->framebuffer, buffers, rgba, depth,
+ stencil);
+}
diff --git a/src/gallium/drivers/i915simple/i915_context.c b/src/gallium/drivers/i915simple/i915_context.c
new file mode 100644
index 0000000000..b43f735245
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_context.c
@@ -0,0 +1,237 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_context.h"
+#include "i915_state.h"
+#include "i915_screen.h"
+#include "i915_batch.h"
+#include "i915_texture.h"
+#include "i915_reg.h"
+
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "pipe/p_screen.h"
+
+
+/*
+ * Draw functions
+ */
+
+
+static boolean
+i915_draw_range_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned min_index,
+ unsigned max_index,
+ unsigned prim, unsigned start, unsigned count)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ struct draw_context *draw = i915->draw;
+ unsigned i;
+
+ if (i915->dirty)
+ i915_update_derived(i915);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < i915->num_vertex_buffers; i++) {
+ void *buf = pipe_buffer_map(pipe->screen, i915->vertex_buffer[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+
+ /*
+ * Map index buffer, if present
+ */
+ if (indexBuffer) {
+ void *mapped_indexes = pipe_buffer_map(pipe->screen, indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer_range(draw, indexSize,
+ min_index,
+ max_index,
+ mapped_indexes);
+ } else {
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+
+ draw_set_mapped_constant_buffer(draw,
+ i915->current.constants[PIPE_SHADER_VERTEX],
+ (i915->current.num_user_constants[PIPE_SHADER_VERTEX] *
+ 4 * sizeof(float)));
+
+ /*
+ * Do the drawing
+ */
+ draw_arrays(i915->draw, prim, start, count);
+
+ /*
+ * unmap vertex/index buffers
+ */
+ for (i = 0; i < i915->num_vertex_buffers; i++) {
+ pipe_buffer_unmap(pipe->screen, i915->vertex_buffer[i].buffer);
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ }
+
+ if (indexBuffer) {
+ pipe_buffer_unmap(pipe->screen, indexBuffer);
+ draw_set_mapped_element_buffer_range(draw, 0, start, start + count - 1, NULL);
+ }
+
+ return TRUE;
+}
+
+static boolean
+i915_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count)
+{
+ return i915_draw_range_elements(pipe, indexBuffer,
+ indexSize,
+ 0, 0xffffffff,
+ prim, start, count);
+}
+
+static boolean
+i915_draw_arrays(struct pipe_context *pipe,
+ unsigned prim, unsigned start, unsigned count)
+{
+ return i915_draw_elements(pipe, NULL, 0, prim, start, count);
+}
+
+
+/*
+ * Is referenced functions
+ */
+
+
+static unsigned int
+i915_is_texture_referenced(struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Return the corrent result. We can't alays return referenced
+ * since it causes a double flush within the vbo module.
+ */
+#if 0
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+#else
+ return 0;
+#endif
+}
+
+static unsigned int
+i915_is_buffer_referenced(struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Return the corrent result. We can't alays return referenced
+ * since it causes a double flush within the vbo module.
+ */
+#if 0
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+#else
+ return 0;
+#endif
+}
+
+
+/*
+ * Generic context functions
+ */
+
+
+static void i915_destroy(struct pipe_context *pipe)
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ draw_destroy(i915->draw);
+
+ if(i915->batch)
+ i915->iws->batchbuffer_destroy(i915->batch);
+
+ FREE(i915);
+}
+
+struct pipe_context *
+i915_create_context(struct pipe_screen *screen)
+{
+ struct i915_context *i915;
+
+ i915 = CALLOC_STRUCT(i915_context);
+ if (i915 == NULL)
+ return NULL;
+
+ i915->iws = i915_screen(screen)->iws;
+ i915->base.winsys = NULL;
+ i915->base.screen = screen;
+
+ i915->base.destroy = i915_destroy;
+
+ i915->base.clear = i915_clear;
+
+ i915->base.draw_arrays = i915_draw_arrays;
+ i915->base.draw_elements = i915_draw_elements;
+ i915->base.draw_range_elements = i915_draw_range_elements;
+
+ i915->base.is_texture_referenced = i915_is_texture_referenced;
+ i915->base.is_buffer_referenced = i915_is_buffer_referenced;
+
+ /*
+ * Create drawing context and plug our rendering stage into it.
+ */
+ i915->draw = draw_create();
+ assert(i915->draw);
+ if (!debug_get_bool_option("I915_NO_VBUF", FALSE)) {
+ draw_set_rasterize_stage(i915->draw, i915_draw_vbuf_stage(i915));
+ } else {
+ draw_set_rasterize_stage(i915->draw, i915_draw_render_stage(i915));
+ }
+
+ i915_init_surface_functions(i915);
+ i915_init_state_functions(i915);
+ i915_init_flush_functions(i915);
+
+ draw_install_aaline_stage(i915->draw, &i915->base);
+ draw_install_aapoint_stage(i915->draw, &i915->base);
+
+ i915->dirty = ~0;
+ i915->hardware_dirty = ~0;
+
+ /* Batch stream debugging is a bit hacked up at the moment:
+ */
+ i915->batch = i915->iws->batchbuffer_create(i915->iws);
+
+ return &i915->base;
+}
diff --git a/src/gallium/drivers/i915simple/i915_context.h b/src/gallium/drivers/i915simple/i915_context.h
new file mode 100644
index 0000000000..234b441ce6
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_context.h
@@ -0,0 +1,350 @@
+ /**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_CONTEXT_H
+#define I915_CONTEXT_H
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "draw/draw_vertex.h"
+
+#include "tgsi/tgsi_scan.h"
+
+
+struct intel_winsys;
+struct intel_buffer;
+struct intel_batchbuffer;
+
+
+#define I915_TEX_UNITS 8
+
+#define I915_DYNAMIC_MODES4 0
+#define I915_DYNAMIC_DEPTHSCALE_0 1 /* just the header */
+#define I915_DYNAMIC_DEPTHSCALE_1 2
+#define I915_DYNAMIC_IAB 3
+#define I915_DYNAMIC_BC_0 4 /* just the header */
+#define I915_DYNAMIC_BC_1 5
+#define I915_DYNAMIC_BFO_0 6
+#define I915_DYNAMIC_BFO_1 7
+#define I915_DYNAMIC_STP_0 8
+#define I915_DYNAMIC_STP_1 9
+#define I915_DYNAMIC_SC_ENA_0 10
+#define I915_DYNAMIC_SC_RECT_0 11
+#define I915_DYNAMIC_SC_RECT_1 12
+#define I915_DYNAMIC_SC_RECT_2 13
+#define I915_MAX_DYNAMIC 14
+
+
+#define I915_IMMEDIATE_S0 0
+#define I915_IMMEDIATE_S1 1
+#define I915_IMMEDIATE_S2 2
+#define I915_IMMEDIATE_S3 3
+#define I915_IMMEDIATE_S4 4
+#define I915_IMMEDIATE_S5 5
+#define I915_IMMEDIATE_S6 6
+#define I915_IMMEDIATE_S7 7
+#define I915_MAX_IMMEDIATE 8
+
+/* These must mach the order of LI0_STATE_* bits, as they will be used
+ * to generate hardware packets:
+ */
+#define I915_CACHE_STATIC 0
+#define I915_CACHE_DYNAMIC 1 /* handled specially */
+#define I915_CACHE_SAMPLER 2
+#define I915_CACHE_MAP 3
+#define I915_CACHE_PROGRAM 4
+#define I915_CACHE_CONSTANTS 5
+#define I915_MAX_CACHE 6
+
+#define I915_MAX_CONSTANT 32
+
+
+/** See constant_flags[] below */
+#define I915_CONSTFLAG_USER 0x1f
+
+
+/**
+ * Subclass of pipe_shader_state
+ */
+struct i915_fragment_shader
+{
+ struct pipe_shader_state state;
+
+ struct tgsi_shader_info info;
+
+ uint *program;
+ uint program_len;
+
+ /**
+ * constants introduced during translation.
+ * These are placed at the end of the constant buffer and grow toward
+ * the beginning (eg: slot 31, 30 29, ...)
+ * User-provided constants start at 0.
+ * This allows both types of constants to co-exist (until there's too many)
+ * and doesn't require regenerating/changing the fragment program to
+ * shuffle constants around.
+ */
+ uint num_constants;
+ float constants[I915_MAX_CONSTANT][4];
+
+ /**
+ * Status of each constant
+ * if I915_CONSTFLAG_PARAM, the value must be taken from the corresponding
+ * slot of the user's constant buffer. (set by pipe->set_constant_buffer())
+ * Else, the bitmask indicates which components are occupied by immediates.
+ */
+ ubyte constant_flags[I915_MAX_CONSTANT];
+};
+
+
+struct i915_cache_context;
+
+/* Use to calculate differences between state emitted to hardware and
+ * current driver-calculated state.
+ */
+struct i915_state
+{
+ unsigned immediate[I915_MAX_IMMEDIATE];
+ unsigned dynamic[I915_MAX_DYNAMIC];
+
+ float constants[PIPE_SHADER_TYPES][I915_MAX_CONSTANT][4];
+ /** number of constants passed in through a constant buffer */
+ uint num_user_constants[PIPE_SHADER_TYPES];
+
+ /* texture sampler state */
+ unsigned sampler[I915_TEX_UNITS][3];
+ unsigned sampler_enable_flags;
+ unsigned sampler_enable_nr;
+
+ /* texture image buffers */
+ unsigned texbuffer[I915_TEX_UNITS][2];
+
+ /** Describes the current hardware vertex layout */
+ struct vertex_info vertex_info;
+
+ unsigned id; /* track lost context events */
+};
+
+struct i915_blend_state {
+ unsigned iab;
+ unsigned modes4;
+ unsigned LIS5;
+ unsigned LIS6;
+};
+
+struct i915_depth_stencil_state {
+ unsigned stencil_modes4;
+ unsigned bfo[2];
+ unsigned stencil_LIS5;
+ unsigned depth_LIS6;
+};
+
+struct i915_rasterizer_state {
+ int light_twoside : 1;
+ unsigned st;
+ enum interp_mode color_interp;
+
+ unsigned LIS4;
+ unsigned LIS7;
+ unsigned sc[1];
+
+ const struct pipe_rasterizer_state *templ;
+
+ union { float f; unsigned u; } ds[2];
+};
+
+struct i915_sampler_state {
+ unsigned state[3];
+ const struct pipe_sampler_state *templ;
+ unsigned minlod;
+ unsigned maxlod;
+};
+
+struct i915_texture {
+ struct pipe_texture base;
+
+ /* Derived from the above:
+ */
+ unsigned stride;
+ unsigned depth_stride; /* per-image on i945? */
+ unsigned total_nblocksy;
+
+ unsigned sw_tiled; /**< tiled with software flags */
+ unsigned hw_tiled; /**< tiled with hardware fences */
+
+ unsigned nr_images[PIPE_MAX_TEXTURE_LEVELS];
+
+ /* Explicitly store the offset of each image for each cube face or
+ * depth value. Pretty much have to accept that hardware formats
+ * are going to be so diverse that there is no unified way to
+ * compute the offsets of depth/cube images within a mipmap level,
+ * so have to store them as a lookup table:
+ */
+ unsigned *image_offset[PIPE_MAX_TEXTURE_LEVELS]; /**< array [depth] of offsets */
+
+ /* The data is held here:
+ */
+ struct intel_buffer *buffer;
+};
+
+struct i915_context
+{
+ struct pipe_context base;
+
+ struct intel_winsys *iws;
+
+ struct draw_context *draw;
+
+ /* The most recent drawing state as set by the driver:
+ */
+ const struct i915_blend_state *blend;
+ const struct i915_sampler_state *sampler[PIPE_MAX_SAMPLERS];
+ const struct i915_depth_stencil_state *depth_stencil;
+ const struct i915_rasterizer_state *rasterizer;
+
+ struct i915_fragment_shader *fs;
+
+ struct pipe_blend_color blend_color;
+ struct pipe_clip_state clip;
+ struct pipe_constant_buffer constants[PIPE_SHADER_TYPES];
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_poly_stipple poly_stipple;
+ struct pipe_scissor_state scissor;
+ struct i915_texture *texture[PIPE_MAX_SAMPLERS];
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+
+ unsigned dirty;
+
+ unsigned num_samplers;
+ unsigned num_textures;
+ unsigned num_vertex_elements;
+ unsigned num_vertex_buffers;
+
+ struct intel_batchbuffer *batch;
+
+ /** Vertex buffer */
+ struct intel_buffer *vbo;
+ size_t vbo_offset;
+ unsigned vbo_flushed;
+
+ struct i915_state current;
+ unsigned hardware_dirty;
+
+ unsigned debug;
+};
+
+/* A flag for each state_tracker state object:
+ */
+#define I915_NEW_VIEWPORT 0x1
+#define I915_NEW_RASTERIZER 0x2
+#define I915_NEW_FS 0x4
+#define I915_NEW_BLEND 0x8
+#define I915_NEW_CLIP 0x10
+#define I915_NEW_SCISSOR 0x20
+#define I915_NEW_STIPPLE 0x40
+#define I915_NEW_FRAMEBUFFER 0x80
+#define I915_NEW_ALPHA_TEST 0x100
+#define I915_NEW_DEPTH_STENCIL 0x200
+#define I915_NEW_SAMPLER 0x400
+#define I915_NEW_TEXTURE 0x800
+#define I915_NEW_CONSTANTS 0x1000
+#define I915_NEW_VBO 0x2000
+#define I915_NEW_VS 0x4000
+
+
+/* Driver's internally generated state flags:
+ */
+#define I915_NEW_VERTEX_FORMAT 0x10000
+
+
+/* Dirty flags for hardware emit
+ */
+#define I915_HW_STATIC (1<<I915_CACHE_STATIC)
+#define I915_HW_DYNAMIC (1<<I915_CACHE_DYNAMIC)
+#define I915_HW_SAMPLER (1<<I915_CACHE_SAMPLER)
+#define I915_HW_MAP (1<<I915_CACHE_MAP)
+#define I915_HW_PROGRAM (1<<I915_CACHE_PROGRAM)
+#define I915_HW_CONSTANTS (1<<I915_CACHE_CONSTANTS)
+#define I915_HW_IMMEDIATE (1<<(I915_MAX_CACHE+0))
+#define I915_HW_INVARIENT (1<<(I915_MAX_CACHE+1))
+
+
+/***********************************************************************
+ * i915_prim_emit.c:
+ */
+struct draw_stage *i915_draw_render_stage( struct i915_context *i915 );
+
+
+/***********************************************************************
+ * i915_prim_vbuf.c:
+ */
+struct draw_stage *i915_draw_vbuf_stage( struct i915_context *i915 );
+
+
+/***********************************************************************
+ * i915_state_emit.c:
+ */
+void i915_emit_hardware_state(struct i915_context *i915 );
+
+
+
+/***********************************************************************
+ * i915_clear.c:
+ */
+void i915_clear( struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil);
+
+
+/***********************************************************************
+ * i915_surface.c:
+ */
+void i915_init_surface_functions( struct i915_context *i915 );
+
+void i915_init_state_functions( struct i915_context *i915 );
+void i915_init_flush_functions( struct i915_context *i915 );
+void i915_init_string_functions( struct i915_context *i915 );
+
+
+
+
+/***********************************************************************
+ * Inline conversion functions. These are better-typed than the
+ * macros used previously:
+ */
+static INLINE struct i915_context *
+i915_context( struct pipe_context *pipe )
+{
+ return (struct i915_context *)pipe;
+}
+
+
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_debug.c b/src/gallium/drivers/i915simple/i915_debug.c
new file mode 100644
index 0000000000..ce92d1af9a
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_debug.c
@@ -0,0 +1,898 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_debug.h"
+#include "i915_batch.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_debug.h"
+
+
+static void
+PRINTF(
+ struct debug_stream *stream,
+ const char *fmt,
+ ... )
+{
+ va_list args;
+
+ va_start( args, fmt );
+ debug_vprintf( fmt, args );
+ va_end( args );
+}
+
+
+static boolean debug( struct debug_stream *stream, const char *name, unsigned len )
+{
+ unsigned i;
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+
+ if (len == 0) {
+ PRINTF(stream, "Error - zero length packet (0x%08x)\n", stream->ptr[0]);
+ assert(0);
+ return FALSE;
+ }
+
+ if (stream->print_addresses)
+ PRINTF(stream, "%08x: ", stream->offset);
+
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ for (i = 0; i < len; i++)
+ PRINTF(stream, "\t0x%08x\n", ptr[i]);
+ PRINTF(stream, "\n");
+
+ stream->offset += len * sizeof(unsigned);
+
+ return TRUE;
+}
+
+
+static const char *get_prim_name( unsigned val )
+{
+ switch (val & PRIM3D_MASK) {
+ case PRIM3D_TRILIST: return "TRILIST"; break;
+ case PRIM3D_TRISTRIP: return "TRISTRIP"; break;
+ case PRIM3D_TRISTRIP_RVRSE: return "TRISTRIP_RVRSE"; break;
+ case PRIM3D_TRIFAN: return "TRIFAN"; break;
+ case PRIM3D_POLY: return "POLY"; break;
+ case PRIM3D_LINELIST: return "LINELIST"; break;
+ case PRIM3D_LINESTRIP: return "LINESTRIP"; break;
+ case PRIM3D_RECTLIST: return "RECTLIST"; break;
+ case PRIM3D_POINTLIST: return "POINTLIST"; break;
+ case PRIM3D_DIB: return "DIB"; break;
+ case PRIM3D_CLEAR_RECT: return "CLEAR_RECT"; break;
+ case PRIM3D_ZONE_INIT: return "ZONE_INIT"; break;
+ default: return "????"; break;
+ }
+}
+
+static boolean debug_prim( struct debug_stream *stream, const char *name,
+ boolean dump_floats,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ const char *prim = get_prim_name( ptr[0] );
+ unsigned i;
+
+
+
+ PRINTF(stream, "%s %s (%d dwords):\n", name, prim, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[0]);
+ for (i = 1; i < len; i++) {
+ if (dump_floats)
+ PRINTF(stream, "\t0x%08x // %f\n", ptr[i], *(float *)&ptr[i]);
+ else
+ PRINTF(stream, "\t0x%08x\n", ptr[i]);
+ }
+
+
+ PRINTF(stream, "\n");
+
+ stream->offset += len * sizeof(unsigned);
+
+ return TRUE;
+}
+
+
+
+
+static boolean debug_program( struct debug_stream *stream, const char *name, unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+
+ if (len == 0) {
+ PRINTF(stream, "Error - zero length packet (0x%08x)\n", stream->ptr[0]);
+ assert(0);
+ return FALSE;
+ }
+
+ if (stream->print_addresses)
+ PRINTF(stream, "%08x: ", stream->offset);
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ i915_disassemble_program( stream, ptr, len );
+
+ stream->offset += len * sizeof(unsigned);
+ return TRUE;
+}
+
+
+static boolean debug_chain( struct debug_stream *stream, const char *name, unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ unsigned old_offset = stream->offset + len * sizeof(unsigned);
+ unsigned i;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ for (i = 0; i < len; i++)
+ PRINTF(stream, "\t0x%08x\n", ptr[i]);
+
+ stream->offset = ptr[1] & ~0x3;
+
+ if (stream->offset < old_offset)
+ PRINTF(stream, "\n... skipping backwards from 0x%x --> 0x%x ...\n\n",
+ old_offset, stream->offset );
+ else
+ PRINTF(stream, "\n... skipping from 0x%x --> 0x%x ...\n\n",
+ old_offset, stream->offset );
+
+
+ return TRUE;
+}
+
+
+static boolean debug_variable_length_prim( struct debug_stream *stream )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ const char *prim = get_prim_name( ptr[0] );
+ unsigned i, len;
+
+ ushort *idx = (ushort *)(ptr+1);
+ for (i = 0; idx[i] != 0xffff; i++)
+ ;
+
+ len = 1+(i+2)/2;
+
+ PRINTF(stream, "3DPRIM, %s variable length %d indicies (%d dwords):\n", prim, i, len);
+ for (i = 0; i < len; i++)
+ PRINTF(stream, "\t0x%08x\n", ptr[i]);
+ PRINTF(stream, "\n");
+
+ stream->offset += len * sizeof(unsigned);
+ return TRUE;
+}
+
+
+static void
+BITS(
+ struct debug_stream *stream,
+ unsigned dw,
+ unsigned hi,
+ unsigned lo,
+ const char *fmt,
+ ... )
+{
+ va_list args;
+ unsigned himask = ~0UL >> (31 - (hi));
+
+ PRINTF(stream, "\t\t ");
+
+ va_start( args, fmt );
+ debug_vprintf( fmt, args );
+ va_end( args );
+
+ PRINTF(stream, ": 0x%x\n", ((dw) & himask) >> (lo));
+}
+
+#ifdef DEBUG
+#define MBZ( dw, hi, lo) do { \
+ unsigned x = (dw) >> (lo); \
+ unsigned lomask = (1 << (lo)) - 1; \
+ unsigned himask; \
+ himask = (1UL << (hi)) - 1; \
+ assert ((x & himask & ~lomask) == 0); \
+} while (0)
+#else
+#define MBZ( dw, hi, lo) do { \
+} while (0)
+#endif
+
+static void
+FLAG(
+ struct debug_stream *stream,
+ unsigned dw,
+ unsigned bit,
+ const char *fmt,
+ ... )
+{
+ if (((dw) >> (bit)) & 1) {
+ va_list args;
+
+ PRINTF(stream, "\t\t ");
+
+ va_start( args, fmt );
+ debug_vprintf( fmt, args );
+ va_end( args );
+
+ PRINTF(stream, "\n");
+ }
+}
+
+static boolean debug_load_immediate( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ unsigned bits = (ptr[0] >> 4) & 0xff;
+ unsigned j = 0;
+
+ PRINTF(stream, "%s (%d dwords, flags: %x):\n", name, len, bits);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ if (bits & (1<<0)) {
+ PRINTF(stream, "\t LIS0: 0x%08x\n", ptr[j]);
+ PRINTF(stream, "\t vb address: 0x%08x\n", (ptr[j] & ~0x3));
+ BITS(stream, ptr[j], 0, 0, "vb invalidate disable");
+ j++;
+ }
+ if (bits & (1<<1)) {
+ PRINTF(stream, "\t LIS1: 0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 29, 24, "vb dword width");
+ BITS(stream, ptr[j], 21, 16, "vb dword pitch");
+ BITS(stream, ptr[j], 15, 0, "vb max index");
+ j++;
+ }
+ if (bits & (1<<2)) {
+ int i;
+ PRINTF(stream, "\t LIS2: 0x%08x\n", ptr[j]);
+ for (i = 0; i < 8; i++) {
+ unsigned tc = (ptr[j] >> (i * 4)) & 0xf;
+ if (tc != 0xf)
+ BITS(stream, tc, 3, 0, "tex coord %d", i);
+ }
+ j++;
+ }
+ if (bits & (1<<3)) {
+ PRINTF(stream, "\t LIS3: 0x%08x\n", ptr[j]);
+ j++;
+ }
+ if (bits & (1<<4)) {
+ PRINTF(stream, "\t LIS4: 0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 31, 23, "point width");
+ BITS(stream, ptr[j], 22, 19, "line width");
+ FLAG(stream, ptr[j], 18, "alpha flatshade");
+ FLAG(stream, ptr[j], 17, "fog flatshade");
+ FLAG(stream, ptr[j], 16, "spec flatshade");
+ FLAG(stream, ptr[j], 15, "rgb flatshade");
+ BITS(stream, ptr[j], 14, 13, "cull mode");
+ FLAG(stream, ptr[j], 12, "vfmt: point width");
+ FLAG(stream, ptr[j], 11, "vfmt: specular/fog");
+ FLAG(stream, ptr[j], 10, "vfmt: rgba");
+ FLAG(stream, ptr[j], 9, "vfmt: depth offset");
+ BITS(stream, ptr[j], 8, 6, "vfmt: position (2==xyzw)");
+ FLAG(stream, ptr[j], 5, "force dflt diffuse");
+ FLAG(stream, ptr[j], 4, "force dflt specular");
+ FLAG(stream, ptr[j], 3, "local depth offset enable");
+ FLAG(stream, ptr[j], 2, "vfmt: fp32 fog coord");
+ FLAG(stream, ptr[j], 1, "sprite point");
+ FLAG(stream, ptr[j], 0, "antialiasing");
+ j++;
+ }
+ if (bits & (1<<5)) {
+ PRINTF(stream, "\t LIS5: 0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 31, 28, "rgba write disables");
+ FLAG(stream, ptr[j], 27, "force dflt point width");
+ FLAG(stream, ptr[j], 26, "last pixel enable");
+ FLAG(stream, ptr[j], 25, "global z offset enable");
+ FLAG(stream, ptr[j], 24, "fog enable");
+ BITS(stream, ptr[j], 23, 16, "stencil ref");
+ BITS(stream, ptr[j], 15, 13, "stencil test");
+ BITS(stream, ptr[j], 12, 10, "stencil fail op");
+ BITS(stream, ptr[j], 9, 7, "stencil pass z fail op");
+ BITS(stream, ptr[j], 6, 4, "stencil pass z pass op");
+ FLAG(stream, ptr[j], 3, "stencil write enable");
+ FLAG(stream, ptr[j], 2, "stencil test enable");
+ FLAG(stream, ptr[j], 1, "color dither enable");
+ FLAG(stream, ptr[j], 0, "logiop enable");
+ j++;
+ }
+ if (bits & (1<<6)) {
+ PRINTF(stream, "\t LIS6: 0x%08x\n", ptr[j]);
+ FLAG(stream, ptr[j], 31, "alpha test enable");
+ BITS(stream, ptr[j], 30, 28, "alpha func");
+ BITS(stream, ptr[j], 27, 20, "alpha ref");
+ FLAG(stream, ptr[j], 19, "depth test enable");
+ BITS(stream, ptr[j], 18, 16, "depth func");
+ FLAG(stream, ptr[j], 15, "blend enable");
+ BITS(stream, ptr[j], 14, 12, "blend func");
+ BITS(stream, ptr[j], 11, 8, "blend src factor");
+ BITS(stream, ptr[j], 7, 4, "blend dst factor");
+ FLAG(stream, ptr[j], 3, "depth write enable");
+ FLAG(stream, ptr[j], 2, "color write enable");
+ BITS(stream, ptr[j], 1, 0, "provoking vertex");
+ j++;
+ }
+
+
+ PRINTF(stream, "\n");
+
+ assert(j == len);
+
+ stream->offset += len * sizeof(unsigned);
+
+ return TRUE;
+}
+
+
+
+static boolean debug_load_indirect( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ unsigned bits = (ptr[0] >> 8) & 0x3f;
+ unsigned i, j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ for (i = 0; i < 6; i++) {
+ if (bits & (1<<i)) {
+ switch (1<<(8+i)) {
+ case LI0_STATE_STATIC_INDIRECT:
+ PRINTF(stream, " STATIC: 0x%08x | %x\n", ptr[j]&~3, ptr[j]&3); j++;
+ PRINTF(stream, " 0x%08x\n", ptr[j++]);
+ break;
+ case LI0_STATE_DYNAMIC_INDIRECT:
+ PRINTF(stream, " DYNAMIC: 0x%08x | %x\n", ptr[j]&~3, ptr[j]&3); j++;
+ break;
+ case LI0_STATE_SAMPLER:
+ PRINTF(stream, " SAMPLER: 0x%08x | %x\n", ptr[j]&~3, ptr[j]&3); j++;
+ PRINTF(stream, " 0x%08x\n", ptr[j++]);
+ break;
+ case LI0_STATE_MAP:
+ PRINTF(stream, " MAP: 0x%08x | %x\n", ptr[j]&~3, ptr[j]&3); j++;
+ PRINTF(stream, " 0x%08x\n", ptr[j++]);
+ break;
+ case LI0_STATE_PROGRAM:
+ PRINTF(stream, " PROGRAM: 0x%08x | %x\n", ptr[j]&~3, ptr[j]&3); j++;
+ PRINTF(stream, " 0x%08x\n", ptr[j++]);
+ break;
+ case LI0_STATE_CONSTANTS:
+ PRINTF(stream, " CONSTANTS: 0x%08x | %x\n", ptr[j]&~3, ptr[j]&3); j++;
+ PRINTF(stream, " 0x%08x\n", ptr[j++]);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+ }
+
+ if (bits == 0) {
+ PRINTF(stream, "\t DUMMY: 0x%08x\n", ptr[j++]);
+ }
+
+ PRINTF(stream, "\n");
+
+
+ assert(j == len);
+
+ stream->offset += len * sizeof(unsigned);
+
+ return TRUE;
+}
+
+static void BR13( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x\n", val);
+ FLAG(stream, val, 30, "clipping enable");
+ BITS(stream, val, 25, 24, "color depth (3==32bpp)");
+ BITS(stream, val, 23, 16, "raster op");
+ BITS(stream, val, 15, 0, "dest pitch");
+}
+
+
+static void BR22( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x\n", val);
+ BITS(stream, val, 31, 16, "dest y1");
+ BITS(stream, val, 15, 0, "dest x1");
+}
+
+static void BR23( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x\n", val);
+ BITS(stream, val, 31, 16, "dest y2");
+ BITS(stream, val, 15, 0, "dest x2");
+}
+
+static void BR09( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x -- dest address\n", val);
+}
+
+static void BR26( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x\n", val);
+ BITS(stream, val, 31, 16, "src y1");
+ BITS(stream, val, 15, 0, "src x1");
+}
+
+static void BR11( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x\n", val);
+ BITS(stream, val, 15, 0, "src pitch");
+}
+
+static void BR12( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x -- src address\n", val);
+}
+
+static void BR16( struct debug_stream *stream,
+ unsigned val )
+{
+ PRINTF(stream, "\t0x%08x -- color\n", val);
+}
+
+static boolean debug_copy_blit( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ int j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ BR13(stream, ptr[j++]);
+ BR22(stream, ptr[j++]);
+ BR23(stream, ptr[j++]);
+ BR09(stream, ptr[j++]);
+ BR26(stream, ptr[j++]);
+ BR11(stream, ptr[j++]);
+ BR12(stream, ptr[j++]);
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean debug_color_blit( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ int j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ BR13(stream, ptr[j++]);
+ BR22(stream, ptr[j++]);
+ BR23(stream, ptr[j++]);
+ BR09(stream, ptr[j++]);
+ BR16(stream, ptr[j++]);
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean debug_modes4( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ int j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 21, 18, "logicop func");
+ FLAG(stream, ptr[j], 17, "stencil test mask modify-enable");
+ FLAG(stream, ptr[j], 16, "stencil write mask modify-enable");
+ BITS(stream, ptr[j], 15, 8, "stencil test mask");
+ BITS(stream, ptr[j], 7, 0, "stencil write mask");
+ j++;
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean debug_map_state( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ unsigned j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ {
+ PRINTF(stream, "\t0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 15, 0, "map mask");
+ j++;
+ }
+
+ while (j < len) {
+ {
+ PRINTF(stream, "\t TMn.0: 0x%08x\n", ptr[j]);
+ PRINTF(stream, "\t map address: 0x%08x\n", (ptr[j] & ~0x3));
+ FLAG(stream, ptr[j], 1, "vertical line stride");
+ FLAG(stream, ptr[j], 0, "vertical line stride offset");
+ j++;
+ }
+
+ {
+ PRINTF(stream, "\t TMn.1: 0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 31, 21, "height");
+ BITS(stream, ptr[j], 20, 10, "width");
+ BITS(stream, ptr[j], 9, 7, "surface format");
+ BITS(stream, ptr[j], 6, 3, "texel format");
+ FLAG(stream, ptr[j], 2, "use fence regs");
+ FLAG(stream, ptr[j], 1, "tiled surface");
+ FLAG(stream, ptr[j], 0, "tile walk ymajor");
+ j++;
+ }
+ {
+ PRINTF(stream, "\t TMn.2: 0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 31, 21, "dword pitch");
+ BITS(stream, ptr[j], 20, 15, "cube face enables");
+ BITS(stream, ptr[j], 14, 9, "max lod");
+ FLAG(stream, ptr[j], 8, "mip layout right");
+ BITS(stream, ptr[j], 7, 0, "depth");
+ j++;
+ }
+ }
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean debug_sampler_state( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ unsigned j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ {
+ PRINTF(stream, "\t0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 15, 0, "sampler mask");
+ j++;
+ }
+
+ while (j < len) {
+ {
+ PRINTF(stream, "\t TSn.0: 0x%08x\n", ptr[j]);
+ FLAG(stream, ptr[j], 31, "reverse gamma");
+ FLAG(stream, ptr[j], 30, "planar to packed");
+ FLAG(stream, ptr[j], 29, "yuv->rgb");
+ BITS(stream, ptr[j], 28, 27, "chromakey index");
+ BITS(stream, ptr[j], 26, 22, "base mip level");
+ BITS(stream, ptr[j], 21, 20, "mip mode filter");
+ BITS(stream, ptr[j], 19, 17, "mag mode filter");
+ BITS(stream, ptr[j], 16, 14, "min mode filter");
+ BITS(stream, ptr[j], 13, 5, "lod bias (s4.4)");
+ FLAG(stream, ptr[j], 4, "shadow enable");
+ FLAG(stream, ptr[j], 3, "max-aniso-4");
+ BITS(stream, ptr[j], 2, 0, "shadow func");
+ j++;
+ }
+
+ {
+ PRINTF(stream, "\t TSn.1: 0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 31, 24, "min lod");
+ MBZ( ptr[j], 23, 18 );
+ FLAG(stream, ptr[j], 17, "kill pixel enable");
+ FLAG(stream, ptr[j], 16, "keyed tex filter mode");
+ FLAG(stream, ptr[j], 15, "chromakey enable");
+ BITS(stream, ptr[j], 14, 12, "tcx wrap mode");
+ BITS(stream, ptr[j], 11, 9, "tcy wrap mode");
+ BITS(stream, ptr[j], 8, 6, "tcz wrap mode");
+ FLAG(stream, ptr[j], 5, "normalized coords");
+ BITS(stream, ptr[j], 4, 1, "map (surface) index");
+ FLAG(stream, ptr[j], 0, "EAST deinterlacer enable");
+ j++;
+ }
+ {
+ PRINTF(stream, "\t TSn.2: 0x%08x (default color)\n", ptr[j]);
+ j++;
+ }
+ }
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean debug_dest_vars( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ int j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ {
+ PRINTF(stream, "\t0x%08x\n", ptr[j]);
+ FLAG(stream, ptr[j], 31, "early classic ztest");
+ FLAG(stream, ptr[j], 30, "opengl tex default color");
+ FLAG(stream, ptr[j], 29, "bypass iz");
+ FLAG(stream, ptr[j], 28, "lod preclamp");
+ BITS(stream, ptr[j], 27, 26, "dither pattern");
+ FLAG(stream, ptr[j], 25, "linear gamma blend");
+ FLAG(stream, ptr[j], 24, "debug dither");
+ BITS(stream, ptr[j], 23, 20, "dstorg x");
+ BITS(stream, ptr[j], 19, 16, "dstorg y");
+ MBZ (ptr[j], 15, 15 );
+ BITS(stream, ptr[j], 14, 12, "422 write select");
+ BITS(stream, ptr[j], 11, 8, "cbuf format");
+ BITS(stream, ptr[j], 3, 2, "zbuf format");
+ FLAG(stream, ptr[j], 1, "vert line stride");
+ FLAG(stream, ptr[j], 1, "vert line stride offset");
+ j++;
+ }
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean debug_buf_info( struct debug_stream *stream,
+ const char *name,
+ unsigned len )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ int j = 0;
+
+ PRINTF(stream, "%s (%d dwords):\n", name, len);
+ PRINTF(stream, "\t0x%08x\n", ptr[j++]);
+
+ {
+ PRINTF(stream, "\t0x%08x\n", ptr[j]);
+ BITS(stream, ptr[j], 28, 28, "aux buffer id");
+ BITS(stream, ptr[j], 27, 24, "buffer id (7=depth, 3=back)");
+ FLAG(stream, ptr[j], 23, "use fence regs");
+ FLAG(stream, ptr[j], 22, "tiled surface");
+ FLAG(stream, ptr[j], 21, "tile walk ymajor");
+ MBZ (ptr[j], 20, 14);
+ BITS(stream, ptr[j], 13, 2, "dword pitch");
+ MBZ (ptr[j], 2, 0);
+ j++;
+ }
+
+ PRINTF(stream, "\t0x%08x -- buffer base address\n", ptr[j++]);
+
+ stream->offset += len * sizeof(unsigned);
+ assert(j == len);
+ return TRUE;
+}
+
+static boolean i915_debug_packet( struct debug_stream *stream )
+{
+ unsigned *ptr = (unsigned *)(stream->ptr + stream->offset);
+ unsigned cmd = *ptr;
+
+ switch (((cmd >> 29) & 0x7)) {
+ case 0x0:
+ switch ((cmd >> 23) & 0x3f) {
+ case 0x0:
+ return debug(stream, "MI_NOOP", 1);
+ case 0x3:
+ return debug(stream, "MI_WAIT_FOR_EVENT", 1);
+ case 0x4:
+ return debug(stream, "MI_FLUSH", 1);
+ case 0xA:
+ debug(stream, "MI_BATCH_BUFFER_END", 1);
+ return FALSE;
+ case 0x22:
+ return debug(stream, "MI_LOAD_REGISTER_IMM", 3);
+ case 0x31:
+ return debug_chain(stream, "MI_BATCH_BUFFER_START", 2);
+ default:
+ (void)debug(stream, "UNKNOWN 0x0 case!", 1);
+ assert(0);
+ break;
+ }
+ break;
+ case 0x1:
+ (void) debug(stream, "UNKNOWN 0x1 case!", 1);
+ assert(0);
+ break;
+ case 0x2:
+ switch ((cmd >> 22) & 0xff) {
+ case 0x50:
+ return debug_color_blit(stream, "XY_COLOR_BLT", (cmd & 0xff) + 2);
+ case 0x53:
+ return debug_copy_blit(stream, "XY_SRC_COPY_BLT", (cmd & 0xff) + 2);
+ default:
+ return debug(stream, "blit command", (cmd & 0xff) + 2);
+ }
+ break;
+ case 0x3:
+ switch ((cmd >> 24) & 0x1f) {
+ case 0x6:
+ return debug(stream, "3DSTATE_ANTI_ALIASING", 1);
+ case 0x7:
+ return debug(stream, "3DSTATE_RASTERIZATION_RULES", 1);
+ case 0x8:
+ return debug(stream, "3DSTATE_BACKFACE_STENCIL_OPS", 2);
+ case 0x9:
+ return debug(stream, "3DSTATE_BACKFACE_STENCIL_MASKS", 1);
+ case 0xb:
+ return debug(stream, "3DSTATE_INDEPENDENT_ALPHA_BLEND", 1);
+ case 0xc:
+ return debug(stream, "3DSTATE_MODES5", 1);
+ case 0xd:
+ return debug_modes4(stream, "3DSTATE_MODES4", 1);
+ case 0x15:
+ return debug(stream, "3DSTATE_FOG_COLOR", 1);
+ case 0x16:
+ return debug(stream, "3DSTATE_COORD_SET_BINDINGS", 1);
+ case 0x1c:
+ /* 3DState16NP */
+ switch((cmd >> 19) & 0x1f) {
+ case 0x10:
+ return debug(stream, "3DSTATE_SCISSOR_ENABLE", 1);
+ case 0x11:
+ return debug(stream, "3DSTATE_DEPTH_SUBRECTANGLE_DISABLE", 1);
+ default:
+ (void) debug(stream, "UNKNOWN 0x1c case!", 1);
+ assert(0);
+ break;
+ }
+ break;
+ case 0x1d:
+ /* 3DStateMW */
+ switch ((cmd >> 16) & 0xff) {
+ case 0x0:
+ return debug_map_state(stream, "3DSTATE_MAP_STATE", (cmd & 0x1f) + 2);
+ case 0x1:
+ return debug_sampler_state(stream, "3DSTATE_SAMPLER_STATE", (cmd & 0x1f) + 2);
+ case 0x4:
+ return debug_load_immediate(stream, "3DSTATE_LOAD_STATE_IMMEDIATE", (cmd & 0xf) + 2);
+ case 0x5:
+ return debug_program(stream, "3DSTATE_PIXEL_SHADER_PROGRAM", (cmd & 0x1ff) + 2);
+ case 0x6:
+ return debug(stream, "3DSTATE_PIXEL_SHADER_CONSTANTS", (cmd & 0xff) + 2);
+ case 0x7:
+ return debug_load_indirect(stream, "3DSTATE_LOAD_INDIRECT", (cmd & 0xff) + 2);
+ case 0x80:
+ return debug(stream, "3DSTATE_DRAWING_RECTANGLE", (cmd & 0xffff) + 2);
+ case 0x81:
+ return debug(stream, "3DSTATE_SCISSOR_RECTANGLE", (cmd & 0xffff) + 2);
+ case 0x83:
+ return debug(stream, "3DSTATE_SPAN_STIPPLE", (cmd & 0xffff) + 2);
+ case 0x85:
+ return debug_dest_vars(stream, "3DSTATE_DEST_BUFFER_VARS", (cmd & 0xffff) + 2);
+ case 0x88:
+ return debug(stream, "3DSTATE_CONSTANT_BLEND_COLOR", (cmd & 0xffff) + 2);
+ case 0x89:
+ return debug(stream, "3DSTATE_FOG_MODE", (cmd & 0xffff) + 2);
+ case 0x8e:
+ return debug_buf_info(stream, "3DSTATE_BUFFER_INFO", (cmd & 0xffff) + 2);
+ case 0x97:
+ return debug(stream, "3DSTATE_DEPTH_OFFSET_SCALE", (cmd & 0xffff) + 2);
+ case 0x98:
+ return debug(stream, "3DSTATE_DEFAULT_Z", (cmd & 0xffff) + 2);
+ case 0x99:
+ return debug(stream, "3DSTATE_DEFAULT_DIFFUSE", (cmd & 0xffff) + 2);
+ case 0x9a:
+ return debug(stream, "3DSTATE_DEFAULT_SPECULAR", (cmd & 0xffff) + 2);
+ case 0x9c:
+ return debug(stream, "3DSTATE_CLEAR_PARAMETERS", (cmd & 0xffff) + 2);
+ default:
+ assert(0);
+ return 0;
+ }
+ break;
+ case 0x1e:
+ if (cmd & (1 << 23))
+ return debug(stream, "???", (cmd & 0xffff) + 1);
+ else
+ return debug(stream, "", 1);
+ break;
+ case 0x1f:
+ if ((cmd & (1 << 23)) == 0)
+ return debug_prim(stream, "3DPRIM (inline)", 1, (cmd & 0x1ffff) + 2);
+ else if (cmd & (1 << 17))
+ {
+ if ((cmd & 0xffff) == 0)
+ return debug_variable_length_prim(stream);
+ else
+ return debug_prim(stream, "3DPRIM (indexed)", 0, (((cmd & 0xffff) + 1) / 2) + 1);
+ }
+ else
+ return debug_prim(stream, "3DPRIM (indirect sequential)", 0, 2);
+ break;
+ default:
+ return debug(stream, "", 0);
+ }
+ default:
+ assert(0);
+ return 0;
+ }
+
+ assert(0);
+ return 0;
+}
+
+
+
+void
+i915_dump_batchbuffer( struct intel_batchbuffer *batch )
+{
+ struct debug_stream stream;
+ unsigned *start = (unsigned*)batch->map;
+ unsigned *end = (unsigned*)batch->ptr;
+ unsigned long bytes = (unsigned long) (end - start) * 4;
+ boolean done = FALSE;
+
+ stream.offset = 0;
+ stream.ptr = (char *)start;
+ stream.print_addresses = 0;
+
+ if (!start || !end) {
+ debug_printf( "\n\nBATCH: ???\n");
+ return;
+ }
+
+ debug_printf( "\n\nBATCH: (%d)\n", bytes / 4);
+
+ while (!done &&
+ stream.offset < bytes)
+ {
+ if (!i915_debug_packet( &stream ))
+ break;
+
+ assert(stream.offset <= bytes &&
+ stream.offset >= 0);
+ }
+
+ debug_printf( "END-BATCH\n\n\n");
+}
+
+
diff --git a/src/gallium/drivers/i915simple/i915_debug.h b/src/gallium/drivers/i915simple/i915_debug.h
new file mode 100644
index 0000000000..dd9b86e17b
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_debug.h
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef I915_DEBUG_H
+#define I915_DEBUG_H
+
+#include <stdarg.h>
+
+struct i915_context;
+
+struct debug_stream
+{
+ unsigned offset; /* current gtt offset */
+ char *ptr; /* pointer to gtt offset zero */
+ char *end; /* pointer to gtt offset zero */
+ unsigned print_addresses;
+};
+
+
+/* Internal functions
+ */
+void i915_disassemble_program(struct debug_stream *stream,
+ const unsigned *program, unsigned sz);
+
+void i915_print_ureg(const char *msg, unsigned ureg);
+
+
+#define DEBUG_BATCH 0x1
+#define DEBUG_BLIT 0x2
+#define DEBUG_BUFFER 0x4
+#define DEBUG_CONSTANTS 0x8
+#define DEBUG_CONTEXT 0x10
+#define DEBUG_DRAW 0x20
+#define DEBUG_DYNAMIC 0x40
+#define DEBUG_FLUSH 0x80
+#define DEBUG_MAP 0x100
+#define DEBUG_PROGRAM 0x200
+#define DEBUG_REGIONS 0x400
+#define DEBUG_SAMPLER 0x800
+#define DEBUG_STATIC 0x1000
+#define DEBUG_SURFACE 0x2000
+#define DEBUG_WINSYS 0x4000
+
+#include "pipe/p_compiler.h"
+
+#if defined(DEBUG) && defined(FILE_DEBUG_FLAG)
+
+#include "pipe/internal/p_winsys_screen.h"
+
+static INLINE void
+I915_DBG(
+ struct i915_context *i915,
+ const char *fmt,
+ ... )
+{
+ if ((i915)->debug & FILE_DEBUG_FLAG) {
+ va_list args;
+
+ va_start( args, fmt );
+ debug_vprintf( fmt, args );
+ va_end( args );
+ }
+}
+
+#else
+
+static INLINE void
+I915_DBG(
+ struct i915_context *i915,
+ const char *fmt,
+ ... )
+{
+ (void) i915;
+ (void) fmt;
+}
+
+#endif
+
+
+struct intel_batchbuffer;
+
+void i915_dump_batchbuffer( struct intel_batchbuffer *i915 );
+
+void i915_debug_init( struct i915_context *i915 );
+
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_debug_fp.c b/src/gallium/drivers/i915simple/i915_debug_fp.c
new file mode 100644
index 0000000000..9c5b117b6d
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_debug_fp.c
@@ -0,0 +1,363 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "i915_reg.h"
+#include "i915_debug.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_memory.h"
+
+
+static void
+PRINTF(
+ struct debug_stream *stream,
+ const char *fmt,
+ ... )
+{
+ va_list args;
+
+ va_start( args, fmt );
+ debug_vprintf( fmt, args );
+ va_end( args );
+}
+
+
+static const char *opcodes[0x20] = {
+ "NOP",
+ "ADD",
+ "MOV",
+ "MUL",
+ "MAD",
+ "DP2ADD",
+ "DP3",
+ "DP4",
+ "FRC",
+ "RCP",
+ "RSQ",
+ "EXP",
+ "LOG",
+ "CMP",
+ "MIN",
+ "MAX",
+ "FLR",
+ "MOD",
+ "TRC",
+ "SGE",
+ "SLT",
+ "TEXLD",
+ "TEXLDP",
+ "TEXLDB",
+ "TEXKILL",
+ "DCL",
+ "0x1a",
+ "0x1b",
+ "0x1c",
+ "0x1d",
+ "0x1e",
+ "0x1f",
+};
+
+
+static const int args[0x20] = {
+ 0, /* 0 nop */
+ 2, /* 1 add */
+ 1, /* 2 mov */
+ 2, /* 3 m ul */
+ 3, /* 4 mad */
+ 3, /* 5 dp2add */
+ 2, /* 6 dp3 */
+ 2, /* 7 dp4 */
+ 1, /* 8 frc */
+ 1, /* 9 rcp */
+ 1, /* a rsq */
+ 1, /* b exp */
+ 1, /* c log */
+ 3, /* d cmp */
+ 2, /* e min */
+ 2, /* f max */
+ 1, /* 10 flr */
+ 1, /* 11 mod */
+ 1, /* 12 trc */
+ 2, /* 13 sge */
+ 2, /* 14 slt */
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+
+static const char *regname[0x8] = {
+ "R",
+ "T",
+ "CONST",
+ "S",
+ "OC",
+ "OD",
+ "U",
+ "UNKNOWN",
+};
+
+static void
+print_reg_type_nr(struct debug_stream *stream, unsigned type, unsigned nr)
+{
+ switch (type) {
+ case REG_TYPE_T:
+ switch (nr) {
+ case T_DIFFUSE:
+ PRINTF(stream, "T_DIFFUSE");
+ return;
+ case T_SPECULAR:
+ PRINTF(stream, "T_SPECULAR");
+ return;
+ case T_FOG_W:
+ PRINTF(stream, "T_FOG_W");
+ return;
+ default:
+ PRINTF(stream, "T_TEX%d", nr);
+ return;
+ }
+ case REG_TYPE_OC:
+ if (nr == 0) {
+ PRINTF(stream, "oC");
+ return;
+ }
+ break;
+ case REG_TYPE_OD:
+ if (nr == 0) {
+ PRINTF(stream, "oD");
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ PRINTF(stream, "%s[%d]", regname[type], nr);
+}
+
+#define REG_SWIZZLE_MASK 0x7777
+#define REG_NEGATE_MASK 0x8888
+
+#define REG_SWIZZLE_XYZW ((SRC_X << A2_SRC2_CHANNEL_X_SHIFT) | \
+ (SRC_Y << A2_SRC2_CHANNEL_Y_SHIFT) | \
+ (SRC_Z << A2_SRC2_CHANNEL_Z_SHIFT) | \
+ (SRC_W << A2_SRC2_CHANNEL_W_SHIFT))
+
+
+static void
+print_reg_neg_swizzle(struct debug_stream *stream, unsigned reg)
+{
+ int i;
+
+ if ((reg & REG_SWIZZLE_MASK) == REG_SWIZZLE_XYZW &&
+ (reg & REG_NEGATE_MASK) == 0)
+ return;
+
+ PRINTF(stream, ".");
+
+ for (i = 3; i >= 0; i--) {
+ if (reg & (1 << ((i * 4) + 3)))
+ PRINTF(stream, "-");
+
+ switch ((reg >> (i * 4)) & 0x7) {
+ case 0:
+ PRINTF(stream, "x");
+ break;
+ case 1:
+ PRINTF(stream, "y");
+ break;
+ case 2:
+ PRINTF(stream, "z");
+ break;
+ case 3:
+ PRINTF(stream, "w");
+ break;
+ case 4:
+ PRINTF(stream, "0");
+ break;
+ case 5:
+ PRINTF(stream, "1");
+ break;
+ default:
+ PRINTF(stream, "?");
+ break;
+ }
+ }
+}
+
+
+static void
+print_src_reg(struct debug_stream *stream, unsigned dword)
+{
+ unsigned nr = (dword >> A2_SRC2_NR_SHIFT) & REG_NR_MASK;
+ unsigned type = (dword >> A2_SRC2_TYPE_SHIFT) & REG_TYPE_MASK;
+ print_reg_type_nr(stream, type, nr);
+ print_reg_neg_swizzle(stream, dword);
+}
+
+
+static void
+print_dest_reg(struct debug_stream *stream, unsigned dword)
+{
+ unsigned nr = (dword >> A0_DEST_NR_SHIFT) & REG_NR_MASK;
+ unsigned type = (dword >> A0_DEST_TYPE_SHIFT) & REG_TYPE_MASK;
+ print_reg_type_nr(stream, type, nr);
+ if ((dword & A0_DEST_CHANNEL_ALL) == A0_DEST_CHANNEL_ALL)
+ return;
+ PRINTF(stream, ".");
+ if (dword & A0_DEST_CHANNEL_X)
+ PRINTF(stream, "x");
+ if (dword & A0_DEST_CHANNEL_Y)
+ PRINTF(stream, "y");
+ if (dword & A0_DEST_CHANNEL_Z)
+ PRINTF(stream, "z");
+ if (dword & A0_DEST_CHANNEL_W)
+ PRINTF(stream, "w");
+}
+
+
+#define GET_SRC0_REG(r0, r1) ((r0<<14)|(r1>>A1_SRC0_CHANNEL_W_SHIFT))
+#define GET_SRC1_REG(r0, r1) ((r0<<8)|(r1>>A2_SRC1_CHANNEL_W_SHIFT))
+#define GET_SRC2_REG(r) (r)
+
+
+static void
+print_arith_op(struct debug_stream *stream,
+ unsigned opcode, const unsigned * program)
+{
+ if (opcode != A0_NOP) {
+ print_dest_reg(stream, program[0]);
+ if (program[0] & A0_DEST_SATURATE)
+ PRINTF(stream, " = SATURATE ");
+ else
+ PRINTF(stream, " = ");
+ }
+
+ PRINTF(stream, "%s ", opcodes[opcode]);
+
+ print_src_reg(stream, GET_SRC0_REG(program[0], program[1]));
+ if (args[opcode] == 1) {
+ PRINTF(stream, "\n");
+ return;
+ }
+
+ PRINTF(stream, ", ");
+ print_src_reg(stream, GET_SRC1_REG(program[1], program[2]));
+ if (args[opcode] == 2) {
+ PRINTF(stream, "\n");
+ return;
+ }
+
+ PRINTF(stream, ", ");
+ print_src_reg(stream, GET_SRC2_REG(program[2]));
+ PRINTF(stream, "\n");
+ return;
+}
+
+
+static void
+print_tex_op(struct debug_stream *stream,
+ unsigned opcode, const unsigned * program)
+{
+ print_dest_reg(stream, program[0] | A0_DEST_CHANNEL_ALL);
+ PRINTF(stream, " = ");
+
+ PRINTF(stream, "%s ", opcodes[opcode]);
+
+ PRINTF(stream, "S[%d],", program[0] & T0_SAMPLER_NR_MASK);
+
+ print_reg_type_nr(stream,
+ (program[1] >> T1_ADDRESS_REG_TYPE_SHIFT) &
+ REG_TYPE_MASK,
+ (program[1] >> T1_ADDRESS_REG_NR_SHIFT) & REG_NR_MASK);
+ PRINTF(stream, "\n");
+}
+
+static void
+print_texkil_op(struct debug_stream *stream,
+ unsigned opcode, const unsigned * program)
+{
+ PRINTF(stream, "TEXKIL ");
+
+ print_reg_type_nr(stream,
+ (program[1] >> T1_ADDRESS_REG_TYPE_SHIFT) &
+ REG_TYPE_MASK,
+ (program[1] >> T1_ADDRESS_REG_NR_SHIFT) & REG_NR_MASK);
+ PRINTF(stream, "\n");
+}
+
+static void
+print_dcl_op(struct debug_stream *stream,
+ unsigned opcode, const unsigned * program)
+{
+ PRINTF(stream, "%s ", opcodes[opcode]);
+ print_dest_reg(stream,
+ program[0] | A0_DEST_CHANNEL_ALL);
+ PRINTF(stream, "\n");
+}
+
+
+void
+i915_disassemble_program(struct debug_stream *stream,
+ const unsigned * program, unsigned sz)
+{
+ unsigned i;
+
+ PRINTF(stream, "\t\tBEGIN\n");
+
+ assert((program[0] & 0x1ff) + 2 == sz);
+
+ program++;
+ for (i = 1; i < sz; i += 3, program += 3) {
+ unsigned opcode = program[0] & (0x1f << 24);
+
+ PRINTF(stream, "\t\t");
+
+ if ((int) opcode >= A0_NOP && opcode <= A0_SLT)
+ print_arith_op(stream, opcode >> 24, program);
+ else if (opcode >= T0_TEXLD && opcode < T0_TEXKILL)
+ print_tex_op(stream, opcode >> 24, program);
+ else if (opcode == T0_TEXKILL)
+ print_texkil_op(stream, opcode >> 24, program);
+ else if (opcode == D0_DCL)
+ print_dcl_op(stream, opcode >> 24, program);
+ else
+ PRINTF(stream, "Unknown opcode 0x%x\n", opcode);
+ }
+
+ PRINTF(stream, "\t\tEND\n\n");
+}
+
+
diff --git a/src/gallium/drivers/i915simple/i915_flush.c b/src/gallium/drivers/i915simple/i915_flush.c
new file mode 100644
index 0000000000..1582168eba
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_flush.c
@@ -0,0 +1,86 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_defines.h"
+#include "draw/draw_context.h"
+#include "i915_context.h"
+#include "i915_reg.h"
+#include "i915_batch.h"
+
+
+static void i915_flush( struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence )
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ draw_flush(i915->draw);
+
+#if 0
+ /* Do we need to emit an MI_FLUSH command to flush the hardware
+ * caches?
+ */
+ if (flags & (PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_TEXTURE_CACHE)) {
+ unsigned flush = MI_FLUSH;
+
+ if (!(flags & PIPE_FLUSH_RENDER_CACHE))
+ flush |= INHIBIT_FLUSH_RENDER_CACHE;
+
+ if (flags & PIPE_FLUSH_TEXTURE_CACHE)
+ flush |= FLUSH_MAP_CACHE;
+
+ if (!BEGIN_BATCH(1, 0)) {
+ FLUSH_BATCH(NULL);
+ assert(BEGIN_BATCH(1, 0));
+ }
+ OUT_BATCH( flush );
+ }
+#endif
+
+#if 0
+ if (i915->batch->map == i915->batch->ptr) {
+ return;
+ }
+#endif
+
+ /* If there are no flags, just flush pending commands to hardware:
+ */
+ FLUSH_BATCH(fence);
+ i915->vbo_flushed = 1;
+}
+
+
+
+void i915_init_flush_functions( struct i915_context *i915 )
+{
+ i915->base.flush = i915_flush;
+}
diff --git a/src/gallium/drivers/i915simple/i915_fpc.h b/src/gallium/drivers/i915simple/i915_fpc.h
new file mode 100644
index 0000000000..2f0f99d046
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_fpc.h
@@ -0,0 +1,207 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef I915_FPC_H
+#define I915_FPC_H
+
+
+#include "i915_context.h"
+#include "i915_reg.h"
+
+
+
+#define I915_PROGRAM_SIZE 192
+
+
+
+/**
+ * Program translation state
+ */
+struct i915_fp_compile {
+ struct i915_fragment_shader *shader; /* the shader we're compiling */
+
+ boolean used_constants[I915_MAX_CONSTANT];
+
+ /** maps TGSI immediate index to constant slot */
+ uint num_immediates;
+ uint immediates_map[I915_MAX_CONSTANT];
+ float immediates[I915_MAX_CONSTANT][4];
+
+ boolean first_instruction;
+
+ uint declarations[I915_PROGRAM_SIZE];
+ uint program[I915_PROGRAM_SIZE];
+
+ uint *csr; /**< Cursor, points into program. */
+
+ uint *decl; /**< Cursor, points into declarations. */
+
+ uint decl_s; /**< flags for which s regs need to be decl'd */
+ uint decl_t; /**< flags for which t regs need to be decl'd */
+
+ uint temp_flag; /**< Tracks temporary regs which are in use */
+ uint utemp_flag; /**< Tracks TYPE_U temporary regs which are in use */
+
+ uint nr_tex_indirect;
+ uint nr_tex_insn;
+ uint nr_alu_insn;
+ uint nr_decl_insn;
+
+ boolean error; /**< Set if i915_program_error() is called */
+ uint wpos_tex;
+ uint NumNativeInstructions;
+ uint NumNativeAluInstructions;
+ uint NumNativeTexInstructions;
+ uint NumNativeTexIndirections;
+};
+
+
+/* Having zero and one in here makes the definition of swizzle a lot
+ * easier.
+ */
+#define UREG_TYPE_SHIFT 29
+#define UREG_NR_SHIFT 24
+#define UREG_CHANNEL_X_NEGATE_SHIFT 23
+#define UREG_CHANNEL_X_SHIFT 20
+#define UREG_CHANNEL_Y_NEGATE_SHIFT 19
+#define UREG_CHANNEL_Y_SHIFT 16
+#define UREG_CHANNEL_Z_NEGATE_SHIFT 15
+#define UREG_CHANNEL_Z_SHIFT 12
+#define UREG_CHANNEL_W_NEGATE_SHIFT 11
+#define UREG_CHANNEL_W_SHIFT 8
+#define UREG_CHANNEL_ZERO_NEGATE_MBZ 5
+#define UREG_CHANNEL_ZERO_SHIFT 4
+#define UREG_CHANNEL_ONE_NEGATE_MBZ 1
+#define UREG_CHANNEL_ONE_SHIFT 0
+
+#define UREG_BAD 0xffffffff /* not a valid ureg */
+
+#define X SRC_X
+#define Y SRC_Y
+#define Z SRC_Z
+#define W SRC_W
+#define ZERO SRC_ZERO
+#define ONE SRC_ONE
+
+/* Construct a ureg:
+ */
+#define UREG( type, nr ) (((type)<< UREG_TYPE_SHIFT) | \
+ ((nr) << UREG_NR_SHIFT) | \
+ (X << UREG_CHANNEL_X_SHIFT) | \
+ (Y << UREG_CHANNEL_Y_SHIFT) | \
+ (Z << UREG_CHANNEL_Z_SHIFT) | \
+ (W << UREG_CHANNEL_W_SHIFT) | \
+ (ZERO << UREG_CHANNEL_ZERO_SHIFT) | \
+ (ONE << UREG_CHANNEL_ONE_SHIFT))
+
+#define GET_CHANNEL_SRC( reg, channel ) ((reg<<(channel*4)) & (0xf<<20))
+#define CHANNEL_SRC( src, channel ) (src>>(channel*4))
+
+#define GET_UREG_TYPE(reg) (((reg)>>UREG_TYPE_SHIFT)&REG_TYPE_MASK)
+#define GET_UREG_NR(reg) (((reg)>>UREG_NR_SHIFT)&REG_NR_MASK)
+
+
+
+#define UREG_XYZW_CHANNEL_MASK 0x00ffff00
+
+/* One neat thing about the UREG representation:
+ */
+static INLINE int
+swizzle(int reg, uint x, uint y, uint z, uint w)
+{
+ assert(x <= SRC_ONE);
+ assert(y <= SRC_ONE);
+ assert(z <= SRC_ONE);
+ assert(w <= SRC_ONE);
+ return ((reg & ~UREG_XYZW_CHANNEL_MASK) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, x), 0) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, y), 1) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, z), 2) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, w), 3));
+}
+
+
+
+/***********************************************************************
+ * Public interface for the compiler
+ */
+extern void
+i915_translate_fragment_program( struct i915_context *i915,
+ struct i915_fragment_shader *fs);
+
+
+
+extern uint i915_get_temp(struct i915_fp_compile *p);
+extern uint i915_get_utemp(struct i915_fp_compile *p);
+extern void i915_release_utemps(struct i915_fp_compile *p);
+
+
+extern uint i915_emit_texld(struct i915_fp_compile *p,
+ uint dest,
+ uint destmask,
+ uint sampler, uint coord, uint op);
+
+extern uint i915_emit_arith(struct i915_fp_compile *p,
+ uint op,
+ uint dest,
+ uint mask,
+ uint saturate,
+ uint src0, uint src1, uint src2);
+
+extern uint i915_emit_decl(struct i915_fp_compile *p,
+ uint type, uint nr, uint d0_flags);
+
+
+extern uint i915_emit_const1f(struct i915_fp_compile *p, float c0);
+
+extern uint i915_emit_const2f(struct i915_fp_compile *p,
+ float c0, float c1);
+
+extern uint i915_emit_const4fv(struct i915_fp_compile *p,
+ const float * c);
+
+extern uint i915_emit_const4f(struct i915_fp_compile *p,
+ float c0, float c1,
+ float c2, float c3);
+
+
+/*======================================================================
+ * i915_fpc_debug.c
+ */
+extern void i915_disassemble_program(const uint * program, uint sz);
+
+
+/*======================================================================
+ * i915_fpc_translate.c
+ */
+
+extern void
+i915_program_error(struct i915_fp_compile *p, const char *msg, ...);
+
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_fpc_emit.c b/src/gallium/drivers/i915simple/i915_fpc_emit.c
new file mode 100644
index 0000000000..b054ce41d3
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_fpc_emit.c
@@ -0,0 +1,375 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_fpc.h"
+#include "util/u_math.h"
+
+
+#define A0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
+#define D0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
+#define T0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
+#define A0_SRC0( reg ) (((reg)&UREG_MASK)>>UREG_A0_SRC0_SHIFT_LEFT)
+#define A1_SRC0( reg ) (((reg)&UREG_MASK)<<UREG_A1_SRC0_SHIFT_RIGHT)
+#define A1_SRC1( reg ) (((reg)&UREG_MASK)>>UREG_A1_SRC1_SHIFT_LEFT)
+#define A2_SRC1( reg ) (((reg)&UREG_MASK)<<UREG_A2_SRC1_SHIFT_RIGHT)
+#define A2_SRC2( reg ) (((reg)&UREG_MASK)>>UREG_A2_SRC2_SHIFT_LEFT)
+
+/* These are special, and don't have swizzle/negate bits.
+ */
+#define T0_SAMPLER( reg ) (GET_UREG_NR(reg)<<T0_SAMPLER_NR_SHIFT)
+#define T1_ADDRESS_REG( reg ) ((GET_UREG_NR(reg)<<T1_ADDRESS_REG_NR_SHIFT) | \
+ (GET_UREG_TYPE(reg)<<T1_ADDRESS_REG_TYPE_SHIFT))
+
+
+/* Macros for translating UREG's into the various register fields used
+ * by the I915 programmable unit.
+ */
+#define UREG_A0_DEST_SHIFT_LEFT (UREG_TYPE_SHIFT - A0_DEST_TYPE_SHIFT)
+#define UREG_A0_SRC0_SHIFT_LEFT (UREG_TYPE_SHIFT - A0_SRC0_TYPE_SHIFT)
+#define UREG_A1_SRC0_SHIFT_RIGHT (A1_SRC0_CHANNEL_W_SHIFT - UREG_CHANNEL_W_SHIFT)
+#define UREG_A1_SRC1_SHIFT_LEFT (UREG_TYPE_SHIFT - A1_SRC1_TYPE_SHIFT)
+#define UREG_A2_SRC1_SHIFT_RIGHT (A2_SRC1_CHANNEL_W_SHIFT - UREG_CHANNEL_W_SHIFT)
+#define UREG_A2_SRC2_SHIFT_LEFT (UREG_TYPE_SHIFT - A2_SRC2_TYPE_SHIFT)
+
+#define UREG_MASK 0xffffff00
+#define UREG_TYPE_NR_MASK ((REG_TYPE_MASK << UREG_TYPE_SHIFT) | \
+ (REG_NR_MASK << UREG_NR_SHIFT))
+
+
+uint
+i915_get_temp(struct i915_fp_compile *p)
+{
+ int bit = ffs(~p->temp_flag);
+ if (!bit) {
+ i915_program_error(p, "i915_get_temp: out of temporaries\n");
+ return 0;
+ }
+
+ p->temp_flag |= 1 << (bit - 1);
+ return bit - 1;
+}
+
+
+static void
+i915_release_temp(struct i915_fp_compile *p, int reg)
+{
+ p->temp_flag &= ~(1 << reg);
+}
+
+
+/**
+ * Get unpreserved temporary, a temp whose value is not preserved between
+ * PS program phases.
+ */
+uint
+i915_get_utemp(struct i915_fp_compile * p)
+{
+ int bit = ffs(~p->utemp_flag);
+ if (!bit) {
+ i915_program_error(p, "i915_get_utemp: out of temporaries\n");
+ return 0;
+ }
+
+ p->utemp_flag |= 1 << (bit - 1);
+ return UREG(REG_TYPE_U, (bit - 1));
+}
+
+void
+i915_release_utemps(struct i915_fp_compile *p)
+{
+ p->utemp_flag = ~0x7;
+}
+
+
+uint
+i915_emit_decl(struct i915_fp_compile *p,
+ uint type, uint nr, uint d0_flags)
+{
+ uint reg = UREG(type, nr);
+
+ if (type == REG_TYPE_T) {
+ if (p->decl_t & (1 << nr))
+ return reg;
+
+ p->decl_t |= (1 << nr);
+ }
+ else if (type == REG_TYPE_S) {
+ if (p->decl_s & (1 << nr))
+ return reg;
+
+ p->decl_s |= (1 << nr);
+ }
+ else
+ return reg;
+
+ *(p->decl++) = (D0_DCL | D0_DEST(reg) | d0_flags);
+ *(p->decl++) = D1_MBZ;
+ *(p->decl++) = D2_MBZ;
+
+ p->nr_decl_insn++;
+ return reg;
+}
+
+uint
+i915_emit_arith(struct i915_fp_compile * p,
+ uint op,
+ uint dest,
+ uint mask,
+ uint saturate, uint src0, uint src1, uint src2)
+{
+ uint c[3];
+ uint nr_const = 0;
+
+ assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
+ dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest));
+ assert(dest);
+
+ if (GET_UREG_TYPE(src0) == REG_TYPE_CONST)
+ c[nr_const++] = 0;
+ if (GET_UREG_TYPE(src1) == REG_TYPE_CONST)
+ c[nr_const++] = 1;
+ if (GET_UREG_TYPE(src2) == REG_TYPE_CONST)
+ c[nr_const++] = 2;
+
+ /* Recursively call this function to MOV additional const values
+ * into temporary registers. Use utemp registers for this -
+ * currently shouldn't be possible to run out, but keep an eye on
+ * this.
+ */
+ if (nr_const > 1) {
+ uint s[3], first, i, old_utemp_flag;
+
+ s[0] = src0;
+ s[1] = src1;
+ s[2] = src2;
+ old_utemp_flag = p->utemp_flag;
+
+ first = GET_UREG_NR(s[c[0]]);
+ for (i = 1; i < nr_const; i++) {
+ if (GET_UREG_NR(s[c[i]]) != first) {
+ uint tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p, A0_MOV, tmp, A0_DEST_CHANNEL_ALL, 0,
+ s[c[i]], 0, 0);
+ s[c[i]] = tmp;
+ }
+ }
+
+ src0 = s[0];
+ src1 = s[1];
+ src2 = s[2];
+ p->utemp_flag = old_utemp_flag; /* restore */
+ }
+
+ *(p->csr++) = (op | A0_DEST(dest) | mask | saturate | A0_SRC0(src0));
+ *(p->csr++) = (A1_SRC0(src0) | A1_SRC1(src1));
+ *(p->csr++) = (A2_SRC1(src1) | A2_SRC2(src2));
+
+ p->nr_alu_insn++;
+ return dest;
+}
+
+
+/**
+ * Emit a texture load or texkill instruction.
+ * \param dest the dest i915 register
+ * \param destmask the dest register writemask
+ * \param sampler the i915 sampler register
+ * \param coord the i915 source texcoord operand
+ * \param opcode the instruction opcode
+ */
+uint i915_emit_texld( struct i915_fp_compile *p,
+ uint dest,
+ uint destmask,
+ uint sampler,
+ uint coord,
+ uint opcode )
+{
+ const uint k = UREG(GET_UREG_TYPE(coord), GET_UREG_NR(coord));
+ int temp = -1;
+
+ if (coord != k) {
+ /* texcoord is swizzled or negated. Need to allocate a new temporary
+ * register (a utemp / unpreserved temp) won't do.
+ */
+ uint tempReg;
+
+ temp = i915_get_temp(p); /* get temp reg index */
+ tempReg = UREG(REG_TYPE_R, temp); /* make i915 register */
+
+ i915_emit_arith( p, A0_MOV,
+ tempReg, A0_DEST_CHANNEL_ALL, /* dest reg, writemask */
+ 0, /* saturate */
+ coord, 0, 0 ); /* src0, src1, src2 */
+
+ /* new src texcoord is tempReg */
+ coord = tempReg;
+ }
+
+ /* Don't worry about saturate as we only support
+ */
+ if (destmask != A0_DEST_CHANNEL_ALL) {
+ /* if not writing to XYZW... */
+ uint tmp = i915_get_utemp(p);
+ i915_emit_texld( p, tmp, A0_DEST_CHANNEL_ALL, sampler, coord, opcode );
+ i915_emit_arith( p, A0_MOV, dest, destmask, 0, tmp, 0, 0 );
+ /* XXX release utemp here? */
+ }
+ else {
+ assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
+ assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
+
+ /* is the sampler coord a texcoord input reg? */
+ if (GET_UREG_TYPE(coord) != REG_TYPE_T) {
+ p->nr_tex_indirect++;
+ }
+
+ *(p->csr++) = (opcode |
+ T0_DEST( dest ) |
+ T0_SAMPLER( sampler ));
+
+ *(p->csr++) = T1_ADDRESS_REG( coord );
+ *(p->csr++) = T2_MBZ;
+
+ p->nr_tex_insn++;
+ }
+
+ if (temp >= 0)
+ i915_release_temp(p, temp);
+
+ return dest;
+}
+
+
+uint
+i915_emit_const1f(struct i915_fp_compile * p, float c0)
+{
+ struct i915_fragment_shader *ifs = p->shader;
+ unsigned reg, idx;
+
+ if (c0 == 0.0)
+ return swizzle(UREG(REG_TYPE_R, 0), ZERO, ZERO, ZERO, ZERO);
+ if (c0 == 1.0)
+ return swizzle(UREG(REG_TYPE_R, 0), ONE, ONE, ONE, ONE);
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (ifs->constant_flags[reg] == I915_CONSTFLAG_USER)
+ continue;
+ for (idx = 0; idx < 4; idx++) {
+ if (!(ifs->constant_flags[reg] & (1 << idx)) ||
+ ifs->constants[reg][idx] == c0) {
+ ifs->constants[reg][idx] = c0;
+ ifs->constant_flags[reg] |= 1 << idx;
+ if (reg + 1 > ifs->num_constants)
+ ifs->num_constants = reg + 1;
+ return swizzle(UREG(REG_TYPE_CONST, reg), idx, ZERO, ZERO, ONE);
+ }
+ }
+ }
+
+ i915_program_error(p, "i915_emit_const1f: out of constants\n");
+ return 0;
+}
+
+uint
+i915_emit_const2f(struct i915_fp_compile * p, float c0, float c1)
+{
+ struct i915_fragment_shader *ifs = p->shader;
+ unsigned reg, idx;
+
+ if (c0 == 0.0)
+ return swizzle(i915_emit_const1f(p, c1), ZERO, X, Z, W);
+ if (c0 == 1.0)
+ return swizzle(i915_emit_const1f(p, c1), ONE, X, Z, W);
+
+ if (c1 == 0.0)
+ return swizzle(i915_emit_const1f(p, c0), X, ZERO, Z, W);
+ if (c1 == 1.0)
+ return swizzle(i915_emit_const1f(p, c0), X, ONE, Z, W);
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (ifs->constant_flags[reg] == 0xf ||
+ ifs->constant_flags[reg] == I915_CONSTFLAG_USER)
+ continue;
+ for (idx = 0; idx < 3; idx++) {
+ if (!(ifs->constant_flags[reg] & (3 << idx))) {
+ ifs->constants[reg][idx + 0] = c0;
+ ifs->constants[reg][idx + 1] = c1;
+ ifs->constant_flags[reg] |= 3 << idx;
+ if (reg + 1 > ifs->num_constants)
+ ifs->num_constants = reg + 1;
+ return swizzle(UREG(REG_TYPE_CONST, reg), idx, idx + 1, ZERO, ONE);
+ }
+ }
+ }
+
+ i915_program_error(p, "i915_emit_const2f: out of constants\n");
+ return 0;
+}
+
+
+
+uint
+i915_emit_const4f(struct i915_fp_compile * p,
+ float c0, float c1, float c2, float c3)
+{
+ struct i915_fragment_shader *ifs = p->shader;
+ unsigned reg;
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (ifs->constant_flags[reg] == 0xf &&
+ ifs->constants[reg][0] == c0 &&
+ ifs->constants[reg][1] == c1 &&
+ ifs->constants[reg][2] == c2 &&
+ ifs->constants[reg][3] == c3) {
+ return UREG(REG_TYPE_CONST, reg);
+ }
+ else if (ifs->constant_flags[reg] == 0) {
+
+ ifs->constants[reg][0] = c0;
+ ifs->constants[reg][1] = c1;
+ ifs->constants[reg][2] = c2;
+ ifs->constants[reg][3] = c3;
+ ifs->constant_flags[reg] = 0xf;
+ if (reg + 1 > ifs->num_constants)
+ ifs->num_constants = reg + 1;
+ return UREG(REG_TYPE_CONST, reg);
+ }
+ }
+
+ i915_program_error(p, "i915_emit_const4f: out of constants\n");
+ return 0;
+}
+
+
+uint
+i915_emit_const4fv(struct i915_fp_compile * p, const float * c)
+{
+ return i915_emit_const4f(p, c[0], c[1], c[2], c[3]);
+}
diff --git a/src/gallium/drivers/i915simple/i915_fpc_translate.c b/src/gallium/drivers/i915simple/i915_fpc_translate.c
new file mode 100644
index 0000000000..89504ced27
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_fpc_translate.c
@@ -0,0 +1,1202 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdarg.h>
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_fpc.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_string.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "draw/draw_vertex.h"
+
+
+/**
+ * Simple pass-through fragment shader to use when we don't have
+ * a real shader (or it fails to compile for some reason).
+ */
+static unsigned passthrough[] =
+{
+ _3DSTATE_PIXEL_SHADER_PROGRAM | ((2*3)-1),
+
+ /* declare input color:
+ */
+ (D0_DCL |
+ (REG_TYPE_T << D0_TYPE_SHIFT) |
+ (T_DIFFUSE << D0_NR_SHIFT) |
+ D0_CHANNEL_ALL),
+ 0,
+ 0,
+
+ /* move to output color:
+ */
+ (A0_MOV |
+ (REG_TYPE_OC << A0_DEST_TYPE_SHIFT) |
+ A0_DEST_CHANNEL_ALL |
+ (REG_TYPE_T << A0_SRC0_TYPE_SHIFT) |
+ (T_DIFFUSE << A0_SRC0_NR_SHIFT)),
+ 0x01230000, /* .xyzw */
+ 0
+};
+
+
+/* 1, -1/3!, 1/5!, -1/7! */
+static const float sin_constants[4] = { 1.0,
+ -1.0f / (3 * 2 * 1),
+ 1.0f / (5 * 4 * 3 * 2 * 1),
+ -1.0f / (7 * 6 * 5 * 4 * 3 * 2 * 1)
+};
+
+/* 1, -1/2!, 1/4!, -1/6! */
+static const float cos_constants[4] = { 1.0,
+ -1.0f / (2 * 1),
+ 1.0f / (4 * 3 * 2 * 1),
+ -1.0f / (6 * 5 * 4 * 3 * 2 * 1)
+};
+
+
+
+/**
+ * component-wise negation of ureg
+ */
+static INLINE int
+negate(int reg, int x, int y, int z, int w)
+{
+ /* Another neat thing about the UREG representation */
+ return reg ^ (((x & 1) << UREG_CHANNEL_X_NEGATE_SHIFT) |
+ ((y & 1) << UREG_CHANNEL_Y_NEGATE_SHIFT) |
+ ((z & 1) << UREG_CHANNEL_Z_NEGATE_SHIFT) |
+ ((w & 1) << UREG_CHANNEL_W_NEGATE_SHIFT));
+}
+
+
+/**
+ * In the event of a translation failure, we'll generate a simple color
+ * pass-through program.
+ */
+static void
+i915_use_passthrough_shader(struct i915_fragment_shader *fs)
+{
+ fs->program = (uint *) MALLOC(sizeof(passthrough));
+ if (fs->program) {
+ memcpy(fs->program, passthrough, sizeof(passthrough));
+ fs->program_len = Elements(passthrough);
+ }
+ fs->num_constants = 0;
+}
+
+
+void
+i915_program_error(struct i915_fp_compile *p, const char *msg, ...)
+{
+ va_list args;
+ char buffer[1024];
+
+ debug_printf("i915_program_error: ");
+ va_start( args, msg );
+ util_vsnprintf( buffer, sizeof(buffer), msg, args );
+ va_end( args );
+ debug_printf(buffer);
+ debug_printf("\n");
+
+ p->error = 1;
+}
+
+
+
+/**
+ * Construct a ureg for the given source register. Will emit
+ * constants, apply swizzling and negation as needed.
+ */
+static uint
+src_vector(struct i915_fp_compile *p,
+ const struct tgsi_full_src_register *source)
+{
+ uint index = source->SrcRegister.Index;
+ uint src = 0, sem_name, sem_ind;
+
+ switch (source->SrcRegister.File) {
+ case TGSI_FILE_TEMPORARY:
+ if (source->SrcRegister.Index >= I915_MAX_TEMPORARY) {
+ i915_program_error(p, "Exceeded max temporary reg");
+ return 0;
+ }
+ src = UREG(REG_TYPE_R, index);
+ break;
+ case TGSI_FILE_INPUT:
+ /* XXX: Packing COL1, FOGC into a single attribute works for
+ * texenv programs, but will fail for real fragment programs
+ * that use these attributes and expect them to be a full 4
+ * components wide. Could use a texcoord to pass these
+ * attributes if necessary, but that won't work in the general
+ * case.
+ *
+ * We also use a texture coordinate to pass wpos when possible.
+ */
+
+ sem_name = p->shader->info.input_semantic_name[index];
+ sem_ind = p->shader->info.input_semantic_index[index];
+
+ switch (sem_name) {
+ case TGSI_SEMANTIC_POSITION:
+ debug_printf("SKIP SEM POS\n");
+ /*
+ assert(p->wpos_tex != -1);
+ src = i915_emit_decl(p, REG_TYPE_T, p->wpos_tex, D0_CHANNEL_ALL);
+ */
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ if (sem_ind == 0) {
+ src = i915_emit_decl(p, REG_TYPE_T, T_DIFFUSE, D0_CHANNEL_ALL);
+ }
+ else {
+ /* secondary color */
+ assert(sem_ind == 1);
+ src = i915_emit_decl(p, REG_TYPE_T, T_SPECULAR, D0_CHANNEL_XYZ);
+ src = swizzle(src, X, Y, Z, ONE);
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ src = i915_emit_decl(p, REG_TYPE_T, T_FOG_W, D0_CHANNEL_W);
+ src = swizzle(src, W, W, W, W);
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ /* usually a texcoord */
+ src = i915_emit_decl(p, REG_TYPE_T, T_TEX0 + sem_ind, D0_CHANNEL_ALL);
+ break;
+ default:
+ i915_program_error(p, "Bad source->Index");
+ return 0;
+ }
+ break;
+
+ case TGSI_FILE_IMMEDIATE:
+ assert(index < p->num_immediates);
+ index = p->immediates_map[index];
+ /* fall-through */
+ case TGSI_FILE_CONSTANT:
+ src = UREG(REG_TYPE_CONST, index);
+ break;
+
+ default:
+ i915_program_error(p, "Bad source->File");
+ return 0;
+ }
+
+ if (source->SrcRegister.Extended) {
+ src = swizzle(src,
+ source->SrcRegisterExtSwz.ExtSwizzleX,
+ source->SrcRegisterExtSwz.ExtSwizzleY,
+ source->SrcRegisterExtSwz.ExtSwizzleZ,
+ source->SrcRegisterExtSwz.ExtSwizzleW);
+ }
+ else {
+ src = swizzle(src,
+ source->SrcRegister.SwizzleX,
+ source->SrcRegister.SwizzleY,
+ source->SrcRegister.SwizzleZ,
+ source->SrcRegister.SwizzleW);
+ }
+
+
+ /* There's both negate-all-components and per-component negation.
+ * Try to handle both here.
+ */
+ {
+ int nx = source->SrcRegisterExtSwz.NegateX;
+ int ny = source->SrcRegisterExtSwz.NegateY;
+ int nz = source->SrcRegisterExtSwz.NegateZ;
+ int nw = source->SrcRegisterExtSwz.NegateW;
+ if (source->SrcRegister.Negate) {
+ nx = !nx;
+ ny = !ny;
+ nz = !nz;
+ nw = !nw;
+ }
+ src = negate(src, nx, ny, nz, nw);
+ }
+
+ /* no abs() or post-abs negation */
+#if 0
+ /* XXX assertions disabled to allow arbfplight.c to run */
+ /* XXX enable these assertions, or fix things */
+ assert(!source->SrcRegisterExtMod.Absolute);
+ assert(!source->SrcRegisterExtMod.Negate);
+#endif
+ return src;
+}
+
+
+/**
+ * Construct a ureg for a destination register.
+ */
+static uint
+get_result_vector(struct i915_fp_compile *p,
+ const struct tgsi_full_dst_register *dest)
+{
+ switch (dest->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ {
+ uint sem_name = p->shader->info.output_semantic_name[dest->DstRegister.Index];
+ switch (sem_name) {
+ case TGSI_SEMANTIC_POSITION:
+ return UREG(REG_TYPE_OD, 0);
+ case TGSI_SEMANTIC_COLOR:
+ return UREG(REG_TYPE_OC, 0);
+ default:
+ i915_program_error(p, "Bad inst->DstReg.Index/semantics");
+ return 0;
+ }
+ }
+ case TGSI_FILE_TEMPORARY:
+ return UREG(REG_TYPE_R, dest->DstRegister.Index);
+ default:
+ i915_program_error(p, "Bad inst->DstReg.File");
+ return 0;
+ }
+}
+
+
+/**
+ * Compute flags for saturation and writemask.
+ */
+static uint
+get_result_flags(const struct tgsi_full_instruction *inst)
+{
+ const uint writeMask
+ = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ uint flags = 0x0;
+
+ if (inst->Instruction.Saturate == TGSI_SAT_ZERO_ONE)
+ flags |= A0_DEST_SATURATE;
+
+ if (writeMask & TGSI_WRITEMASK_X)
+ flags |= A0_DEST_CHANNEL_X;
+ if (writeMask & TGSI_WRITEMASK_Y)
+ flags |= A0_DEST_CHANNEL_Y;
+ if (writeMask & TGSI_WRITEMASK_Z)
+ flags |= A0_DEST_CHANNEL_Z;
+ if (writeMask & TGSI_WRITEMASK_W)
+ flags |= A0_DEST_CHANNEL_W;
+
+ return flags;
+}
+
+
+/**
+ * Convert TGSI_TEXTURE_x token to DO_SAMPLE_TYPE_x token
+ */
+static uint
+translate_tex_src_target(struct i915_fp_compile *p, uint tex)
+{
+ switch (tex) {
+ case TGSI_TEXTURE_SHADOW1D:
+ /* fall-through */
+ case TGSI_TEXTURE_1D:
+ return D0_SAMPLE_TYPE_2D;
+
+ case TGSI_TEXTURE_SHADOW2D:
+ /* fall-through */
+ case TGSI_TEXTURE_2D:
+ return D0_SAMPLE_TYPE_2D;
+
+ case TGSI_TEXTURE_SHADOWRECT:
+ /* fall-through */
+ case TGSI_TEXTURE_RECT:
+ return D0_SAMPLE_TYPE_2D;
+
+ case TGSI_TEXTURE_3D:
+ return D0_SAMPLE_TYPE_VOLUME;
+
+ case TGSI_TEXTURE_CUBE:
+ return D0_SAMPLE_TYPE_CUBE;
+
+ default:
+ i915_program_error(p, "TexSrc type");
+ return 0;
+ }
+}
+
+
+/**
+ * Generate texel lookup instruction.
+ */
+static void
+emit_tex(struct i915_fp_compile *p,
+ const struct tgsi_full_instruction *inst,
+ uint opcode)
+{
+ uint texture = inst->InstructionExtTexture.Texture;
+ uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
+ uint tex = translate_tex_src_target( p, texture );
+ uint sampler = i915_emit_decl(p, REG_TYPE_S, unit, tex);
+ uint coord = src_vector( p, &inst->FullSrcRegisters[0]);
+
+ i915_emit_texld( p,
+ get_result_vector( p, &inst->FullDstRegisters[0] ),
+ get_result_flags( inst ),
+ sampler,
+ coord,
+ opcode);
+}
+
+
+/**
+ * Generate a simple arithmetic instruction
+ * \param opcode the i915 opcode
+ * \param numArgs the number of input/src arguments
+ */
+static void
+emit_simple_arith(struct i915_fp_compile *p,
+ const struct tgsi_full_instruction *inst,
+ uint opcode, uint numArgs)
+{
+ uint arg1, arg2, arg3;
+
+ assert(numArgs <= 3);
+
+ arg1 = (numArgs < 1) ? 0 : src_vector( p, &inst->FullSrcRegisters[0] );
+ arg2 = (numArgs < 2) ? 0 : src_vector( p, &inst->FullSrcRegisters[1] );
+ arg3 = (numArgs < 3) ? 0 : src_vector( p, &inst->FullSrcRegisters[2] );
+
+ i915_emit_arith( p,
+ opcode,
+ get_result_vector( p, &inst->FullDstRegisters[0]),
+ get_result_flags( inst ), 0,
+ arg1,
+ arg2,
+ arg3 );
+}
+
+
+/** As above, but swap the first two src regs */
+static void
+emit_simple_arith_swap2(struct i915_fp_compile *p,
+ const struct tgsi_full_instruction *inst,
+ uint opcode, uint numArgs)
+{
+ struct tgsi_full_instruction inst2;
+
+ assert(numArgs == 2);
+
+ /* transpose first two registers */
+ inst2 = *inst;
+ inst2.FullSrcRegisters[0] = inst->FullSrcRegisters[1];
+ inst2.FullSrcRegisters[1] = inst->FullSrcRegisters[0];
+
+ emit_simple_arith(p, &inst2, opcode, numArgs);
+}
+
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+/*
+ * Translate TGSI instruction to i915 instruction.
+ *
+ * Possible concerns:
+ *
+ * SIN, COS -- could use another taylor step?
+ * LIT -- results seem a little different to sw mesa
+ * LOG -- different to mesa on negative numbers, but this is conformant.
+ */
+static void
+i915_translate_instruction(struct i915_fp_compile *p,
+ const struct tgsi_full_instruction *inst)
+{
+ uint writemask;
+ uint src0, src1, src2, flags;
+ uint tmp = 0;
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ i915_emit_arith(p,
+ A0_MAX,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ src0, negate(src0, 1, 1, 1, 1), 0);
+ break;
+
+ case TGSI_OPCODE_ADD:
+ emit_simple_arith(p, inst, A0_ADD, 2);
+ break;
+
+ case TGSI_OPCODE_CMP:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+ src2 = src_vector(p, &inst->FullSrcRegisters[2]);
+ i915_emit_arith(p, A0_CMP,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst),
+ 0, src0, src2, src1); /* NOTE: order of src2, src1 */
+ break;
+
+ case TGSI_OPCODE_COS:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ src0, i915_emit_const1f(p, 1.0f / (float) (M_PI * 2.0)), 0);
+
+ i915_emit_arith(p, A0_MOD, tmp, A0_DEST_CHANNEL_X, 0, tmp, 0, 0);
+
+ /* By choosing different taylor constants, could get rid of this mul:
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ tmp, i915_emit_const1f(p, (float) (M_PI * 2.0)), 0);
+
+ /*
+ * t0.xy = MUL x.xx11, x.x1111 ; x^2, x, 1, 1
+ * t0 = MUL t0.xyxy t0.xx11 ; x^4, x^3, x^2, 1
+ * t0 = MUL t0.xxz1 t0.z111 ; x^6 x^4 x^2 1
+ * result = DP4 t0, cos_constants
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XY, 0,
+ swizzle(tmp, X, X, ONE, ONE),
+ swizzle(tmp, X, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XYZ, 0,
+ swizzle(tmp, X, Y, X, ONE),
+ swizzle(tmp, X, X, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XYZ, 0,
+ swizzle(tmp, X, X, Z, ONE),
+ swizzle(tmp, Z, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(tmp, ONE, Z, Y, X),
+ i915_emit_const4fv(p, cos_constants), 0);
+ break;
+
+ case TGSI_OPCODE_DP3:
+ emit_simple_arith(p, inst, A0_DP3, 2);
+ break;
+
+ case TGSI_OPCODE_DP4:
+ emit_simple_arith(p, inst, A0_DP4, 2);
+ break;
+
+ case TGSI_OPCODE_DPH:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, Y, Z, ONE), src1, 0);
+ break;
+
+ case TGSI_OPCODE_DST:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+
+ /* result[0] = 1 * 1;
+ * result[1] = a[1] * b[1];
+ * result[2] = a[2] * 1;
+ * result[3] = 1 * b[3];
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, ONE, Y, Z, ONE),
+ swizzle(src1, ONE, Y, ONE, W), 0);
+ break;
+
+ case TGSI_OPCODE_END:
+ /* no-op */
+ break;
+
+ case TGSI_OPCODE_EX2:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+
+ i915_emit_arith(p,
+ A0_EXP,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case TGSI_OPCODE_FLR:
+ emit_simple_arith(p, inst, A0_FLR, 1);
+ break;
+
+ case TGSI_OPCODE_FRC:
+ emit_simple_arith(p, inst, A0_FRC, 1);
+ break;
+
+ case TGSI_OPCODE_KIL:
+ /* kill if src[0].x < 0 || src[0].y < 0 ... */
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_texld(p,
+ tmp, /* dest reg: a dummy reg */
+ A0_DEST_CHANNEL_ALL, /* dest writemask */
+ 0, /* sampler */
+ src0, /* coord*/
+ T0_TEXKILL); /* opcode */
+ break;
+
+ case TGSI_OPCODE_KILP:
+ assert(0); /* not tested yet */
+ break;
+
+ case TGSI_OPCODE_LG2:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+
+ i915_emit_arith(p,
+ A0_LOG,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case TGSI_OPCODE_LIT:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ tmp = i915_get_utemp(p);
+
+ /* tmp = max( a.xyzw, a.00zw )
+ * XXX: Clamp tmp.w to -128..128
+ * tmp.y = log(tmp.y)
+ * tmp.y = tmp.w * tmp.y
+ * tmp.y = exp(tmp.y)
+ * result = cmp (a.11-x1, a.1x01, a.1xy1 )
+ */
+ i915_emit_arith(p, A0_MAX, tmp, A0_DEST_CHANNEL_ALL, 0,
+ src0, swizzle(src0, ZERO, ZERO, Z, W), 0);
+
+ i915_emit_arith(p, A0_LOG, tmp, A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp, Y, Y, Y, Y), 0, 0);
+
+ i915_emit_arith(p, A0_MUL, tmp, A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp, ZERO, Y, ZERO, ZERO),
+ swizzle(tmp, ZERO, W, ZERO, ZERO), 0);
+
+ i915_emit_arith(p, A0_EXP, tmp, A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp, Y, Y, Y, Y), 0, 0);
+
+ i915_emit_arith(p, A0_CMP,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ negate(swizzle(tmp, ONE, ONE, X, ONE), 0, 0, 1, 0),
+ swizzle(tmp, ONE, X, ZERO, ONE),
+ swizzle(tmp, ONE, X, Y, ONE));
+
+ break;
+
+ case TGSI_OPCODE_LRP:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+ src2 = src_vector(p, &inst->FullSrcRegisters[2]);
+ flags = get_result_flags(inst);
+ tmp = i915_get_utemp(p);
+
+ /* b*a + c*(1-a)
+ *
+ * b*a + c - ca
+ *
+ * tmp = b*a + c,
+ * result = (-c)*a + tmp
+ */
+ i915_emit_arith(p, A0_MAD, tmp,
+ flags & A0_DEST_CHANNEL_ALL, 0, src1, src0, src2);
+
+ i915_emit_arith(p, A0_MAD,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ flags, 0, negate(src2, 1, 1, 1, 1), src0, tmp);
+ break;
+
+ case TGSI_OPCODE_MAD:
+ emit_simple_arith(p, inst, A0_MAD, 3);
+ break;
+
+ case TGSI_OPCODE_MAX:
+ emit_simple_arith(p, inst, A0_MAX, 2);
+ break;
+
+ case TGSI_OPCODE_MIN:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+ tmp = i915_get_utemp(p);
+ flags = get_result_flags(inst);
+
+ i915_emit_arith(p,
+ A0_MAX,
+ tmp, flags & A0_DEST_CHANNEL_ALL, 0,
+ negate(src0, 1, 1, 1, 1),
+ negate(src1, 1, 1, 1, 1), 0);
+
+ i915_emit_arith(p,
+ A0_MOV,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ flags, 0, negate(tmp, 1, 1, 1, 1), 0, 0);
+ break;
+
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ emit_simple_arith(p, inst, A0_MOV, 1);
+ break;
+
+ case TGSI_OPCODE_MUL:
+ emit_simple_arith(p, inst, A0_MUL, 2);
+ break;
+
+ case TGSI_OPCODE_POW:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+ tmp = i915_get_utemp(p);
+ flags = get_result_flags(inst);
+
+ /* XXX: masking on intermediate values, here and elsewhere.
+ */
+ i915_emit_arith(p,
+ A0_LOG,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+
+ i915_emit_arith(p, A0_MUL, tmp, A0_DEST_CHANNEL_X, 0, tmp, src1, 0);
+
+ i915_emit_arith(p,
+ A0_EXP,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ flags, 0, swizzle(tmp, X, X, X, X), 0, 0);
+ break;
+
+ case TGSI_OPCODE_RET:
+ /* XXX: no-op? */
+ break;
+
+ case TGSI_OPCODE_RCP:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+
+ i915_emit_arith(p,
+ A0_RCP,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case TGSI_OPCODE_RSQ:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+
+ i915_emit_arith(p,
+ A0_RSQ,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case TGSI_OPCODE_SCS:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ tmp = i915_get_utemp(p);
+
+ /*
+ * t0.xy = MUL x.xx11, x.x1111 ; x^2, x, 1, 1
+ * t0 = MUL t0.xyxy t0.xx11 ; x^4, x^3, x^2, x
+ * t1 = MUL t0.xyyw t0.yz11 ; x^7 x^5 x^3 x
+ * scs.x = DP4 t1, sin_constants
+ * t1 = MUL t0.xxz1 t0.z111 ; x^6 x^4 x^2 1
+ * scs.y = DP4 t1, cos_constants
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XY, 0,
+ swizzle(src0, X, X, ONE, ONE),
+ swizzle(src0, X, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, X, Y),
+ swizzle(tmp, X, X, ONE, ONE), 0);
+
+ writemask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+
+ if (writemask & TGSI_WRITEMASK_Y) {
+ uint tmp1;
+
+ if (writemask & TGSI_WRITEMASK_X)
+ tmp1 = i915_get_utemp(p);
+ else
+ tmp1 = tmp;
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp1, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, Y, W),
+ swizzle(tmp, X, Z, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp1, W, Z, Y, X),
+ i915_emit_const4fv(p, sin_constants), 0);
+ }
+
+ if (writemask & TGSI_WRITEMASK_X) {
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XYZ, 0,
+ swizzle(tmp, X, X, Z, ONE),
+ swizzle(tmp, Z, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ A0_DEST_CHANNEL_X, 0,
+ swizzle(tmp, ONE, Z, Y, X),
+ i915_emit_const4fv(p, cos_constants), 0);
+ }
+ break;
+
+ case TGSI_OPCODE_SGE:
+ emit_simple_arith(p, inst, A0_SGE, 2);
+ break;
+
+ case TGSI_OPCODE_SLE:
+ /* like SGE, but swap reg0, reg1 */
+ emit_simple_arith_swap2(p, inst, A0_SGE, 2);
+ break;
+
+ case TGSI_OPCODE_SIN:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ src0, i915_emit_const1f(p, 1.0f / (float) (M_PI * 2.0)), 0);
+
+ i915_emit_arith(p, A0_MOD, tmp, A0_DEST_CHANNEL_X, 0, tmp, 0, 0);
+
+ /* By choosing different taylor constants, could get rid of this mul:
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ tmp, i915_emit_const1f(p, (float) (M_PI * 2.0)), 0);
+
+ /*
+ * t0.xy = MUL x.xx11, x.x1111 ; x^2, x, 1, 1
+ * t0 = MUL t0.xyxy t0.xx11 ; x^4, x^3, x^2, x
+ * t1 = MUL t0.xyyw t0.yz11 ; x^7 x^5 x^3 x
+ * result = DP4 t1.wzyx, sin_constants
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XY, 0,
+ swizzle(tmp, X, X, ONE, ONE),
+ swizzle(tmp, X, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, X, Y),
+ swizzle(tmp, X, X, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, Y, W),
+ swizzle(tmp, X, Z, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(tmp, W, Z, Y, X),
+ i915_emit_const4fv(p, sin_constants), 0);
+ break;
+
+ case TGSI_OPCODE_SLT:
+ emit_simple_arith(p, inst, A0_SLT, 2);
+ break;
+
+ case TGSI_OPCODE_SGT:
+ /* like SLT, but swap reg0, reg1 */
+ emit_simple_arith_swap2(p, inst, A0_SLT, 2);
+ break;
+
+ case TGSI_OPCODE_SUB:
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+
+ i915_emit_arith(p,
+ A0_ADD,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ src0, negate(src1, 1, 1, 1, 1), 0);
+ break;
+
+ case TGSI_OPCODE_TEX:
+ emit_tex(p, inst, T0_TEXLD);
+ break;
+
+ case TGSI_OPCODE_TXB:
+ emit_tex(p, inst, T0_TEXLDB);
+ break;
+
+ case TGSI_OPCODE_TXP:
+ emit_tex(p, inst, T0_TEXLDP);
+ break;
+
+ case TGSI_OPCODE_XPD:
+ /* Cross product:
+ * result.x = src0.y * src1.z - src0.z * src1.y;
+ * result.y = src0.z * src1.x - src0.x * src1.z;
+ * result.z = src0.x * src1.y - src0.y * src1.x;
+ * result.w = undef;
+ */
+ src0 = src_vector(p, &inst->FullSrcRegisters[0]);
+ src1 = src_vector(p, &inst->FullSrcRegisters[1]);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(src0, Z, X, Y, ONE),
+ swizzle(src1, Y, Z, X, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MAD,
+ get_result_vector(p, &inst->FullDstRegisters[0]),
+ get_result_flags(inst), 0,
+ swizzle(src0, Y, Z, X, ONE),
+ swizzle(src1, Z, X, Y, ONE),
+ negate(tmp, 1, 1, 1, 0));
+ break;
+
+ default:
+ i915_program_error(p, "bad opcode %d", inst->Instruction.Opcode);
+ p->error = 1;
+ return;
+ }
+
+ i915_release_utemps(p);
+}
+
+
+/**
+ * Translate TGSI fragment shader into i915 hardware instructions.
+ * \param p the translation state
+ * \param tokens the TGSI token array
+ */
+static void
+i915_translate_instructions(struct i915_fp_compile *p,
+ const struct tgsi_token *tokens)
+{
+ struct i915_fragment_shader *ifs = p->shader;
+ struct tgsi_parse_context parse;
+
+ tgsi_parse_init( &parse, tokens );
+
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (parse.FullToken.FullDeclaration.Declaration.File
+ == TGSI_FILE_CONSTANT) {
+ uint i;
+ for (i = parse.FullToken.FullDeclaration.DeclarationRange.First;
+ i <= parse.FullToken.FullDeclaration.DeclarationRange.Last;
+ i++) {
+ assert(ifs->constant_flags[i] == 0x0);
+ ifs->constant_flags[i] = I915_CONSTFLAG_USER;
+ ifs->num_constants = MAX2(ifs->num_constants, i + 1);
+ }
+ }
+ else if (parse.FullToken.FullDeclaration.Declaration.File
+ == TGSI_FILE_TEMPORARY) {
+ uint i;
+ for (i = parse.FullToken.FullDeclaration.DeclarationRange.First;
+ i <= parse.FullToken.FullDeclaration.DeclarationRange.Last;
+ i++) {
+ assert(i < I915_MAX_TEMPORARY);
+ /* XXX just use shader->info->file_mask[TGSI_FILE_TEMPORARY] */
+ p->temp_flag |= (1 << i); /* mark temp as used */
+ }
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ const struct tgsi_full_immediate *imm
+ = &parse.FullToken.FullImmediate;
+ const uint pos = p->num_immediates++;
+ uint j;
+ assert( imm->Immediate.NrTokens <= 4 + 1 );
+ for (j = 0; j < imm->Immediate.NrTokens - 1; j++) {
+ p->immediates[pos][j] = imm->u[j].Float;
+ }
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ if (p->first_instruction) {
+ /* resolve location of immediates */
+ uint i, j;
+ for (i = 0; i < p->num_immediates; i++) {
+ /* find constant slot for this immediate */
+ for (j = 0; j < I915_MAX_CONSTANT; j++) {
+ if (ifs->constant_flags[j] == 0x0) {
+ memcpy(ifs->constants[j],
+ p->immediates[i],
+ 4 * sizeof(float));
+ /*printf("immediate %d maps to const %d\n", i, j);*/
+ ifs->constant_flags[j] = 0xf; /* all four comps used */
+ p->immediates_map[i] = j;
+ ifs->num_constants = MAX2(ifs->num_constants, j + 1);
+ break;
+ }
+ }
+ }
+
+ p->first_instruction = FALSE;
+ }
+
+ i915_translate_instruction(p, &parse.FullToken.FullInstruction);
+ break;
+
+ default:
+ assert( 0 );
+ }
+
+ } /* while */
+
+ tgsi_parse_free (&parse);
+}
+
+
+static struct i915_fp_compile *
+i915_init_compile(struct i915_context *i915,
+ struct i915_fragment_shader *ifs)
+{
+ struct i915_fp_compile *p = CALLOC_STRUCT(i915_fp_compile);
+
+ p->shader = ifs;
+
+ /* Put new constants at end of const buffer, growing downward.
+ * The problem is we don't know how many user-defined constants might
+ * be specified with pipe->set_constant_buffer().
+ * Should pre-scan the user's program to determine the highest-numbered
+ * constant referenced.
+ */
+ ifs->num_constants = 0;
+ memset(ifs->constant_flags, 0, sizeof(ifs->constant_flags));
+
+ p->first_instruction = TRUE;
+
+ p->nr_tex_indirect = 1; /* correct? */
+ p->nr_tex_insn = 0;
+ p->nr_alu_insn = 0;
+ p->nr_decl_insn = 0;
+
+ p->csr = p->program;
+ p->decl = p->declarations;
+ p->decl_s = 0;
+ p->decl_t = 0;
+ p->temp_flag = ~0x0 << I915_MAX_TEMPORARY;
+ p->utemp_flag = ~0x7;
+
+ p->wpos_tex = -1;
+
+ /* initialize the first program word */
+ *(p->decl++) = _3DSTATE_PIXEL_SHADER_PROGRAM;
+
+ return p;
+}
+
+
+/* Copy compile results to the fragment program struct and destroy the
+ * compilation context.
+ */
+static void
+i915_fini_compile(struct i915_context *i915, struct i915_fp_compile *p)
+{
+ struct i915_fragment_shader *ifs = p->shader;
+ unsigned long program_size = (unsigned long) (p->csr - p->program);
+ unsigned long decl_size = (unsigned long) (p->decl - p->declarations);
+
+ if (p->nr_tex_indirect > I915_MAX_TEX_INDIRECT)
+ i915_program_error(p, "Exceeded max nr indirect texture lookups");
+
+ if (p->nr_tex_insn > I915_MAX_TEX_INSN)
+ i915_program_error(p, "Exceeded max TEX instructions");
+
+ if (p->nr_alu_insn > I915_MAX_ALU_INSN)
+ i915_program_error(p, "Exceeded max ALU instructions");
+
+ if (p->nr_decl_insn > I915_MAX_DECL_INSN)
+ i915_program_error(p, "Exceeded max DECL instructions");
+
+ if (p->error) {
+ p->NumNativeInstructions = 0;
+ p->NumNativeAluInstructions = 0;
+ p->NumNativeTexInstructions = 0;
+ p->NumNativeTexIndirections = 0;
+
+ i915_use_passthrough_shader(ifs);
+ }
+ else {
+ p->NumNativeInstructions
+ = p->nr_alu_insn + p->nr_tex_insn + p->nr_decl_insn;
+ p->NumNativeAluInstructions = p->nr_alu_insn;
+ p->NumNativeTexInstructions = p->nr_tex_insn;
+ p->NumNativeTexIndirections = p->nr_tex_indirect;
+
+ /* patch in the program length */
+ p->declarations[0] |= program_size + decl_size - 2;
+
+ /* Copy compilation results to fragment program struct:
+ */
+ assert(!ifs->program);
+ ifs->program
+ = (uint *) MALLOC((program_size + decl_size) * sizeof(uint));
+ if (ifs->program) {
+ ifs->program_len = program_size + decl_size;
+
+ memcpy(ifs->program,
+ p->declarations,
+ decl_size * sizeof(uint));
+
+ memcpy(ifs->program + decl_size,
+ p->program,
+ program_size * sizeof(uint));
+ }
+ }
+
+ /* Release the compilation struct:
+ */
+ FREE(p);
+}
+
+
+/**
+ * Find an unused texture coordinate slot to use for fragment WPOS.
+ * Update p->fp->wpos_tex with the result (-1 if no used texcoord slot is found).
+ */
+static void
+i915_find_wpos_space(struct i915_fp_compile *p)
+{
+#if 0
+ const uint inputs
+ = p->shader->inputs_read | (1 << TGSI_ATTRIB_POS); /*XXX hack*/
+ uint i;
+
+ p->wpos_tex = -1;
+
+ if (inputs & (1 << TGSI_ATTRIB_POS)) {
+ for (i = 0; i < I915_TEX_UNITS; i++) {
+ if ((inputs & (1 << (TGSI_ATTRIB_TEX0 + i))) == 0) {
+ p->wpos_tex = i;
+ return;
+ }
+ }
+
+ i915_program_error(p, "No free texcoord for wpos value");
+ }
+#else
+ if (p->shader->info.input_semantic_name[0] == TGSI_SEMANTIC_POSITION) {
+ /* frag shader using the fragment position input */
+#if 0
+ assert(0);
+#endif
+ }
+#endif
+}
+
+
+
+
+/**
+ * Rather than trying to intercept and jiggle depth writes during
+ * emit, just move the value into its correct position at the end of
+ * the program:
+ */
+static void
+i915_fixup_depth_write(struct i915_fp_compile *p)
+{
+ /* XXX assuming pos/depth is always in output[0] */
+ if (p->shader->info.output_semantic_name[0] == TGSI_SEMANTIC_POSITION) {
+ const uint depth = UREG(REG_TYPE_OD, 0);
+
+ i915_emit_arith(p,
+ A0_MOV, /* opcode */
+ depth, /* dest reg */
+ A0_DEST_CHANNEL_W, /* write mask */
+ 0, /* saturate? */
+ swizzle(depth, X, Y, Z, Z), /* src0 */
+ 0, 0 /* src1, src2 */);
+ }
+}
+
+
+void
+i915_translate_fragment_program( struct i915_context *i915,
+ struct i915_fragment_shader *fs)
+{
+ struct i915_fp_compile *p = i915_init_compile(i915, fs);
+ const struct tgsi_token *tokens = fs->state.tokens;
+
+ i915_find_wpos_space(p);
+
+#if 0
+ tgsi_dump(tokens, 0);
+#endif
+
+ i915_translate_instructions(p, tokens);
+ i915_fixup_depth_write(p);
+
+ i915_fini_compile(i915, p);
+}
diff --git a/src/gallium/drivers/i915simple/i915_prim_emit.c b/src/gallium/drivers/i915simple/i915_prim_emit.c
new file mode 100644
index 0000000000..d9a5c40ab9
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_prim_emit.c
@@ -0,0 +1,219 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "draw/draw_pipe.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_pack_color.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+#include "i915_state.h"
+#include "i915_batch.h"
+
+
+
+/**
+ * Primitive emit to hardware. No support for vertex buffers or any
+ * nice fast paths.
+ */
+struct setup_stage {
+ struct draw_stage stage; /**< This must be first (base class) */
+
+ struct i915_context *i915;
+};
+
+
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct setup_stage *setup_stage( struct draw_stage *stage )
+{
+ return (struct setup_stage *)stage;
+}
+
+
+/**
+ * Extract the needed fields from vertex_header and emit i915 dwords.
+ * Recall that the vertices are constructed by the 'draw' module and
+ * have a couple of slots at the beginning (1-dword header, 4-dword
+ * clip pos) that we ignore here.
+ */
+static INLINE void
+emit_hw_vertex( struct i915_context *i915,
+ const struct vertex_header *vertex)
+{
+ const struct vertex_info *vinfo = &i915->current.vertex_info;
+ uint i;
+ uint count = 0; /* for debug/sanity */
+
+ assert(!i915->dirty);
+
+ for (i = 0; i < vinfo->num_attribs; i++) {
+ const uint j = vinfo->attrib[i].src_index;
+ const float *attrib = vertex->data[j];
+ switch (vinfo->attrib[i].emit) {
+ case EMIT_1F:
+ OUT_BATCH( fui(attrib[0]) );
+ count++;
+ break;
+ case EMIT_2F:
+ OUT_BATCH( fui(attrib[0]) );
+ OUT_BATCH( fui(attrib[1]) );
+ count += 2;
+ break;
+ case EMIT_3F:
+ OUT_BATCH( fui(attrib[0]) );
+ OUT_BATCH( fui(attrib[1]) );
+ OUT_BATCH( fui(attrib[2]) );
+ count += 3;
+ break;
+ case EMIT_4F:
+ OUT_BATCH( fui(attrib[0]) );
+ OUT_BATCH( fui(attrib[1]) );
+ OUT_BATCH( fui(attrib[2]) );
+ OUT_BATCH( fui(attrib[3]) );
+ count += 4;
+ break;
+ case EMIT_4UB:
+ OUT_BATCH( pack_ub4(float_to_ubyte( attrib[2] ),
+ float_to_ubyte( attrib[1] ),
+ float_to_ubyte( attrib[0] ),
+ float_to_ubyte( attrib[3] )) );
+ count += 1;
+ break;
+ default:
+ assert(0);
+ }
+ }
+ assert(count == vinfo->size);
+}
+
+
+
+static INLINE void
+emit_prim( struct draw_stage *stage,
+ struct prim_header *prim,
+ unsigned hwprim,
+ unsigned nr )
+{
+ struct i915_context *i915 = setup_stage(stage)->i915;
+ unsigned vertex_size;
+ unsigned i;
+
+ if (i915->dirty)
+ i915_update_derived( i915 );
+
+ if (i915->hardware_dirty)
+ i915_emit_hardware_state( i915 );
+
+ /* need to do this after validation! */
+ vertex_size = i915->current.vertex_info.size * 4; /* in bytes */
+ assert(vertex_size >= 12); /* never smaller than 12 bytes */
+
+ if (!BEGIN_BATCH( 1 + nr * vertex_size / 4, 0 )) {
+ FLUSH_BATCH(NULL);
+
+ /* Make sure state is re-emitted after a flush:
+ */
+ i915_update_derived( i915 );
+ i915_emit_hardware_state( i915 );
+
+ if (!BEGIN_BATCH( 1 + nr * vertex_size / 4, 0 )) {
+ assert(0);
+ return;
+ }
+ }
+
+ /* Emit each triangle as a single primitive. I told you this was
+ * simple.
+ */
+ OUT_BATCH(_3DPRIMITIVE |
+ hwprim |
+ ((4 + vertex_size * nr)/4 - 2));
+
+ for (i = 0; i < nr; i++)
+ emit_hw_vertex(i915, prim->v[i]);
+}
+
+
+static void
+setup_tri( struct draw_stage *stage, struct prim_header *prim )
+{
+ emit_prim( stage, prim, PRIM3D_TRILIST, 3 );
+}
+
+
+static void
+setup_line(struct draw_stage *stage, struct prim_header *prim)
+{
+ emit_prim( stage, prim, PRIM3D_LINELIST, 2 );
+}
+
+
+static void
+setup_point(struct draw_stage *stage, struct prim_header *prim)
+{
+ emit_prim( stage, prim, PRIM3D_POINTLIST, 1 );
+}
+
+
+static void setup_flush( struct draw_stage *stage, unsigned flags )
+{
+}
+
+static void reset_stipple_counter( struct draw_stage *stage )
+{
+}
+
+static void render_destroy( struct draw_stage *stage )
+{
+ FREE( stage );
+}
+
+
+/**
+ * Create a new primitive setup/render stage. This gets plugged into
+ * the 'draw' module's pipeline.
+ */
+struct draw_stage *i915_draw_render_stage( struct i915_context *i915 )
+{
+ struct setup_stage *setup = CALLOC_STRUCT(setup_stage);
+
+ setup->i915 = i915;
+ setup->stage.draw = i915->draw;
+ setup->stage.point = setup_point;
+ setup->stage.line = setup_line;
+ setup->stage.tri = setup_tri;
+ setup->stage.flush = setup_flush;
+ setup->stage.reset_stipple_counter = reset_stipple_counter;
+ setup->stage.destroy = render_destroy;
+
+ return &setup->stage;
+}
diff --git a/src/gallium/drivers/i915simple/i915_prim_vbuf.c b/src/gallium/drivers/i915simple/i915_prim_vbuf.c
new file mode 100644
index 0000000000..b3a7774fd6
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_prim_vbuf.c
@@ -0,0 +1,616 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Build post-transformation, post-clipping vertex buffers and element
+ * lists by hooking into the end of the primitive pipeline and
+ * manipulating the vertex_id field in the vertex headers.
+ *
+ * XXX: work in progress
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "draw/draw_context.h"
+#include "draw/draw_vbuf.h"
+#include "util/u_debug.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_fifo.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+#include "i915_batch.h"
+#include "i915_state.h"
+
+
+/**
+ * Primitive renderer for i915.
+ */
+struct i915_vbuf_render {
+ struct vbuf_render base;
+
+ struct i915_context *i915;
+
+ /** Vertex size in bytes */
+ size_t vertex_size;
+
+ /** Software primitive */
+ unsigned prim;
+
+ /** Hardware primitive */
+ unsigned hwprim;
+
+ /** Genereate a vertex list */
+ unsigned fallback;
+
+ /* Stuff for the vbo */
+ struct intel_buffer *vbo;
+ size_t vbo_size;
+ size_t vbo_offset;
+ void *vbo_ptr;
+ size_t vbo_max_used;
+
+ /* stuff for the pool */
+ struct util_fifo *pool_fifo;
+ unsigned pool_used;
+ unsigned pool_buffer_size;
+ boolean pool_not_used;
+};
+
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct i915_vbuf_render *
+i915_vbuf_render(struct vbuf_render *render)
+{
+ assert(render);
+ return (struct i915_vbuf_render *)render;
+}
+
+static const struct vertex_info *
+i915_vbuf_render_get_vertex_info(struct vbuf_render *render)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+
+ if (i915->dirty) {
+ /* make sure we have up to date vertex layout */
+ i915_update_derived(i915);
+ }
+
+ return &i915->current.vertex_info;
+}
+
+static boolean
+i915_vbuf_render_reserve(struct i915_vbuf_render *i915_render, size_t size)
+{
+ struct i915_context *i915 = i915_render->i915;
+
+ if (i915_render->vbo_size < size + i915_render->vbo_offset)
+ return FALSE;
+
+ if (i915->vbo_flushed)
+ return FALSE;
+
+ return TRUE;
+}
+
+static void
+i915_vbuf_render_new_buf(struct i915_vbuf_render *i915_render, size_t size)
+{
+ struct i915_context *i915 = i915_render->i915;
+ struct intel_winsys *iws = i915->iws;
+
+ if (i915_render->vbo) {
+ if (i915_render->pool_not_used)
+ iws->buffer_destroy(iws, i915_render->vbo);
+ else
+ u_fifo_add(i915_render->pool_fifo, i915_render->vbo);
+ i915_render->vbo = NULL;
+ }
+
+ i915->vbo_flushed = 0;
+
+ i915_render->vbo_size = MAX2(size, i915_render->pool_buffer_size);
+ i915_render->vbo_offset = 0;
+
+ if (i915_render->vbo_size != i915_render->pool_buffer_size) {
+ i915_render->pool_not_used = TRUE;
+ i915_render->vbo = iws->buffer_create(iws, i915_render->vbo_size, 64,
+ INTEL_NEW_VERTEX);
+ } else {
+ i915_render->pool_not_used = FALSE;
+
+ if (i915_render->pool_used >= 2) {
+ FLUSH_BATCH(NULL);
+ i915->vbo_flushed = 0;
+ i915_render->pool_used = 0;
+ }
+ u_fifo_pop(i915_render->pool_fifo, (void**)&i915_render->vbo);
+ }
+}
+
+static boolean
+i915_vbuf_render_allocate_vertices(struct vbuf_render *render,
+ ushort vertex_size,
+ ushort nr_vertices)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ size_t size = (size_t)vertex_size * (size_t)nr_vertices;
+
+ /* FIXME: handle failure */
+ assert(!i915->vbo);
+
+ if (!i915_vbuf_render_reserve(i915_render, size)) {
+
+ if (i915->vbo_flushed)
+ i915_render->pool_used = 0;
+
+ i915_vbuf_render_new_buf(i915_render, size);
+ }
+
+ i915_render->vertex_size = vertex_size;
+ i915->vbo = i915_render->vbo;
+ i915->vbo_offset = i915_render->vbo_offset;
+ i915->dirty |= I915_NEW_VBO;
+
+ if (!i915_render->vbo)
+ return FALSE;
+ return TRUE;
+}
+
+static void *
+i915_vbuf_render_map_vertices(struct vbuf_render *render)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ struct intel_winsys *iws = i915->iws;
+
+ if (i915->vbo_flushed)
+ debug_printf("%s bad vbo flush occured stalling on hw\n");
+
+ i915_render->vbo_ptr = iws->buffer_map(iws, i915_render->vbo, TRUE);
+
+ return (unsigned char *)i915_render->vbo_ptr + i915->vbo_offset;
+}
+
+static void
+i915_vbuf_render_unmap_vertices(struct vbuf_render *render,
+ ushort min_index,
+ ushort max_index)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ struct intel_winsys *iws = i915->iws;
+
+ i915_render->vbo_max_used = MAX2(i915_render->vbo_max_used, i915_render->vertex_size * (max_index + 1));
+ iws->buffer_unmap(iws, i915_render->vbo);
+}
+
+static boolean
+i915_vbuf_render_set_primitive(struct vbuf_render *render,
+ unsigned prim)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ i915_render->prim = prim;
+
+ switch(prim) {
+ case PIPE_PRIM_POINTS:
+ i915_render->hwprim = PRIM3D_POINTLIST;
+ i915_render->fallback = 0;
+ return TRUE;
+ case PIPE_PRIM_LINES:
+ i915_render->hwprim = PRIM3D_LINELIST;
+ i915_render->fallback = 0;
+ return TRUE;
+ case PIPE_PRIM_LINE_LOOP:
+ i915_render->hwprim = PRIM3D_LINELIST;
+ i915_render->fallback = PIPE_PRIM_LINE_LOOP;
+ return TRUE;
+ case PIPE_PRIM_LINE_STRIP:
+ i915_render->hwprim = PRIM3D_LINESTRIP;
+ i915_render->fallback = 0;
+ return TRUE;
+ case PIPE_PRIM_TRIANGLES:
+ i915_render->hwprim = PRIM3D_TRILIST;
+ i915_render->fallback = 0;
+ return TRUE;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ i915_render->hwprim = PRIM3D_TRISTRIP;
+ i915_render->fallback = 0;
+ return TRUE;
+ case PIPE_PRIM_TRIANGLE_FAN:
+ i915_render->hwprim = PRIM3D_TRIFAN;
+ i915_render->fallback = 0;
+ return TRUE;
+ case PIPE_PRIM_QUADS:
+ i915_render->hwprim = PRIM3D_TRILIST;
+ i915_render->fallback = PIPE_PRIM_QUADS;
+ return TRUE;
+ case PIPE_PRIM_QUAD_STRIP:
+ i915_render->hwprim = PRIM3D_TRILIST;
+ i915_render->fallback = PIPE_PRIM_QUAD_STRIP;
+ return TRUE;
+ case PIPE_PRIM_POLYGON:
+ i915_render->hwprim = PRIM3D_POLY;
+ i915_render->fallback = 0;
+ return TRUE;
+ default:
+ /* FIXME: Actually, can handle a lot more just fine... */
+ return FALSE;
+ }
+}
+
+/**
+ * Used for fallbacks in draw_arrays
+ */
+static void
+draw_arrays_generate_indices(struct vbuf_render *render,
+ unsigned start, uint nr,
+ unsigned type)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ unsigned i;
+ unsigned end = start + nr;
+ switch(type) {
+ case 0:
+ for (i = start; i+1 < end; i += 2)
+ OUT_BATCH((i+0) | (i+1) << 16);
+ if (i < end)
+ OUT_BATCH(i);
+ break;
+ case PIPE_PRIM_LINE_LOOP:
+ if (nr >= 2) {
+ for (i = start + 1; i < end; i++)
+ OUT_BATCH((i-0) | (i+0) << 16);
+ OUT_BATCH((i-0) | ( start) << 16);
+ }
+ break;
+ case PIPE_PRIM_QUADS:
+ for (i = start; i + 3 < end; i += 4) {
+ OUT_BATCH((i+0) | (i+1) << 16);
+ OUT_BATCH((i+3) | (i+1) << 16);
+ OUT_BATCH((i+2) | (i+3) << 16);
+ }
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ for (i = start; i + 3 < end; i += 2) {
+ OUT_BATCH((i+0) | (i+1) << 16);
+ OUT_BATCH((i+3) | (i+2) << 16);
+ OUT_BATCH((i+0) | (i+3) << 16);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static unsigned
+draw_arrays_calc_nr_indices(uint nr, unsigned type)
+{
+ switch (type) {
+ case 0:
+ return nr;
+ case PIPE_PRIM_LINE_LOOP:
+ if (nr >= 2)
+ return nr * 2;
+ else
+ return 0;
+ case PIPE_PRIM_QUADS:
+ return (nr / 4) * 6;
+ case PIPE_PRIM_QUAD_STRIP:
+ return ((nr - 2) / 2) * 6;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static void
+draw_arrays_fallback(struct vbuf_render *render,
+ unsigned start,
+ uint nr)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ unsigned nr_indices;
+
+ if (i915->dirty)
+ i915_update_derived(i915);
+
+ if (i915->hardware_dirty)
+ i915_emit_hardware_state(i915);
+
+ nr_indices = draw_arrays_calc_nr_indices(nr, i915_render->fallback);
+ if (!nr_indices)
+ return;
+
+ if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
+ FLUSH_BATCH(NULL);
+
+ /* Make sure state is re-emitted after a flush:
+ */
+ i915_update_derived(i915);
+ i915_emit_hardware_state(i915);
+ i915->vbo_flushed = 1;
+
+ if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
+ assert(0);
+ goto out;
+ }
+ }
+ OUT_BATCH(_3DPRIMITIVE |
+ PRIM_INDIRECT |
+ i915_render->hwprim |
+ PRIM_INDIRECT_ELTS |
+ nr_indices);
+
+ draw_arrays_generate_indices(render, start, nr, i915_render->fallback);
+
+out:
+ return;
+}
+
+static void
+i915_vbuf_render_draw_arrays(struct vbuf_render *render,
+ unsigned start,
+ uint nr)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+
+ if (i915_render->fallback) {
+ draw_arrays_fallback(render, start, nr);
+ return;
+ }
+
+ /* JB: TODO submit direct cmds */
+ draw_arrays_fallback(render, start, nr);
+}
+
+/**
+ * Used for normal and fallback emitting of indices
+ * If type is zero normal operation assumed.
+ */
+static void
+draw_generate_indices(struct vbuf_render *render,
+ const ushort *indices,
+ uint nr_indices,
+ unsigned type)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ unsigned i;
+
+ switch(type) {
+ case 0:
+ for (i = 0; i + 1 < nr_indices; i += 2) {
+ OUT_BATCH(indices[i] | indices[i+1] << 16);
+ }
+ if (i < nr_indices) {
+ OUT_BATCH(indices[i]);
+ }
+ break;
+ case PIPE_PRIM_LINE_LOOP:
+ if (nr_indices >= 2) {
+ for (i = 1; i < nr_indices; i++)
+ OUT_BATCH(indices[i-1] | indices[i] << 16);
+ OUT_BATCH(indices[i-1] | indices[0] << 16);
+ }
+ break;
+ case PIPE_PRIM_QUADS:
+ for (i = 0; i + 3 < nr_indices; i += 4) {
+ OUT_BATCH(indices[i+0] | indices[i+1] << 16);
+ OUT_BATCH(indices[i+3] | indices[i+1] << 16);
+ OUT_BATCH(indices[i+2] | indices[i+3] << 16);
+ }
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ for (i = 0; i + 3 < nr_indices; i += 2) {
+ OUT_BATCH(indices[i+0] | indices[i+1] << 16);
+ OUT_BATCH(indices[i+3] | indices[i+2] << 16);
+ OUT_BATCH(indices[i+0] | indices[i+3] << 16);
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static unsigned
+draw_calc_nr_indices(uint nr_indices, unsigned type)
+{
+ switch (type) {
+ case 0:
+ return nr_indices;
+ case PIPE_PRIM_LINE_LOOP:
+ if (nr_indices >= 2)
+ return nr_indices * 2;
+ else
+ return 0;
+ case PIPE_PRIM_QUADS:
+ return (nr_indices / 4) * 6;
+ case PIPE_PRIM_QUAD_STRIP:
+ return ((nr_indices - 2) / 2) * 6;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static void
+i915_vbuf_render_draw(struct vbuf_render *render,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+ unsigned save_nr_indices;
+
+ save_nr_indices = nr_indices;
+
+ nr_indices = draw_calc_nr_indices(nr_indices, i915_render->fallback);
+ if (!nr_indices)
+ return;
+
+ if (i915->dirty)
+ i915_update_derived(i915);
+
+ if (i915->hardware_dirty)
+ i915_emit_hardware_state(i915);
+
+ if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
+ FLUSH_BATCH(NULL);
+
+ /* Make sure state is re-emitted after a flush:
+ */
+ i915_update_derived(i915);
+ i915_emit_hardware_state(i915);
+ i915->vbo_flushed = 1;
+
+ if (!BEGIN_BATCH(1 + (nr_indices + 1)/2, 1)) {
+ assert(0);
+ goto out;
+ }
+ }
+
+ OUT_BATCH(_3DPRIMITIVE |
+ PRIM_INDIRECT |
+ i915_render->hwprim |
+ PRIM_INDIRECT_ELTS |
+ nr_indices);
+ draw_generate_indices(render,
+ indices,
+ save_nr_indices,
+ i915_render->fallback);
+
+out:
+ return;
+}
+
+static void
+i915_vbuf_render_release_vertices(struct vbuf_render *render)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ struct i915_context *i915 = i915_render->i915;
+
+ assert(i915->vbo);
+
+ i915_render->vbo_offset += i915_render->vbo_max_used;
+ i915_render->vbo_max_used = 0;
+ i915->vbo = NULL;
+ i915->dirty |= I915_NEW_VBO;
+}
+
+static void
+i915_vbuf_render_destroy(struct vbuf_render *render)
+{
+ struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
+ FREE(i915_render);
+}
+
+/**
+ * Create a new primitive render.
+ */
+static struct vbuf_render *
+i915_vbuf_render_create(struct i915_context *i915)
+{
+ struct i915_vbuf_render *i915_render = CALLOC_STRUCT(i915_vbuf_render);
+ struct intel_winsys *iws = i915->iws;
+ int i;
+
+ i915_render->i915 = i915;
+
+ i915_render->base.max_vertex_buffer_bytes = 128*1024;
+
+ /* NOTE: it must be such that state and vertices indices fit in a single
+ * batch buffer.
+ */
+ i915_render->base.max_indices = 16*1024;
+
+ i915_render->base.get_vertex_info = i915_vbuf_render_get_vertex_info;
+ i915_render->base.allocate_vertices = i915_vbuf_render_allocate_vertices;
+ i915_render->base.map_vertices = i915_vbuf_render_map_vertices;
+ i915_render->base.unmap_vertices = i915_vbuf_render_unmap_vertices;
+ i915_render->base.set_primitive = i915_vbuf_render_set_primitive;
+ i915_render->base.draw = i915_vbuf_render_draw;
+ i915_render->base.draw_arrays = i915_vbuf_render_draw_arrays;
+ i915_render->base.release_vertices = i915_vbuf_render_release_vertices;
+ i915_render->base.destroy = i915_vbuf_render_destroy;
+
+
+ i915_render->vbo = NULL;
+ i915_render->vbo_size = 0;
+ i915_render->vbo_offset = 0;
+
+ i915_render->pool_used = FALSE;
+ i915_render->pool_buffer_size = 128 * 4096;
+ i915_render->pool_fifo = u_fifo_create(6);
+ for (i = 0; i < 6; i++)
+ u_fifo_add(i915_render->pool_fifo,
+ iws->buffer_create(iws, i915_render->pool_buffer_size, 64,
+ INTEL_NEW_VERTEX));
+
+#if 0
+ /* TODO JB: is this realy needed? */
+ i915_render->vbo_ptr = iws->buffer_map(iws, i915_render->vbo, TRUE);
+ iws->buffer_unmap(iws, i915_render->vbo);
+#endif
+
+ return &i915_render->base;
+}
+
+/**
+ * Create a new primitive vbuf/render stage.
+ */
+struct draw_stage *i915_draw_vbuf_stage(struct i915_context *i915)
+{
+ struct vbuf_render *render;
+ struct draw_stage *stage;
+
+ render = i915_vbuf_render_create(i915);
+ if(!render)
+ return NULL;
+
+ stage = draw_vbuf_stage(i915->draw, render);
+ if(!stage) {
+ render->destroy(render);
+ return NULL;
+ }
+ /** TODO JB: this shouldn't be here */
+ draw_set_render(i915->draw, render);
+
+ return stage;
+}
diff --git a/src/gallium/drivers/i915simple/i915_reg.h b/src/gallium/drivers/i915simple/i915_reg.h
new file mode 100644
index 0000000000..04620fec68
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_reg.h
@@ -0,0 +1,978 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef I915_REG_H
+#define I915_REG_H
+
+
+#define I915_SET_FIELD( var, mask, value ) (var &= ~(mask), var |= value)
+
+#define CMD_3D (0x3<<29)
+
+#define PRIM3D_INLINE (CMD_3D | (0x1f<<24))
+#define PRIM3D_TRILIST (0x0<<18)
+#define PRIM3D_TRISTRIP (0x1<<18)
+#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
+#define PRIM3D_TRIFAN (0x3<<18)
+#define PRIM3D_POLY (0x4<<18)
+#define PRIM3D_LINELIST (0x5<<18)
+#define PRIM3D_LINESTRIP (0x6<<18)
+#define PRIM3D_RECTLIST (0x7<<18)
+#define PRIM3D_POINTLIST (0x8<<18)
+#define PRIM3D_DIB (0x9<<18)
+#define PRIM3D_CLEAR_RECT (0xa<<18)
+#define PRIM3D_ZONE_INIT (0xd<<18)
+#define PRIM3D_MASK (0x1f<<18)
+
+/* p137 */
+#define _3DSTATE_AA_CMD (CMD_3D | (0x06<<24))
+#define AA_LINE_ECAAR_WIDTH_ENABLE (1<<16)
+#define AA_LINE_ECAAR_WIDTH_0_5 0
+#define AA_LINE_ECAAR_WIDTH_1_0 (1<<14)
+#define AA_LINE_ECAAR_WIDTH_2_0 (2<<14)
+#define AA_LINE_ECAAR_WIDTH_4_0 (3<<14)
+#define AA_LINE_REGION_WIDTH_ENABLE (1<<8)
+#define AA_LINE_REGION_WIDTH_0_5 0
+#define AA_LINE_REGION_WIDTH_1_0 (1<<6)
+#define AA_LINE_REGION_WIDTH_2_0 (2<<6)
+#define AA_LINE_REGION_WIDTH_4_0 (3<<6)
+
+/* 3DSTATE_BACKFACE_STENCIL_OPS, p138*/
+#define _3DSTATE_BACKFACE_STENCIL_OPS (CMD_3D | (0x8<<24))
+#define BFO_ENABLE_STENCIL_REF (1<<23)
+#define BFO_STENCIL_REF_SHIFT 15
+#define BFO_STENCIL_REF_MASK (0xff<<15)
+#define BFO_ENABLE_STENCIL_FUNCS (1<<14)
+#define BFO_STENCIL_TEST_SHIFT 11
+#define BFO_STENCIL_TEST_MASK (0x7<<11)
+#define BFO_STENCIL_FAIL_SHIFT 8
+#define BFO_STENCIL_FAIL_MASK (0x7<<8)
+#define BFO_STENCIL_PASS_Z_FAIL_SHIFT 5
+#define BFO_STENCIL_PASS_Z_FAIL_MASK (0x7<<5)
+#define BFO_STENCIL_PASS_Z_PASS_SHIFT 2
+#define BFO_STENCIL_PASS_Z_PASS_MASK (0x7<<2)
+#define BFO_ENABLE_STENCIL_TWO_SIDE (1<<1)
+#define BFO_STENCIL_TWO_SIDE (1<<0)
+
+
+/* 3DSTATE_BACKFACE_STENCIL_MASKS, p140 */
+#define _3DSTATE_BACKFACE_STENCIL_MASKS (CMD_3D | (0x9<<24))
+#define BFM_ENABLE_STENCIL_TEST_MASK (1<<17)
+#define BFM_ENABLE_STENCIL_WRITE_MASK (1<<16)
+#define BFM_STENCIL_TEST_MASK_SHIFT 8
+#define BFM_STENCIL_TEST_MASK_MASK (0xff<<8)
+#define BFM_STENCIL_WRITE_MASK_SHIFT 0
+#define BFM_STENCIL_WRITE_MASK_MASK (0xff<<0)
+
+
+
+/* 3DSTATE_BIN_CONTROL p141 */
+
+/* p143 */
+#define _3DSTATE_BUF_INFO_CMD (CMD_3D | (0x1d<<24) | (0x8e<<16) | 1)
+/* Dword 1 */
+#define BUF_3D_ID_COLOR_BACK (0x3<<24)
+#define BUF_3D_ID_DEPTH (0x7<<24)
+#define BUF_3D_USE_FENCE (1<<23)
+#define BUF_3D_TILED_SURFACE (1<<22)
+#define BUF_3D_TILE_WALK_X 0
+#define BUF_3D_TILE_WALK_Y (1<<21)
+#define BUF_3D_PITCH(x) (((x)/4)<<2)
+/* Dword 2 */
+#define BUF_3D_ADDR(x) ((x) & ~0x3)
+
+
+/* 3DSTATE_CHROMA_KEY */
+
+/* 3DSTATE_CLEAR_PARAMETERS, p150 */
+#define _3DSTATE_CLEAR_PARAMETERS (CMD_3D | (0x1d<<24) | (0x9c<<16) | 5)
+/* Dword 1 */
+#define CLEARPARAM_CLEAR_RECT (1 << 16)
+#define CLEARPARAM_ZONE_INIT (0 << 16)
+#define CLEARPARAM_WRITE_COLOR (1 << 2)
+#define CLEARPARAM_WRITE_DEPTH (1 << 1)
+#define CLEARPARAM_WRITE_STENCIL (1 << 0)
+
+/* 3DSTATE_CONSTANT_BLEND_COLOR, p153 */
+#define _3DSTATE_CONST_BLEND_COLOR_CMD (CMD_3D | (0x1d<<24) | (0x88<<16))
+
+
+
+/* 3DSTATE_COORD_SET_BINDINGS, p154 */
+#define _3DSTATE_COORD_SET_BINDINGS (CMD_3D | (0x16<<24))
+#define CSB_TCB(iunit, eunit) ((eunit)<<(iunit*3))
+
+/* p156 */
+#define _3DSTATE_DFLT_DIFFUSE_CMD (CMD_3D | (0x1d<<24) | (0x99<<16))
+
+/* p157 */
+#define _3DSTATE_DFLT_SPEC_CMD (CMD_3D | (0x1d<<24) | (0x9a<<16))
+
+/* p158 */
+#define _3DSTATE_DFLT_Z_CMD (CMD_3D | (0x1d<<24) | (0x98<<16))
+
+
+/* 3DSTATE_DEPTH_OFFSET_SCALE, p159 */
+#define _3DSTATE_DEPTH_OFFSET_SCALE (CMD_3D | (0x1d<<24) | (0x97<<16))
+/* scale in dword 1 */
+
+
+/* 3DSTATE_DEPTH_SUBRECT_DISABLE, p160 */
+#define _3DSTATE_DEPTH_SUBRECT_DISABLE (CMD_3D | (0x1c<<24) | (0x11<<19) | 0x2)
+
+/* p161 */
+#define _3DSTATE_DST_BUF_VARS_CMD (CMD_3D | (0x1d<<24) | (0x85<<16))
+/* Dword 1 */
+#define TEX_DEFAULT_COLOR_OGL (0<<30)
+#define TEX_DEFAULT_COLOR_D3D (1<<30)
+#define ZR_EARLY_DEPTH (1<<29)
+#define LOD_PRECLAMP_OGL (1<<28)
+#define LOD_PRECLAMP_D3D (0<<28)
+#define DITHER_FULL_ALWAYS (0<<26)
+#define DITHER_FULL_ON_FB_BLEND (1<<26)
+#define DITHER_CLAMPED_ALWAYS (2<<26)
+#define LINEAR_GAMMA_BLEND_32BPP (1<<25)
+#define DEBUG_DISABLE_ENH_DITHER (1<<24)
+#define DSTORG_HORT_BIAS(x) ((x)<<20)
+#define DSTORG_VERT_BIAS(x) ((x)<<16)
+#define COLOR_4_2_2_CHNL_WRT_ALL 0
+#define COLOR_4_2_2_CHNL_WRT_Y (1<<12)
+#define COLOR_4_2_2_CHNL_WRT_CR (2<<12)
+#define COLOR_4_2_2_CHNL_WRT_CB (3<<12)
+#define COLOR_4_2_2_CHNL_WRT_CRCB (4<<12)
+#define COLOR_BUF_8BIT 0
+#define COLOR_BUF_RGB555 (1<<8)
+#define COLOR_BUF_RGB565 (2<<8)
+#define COLOR_BUF_ARGB8888 (3<<8)
+#define DEPTH_FRMT_16_FIXED 0
+#define DEPTH_FRMT_16_FLOAT (1<<2)
+#define DEPTH_FRMT_24_FIXED_8_OTHER (2<<2)
+#define VERT_LINE_STRIDE_1 (1<<1)
+#define VERT_LINE_STRIDE_0 (0<<1)
+#define VERT_LINE_STRIDE_OFS_1 1
+#define VERT_LINE_STRIDE_OFS_0 0
+
+/* p166 */
+#define _3DSTATE_DRAW_RECT_CMD (CMD_3D|(0x1d<<24)|(0x80<<16)|3)
+/* Dword 1 */
+#define DRAW_RECT_DIS_DEPTH_OFS (1<<30)
+#define DRAW_DITHER_OFS_X(x) ((x)<<26)
+#define DRAW_DITHER_OFS_Y(x) ((x)<<24)
+/* Dword 2 */
+#define DRAW_YMIN(x) ((x)<<16)
+#define DRAW_XMIN(x) (x)
+/* Dword 3 */
+#define DRAW_YMAX(x) ((x)<<16)
+#define DRAW_XMAX(x) (x)
+/* Dword 4 */
+#define DRAW_YORG(x) ((x)<<16)
+#define DRAW_XORG(x) (x)
+
+
+/* 3DSTATE_FILTER_COEFFICIENTS_4X4, p170 */
+
+/* 3DSTATE_FILTER_COEFFICIENTS_6X5, p172 */
+
+
+/* _3DSTATE_FOG_COLOR, p173 */
+#define _3DSTATE_FOG_COLOR_CMD (CMD_3D|(0x15<<24))
+#define FOG_COLOR_RED(x) ((x)<<16)
+#define FOG_COLOR_GREEN(x) ((x)<<8)
+#define FOG_COLOR_BLUE(x) (x)
+
+/* _3DSTATE_FOG_MODE, p174 */
+#define _3DSTATE_FOG_MODE_CMD (CMD_3D|(0x1d<<24)|(0x89<<16)|2)
+/* Dword 1 */
+#define FMC1_FOGFUNC_MODIFY_ENABLE (1<<31)
+#define FMC1_FOGFUNC_VERTEX (0<<28)
+#define FMC1_FOGFUNC_PIXEL_EXP (1<<28)
+#define FMC1_FOGFUNC_PIXEL_EXP2 (2<<28)
+#define FMC1_FOGFUNC_PIXEL_LINEAR (3<<28)
+#define FMC1_FOGFUNC_MASK (3<<28)
+#define FMC1_FOGINDEX_MODIFY_ENABLE (1<<27)
+#define FMC1_FOGINDEX_Z (0<<25)
+#define FMC1_FOGINDEX_W (1<<25)
+#define FMC1_C1_C2_MODIFY_ENABLE (1<<24)
+#define FMC1_DENSITY_MODIFY_ENABLE (1<<23)
+#define FMC1_C1_ONE (1<<13)
+#define FMC1_C1_MASK (0xffff<<4)
+/* Dword 2 */
+#define FMC2_C2_ONE (1<<16)
+/* Dword 3 */
+#define FMC3_D_ONE (1<<16)
+
+
+
+/* _3DSTATE_INDEPENDENT_ALPHA_BLEND, p177 */
+#define _3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD (CMD_3D|(0x0b<<24))
+#define IAB_MODIFY_ENABLE (1<<23)
+#define IAB_ENABLE (1<<22)
+#define IAB_MODIFY_FUNC (1<<21)
+#define IAB_FUNC_SHIFT 16
+#define IAB_MODIFY_SRC_FACTOR (1<<11)
+#define IAB_SRC_FACTOR_SHIFT 6
+#define IAB_SRC_FACTOR_MASK (BLENDFACT_MASK<<6)
+#define IAB_MODIFY_DST_FACTOR (1<<5)
+#define IAB_DST_FACTOR_SHIFT 0
+#define IAB_DST_FACTOR_MASK (BLENDFACT_MASK<<0)
+
+
+#define BLENDFUNC_ADD 0x0
+#define BLENDFUNC_SUBTRACT 0x1
+#define BLENDFUNC_REVERSE_SUBTRACT 0x2
+#define BLENDFUNC_MIN 0x3
+#define BLENDFUNC_MAX 0x4
+#define BLENDFUNC_MASK 0x7
+
+/* 3DSTATE_LOAD_INDIRECT, p180 */
+
+#define _3DSTATE_LOAD_INDIRECT (CMD_3D|(0x1d<<24)|(0x7<<16))
+#define LI0_STATE_STATIC_INDIRECT (0x01<<8)
+#define LI0_STATE_DYNAMIC_INDIRECT (0x02<<8)
+#define LI0_STATE_SAMPLER (0x04<<8)
+#define LI0_STATE_MAP (0x08<<8)
+#define LI0_STATE_PROGRAM (0x10<<8)
+#define LI0_STATE_CONSTANTS (0x20<<8)
+
+#define SIS0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define SIS0_FORCE_LOAD (1<<1)
+#define SIS0_BUFFER_VALID (1<<0)
+#define SIS1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define DIS0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define DIS0_BUFFER_RESET (1<<1)
+#define DIS0_BUFFER_VALID (1<<0)
+
+#define SSB0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define SSB0_FORCE_LOAD (1<<1)
+#define SSB0_BUFFER_VALID (1<<0)
+#define SSB1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define MSB0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define MSB0_FORCE_LOAD (1<<1)
+#define MSB0_BUFFER_VALID (1<<0)
+#define MSB1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define PSP0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define PSP0_FORCE_LOAD (1<<1)
+#define PSP0_BUFFER_VALID (1<<0)
+#define PSP1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define PSC0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define PSC0_FORCE_LOAD (1<<1)
+#define PSC0_BUFFER_VALID (1<<0)
+#define PSC1_BUFFER_LENGTH(x) ((x)&0xff)
+
+
+
+
+
+/* _3DSTATE_RASTERIZATION_RULES */
+#define _3DSTATE_RASTER_RULES_CMD (CMD_3D|(0x07<<24))
+#define ENABLE_POINT_RASTER_RULE (1<<15)
+#define OGL_POINT_RASTER_RULE (1<<13)
+#define ENABLE_TEXKILL_3D_4D (1<<10)
+#define TEXKILL_3D (0<<9)
+#define TEXKILL_4D (1<<9)
+#define ENABLE_LINE_STRIP_PROVOKE_VRTX (1<<8)
+#define ENABLE_TRI_FAN_PROVOKE_VRTX (1<<5)
+#define LINE_STRIP_PROVOKE_VRTX(x) ((x)<<6)
+#define TRI_FAN_PROVOKE_VRTX(x) ((x)<<3)
+
+/* _3DSTATE_SCISSOR_ENABLE, p256 */
+#define _3DSTATE_SCISSOR_ENABLE_CMD (CMD_3D|(0x1c<<24)|(0x10<<19))
+#define ENABLE_SCISSOR_RECT ((1<<1) | 1)
+#define DISABLE_SCISSOR_RECT (1<<1)
+
+/* _3DSTATE_SCISSOR_RECTANGLE_0, p257 */
+#define _3DSTATE_SCISSOR_RECT_0_CMD (CMD_3D|(0x1d<<24)|(0x81<<16)|1)
+/* Dword 1 */
+#define SCISSOR_RECT_0_YMIN(x) ((x)<<16)
+#define SCISSOR_RECT_0_XMIN(x) (x)
+/* Dword 2 */
+#define SCISSOR_RECT_0_YMAX(x) ((x)<<16)
+#define SCISSOR_RECT_0_XMAX(x) (x)
+
+/* p189 */
+#define _3DSTATE_LOAD_STATE_IMMEDIATE_1 ((0x3<<29)|(0x1d<<24)|(0x04<<16))
+#define I1_LOAD_S(n) (1<<(4+n))
+
+#define S0_VB_OFFSET_MASK 0xffffffc
+#define S0_AUTO_CACHE_INV_DISABLE (1<<0)
+
+#define S1_VERTEX_WIDTH_SHIFT 24
+#define S1_VERTEX_WIDTH_MASK (0x3f<<24)
+#define S1_VERTEX_PITCH_SHIFT 16
+#define S1_VERTEX_PITCH_MASK (0x3f<<16)
+
+#define TEXCOORDFMT_2D 0x0
+#define TEXCOORDFMT_3D 0x1
+#define TEXCOORDFMT_4D 0x2
+#define TEXCOORDFMT_1D 0x3
+#define TEXCOORDFMT_2D_16 0x4
+#define TEXCOORDFMT_4D_16 0x5
+#define TEXCOORDFMT_NOT_PRESENT 0xf
+#define S2_TEXCOORD_FMT0_MASK 0xf
+#define S2_TEXCOORD_FMT1_SHIFT 4
+#define S2_TEXCOORD_FMT(unit, type) ((type)<<(unit*4))
+#define S2_TEXCOORD_NONE (~0)
+
+/* S3 not interesting */
+
+#define S4_POINT_WIDTH_SHIFT 23
+#define S4_POINT_WIDTH_MASK (0x1ff<<23)
+#define S4_LINE_WIDTH_SHIFT 19
+#define S4_LINE_WIDTH_ONE (0x2<<19)
+#define S4_LINE_WIDTH_MASK (0xf<<19)
+#define S4_FLATSHADE_ALPHA (1<<18)
+#define S4_FLATSHADE_FOG (1<<17)
+#define S4_FLATSHADE_SPECULAR (1<<16)
+#define S4_FLATSHADE_COLOR (1<<15)
+#define S4_CULLMODE_BOTH (0<<13)
+#define S4_CULLMODE_NONE (1<<13)
+#define S4_CULLMODE_CW (2<<13)
+#define S4_CULLMODE_CCW (3<<13)
+#define S4_CULLMODE_MASK (3<<13)
+#define S4_VFMT_POINT_WIDTH (1<<12)
+#define S4_VFMT_SPEC_FOG (1<<11)
+#define S4_VFMT_COLOR (1<<10)
+#define S4_VFMT_DEPTH_OFFSET (1<<9)
+#define S4_VFMT_XYZ (1<<6)
+#define S4_VFMT_XYZW (2<<6)
+#define S4_VFMT_XY (3<<6)
+#define S4_VFMT_XYW (4<<6)
+#define S4_VFMT_XYZW_MASK (7<<6)
+#define S4_FORCE_DEFAULT_DIFFUSE (1<<5)
+#define S4_FORCE_DEFAULT_SPECULAR (1<<4)
+#define S4_LOCAL_DEPTH_OFFSET_ENABLE (1<<3)
+#define S4_VFMT_FOG_PARAM (1<<2)
+#define S4_SPRITE_POINT_ENABLE (1<<1)
+#define S4_LINE_ANTIALIAS_ENABLE (1<<0)
+
+#define S4_VFMT_MASK (S4_VFMT_POINT_WIDTH | \
+ S4_VFMT_SPEC_FOG | \
+ S4_VFMT_COLOR | \
+ S4_VFMT_DEPTH_OFFSET | \
+ S4_VFMT_XYZW_MASK | \
+ S4_VFMT_FOG_PARAM)
+
+
+#define S5_WRITEDISABLE_ALPHA (1<<31)
+#define S5_WRITEDISABLE_RED (1<<30)
+#define S5_WRITEDISABLE_GREEN (1<<29)
+#define S5_WRITEDISABLE_BLUE (1<<28)
+#define S5_WRITEDISABLE_MASK (0xf<<28)
+#define S5_FORCE_DEFAULT_POINT_SIZE (1<<27)
+#define S5_LAST_PIXEL_ENABLE (1<<26)
+#define S5_GLOBAL_DEPTH_OFFSET_ENABLE (1<<25)
+#define S5_FOG_ENABLE (1<<24)
+#define S5_STENCIL_REF_SHIFT 16
+#define S5_STENCIL_REF_MASK (0xff<<16)
+#define S5_STENCIL_TEST_FUNC_SHIFT 13
+#define S5_STENCIL_TEST_FUNC_MASK (0x7<<13)
+#define S5_STENCIL_FAIL_SHIFT 10
+#define S5_STENCIL_FAIL_MASK (0x7<<10)
+#define S5_STENCIL_PASS_Z_FAIL_SHIFT 7
+#define S5_STENCIL_PASS_Z_FAIL_MASK (0x7<<7)
+#define S5_STENCIL_PASS_Z_PASS_SHIFT 4
+#define S5_STENCIL_PASS_Z_PASS_MASK (0x7<<4)
+#define S5_STENCIL_WRITE_ENABLE (1<<3)
+#define S5_STENCIL_TEST_ENABLE (1<<2)
+#define S5_COLOR_DITHER_ENABLE (1<<1)
+#define S5_LOGICOP_ENABLE (1<<0)
+
+
+#define S6_ALPHA_TEST_ENABLE (1<<31)
+#define S6_ALPHA_TEST_FUNC_SHIFT 28
+#define S6_ALPHA_TEST_FUNC_MASK (0x7<<28)
+#define S6_ALPHA_REF_SHIFT 20
+#define S6_ALPHA_REF_MASK (0xff<<20)
+#define S6_DEPTH_TEST_ENABLE (1<<19)
+#define S6_DEPTH_TEST_FUNC_SHIFT 16
+#define S6_DEPTH_TEST_FUNC_MASK (0x7<<16)
+#define S6_CBUF_BLEND_ENABLE (1<<15)
+#define S6_CBUF_BLEND_FUNC_SHIFT 12
+#define S6_CBUF_BLEND_FUNC_MASK (0x7<<12)
+#define S6_CBUF_SRC_BLEND_FACT_SHIFT 8
+#define S6_CBUF_SRC_BLEND_FACT_MASK (0xf<<8)
+#define S6_CBUF_DST_BLEND_FACT_SHIFT 4
+#define S6_CBUF_DST_BLEND_FACT_MASK (0xf<<4)
+#define S6_DEPTH_WRITE_ENABLE (1<<3)
+#define S6_COLOR_WRITE_ENABLE (1<<2)
+#define S6_TRISTRIP_PV_SHIFT 0
+#define S6_TRISTRIP_PV_MASK (0x3<<0)
+
+#define S7_DEPTH_OFFSET_CONST_MASK ~0
+
+
+
+#define DST_BLND_FACT(f) ((f)<<S6_CBUF_DST_BLEND_FACT_SHIFT)
+#define SRC_BLND_FACT(f) ((f)<<S6_CBUF_SRC_BLEND_FACT_SHIFT)
+#define DST_ABLND_FACT(f) ((f)<<IAB_DST_FACTOR_SHIFT)
+#define SRC_ABLND_FACT(f) ((f)<<IAB_SRC_FACTOR_SHIFT)
+
+
+
+
+/* 3DSTATE_MAP_DEINTERLACER_PARAMETERS */
+
+/* 3DSTATE_MAP_PALETTE_LOAD_32, p206 */
+#define _3DSTATE_MAP_PALETTE_LOAD_32 (CMD_3D|(0x1d<<24)|(0x8f<<16))
+/* subsequent dwords up to length (max 16) are ARGB8888 color values */
+
+/* _3DSTATE_MODES_4, p218 */
+#define _3DSTATE_MODES_4_CMD (CMD_3D|(0x0d<<24))
+#define ENABLE_LOGIC_OP_FUNC (1<<23)
+#define LOGIC_OP_FUNC(x) ((x)<<18)
+#define LOGICOP_MASK (0xf<<18)
+#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
+#define ENABLE_STENCIL_TEST_MASK (1<<17)
+#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
+#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
+#define ENABLE_STENCIL_WRITE_MASK (1<<16)
+#define STENCIL_WRITE_MASK(x) ((x)&0xff)
+
+/* _3DSTATE_MODES_5, p220 */
+#define _3DSTATE_MODES_5_CMD (CMD_3D|(0x0c<<24))
+#define PIPELINE_FLUSH_RENDER_CACHE (1<<18)
+#define PIPELINE_FLUSH_TEXTURE_CACHE (1<<16)
+
+
+/* p221 */
+#define _3DSTATE_PIXEL_SHADER_CONSTANTS (CMD_3D|(0x1d<<24)|(0x6<<16))
+#define PS1_REG(n) (1<<(n))
+#define PS2_CONST_X(n) (n)
+#define PS3_CONST_Y(n) (n)
+#define PS4_CONST_Z(n) (n)
+#define PS5_CONST_W(n) (n)
+
+/* p222 */
+
+
+#define I915_MAX_TEX_INDIRECT 4
+#define I915_MAX_TEX_INSN 32
+#define I915_MAX_ALU_INSN 64
+#define I915_MAX_DECL_INSN 27
+#define I915_MAX_TEMPORARY 16
+
+
+/* Each instruction is 3 dwords long, though most don't require all
+ * this space. Maximum of 123 instructions. Smaller maxes per insn
+ * type.
+ */
+#define _3DSTATE_PIXEL_SHADER_PROGRAM (CMD_3D|(0x1d<<24)|(0x5<<16))
+
+#define REG_TYPE_R 0 /* temporary regs, no need to
+ * dcl, must be written before
+ * read -- Preserved between
+ * phases.
+ */
+#define REG_TYPE_T 1 /* Interpolated values, must be
+ * dcl'ed before use.
+ *
+ * 0..7: texture coord,
+ * 8: diffuse spec,
+ * 9: specular color,
+ * 10: fog parameter in w.
+ */
+#define REG_TYPE_CONST 2 /* Restriction: only one const
+ * can be referenced per
+ * instruction, though it may be
+ * selected for multiple inputs.
+ * Constants not initialized
+ * default to zero.
+ */
+#define REG_TYPE_S 3 /* sampler */
+#define REG_TYPE_OC 4 /* output color (rgba) */
+#define REG_TYPE_OD 5 /* output depth (w), xyz are
+ * temporaries. If not written,
+ * interpolated depth is used?
+ */
+#define REG_TYPE_U 6 /* unpreserved temporaries */
+#define REG_TYPE_MASK 0x7
+#define REG_NR_MASK 0xf
+
+
+/* REG_TYPE_T:
+ */
+#define T_TEX0 0
+#define T_TEX1 1
+#define T_TEX2 2
+#define T_TEX3 3
+#define T_TEX4 4
+#define T_TEX5 5
+#define T_TEX6 6
+#define T_TEX7 7
+#define T_DIFFUSE 8
+#define T_SPECULAR 9
+#define T_FOG_W 10 /* interpolated fog is in W coord */
+
+/* Arithmetic instructions */
+
+/* .replicate_swizzle == selection and replication of a particular
+ * scalar channel, ie., .xxxx, .yyyy, .zzzz or .wwww
+ */
+#define A0_NOP (0x0<<24) /* no operation */
+#define A0_ADD (0x1<<24) /* dst = src0 + src1 */
+#define A0_MOV (0x2<<24) /* dst = src0 */
+#define A0_MUL (0x3<<24) /* dst = src0 * src1 */
+#define A0_MAD (0x4<<24) /* dst = src0 * src1 + src2 */
+#define A0_DP2ADD (0x5<<24) /* dst.xyzw = src0.xy dot src1.xy + src2.replicate_swizzle */
+#define A0_DP3 (0x6<<24) /* dst.xyzw = src0.xyz dot src1.xyz */
+#define A0_DP4 (0x7<<24) /* dst.xyzw = src0.xyzw dot src1.xyzw */
+#define A0_FRC (0x8<<24) /* dst = src0 - floor(src0) */
+#define A0_RCP (0x9<<24) /* dst.xyzw = 1/(src0.replicate_swizzle) */
+#define A0_RSQ (0xa<<24) /* dst.xyzw = 1/(sqrt(abs(src0.replicate_swizzle))) */
+#define A0_EXP (0xb<<24) /* dst.xyzw = exp2(src0.replicate_swizzle) */
+#define A0_LOG (0xc<<24) /* dst.xyzw = log2(abs(src0.replicate_swizzle)) */
+#define A0_CMP (0xd<<24) /* dst = (src0 >= 0.0) ? src1 : src2 */
+#define A0_MIN (0xe<<24) /* dst = (src0 < src1) ? src0 : src1 */
+#define A0_MAX (0xf<<24) /* dst = (src0 >= src1) ? src0 : src1 */
+#define A0_FLR (0x10<<24) /* dst = floor(src0) */
+#define A0_MOD (0x11<<24) /* dst = src0 fmod 1.0 */
+#define A0_TRC (0x12<<24) /* dst = int(src0) */
+#define A0_SGE (0x13<<24) /* dst = src0 >= src1 ? 1.0 : 0.0 */
+#define A0_SLT (0x14<<24) /* dst = src0 < src1 ? 1.0 : 0.0 */
+#define A0_DEST_SATURATE (1<<22)
+#define A0_DEST_TYPE_SHIFT 19
+/* Allow: R, OC, OD, U */
+#define A0_DEST_NR_SHIFT 14
+/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
+#define A0_DEST_CHANNEL_X (1<<10)
+#define A0_DEST_CHANNEL_Y (2<<10)
+#define A0_DEST_CHANNEL_Z (4<<10)
+#define A0_DEST_CHANNEL_W (8<<10)
+#define A0_DEST_CHANNEL_ALL (0xf<<10)
+#define A0_DEST_CHANNEL_SHIFT 10
+#define A0_SRC0_TYPE_SHIFT 7
+#define A0_SRC0_NR_SHIFT 2
+
+#define A0_DEST_CHANNEL_XY (A0_DEST_CHANNEL_X|A0_DEST_CHANNEL_Y)
+#define A0_DEST_CHANNEL_XYZ (A0_DEST_CHANNEL_XY|A0_DEST_CHANNEL_Z)
+
+
+#define SRC_X 0
+#define SRC_Y 1
+#define SRC_Z 2
+#define SRC_W 3
+#define SRC_ZERO 4
+#define SRC_ONE 5
+
+#define A1_SRC0_CHANNEL_X_NEGATE (1<<31)
+#define A1_SRC0_CHANNEL_X_SHIFT 28
+#define A1_SRC0_CHANNEL_Y_NEGATE (1<<27)
+#define A1_SRC0_CHANNEL_Y_SHIFT 24
+#define A1_SRC0_CHANNEL_Z_NEGATE (1<<23)
+#define A1_SRC0_CHANNEL_Z_SHIFT 20
+#define A1_SRC0_CHANNEL_W_NEGATE (1<<19)
+#define A1_SRC0_CHANNEL_W_SHIFT 16
+#define A1_SRC1_TYPE_SHIFT 13
+#define A1_SRC1_NR_SHIFT 8
+#define A1_SRC1_CHANNEL_X_NEGATE (1<<7)
+#define A1_SRC1_CHANNEL_X_SHIFT 4
+#define A1_SRC1_CHANNEL_Y_NEGATE (1<<3)
+#define A1_SRC1_CHANNEL_Y_SHIFT 0
+
+#define A2_SRC1_CHANNEL_Z_NEGATE (1<<31)
+#define A2_SRC1_CHANNEL_Z_SHIFT 28
+#define A2_SRC1_CHANNEL_W_NEGATE (1<<27)
+#define A2_SRC1_CHANNEL_W_SHIFT 24
+#define A2_SRC2_TYPE_SHIFT 21
+#define A2_SRC2_NR_SHIFT 16
+#define A2_SRC2_CHANNEL_X_NEGATE (1<<15)
+#define A2_SRC2_CHANNEL_X_SHIFT 12
+#define A2_SRC2_CHANNEL_Y_NEGATE (1<<11)
+#define A2_SRC2_CHANNEL_Y_SHIFT 8
+#define A2_SRC2_CHANNEL_Z_NEGATE (1<<7)
+#define A2_SRC2_CHANNEL_Z_SHIFT 4
+#define A2_SRC2_CHANNEL_W_NEGATE (1<<3)
+#define A2_SRC2_CHANNEL_W_SHIFT 0
+
+
+
+/* Texture instructions */
+#define T0_TEXLD (0x15<<24) /* Sample texture using predeclared
+ * sampler and address, and output
+ * filtered texel data to destination
+ * register */
+#define T0_TEXLDP (0x16<<24) /* Same as texld but performs a
+ * perspective divide of the texture
+ * coordinate .xyz values by .w before
+ * sampling. */
+#define T0_TEXLDB (0x17<<24) /* Same as texld but biases the
+ * computed LOD by w. Only S4.6 two's
+ * comp is used. This implies that a
+ * float to fixed conversion is
+ * done. */
+#define T0_TEXKILL (0x18<<24) /* Does not perform a sampling
+ * operation. Simply kills the pixel
+ * if any channel of the address
+ * register is < 0.0. */
+#define T0_DEST_TYPE_SHIFT 19
+/* Allow: R, OC, OD, U */
+/* Note: U (unpreserved) regs do not retain their values between
+ * phases (cannot be used for feedback)
+ *
+ * Note: oC and OD registers can only be used as the destination of a
+ * texture instruction once per phase (this is an implementation
+ * restriction).
+ */
+#define T0_DEST_NR_SHIFT 14
+/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
+#define T0_SAMPLER_NR_SHIFT 0 /* This field ignored for TEXKILL */
+#define T0_SAMPLER_NR_MASK (0xf<<0)
+
+#define T1_ADDRESS_REG_TYPE_SHIFT 24 /* Reg to use as texture coord */
+/* Allow R, T, OC, OD -- R, OC, OD are 'dependent' reads, new program phase */
+#define T1_ADDRESS_REG_NR_SHIFT 17
+#define T2_MBZ 0
+
+/* Declaration instructions */
+#define D0_DCL (0x19<<24) /* Declare a t (interpolated attrib)
+ * register or an s (sampler)
+ * register. */
+#define D0_SAMPLE_TYPE_SHIFT 22
+#define D0_SAMPLE_TYPE_2D (0x0<<22)
+#define D0_SAMPLE_TYPE_CUBE (0x1<<22)
+#define D0_SAMPLE_TYPE_VOLUME (0x2<<22)
+#define D0_SAMPLE_TYPE_MASK (0x3<<22)
+
+#define D0_TYPE_SHIFT 19
+/* Allow: T, S */
+#define D0_NR_SHIFT 14
+/* Allow T: 0..10, S: 0..15 */
+#define D0_CHANNEL_X (1<<10)
+#define D0_CHANNEL_Y (2<<10)
+#define D0_CHANNEL_Z (4<<10)
+#define D0_CHANNEL_W (8<<10)
+#define D0_CHANNEL_ALL (0xf<<10)
+#define D0_CHANNEL_NONE (0<<10)
+
+#define D0_CHANNEL_XY (D0_CHANNEL_X|D0_CHANNEL_Y)
+#define D0_CHANNEL_XYZ (D0_CHANNEL_XY|D0_CHANNEL_Z)
+
+/* I915 Errata: Do not allow (xz), (xw), (xzw) combinations for diffuse
+ * or specular declarations.
+ *
+ * For T dcls, only allow: (x), (xy), (xyz), (w), (xyzw)
+ *
+ * Must be zero for S (sampler) dcls
+ */
+#define D1_MBZ 0
+#define D2_MBZ 0
+
+
+
+/* p207 */
+#define _3DSTATE_MAP_STATE (CMD_3D|(0x1d<<24)|(0x0<<16))
+
+#define MS1_MAPMASK_SHIFT 0
+#define MS1_MAPMASK_MASK (0x8fff<<0)
+
+#define MS2_UNTRUSTED_SURFACE (1<<31)
+#define MS2_ADDRESS_MASK 0xfffffffc
+#define MS2_VERTICAL_LINE_STRIDE (1<<1)
+#define MS2_VERTICAL_OFFSET (1<<1)
+
+#define MS3_HEIGHT_SHIFT 21
+#define MS3_WIDTH_SHIFT 10
+#define MS3_PALETTE_SELECT (1<<9)
+#define MS3_MAPSURF_FORMAT_SHIFT 7
+#define MS3_MAPSURF_FORMAT_MASK (0x7<<7)
+#define MAPSURF_8BIT (1<<7)
+#define MAPSURF_16BIT (2<<7)
+#define MAPSURF_32BIT (3<<7)
+#define MAPSURF_422 (5<<7)
+#define MAPSURF_COMPRESSED (6<<7)
+#define MAPSURF_4BIT_INDEXED (7<<7)
+#define MS3_MT_FORMAT_MASK (0x7 << 3)
+#define MS3_MT_FORMAT_SHIFT 3
+#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
+#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
+#define MT_8BIT_L8 (1<<3)
+#define MT_8BIT_A8 (4<<3)
+#define MT_8BIT_MONO8 (5<<3)
+#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
+#define MT_16BIT_ARGB1555 (1<<3)
+#define MT_16BIT_ARGB4444 (2<<3)
+#define MT_16BIT_AY88 (3<<3)
+#define MT_16BIT_88DVDU (5<<3)
+#define MT_16BIT_BUMP_655LDVDU (6<<3)
+#define MT_16BIT_I16 (7<<3)
+#define MT_16BIT_L16 (8<<3)
+#define MT_16BIT_A16 (9<<3)
+#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
+#define MT_32BIT_ABGR8888 (1<<3)
+#define MT_32BIT_XRGB8888 (2<<3)
+#define MT_32BIT_XBGR8888 (3<<3)
+#define MT_32BIT_QWVU8888 (4<<3)
+#define MT_32BIT_AXVU8888 (5<<3)
+#define MT_32BIT_LXVU8888 (6<<3)
+#define MT_32BIT_XLVU8888 (7<<3)
+#define MT_32BIT_ARGB2101010 (8<<3)
+#define MT_32BIT_ABGR2101010 (9<<3)
+#define MT_32BIT_AWVU2101010 (0xA<<3)
+#define MT_32BIT_GR1616 (0xB<<3)
+#define MT_32BIT_VU1616 (0xC<<3)
+#define MT_32BIT_xI824 (0xD<<3)
+#define MT_32BIT_xA824 (0xE<<3)
+#define MT_32BIT_xL824 (0xF<<3)
+#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
+#define MT_422_YCRCB_NORMAL (1<<3)
+#define MT_422_YCRCB_SWAPUV (2<<3)
+#define MT_422_YCRCB_SWAPUVY (3<<3)
+#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
+#define MT_COMPRESS_DXT2_3 (1<<3)
+#define MT_COMPRESS_DXT4_5 (2<<3)
+#define MT_COMPRESS_FXT1 (3<<3)
+#define MT_COMPRESS_DXT1_RGB (4<<3)
+#define MS3_USE_FENCE_REGS (1<<2)
+#define MS3_TILED_SURFACE (1<<1)
+#define MS3_TILE_WALK (1<<0)
+
+#define MS4_PITCH_SHIFT 21
+#define MS4_CUBE_FACE_ENA_NEGX (1<<20)
+#define MS4_CUBE_FACE_ENA_POSX (1<<19)
+#define MS4_CUBE_FACE_ENA_NEGY (1<<18)
+#define MS4_CUBE_FACE_ENA_POSY (1<<17)
+#define MS4_CUBE_FACE_ENA_NEGZ (1<<16)
+#define MS4_CUBE_FACE_ENA_POSZ (1<<15)
+#define MS4_CUBE_FACE_ENA_MASK (0x3f<<15)
+#define MS4_MAX_LOD_SHIFT 9
+#define MS4_MAX_LOD_MASK (0x3f<<9)
+#define MS4_MIP_LAYOUT_LEGACY (0<<8)
+#define MS4_MIP_LAYOUT_BELOW_LPT (0<<8)
+#define MS4_MIP_LAYOUT_RIGHT_LPT (1<<8)
+#define MS4_VOLUME_DEPTH_SHIFT 0
+#define MS4_VOLUME_DEPTH_MASK (0xff<<0)
+
+/* p244 */
+#define _3DSTATE_SAMPLER_STATE (CMD_3D|(0x1d<<24)|(0x1<<16))
+
+#define SS1_MAPMASK_SHIFT 0
+#define SS1_MAPMASK_MASK (0x8fff<<0)
+
+#define SS2_REVERSE_GAMMA_ENABLE (1<<31)
+#define SS2_PACKED_TO_PLANAR_ENABLE (1<<30)
+#define SS2_COLORSPACE_CONVERSION (1<<29)
+#define SS2_CHROMAKEY_SHIFT 27
+#define SS2_BASE_MIP_LEVEL_SHIFT 22
+#define SS2_BASE_MIP_LEVEL_MASK (0x1f<<22)
+#define SS2_MIP_FILTER_SHIFT 20
+#define SS2_MIP_FILTER_MASK (0x3<<20)
+#define MIPFILTER_NONE 0
+#define MIPFILTER_NEAREST 1
+#define MIPFILTER_LINEAR 3
+#define SS2_MAG_FILTER_SHIFT 17
+#define SS2_MAG_FILTER_MASK (0x7<<17)
+#define FILTER_NEAREST 0
+#define FILTER_LINEAR 1
+#define FILTER_ANISOTROPIC 2
+#define FILTER_4X4_1 3
+#define FILTER_4X4_2 4
+#define FILTER_4X4_FLAT 5
+#define FILTER_6X5_MONO 6 /* XXX - check */
+#define SS2_MIN_FILTER_SHIFT 14
+#define SS2_MIN_FILTER_MASK (0x7<<14)
+#define SS2_LOD_BIAS_SHIFT 5
+#define SS2_LOD_BIAS_ONE (0x10<<5)
+#define SS2_LOD_BIAS_MASK (0x1ff<<5)
+/* Shadow requires:
+ * MT_X8{I,L,A}24 or MT_{I,L,A}16 texture format
+ * FILTER_4X4_x MIN and MAG filters
+ */
+#define SS2_SHADOW_ENABLE (1<<4)
+#define SS2_MAX_ANISO_MASK (1<<3)
+#define SS2_MAX_ANISO_2 (0<<3)
+#define SS2_MAX_ANISO_4 (1<<3)
+#define SS2_SHADOW_FUNC_SHIFT 0
+#define SS2_SHADOW_FUNC_MASK (0x7<<0)
+/* SS2_SHADOW_FUNC values: see COMPAREFUNC_* */
+
+#define SS3_MIN_LOD_SHIFT 24
+#define SS3_MIN_LOD_ONE (0x10<<24)
+#define SS3_MIN_LOD_MASK (0xff<<24)
+#define SS3_KILL_PIXEL_ENABLE (1<<17)
+#define SS3_TCX_ADDR_MODE_SHIFT 12
+#define SS3_TCX_ADDR_MODE_MASK (0x7<<12)
+#define TEXCOORDMODE_WRAP 0
+#define TEXCOORDMODE_MIRROR 1
+#define TEXCOORDMODE_CLAMP_EDGE 2
+#define TEXCOORDMODE_CUBE 3
+#define TEXCOORDMODE_CLAMP_BORDER 4
+#define TEXCOORDMODE_MIRROR_ONCE 5
+#define SS3_TCY_ADDR_MODE_SHIFT 9
+#define SS3_TCY_ADDR_MODE_MASK (0x7<<9)
+#define SS3_TCZ_ADDR_MODE_SHIFT 6
+#define SS3_TCZ_ADDR_MODE_MASK (0x7<<6)
+#define SS3_NORMALIZED_COORDS (1<<5)
+#define SS3_TEXTUREMAP_INDEX_SHIFT 1
+#define SS3_TEXTUREMAP_INDEX_MASK (0xf<<1)
+#define SS3_DEINTERLACER_ENABLE (1<<0)
+
+#define SS4_BORDER_COLOR_MASK (~0)
+
+/* 3DSTATE_SPAN_STIPPLE, p258
+ */
+#define _3DSTATE_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define ST1_ENABLE (1<<16)
+#define ST1_MASK (0xffff)
+
+#define _3DSTATE_DEFAULT_Z ((0x3<<29)|(0x1d<<24)|(0x98<<16))
+#define _3DSTATE_DEFAULT_DIFFUSE ((0x3<<29)|(0x1d<<24)|(0x99<<16))
+#define _3DSTATE_DEFAULT_SPECULAR ((0x3<<29)|(0x1d<<24)|(0x9a<<16))
+
+
+#define MI_FLUSH ((0<<29)|(4<<23))
+#define FLUSH_MAP_CACHE (1<<0)
+#define INHIBIT_FLUSH_RENDER_CACHE (1<<2)
+
+
+#define CMD_3D (0x3<<29)
+
+
+#define _3DPRIMITIVE ((0x3<<29)|(0x1f<<24))
+#define PRIM_INDIRECT (1<<23)
+#define PRIM_INLINE (0<<23)
+#define PRIM_INDIRECT_SEQUENTIAL (0<<17)
+#define PRIM_INDIRECT_ELTS (1<<17)
+
+#define PRIM3D_TRILIST (0x0<<18)
+#define PRIM3D_TRISTRIP (0x1<<18)
+#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
+#define PRIM3D_TRIFAN (0x3<<18)
+#define PRIM3D_POLY (0x4<<18)
+#define PRIM3D_LINELIST (0x5<<18)
+#define PRIM3D_LINESTRIP (0x6<<18)
+#define PRIM3D_RECTLIST (0x7<<18)
+#define PRIM3D_POINTLIST (0x8<<18)
+#define PRIM3D_DIB (0x9<<18)
+#define PRIM3D_MASK (0x1f<<18)
+
+#define I915PACKCOLOR4444(r,g,b,a) \
+ ((((a) & 0xf0) << 8) | (((r) & 0xf0) << 4) | ((g) & 0xf0) | ((b) >> 4))
+
+#define I915PACKCOLOR1555(r,g,b,a) \
+ ((((r) & 0xf8) << 7) | (((g) & 0xf8) << 2) | (((b) & 0xf8) >> 3) | \
+ ((a) ? 0x8000 : 0))
+
+#define I915PACKCOLOR565(r,g,b) \
+ ((((r) & 0xf8) << 8) | (((g) & 0xfc) << 3) | (((b) & 0xf8) >> 3))
+
+#define I915PACKCOLOR8888(r,g,b,a) \
+ ((a<<24) | (r<<16) | (g<<8) | b)
+
+
+
+
+#define BR00_BITBLT_CLIENT 0x40000000
+#define BR00_OP_COLOR_BLT 0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN 0x80000000
+
+#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
+#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
+#define XY_COLOR_BLT_WRITE_RGB (1<<20)
+
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+
+#define MI_WAIT_FOR_EVENT ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+
+#define MI_BATCH_BUFFER (0x30<<23)
+#define MI_BATCH_BUFFER_START (0x31<<23)
+#define MI_BATCH_BUFFER_END (0xa<<23)
+
+
+
+#define COMPAREFUNC_ALWAYS 0
+#define COMPAREFUNC_NEVER 0x1
+#define COMPAREFUNC_LESS 0x2
+#define COMPAREFUNC_EQUAL 0x3
+#define COMPAREFUNC_LEQUAL 0x4
+#define COMPAREFUNC_GREATER 0x5
+#define COMPAREFUNC_NOTEQUAL 0x6
+#define COMPAREFUNC_GEQUAL 0x7
+
+#define STENCILOP_KEEP 0
+#define STENCILOP_ZERO 0x1
+#define STENCILOP_REPLACE 0x2
+#define STENCILOP_INCRSAT 0x3
+#define STENCILOP_DECRSAT 0x4
+#define STENCILOP_INCR 0x5
+#define STENCILOP_DECR 0x6
+#define STENCILOP_INVERT 0x7
+
+#define LOGICOP_CLEAR 0
+#define LOGICOP_NOR 0x1
+#define LOGICOP_AND_INV 0x2
+#define LOGICOP_COPY_INV 0x3
+#define LOGICOP_AND_RVRSE 0x4
+#define LOGICOP_INV 0x5
+#define LOGICOP_XOR 0x6
+#define LOGICOP_NAND 0x7
+#define LOGICOP_AND 0x8
+#define LOGICOP_EQUIV 0x9
+#define LOGICOP_NOOP 0xa
+#define LOGICOP_OR_INV 0xb
+#define LOGICOP_COPY 0xc
+#define LOGICOP_OR_RVRSE 0xd
+#define LOGICOP_OR 0xe
+#define LOGICOP_SET 0xf
+
+#define BLENDFACT_ZERO 0x01
+#define BLENDFACT_ONE 0x02
+#define BLENDFACT_SRC_COLR 0x03
+#define BLENDFACT_INV_SRC_COLR 0x04
+#define BLENDFACT_SRC_ALPHA 0x05
+#define BLENDFACT_INV_SRC_ALPHA 0x06
+#define BLENDFACT_DST_ALPHA 0x07
+#define BLENDFACT_INV_DST_ALPHA 0x08
+#define BLENDFACT_DST_COLR 0x09
+#define BLENDFACT_INV_DST_COLR 0x0a
+#define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
+#define BLENDFACT_CONST_COLOR 0x0c
+#define BLENDFACT_INV_CONST_COLOR 0x0d
+#define BLENDFACT_CONST_ALPHA 0x0e
+#define BLENDFACT_INV_CONST_ALPHA 0x0f
+#define BLENDFACT_MASK 0x0f
+
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_GM 0x27A2
+#define PCI_CHIP_I945_GME 0x27AE
+#define PCI_CHIP_G33_G 0x29C2
+#define PCI_CHIP_Q35_G 0x29B2
+#define PCI_CHIP_Q33_G 0x29D2
+
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_screen.c b/src/gallium/drivers/i915simple/i915_screen.c
new file mode 100644
index 0000000000..9f017a14cc
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_screen.c
@@ -0,0 +1,300 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_string.h"
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_screen.h"
+#include "i915_buffer.h"
+#include "i915_texture.h"
+#include "intel_winsys.h"
+
+
+/*
+ * Probe functions
+ */
+
+
+static const char *
+i915_get_vendor(struct pipe_screen *screen)
+{
+ return "Tungsten Graphics, Inc.";
+}
+
+static const char *
+i915_get_name(struct pipe_screen *screen)
+{
+ static char buffer[128];
+ const char *chipset;
+
+ switch (i915_screen(screen)->pci_id) {
+ case PCI_CHIP_I915_G:
+ chipset = "915G";
+ break;
+ case PCI_CHIP_I915_GM:
+ chipset = "915GM";
+ break;
+ case PCI_CHIP_I945_G:
+ chipset = "945G";
+ break;
+ case PCI_CHIP_I945_GM:
+ chipset = "945GM";
+ break;
+ case PCI_CHIP_I945_GME:
+ chipset = "945GME";
+ break;
+ case PCI_CHIP_G33_G:
+ chipset = "G33";
+ break;
+ case PCI_CHIP_Q35_G:
+ chipset = "Q35";
+ break;
+ case PCI_CHIP_Q33_G:
+ chipset = "Q33";
+ break;
+ default:
+ chipset = "unknown";
+ break;
+ }
+
+ util_snprintf(buffer, sizeof(buffer), "i915 (chipset: %s)", chipset);
+ return buffer;
+}
+
+static int
+i915_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 8;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 0;
+ case PIPE_CAP_POINT_SPRITE:
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 1;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 0;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 11; /* max 1024x1024 */
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 8; /* max 128x128x128 */
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 11; /* max 1024x1024 */
+ default:
+ return 0;
+ }
+}
+
+static float
+i915_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 7.5;
+
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 255.0;
+
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 4.0;
+
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0;
+
+ default:
+ return 0;
+ }
+}
+
+static boolean
+i915_is_format_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags)
+{
+ static const enum pipe_format tex_supported[] = {
+ PIPE_FORMAT_R8G8B8A8_UNORM,
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_I8_UNORM,
+ PIPE_FORMAT_A8L8_UNORM,
+ PIPE_FORMAT_YCBCR,
+ PIPE_FORMAT_YCBCR_REV,
+ PIPE_FORMAT_S8Z24_UNORM,
+ PIPE_FORMAT_NONE /* list terminator */
+ };
+ static const enum pipe_format surface_supported[] = {
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_S8Z24_UNORM,
+ PIPE_FORMAT_NONE /* list terminator */
+ };
+ const enum pipe_format *list;
+ uint i;
+
+ if(tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET)
+ list = surface_supported;
+ else
+ list = tex_supported;
+
+ for (i = 0; list[i] != PIPE_FORMAT_NONE; i++) {
+ if (list[i] == format)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+/*
+ * Fence functions
+ */
+
+
+static void
+i915_fence_reference(struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct i915_screen *is = i915_screen(screen);
+
+ is->iws->fence_reference(is->iws, ptr, fence);
+}
+
+static int
+i915_fence_signalled(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags)
+{
+ struct i915_screen *is = i915_screen(screen);
+
+ return is->iws->fence_signalled(is->iws, fence);
+}
+
+static int
+i915_fence_finish(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags)
+{
+ struct i915_screen *is = i915_screen(screen);
+
+ return is->iws->fence_finish(is->iws, fence);
+}
+
+
+/*
+ * Generic functions
+ */
+
+
+static void
+i915_destroy_screen(struct pipe_screen *screen)
+{
+ struct i915_screen *is = i915_screen(screen);
+
+ if (is->iws)
+ is->iws->destroy(is->iws);
+
+ FREE(is);
+}
+
+/**
+ * Create a new i915_screen object
+ */
+struct pipe_screen *
+i915_create_screen(struct intel_winsys *iws, uint pci_id)
+{
+ struct i915_screen *is = CALLOC_STRUCT(i915_screen);
+
+ if (!is)
+ return NULL;
+
+ switch (pci_id) {
+ case PCI_CHIP_I915_G:
+ case PCI_CHIP_I915_GM:
+ is->is_i945 = FALSE;
+ break;
+
+ case PCI_CHIP_I945_G:
+ case PCI_CHIP_I945_GM:
+ case PCI_CHIP_I945_GME:
+ case PCI_CHIP_G33_G:
+ case PCI_CHIP_Q33_G:
+ case PCI_CHIP_Q35_G:
+ is->is_i945 = TRUE;
+ break;
+
+ default:
+ debug_printf("%s: unknown pci id 0x%x, cannot create screen\n",
+ __FUNCTION__, pci_id);
+ return NULL;
+ }
+
+ is->pci_id = pci_id;
+ is->iws = iws;
+
+ is->base.winsys = NULL;
+
+ is->base.destroy = i915_destroy_screen;
+
+ is->base.get_name = i915_get_name;
+ is->base.get_vendor = i915_get_vendor;
+ is->base.get_param = i915_get_param;
+ is->base.get_paramf = i915_get_paramf;
+ is->base.is_format_supported = i915_is_format_supported;
+
+ is->base.fence_reference = i915_fence_reference;
+ is->base.fence_signalled = i915_fence_signalled;
+ is->base.fence_finish = i915_fence_finish;
+
+ i915_init_screen_texture_functions(is);
+ i915_init_screen_buffer_functions(is);
+
+ return &is->base;
+}
diff --git a/src/gallium/drivers/i915simple/i915_screen.h b/src/gallium/drivers/i915simple/i915_screen.h
new file mode 100644
index 0000000000..5126485caa
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_screen.h
@@ -0,0 +1,80 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_SCREEN_H
+#define I915_SCREEN_H
+
+#include "pipe/p_state.h"
+#include "pipe/p_screen.h"
+
+
+struct intel_winsys;
+
+
+/**
+ * Subclass of pipe_screen
+ */
+struct i915_screen
+{
+ struct pipe_screen base;
+
+ struct intel_winsys *iws;
+
+ boolean is_i945;
+ uint pci_id;
+};
+
+/**
+ * Subclass of pipe_transfer
+ */
+struct i915_transfer
+{
+ struct pipe_transfer base;
+
+ unsigned offset;
+};
+
+
+/*
+ * Cast wrappers
+ */
+
+
+static INLINE struct i915_screen *
+i915_screen(struct pipe_screen *pscreen)
+{
+ return (struct i915_screen *) pscreen;
+}
+
+static INLINE struct i915_transfer *
+i915_transfer(struct pipe_transfer *transfer)
+{
+ return (struct i915_transfer *)transfer;
+}
+
+
+#endif /* I915_SCREEN_H */
diff --git a/src/gallium/drivers/i915simple/i915_state.c b/src/gallium/drivers/i915simple/i915_state.c
new file mode 100644
index 0000000000..0087dfa410
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state.c
@@ -0,0 +1,788 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "draw/draw_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_parse.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+#include "i915_state.h"
+#include "i915_state_inlines.h"
+#include "i915_fpc.h"
+
+/* The i915 (and related graphics cores) do not support GL_CLAMP. The
+ * Intel drivers for "other operating systems" implement GL_CLAMP as
+ * GL_CLAMP_TO_EDGE, so the same is done here.
+ */
+static unsigned
+translate_wrap_mode(unsigned wrap)
+{
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ return TEXCOORDMODE_WRAP;
+ case PIPE_TEX_WRAP_CLAMP:
+ return TEXCOORDMODE_CLAMP_EDGE; /* not quite correct */
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ return TEXCOORDMODE_CLAMP_EDGE;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ return TEXCOORDMODE_CLAMP_BORDER;
+// case PIPE_TEX_WRAP_MIRRORED_REPEAT:
+// return TEXCOORDMODE_MIRROR;
+ default:
+ return TEXCOORDMODE_WRAP;
+ }
+}
+
+static unsigned translate_img_filter( unsigned filter )
+{
+ switch (filter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ return FILTER_NEAREST;
+ case PIPE_TEX_FILTER_LINEAR:
+ return FILTER_LINEAR;
+ case PIPE_TEX_FILTER_ANISO:
+ return FILTER_ANISOTROPIC;
+ default:
+ assert(0);
+ return FILTER_NEAREST;
+ }
+}
+
+static unsigned translate_mip_filter( unsigned filter )
+{
+ switch (filter) {
+ case PIPE_TEX_MIPFILTER_NONE:
+ return MIPFILTER_NONE;
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ return MIPFILTER_NEAREST;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ return MIPFILTER_LINEAR;
+ default:
+ assert(0);
+ return MIPFILTER_NONE;
+ }
+}
+
+
+/* None of this state is actually used for anything yet.
+ */
+static void *
+i915_create_blend_state(struct pipe_context *pipe,
+ const struct pipe_blend_state *blend)
+{
+ struct i915_blend_state *cso_data = CALLOC_STRUCT( i915_blend_state );
+
+ {
+ unsigned eqRGB = blend->rgb_func;
+ unsigned srcRGB = blend->rgb_src_factor;
+ unsigned dstRGB = blend->rgb_dst_factor;
+
+ unsigned eqA = blend->alpha_func;
+ unsigned srcA = blend->alpha_src_factor;
+ unsigned dstA = blend->alpha_dst_factor;
+
+ /* Special handling for MIN/MAX filter modes handled at
+ * state_tracker level.
+ */
+
+ if (srcA != srcRGB ||
+ dstA != dstRGB ||
+ eqA != eqRGB) {
+
+ cso_data->iab = (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
+ IAB_MODIFY_ENABLE |
+ IAB_ENABLE |
+ IAB_MODIFY_FUNC |
+ IAB_MODIFY_SRC_FACTOR |
+ IAB_MODIFY_DST_FACTOR |
+ SRC_ABLND_FACT(i915_translate_blend_factor(srcA)) |
+ DST_ABLND_FACT(i915_translate_blend_factor(dstA)) |
+ (i915_translate_blend_func(eqA) << IAB_FUNC_SHIFT));
+ }
+ else {
+ cso_data->iab = (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
+ IAB_MODIFY_ENABLE |
+ 0);
+ }
+ }
+
+ cso_data->modes4 |= (_3DSTATE_MODES_4_CMD |
+ ENABLE_LOGIC_OP_FUNC |
+ LOGIC_OP_FUNC(i915_translate_logic_op(blend->logicop_func)));
+
+ if (blend->logicop_enable)
+ cso_data->LIS5 |= S5_LOGICOP_ENABLE;
+
+ if (blend->dither)
+ cso_data->LIS5 |= S5_COLOR_DITHER_ENABLE;
+
+ if ((blend->colormask & PIPE_MASK_R) == 0)
+ cso_data->LIS5 |= S5_WRITEDISABLE_RED;
+
+ if ((blend->colormask & PIPE_MASK_G) == 0)
+ cso_data->LIS5 |= S5_WRITEDISABLE_GREEN;
+
+ if ((blend->colormask & PIPE_MASK_B) == 0)
+ cso_data->LIS5 |= S5_WRITEDISABLE_BLUE;
+
+ if ((blend->colormask & PIPE_MASK_A) == 0)
+ cso_data->LIS5 |= S5_WRITEDISABLE_ALPHA;
+
+ if (blend->blend_enable) {
+ unsigned funcRGB = blend->rgb_func;
+ unsigned srcRGB = blend->rgb_src_factor;
+ unsigned dstRGB = blend->rgb_dst_factor;
+
+ cso_data->LIS6 |= (S6_CBUF_BLEND_ENABLE |
+ SRC_BLND_FACT(i915_translate_blend_factor(srcRGB)) |
+ DST_BLND_FACT(i915_translate_blend_factor(dstRGB)) |
+ (i915_translate_blend_func(funcRGB) << S6_CBUF_BLEND_FUNC_SHIFT));
+ }
+
+ return cso_data;
+}
+
+static void i915_bind_blend_state(struct pipe_context *pipe,
+ void *blend)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ i915->blend = (struct i915_blend_state*)blend;
+
+ i915->dirty |= I915_NEW_BLEND;
+}
+
+
+static void i915_delete_blend_state(struct pipe_context *pipe, void *blend)
+{
+ FREE(blend);
+}
+
+static void i915_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color )
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ i915->blend_color = *blend_color;
+
+ i915->dirty |= I915_NEW_BLEND;
+}
+
+static void *
+i915_create_sampler_state(struct pipe_context *pipe,
+ const struct pipe_sampler_state *sampler)
+{
+ struct i915_sampler_state *cso = CALLOC_STRUCT( i915_sampler_state );
+ const unsigned ws = sampler->wrap_s;
+ const unsigned wt = sampler->wrap_t;
+ const unsigned wr = sampler->wrap_r;
+ unsigned minFilt, magFilt;
+ unsigned mipFilt;
+
+ cso->templ = sampler;
+
+ mipFilt = translate_mip_filter(sampler->min_mip_filter);
+ minFilt = translate_img_filter( sampler->min_img_filter );
+ magFilt = translate_img_filter( sampler->mag_img_filter );
+
+ if (sampler->max_anisotropy > 2.0) {
+ cso->state[0] |= SS2_MAX_ANISO_4;
+ }
+
+ {
+ int b = (int) (sampler->lod_bias * 16.0);
+ b = CLAMP(b, -256, 255);
+ cso->state[0] |= ((b << SS2_LOD_BIAS_SHIFT) & SS2_LOD_BIAS_MASK);
+ }
+
+ /* Shadow:
+ */
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
+ {
+ cso->state[0] |= (SS2_SHADOW_ENABLE |
+ i915_translate_compare_func(sampler->compare_func));
+
+ minFilt = FILTER_4X4_FLAT;
+ magFilt = FILTER_4X4_FLAT;
+ }
+
+ cso->state[0] |= ((minFilt << SS2_MIN_FILTER_SHIFT) |
+ (mipFilt << SS2_MIP_FILTER_SHIFT) |
+ (magFilt << SS2_MAG_FILTER_SHIFT));
+
+ cso->state[1] |=
+ ((translate_wrap_mode(ws) << SS3_TCX_ADDR_MODE_SHIFT) |
+ (translate_wrap_mode(wt) << SS3_TCY_ADDR_MODE_SHIFT) |
+ (translate_wrap_mode(wr) << SS3_TCZ_ADDR_MODE_SHIFT));
+
+ if (sampler->normalized_coords)
+ cso->state[1] |= SS3_NORMALIZED_COORDS;
+
+ {
+ int minlod = (int) (16.0 * sampler->min_lod);
+ int maxlod = (int) (16.0 * sampler->max_lod);
+ minlod = CLAMP(minlod, 0, 16 * 11);
+ maxlod = CLAMP(maxlod, 0, 16 * 11);
+
+ if (minlod > maxlod)
+ maxlod = minlod;
+
+ cso->minlod = minlod;
+ cso->maxlod = maxlod;
+ }
+
+ {
+ ubyte r = float_to_ubyte(sampler->border_color[0]);
+ ubyte g = float_to_ubyte(sampler->border_color[1]);
+ ubyte b = float_to_ubyte(sampler->border_color[2]);
+ ubyte a = float_to_ubyte(sampler->border_color[3]);
+ cso->state[2] = I915PACKCOLOR8888(r, g, b, a);
+ }
+ return cso;
+}
+
+static void i915_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ unsigned i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == i915->num_samplers &&
+ !memcmp(i915->sampler, sampler, num * sizeof(void *)))
+ return;
+
+ draw_flush(i915->draw);
+
+ for (i = 0; i < num; ++i)
+ i915->sampler[i] = sampler[i];
+ for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
+ i915->sampler[i] = NULL;
+
+ i915->num_samplers = num;
+
+ i915->dirty |= I915_NEW_SAMPLER;
+}
+
+static void i915_delete_sampler_state(struct pipe_context *pipe,
+ void *sampler)
+{
+ FREE(sampler);
+}
+
+
+/** XXX move someday? Or consolidate all these simple state setters
+ * into one file.
+ */
+
+static void *
+i915_create_depth_stencil_state(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil)
+{
+ struct i915_depth_stencil_state *cso = CALLOC_STRUCT( i915_depth_stencil_state );
+
+ {
+ int testmask = depth_stencil->stencil[0].valuemask & 0xff;
+ int writemask = depth_stencil->stencil[0].writemask & 0xff;
+
+ cso->stencil_modes4 |= (_3DSTATE_MODES_4_CMD |
+ ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(testmask) |
+ ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK(writemask));
+ }
+
+ if (depth_stencil->stencil[0].enabled) {
+ int test = i915_translate_compare_func(depth_stencil->stencil[0].func);
+ int fop = i915_translate_stencil_op(depth_stencil->stencil[0].fail_op);
+ int dfop = i915_translate_stencil_op(depth_stencil->stencil[0].zfail_op);
+ int dpop = i915_translate_stencil_op(depth_stencil->stencil[0].zpass_op);
+ int ref = depth_stencil->stencil[0].ref_value & 0xff;
+
+ cso->stencil_LIS5 |= (S5_STENCIL_TEST_ENABLE |
+ S5_STENCIL_WRITE_ENABLE |
+ (ref << S5_STENCIL_REF_SHIFT) |
+ (test << S5_STENCIL_TEST_FUNC_SHIFT) |
+ (fop << S5_STENCIL_FAIL_SHIFT) |
+ (dfop << S5_STENCIL_PASS_Z_FAIL_SHIFT) |
+ (dpop << S5_STENCIL_PASS_Z_PASS_SHIFT));
+ }
+
+ if (depth_stencil->stencil[1].enabled) {
+ int test = i915_translate_compare_func(depth_stencil->stencil[1].func);
+ int fop = i915_translate_stencil_op(depth_stencil->stencil[1].fail_op);
+ int dfop = i915_translate_stencil_op(depth_stencil->stencil[1].zfail_op);
+ int dpop = i915_translate_stencil_op(depth_stencil->stencil[1].zpass_op);
+ int ref = depth_stencil->stencil[1].ref_value & 0xff;
+ int tmask = depth_stencil->stencil[1].valuemask & 0xff;
+ int wmask = depth_stencil->stencil[1].writemask & 0xff;
+
+ cso->bfo[0] = (_3DSTATE_BACKFACE_STENCIL_OPS |
+ BFO_ENABLE_STENCIL_FUNCS |
+ BFO_ENABLE_STENCIL_TWO_SIDE |
+ BFO_ENABLE_STENCIL_REF |
+ BFO_STENCIL_TWO_SIDE |
+ (ref << BFO_STENCIL_REF_SHIFT) |
+ (test << BFO_STENCIL_TEST_SHIFT) |
+ (fop << BFO_STENCIL_FAIL_SHIFT) |
+ (dfop << BFO_STENCIL_PASS_Z_FAIL_SHIFT) |
+ (dpop << BFO_STENCIL_PASS_Z_PASS_SHIFT));
+
+ cso->bfo[1] = (_3DSTATE_BACKFACE_STENCIL_MASKS |
+ BFM_ENABLE_STENCIL_TEST_MASK |
+ BFM_ENABLE_STENCIL_WRITE_MASK |
+ (tmask << BFM_STENCIL_TEST_MASK_SHIFT) |
+ (wmask << BFM_STENCIL_WRITE_MASK_SHIFT));
+ }
+ else {
+ /* This actually disables two-side stencil: The bit set is a
+ * modify-enable bit to indicate we are changing the two-side
+ * setting. Then there is a symbolic zero to show that we are
+ * setting the flag to zero/off.
+ */
+ cso->bfo[0] = (_3DSTATE_BACKFACE_STENCIL_OPS |
+ BFO_ENABLE_STENCIL_TWO_SIDE |
+ 0);
+ cso->bfo[1] = 0;
+ }
+
+ if (depth_stencil->depth.enabled) {
+ int func = i915_translate_compare_func(depth_stencil->depth.func);
+
+ cso->depth_LIS6 |= (S6_DEPTH_TEST_ENABLE |
+ (func << S6_DEPTH_TEST_FUNC_SHIFT));
+
+ if (depth_stencil->depth.writemask)
+ cso->depth_LIS6 |= S6_DEPTH_WRITE_ENABLE;
+ }
+
+ if (depth_stencil->alpha.enabled) {
+ int test = i915_translate_compare_func(depth_stencil->alpha.func);
+ ubyte refByte = float_to_ubyte(depth_stencil->alpha.ref_value);
+
+ cso->depth_LIS6 |= (S6_ALPHA_TEST_ENABLE |
+ (test << S6_ALPHA_TEST_FUNC_SHIFT) |
+ (((unsigned) refByte) << S6_ALPHA_REF_SHIFT));
+ }
+
+ return cso;
+}
+
+static void i915_bind_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ i915->depth_stencil = (const struct i915_depth_stencil_state *)depth_stencil;
+
+ i915->dirty |= I915_NEW_DEPTH_STENCIL;
+}
+
+static void i915_delete_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ FREE(depth_stencil);
+}
+
+
+static void i915_set_scissor_state( struct pipe_context *pipe,
+ const struct pipe_scissor_state *scissor )
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ memcpy( &i915->scissor, scissor, sizeof(*scissor) );
+ i915->dirty |= I915_NEW_SCISSOR;
+}
+
+
+static void i915_set_polygon_stipple( struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple )
+{
+}
+
+
+
+static void *
+i915_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ struct i915_fragment_shader *ifs = CALLOC_STRUCT(i915_fragment_shader);
+ if (!ifs)
+ return NULL;
+
+ ifs->state.tokens = tgsi_dup_tokens(templ->tokens);
+
+ tgsi_scan_shader(templ->tokens, &ifs->info);
+
+ /* The shader's compiled to i915 instructions here */
+ i915_translate_fragment_program(i915, ifs);
+
+ return ifs;
+}
+
+static void
+i915_bind_fs_state(struct pipe_context *pipe, void *shader)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ i915->fs = (struct i915_fragment_shader*) shader;
+
+ i915->dirty |= I915_NEW_FS;
+}
+
+static
+void i915_delete_fs_state(struct pipe_context *pipe, void *shader)
+{
+ struct i915_fragment_shader *ifs = (struct i915_fragment_shader *) shader;
+
+ if (ifs->program)
+ FREE(ifs->program);
+ ifs->program_len = 0;
+
+ FREE((struct tgsi_token *)ifs->state.tokens);
+
+ FREE(ifs);
+}
+
+
+static void *
+i915_create_vs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ /* just pass-through to draw module */
+ return draw_create_vertex_shader(i915->draw, templ);
+}
+
+static void i915_bind_vs_state(struct pipe_context *pipe, void *shader)
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ /* just pass-through to draw module */
+ draw_bind_vertex_shader(i915->draw, (struct draw_vertex_shader *) shader);
+
+ i915->dirty |= I915_NEW_VS;
+}
+
+static void i915_delete_vs_state(struct pipe_context *pipe, void *shader)
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ /* just pass-through to draw module */
+ draw_delete_vertex_shader(i915->draw, (struct draw_vertex_shader *) shader);
+}
+
+static void i915_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ struct pipe_screen *screen = pipe->screen;
+ draw_flush(i915->draw);
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ /* Make a copy of shader constants.
+ * During fragment program translation we may add additional
+ * constants to the array.
+ *
+ * We want to consider the situation where some user constants
+ * (ex: a material color) may change frequently but the shader program
+ * stays the same. In that case we should only be updating the first
+ * N constants, leaving any extras from shader translation alone.
+ */
+ if (buf) {
+ void *mapped;
+ if (buf->buffer && buf->buffer->size &&
+ (mapped = pipe_buffer_map(screen, buf->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ))) {
+ memcpy(i915->current.constants[shader], mapped, buf->buffer->size);
+ pipe_buffer_unmap(screen, buf->buffer);
+ i915->current.num_user_constants[shader]
+ = buf->buffer->size / (4 * sizeof(float));
+ }
+ else {
+ i915->current.num_user_constants[shader] = 0;
+ }
+ }
+
+ i915->dirty |= I915_NEW_CONSTANTS;
+}
+
+
+static void i915_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num,
+ struct pipe_texture **texture)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ uint i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == i915->num_textures &&
+ !memcmp(i915->texture, texture, num * sizeof(struct pipe_texture *)))
+ return;
+
+ /* Fixes wrong texture in texobj with VBUF */
+ draw_flush(i915->draw);
+
+ for (i = 0; i < num; i++)
+ pipe_texture_reference((struct pipe_texture **) &i915->texture[i],
+ texture[i]);
+
+ for (i = num; i < i915->num_textures; i++)
+ pipe_texture_reference((struct pipe_texture **) &i915->texture[i],
+ NULL);
+
+ i915->num_textures = num;
+
+ i915->dirty |= I915_NEW_TEXTURE;
+}
+
+
+
+static void i915_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ i915->framebuffer = *fb; /* struct copy */
+
+ i915->dirty |= I915_NEW_FRAMEBUFFER;
+}
+
+
+
+static void i915_set_clip_state( struct pipe_context *pipe,
+ const struct pipe_clip_state *clip )
+{
+ struct i915_context *i915 = i915_context(pipe);
+ draw_flush(i915->draw);
+
+ draw_set_clip_state(i915->draw, clip);
+
+ i915->dirty |= I915_NEW_CLIP;
+}
+
+
+
+/* Called when driver state tracker notices changes to the viewport
+ * matrix:
+ */
+static void i915_set_viewport_state( struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport )
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ i915->viewport = *viewport; /* struct copy */
+
+ /* pass the viewport info to the draw module */
+ draw_set_viewport_state(i915->draw, &i915->viewport);
+
+ i915->dirty |= I915_NEW_VIEWPORT;
+}
+
+
+static void *
+i915_create_rasterizer_state(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *rasterizer)
+{
+ struct i915_rasterizer_state *cso = CALLOC_STRUCT( i915_rasterizer_state );
+
+ cso->templ = rasterizer;
+ cso->color_interp = rasterizer->flatshade ? INTERP_CONSTANT : INTERP_LINEAR;
+ cso->light_twoside = rasterizer->light_twoside;
+ cso->ds[0].u = _3DSTATE_DEPTH_OFFSET_SCALE;
+ cso->ds[1].f = rasterizer->offset_scale;
+ if (rasterizer->poly_stipple_enable) {
+ cso->st |= ST1_ENABLE;
+ }
+
+ if (rasterizer->scissor)
+ cso->sc[0] = _3DSTATE_SCISSOR_ENABLE_CMD | ENABLE_SCISSOR_RECT;
+ else
+ cso->sc[0] = _3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT;
+
+ switch (rasterizer->cull_mode) {
+ case PIPE_WINDING_NONE:
+ cso->LIS4 |= S4_CULLMODE_NONE;
+ break;
+ case PIPE_WINDING_CW:
+ cso->LIS4 |= S4_CULLMODE_CW;
+ break;
+ case PIPE_WINDING_CCW:
+ cso->LIS4 |= S4_CULLMODE_CCW;
+ break;
+ case PIPE_WINDING_BOTH:
+ cso->LIS4 |= S4_CULLMODE_BOTH;
+ break;
+ }
+
+ {
+ int line_width = CLAMP((int)(rasterizer->line_width * 2), 1, 0xf);
+
+ cso->LIS4 |= line_width << S4_LINE_WIDTH_SHIFT;
+
+ if (rasterizer->line_smooth)
+ cso->LIS4 |= S4_LINE_ANTIALIAS_ENABLE;
+ }
+
+ {
+ int point_size = CLAMP((int) rasterizer->point_size, 1, 0xff);
+
+ cso->LIS4 |= point_size << S4_POINT_WIDTH_SHIFT;
+ }
+
+ if (rasterizer->flatshade) {
+ cso->LIS4 |= (S4_FLATSHADE_ALPHA |
+ S4_FLATSHADE_COLOR |
+ S4_FLATSHADE_SPECULAR);
+ }
+
+ cso->LIS7 = fui( rasterizer->offset_units );
+
+
+ return cso;
+}
+
+static void i915_bind_rasterizer_state( struct pipe_context *pipe,
+ void *raster )
+{
+ struct i915_context *i915 = i915_context(pipe);
+
+ i915->rasterizer = (struct i915_rasterizer_state *)raster;
+
+ /* pass-through to draw module */
+ draw_set_rasterizer_state(i915->draw,
+ (i915->rasterizer ? i915->rasterizer->templ : NULL));
+
+ i915->dirty |= I915_NEW_RASTERIZER;
+}
+
+static void i915_delete_rasterizer_state(struct pipe_context *pipe,
+ void *raster)
+{
+ FREE(raster);
+}
+
+static void i915_set_vertex_buffers(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ /* Because we change state before the draw_set_vertex_buffers call
+ * we need a flush here, just to be sure.
+ */
+ draw_flush(i915->draw);
+
+ memcpy(i915->vertex_buffer, buffers, count * sizeof(buffers[0]));
+ i915->num_vertex_buffers = count;
+
+ /* pass-through to draw module */
+ draw_set_vertex_buffers(i915->draw, count, buffers);
+}
+
+static void i915_set_vertex_elements(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_element *elements)
+{
+ struct i915_context *i915 = i915_context(pipe);
+ /* Because we change state before the draw_set_vertex_buffers call
+ * we need a flush here, just to be sure.
+ */
+ draw_flush(i915->draw);
+
+ i915->num_vertex_elements = count;
+ /* pass-through to draw module */
+ draw_set_vertex_elements(i915->draw, count, elements);
+}
+
+
+static void i915_set_edgeflags(struct pipe_context *pipe,
+ const unsigned *bitfield)
+{
+ /* TODO do something here */
+}
+
+void
+i915_init_state_functions( struct i915_context *i915 )
+{
+ i915->base.set_edgeflags = i915_set_edgeflags;
+ i915->base.create_blend_state = i915_create_blend_state;
+ i915->base.bind_blend_state = i915_bind_blend_state;
+ i915->base.delete_blend_state = i915_delete_blend_state;
+
+ i915->base.create_sampler_state = i915_create_sampler_state;
+ i915->base.bind_sampler_states = i915_bind_sampler_states;
+ i915->base.delete_sampler_state = i915_delete_sampler_state;
+
+ i915->base.create_depth_stencil_alpha_state = i915_create_depth_stencil_state;
+ i915->base.bind_depth_stencil_alpha_state = i915_bind_depth_stencil_state;
+ i915->base.delete_depth_stencil_alpha_state = i915_delete_depth_stencil_state;
+
+ i915->base.create_rasterizer_state = i915_create_rasterizer_state;
+ i915->base.bind_rasterizer_state = i915_bind_rasterizer_state;
+ i915->base.delete_rasterizer_state = i915_delete_rasterizer_state;
+ i915->base.create_fs_state = i915_create_fs_state;
+ i915->base.bind_fs_state = i915_bind_fs_state;
+ i915->base.delete_fs_state = i915_delete_fs_state;
+ i915->base.create_vs_state = i915_create_vs_state;
+ i915->base.bind_vs_state = i915_bind_vs_state;
+ i915->base.delete_vs_state = i915_delete_vs_state;
+
+ i915->base.set_blend_color = i915_set_blend_color;
+ i915->base.set_clip_state = i915_set_clip_state;
+ i915->base.set_constant_buffer = i915_set_constant_buffer;
+ i915->base.set_framebuffer_state = i915_set_framebuffer_state;
+
+ i915->base.set_polygon_stipple = i915_set_polygon_stipple;
+ i915->base.set_scissor_state = i915_set_scissor_state;
+ i915->base.set_sampler_textures = i915_set_sampler_textures;
+ i915->base.set_viewport_state = i915_set_viewport_state;
+ i915->base.set_vertex_buffers = i915_set_vertex_buffers;
+ i915->base.set_vertex_elements = i915_set_vertex_elements;
+}
diff --git a/src/gallium/drivers/i915simple/i915_state.h b/src/gallium/drivers/i915simple/i915_state.h
new file mode 100644
index 0000000000..86c6b0027d
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state.h
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef I915_STATE_H
+#define I915_STATE_H
+
+struct i915_context;
+
+
+struct i915_tracked_state {
+ unsigned dirty;
+ void (*update)( struct i915_context * );
+};
+
+void i915_update_immediate( struct i915_context *i915 );
+void i915_update_dynamic( struct i915_context *i915 );
+void i915_update_derived( struct i915_context *i915 );
+void i915_update_samplers( struct i915_context *i915 );
+void i915_update_textures(struct i915_context *i915);
+
+void i915_emit_hardware_state( struct i915_context *i915 );
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_state_derived.c b/src/gallium/drivers/i915simple/i915_state_derived.c
new file mode 100644
index 0000000000..178d4e8781
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state_derived.c
@@ -0,0 +1,183 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vertex.h"
+#include "i915_context.h"
+#include "i915_state.h"
+#include "i915_reg.h"
+#include "i915_fpc.h"
+
+
+
+/**
+ * Determine the hardware vertex layout.
+ * Depends on vertex/fragment shader state.
+ */
+static void calculate_vertex_layout( struct i915_context *i915 )
+{
+ const struct i915_fragment_shader *fs = i915->fs;
+ const enum interp_mode colorInterp = i915->rasterizer->color_interp;
+ struct vertex_info vinfo;
+ boolean texCoords[8], colors[2], fog, needW;
+ uint i;
+ int src;
+
+ memset(texCoords, 0, sizeof(texCoords));
+ colors[0] = colors[1] = fog = needW = FALSE;
+ memset(&vinfo, 0, sizeof(vinfo));
+
+ /* Determine which fragment program inputs are needed. Setup HW vertex
+ * layout below, in the HW-specific attribute order.
+ */
+ for (i = 0; i < fs->info.num_inputs; i++) {
+ switch (fs->info.input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ assert(fs->info.input_semantic_index[i] < 2);
+ colors[fs->info.input_semantic_index[i]] = TRUE;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ /* usually a texcoord */
+ {
+ const uint unit = fs->info.input_semantic_index[i];
+ assert(unit < 8);
+ texCoords[unit] = TRUE;
+ needW = TRUE;
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ fog = TRUE;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+
+ /* pos */
+ src = draw_find_vs_output(i915->draw, TGSI_SEMANTIC_POSITION, 0);
+ if (needW) {
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_LINEAR, src);
+ vinfo.hwfmt[0] |= S4_VFMT_XYZW;
+ vinfo.attrib[0].emit = EMIT_4F;
+ }
+ else {
+ draw_emit_vertex_attr(&vinfo, EMIT_3F, INTERP_LINEAR, src);
+ vinfo.hwfmt[0] |= S4_VFMT_XYZ;
+ vinfo.attrib[0].emit = EMIT_3F;
+ }
+
+ /* hardware point size */
+ /* XXX todo */
+
+ /* primary color */
+ if (colors[0]) {
+ src = draw_find_vs_output(i915->draw, TGSI_SEMANTIC_COLOR, 0);
+ draw_emit_vertex_attr(&vinfo, EMIT_4UB, colorInterp, src);
+ vinfo.hwfmt[0] |= S4_VFMT_COLOR;
+ }
+
+ /* secondary color */
+ if (colors[1]) {
+ src = draw_find_vs_output(i915->draw, TGSI_SEMANTIC_COLOR, 1);
+ draw_emit_vertex_attr(&vinfo, EMIT_4UB, colorInterp, src);
+ vinfo.hwfmt[0] |= S4_VFMT_SPEC_FOG;
+ }
+
+ /* fog coord, not fog blend factor */
+ if (fog) {
+ src = draw_find_vs_output(i915->draw, TGSI_SEMANTIC_FOG, 0);
+ draw_emit_vertex_attr(&vinfo, EMIT_1F, INTERP_PERSPECTIVE, src);
+ vinfo.hwfmt[0] |= S4_VFMT_FOG_PARAM;
+ }
+
+ /* texcoords */
+ for (i = 0; i < 8; i++) {
+ uint hwtc;
+ if (texCoords[i]) {
+ hwtc = TEXCOORDFMT_4D;
+ src = draw_find_vs_output(i915->draw, TGSI_SEMANTIC_GENERIC, i);
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ }
+ else {
+ hwtc = TEXCOORDFMT_NOT_PRESENT;
+ }
+ vinfo.hwfmt[1] |= hwtc << (i * 4);
+ }
+
+ draw_compute_vertex_size(&vinfo);
+
+ if (memcmp(&i915->current.vertex_info, &vinfo, sizeof(vinfo))) {
+ /* Need to set this flag so that the LIS2/4 registers get set.
+ * It also means the i915_update_immediate() function must be called
+ * after this one, in i915_update_derived().
+ */
+ i915->dirty |= I915_NEW_VERTEX_FORMAT;
+
+ memcpy(&i915->current.vertex_info, &vinfo, sizeof(vinfo));
+ }
+}
+
+
+
+
+/* Hopefully this will remain quite simple, otherwise need to pull in
+ * something like the state tracker mechanism.
+ */
+void i915_update_derived( struct i915_context *i915 )
+{
+ if (i915->dirty & (I915_NEW_RASTERIZER | I915_NEW_FS | I915_NEW_VS))
+ calculate_vertex_layout( i915 );
+
+ if (i915->dirty & (I915_NEW_SAMPLER | I915_NEW_TEXTURE))
+ i915_update_samplers(i915);
+
+ if (i915->dirty & I915_NEW_TEXTURE)
+ i915_update_textures(i915);
+
+ if (i915->dirty)
+ i915_update_immediate( i915 );
+
+ if (i915->dirty)
+ i915_update_dynamic( i915 );
+
+ if (i915->dirty & I915_NEW_FS) {
+ i915->hardware_dirty |= I915_HW_PROGRAM; /* XXX right? */
+ }
+
+ /* HW emit currently references framebuffer state directly:
+ */
+ if (i915->dirty & I915_NEW_FRAMEBUFFER)
+ i915->hardware_dirty |= I915_HW_STATIC;
+
+ i915->dirty = 0;
+}
diff --git a/src/gallium/drivers/i915simple/i915_state_dynamic.c b/src/gallium/drivers/i915simple/i915_state_dynamic.c
new file mode 100644
index 0000000000..86126a5a15
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state_dynamic.c
@@ -0,0 +1,310 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_batch.h"
+#include "i915_state_inlines.h"
+#include "i915_context.h"
+#include "i915_reg.h"
+#include "i915_state.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_pack_color.h"
+
+#define FILE_DEBUG_FLAG DEBUG_STATE
+
+/* State that we have chosen to store in the DYNAMIC segment of the
+ * i915 indirect state mechanism.
+ *
+ * Can't cache these in the way we do the static state, as there is no
+ * start/size in the command packet, instead an 'end' value that gets
+ * incremented.
+ *
+ * Additionally, there seems to be a requirement to re-issue the full
+ * (active) state every time a 4kb boundary is crossed.
+ */
+
+static INLINE void set_dynamic_indirect( struct i915_context *i915,
+ unsigned offset,
+ const unsigned *src,
+ unsigned dwords )
+{
+ unsigned i;
+
+ for (i = 0; i < dwords; i++)
+ i915->current.dynamic[offset + i] = src[i];
+
+ i915->hardware_dirty |= I915_HW_DYNAMIC;
+}
+
+
+/***********************************************************************
+ * Modes4: stencil masks and logicop
+ */
+static void upload_MODES4( struct i915_context *i915 )
+{
+ unsigned modes4 = 0;
+
+ /* I915_NEW_STENCIL */
+ modes4 |= i915->depth_stencil->stencil_modes4;
+ /* I915_NEW_BLEND */
+ modes4 |= i915->blend->modes4;
+
+ /* Always, so that we know when state is in-active:
+ */
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_MODES4,
+ &modes4,
+ 1 );
+}
+
+const struct i915_tracked_state i915_upload_MODES4 = {
+ I915_NEW_BLEND | I915_NEW_DEPTH_STENCIL,
+ upload_MODES4
+};
+
+
+
+
+/***********************************************************************
+ */
+
+static void upload_BFO( struct i915_context *i915 )
+{
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_BFO_0,
+ &(i915->depth_stencil->bfo[0]),
+ 2 );
+}
+
+const struct i915_tracked_state i915_upload_BFO = {
+ I915_NEW_DEPTH_STENCIL,
+ upload_BFO
+};
+
+
+/***********************************************************************
+ */
+
+
+static void upload_BLENDCOLOR( struct i915_context *i915 )
+{
+ unsigned bc[2];
+
+ memset( bc, 0, sizeof(bc) );
+
+ /* I915_NEW_BLEND {_COLOR}
+ */
+ {
+ const float *color = i915->blend_color.color;
+
+ bc[0] = _3DSTATE_CONST_BLEND_COLOR_CMD;
+ bc[1] = pack_ui32_float4( color[0],
+ color[1],
+ color[2],
+ color[3] );
+ }
+
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_BC_0,
+ bc,
+ 2 );
+}
+
+const struct i915_tracked_state i915_upload_BLENDCOLOR = {
+ I915_NEW_BLEND,
+ upload_BLENDCOLOR
+};
+
+/***********************************************************************
+ */
+
+
+static void upload_IAB( struct i915_context *i915 )
+{
+ unsigned iab = i915->blend->iab;
+
+
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_IAB,
+ &iab,
+ 1 );
+}
+
+const struct i915_tracked_state i915_upload_IAB = {
+ I915_NEW_BLEND,
+ upload_IAB
+};
+
+
+/***********************************************************************
+ */
+
+
+
+static void upload_DEPTHSCALE( struct i915_context *i915 )
+{
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_DEPTHSCALE_0,
+ &(i915->rasterizer->ds[0].u),
+ 2 );
+}
+
+const struct i915_tracked_state i915_upload_DEPTHSCALE = {
+ I915_NEW_RASTERIZER,
+ upload_DEPTHSCALE
+};
+
+
+
+/***********************************************************************
+ * Polygon stipple
+ *
+ * The i915 supports a 4x4 stipple natively, GL wants 32x32.
+ * Fortunately stipple is usually a repeating pattern.
+ *
+ * XXX: does stipple pattern need to be adjusted according to
+ * the window position?
+ *
+ * XXX: possibly need workaround for conform paths test.
+ */
+
+static void upload_STIPPLE( struct i915_context *i915 )
+{
+ unsigned st[2];
+
+ st[0] = _3DSTATE_STIPPLE;
+ st[1] = 0;
+
+ /* I915_NEW_RASTERIZER
+ */
+ st[1] |= i915->rasterizer->st;
+
+
+ /* I915_NEW_STIPPLE
+ */
+ {
+ const ubyte *mask = (const ubyte *)i915->poly_stipple.stipple;
+ ubyte p[4];
+
+ p[0] = mask[12] & 0xf;
+ p[1] = mask[8] & 0xf;
+ p[2] = mask[4] & 0xf;
+ p[3] = mask[0] & 0xf;
+
+ /* Not sure what to do about fallbacks, so for now just dont:
+ */
+ st[1] |= ((p[0] << 0) |
+ (p[1] << 4) |
+ (p[2] << 8) |
+ (p[3] << 12));
+ }
+
+
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_STP_0,
+ &st[0],
+ 2 );
+}
+
+
+const struct i915_tracked_state i915_upload_STIPPLE = {
+ I915_NEW_RASTERIZER | I915_NEW_STIPPLE,
+ upload_STIPPLE
+};
+
+
+
+/***********************************************************************
+ * Scissor.
+ */
+static void upload_SCISSOR_ENABLE( struct i915_context *i915 )
+{
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_SC_ENA_0,
+ &(i915->rasterizer->sc[0]),
+ 1 );
+}
+
+const struct i915_tracked_state i915_upload_SCISSOR_ENABLE = {
+ I915_NEW_RASTERIZER,
+ upload_SCISSOR_ENABLE
+};
+
+
+
+static void upload_SCISSOR_RECT( struct i915_context *i915 )
+{
+ unsigned x1 = i915->scissor.minx;
+ unsigned y1 = i915->scissor.miny;
+ unsigned x2 = i915->scissor.maxx;
+ unsigned y2 = i915->scissor.maxy;
+ unsigned sc[3];
+
+ sc[0] = _3DSTATE_SCISSOR_RECT_0_CMD;
+ sc[1] = (y1 << 16) | (x1 & 0xffff);
+ sc[2] = (y2 << 16) | (x2 & 0xffff);
+
+ set_dynamic_indirect( i915,
+ I915_DYNAMIC_SC_RECT_0,
+ &sc[0],
+ 3 );
+}
+
+
+const struct i915_tracked_state i915_upload_SCISSOR_RECT = {
+ I915_NEW_SCISSOR,
+ upload_SCISSOR_RECT
+};
+
+
+
+
+
+
+static const struct i915_tracked_state *atoms[] = {
+ &i915_upload_MODES4,
+ &i915_upload_BFO,
+ &i915_upload_BLENDCOLOR,
+ &i915_upload_IAB,
+ &i915_upload_DEPTHSCALE,
+ &i915_upload_STIPPLE,
+ &i915_upload_SCISSOR_ENABLE,
+ &i915_upload_SCISSOR_RECT
+};
+
+/* These will be dynamic indirect state commands, but for now just end
+ * up on the batch buffer with everything else.
+ */
+void i915_update_dynamic( struct i915_context *i915 )
+{
+ int i;
+
+ for (i = 0; i < Elements(atoms); i++)
+ if (i915->dirty & atoms[i]->dirty)
+ atoms[i]->update( i915 );
+}
+
diff --git a/src/gallium/drivers/i915simple/i915_state_emit.c b/src/gallium/drivers/i915simple/i915_state_emit.c
new file mode 100644
index 0000000000..a3d4e3b04e
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state_emit.c
@@ -0,0 +1,402 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_batch.h"
+#include "i915_reg.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+
+static unsigned translate_format( enum pipe_format format )
+{
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return COLOR_BUF_ARGB8888;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return COLOR_BUF_RGB565;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static unsigned translate_depth_format( enum pipe_format zformat )
+{
+ switch (zformat) {
+ case PIPE_FORMAT_S8Z24_UNORM:
+ return DEPTH_FRMT_24_FIXED_8_OTHER;
+ case PIPE_FORMAT_Z16_UNORM:
+ return DEPTH_FRMT_16_FIXED;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+
+/**
+ * Examine framebuffer state to determine width, height.
+ */
+static boolean
+framebuffer_size(const struct pipe_framebuffer_state *fb,
+ uint *width, uint *height)
+{
+ if (fb->cbufs[0]) {
+ *width = fb->cbufs[0]->width;
+ *height = fb->cbufs[0]->height;
+ return TRUE;
+ }
+ else if (fb->zsbuf) {
+ *width = fb->zsbuf->width;
+ *height = fb->zsbuf->height;
+ return TRUE;
+ }
+ else {
+ *width = *height = 0;
+ return FALSE;
+ }
+}
+
+
+/* Push the state into the sarea and/or texture memory.
+ */
+void
+i915_emit_hardware_state(struct i915_context *i915 )
+{
+ /* XXX: there must be an easier way */
+ const unsigned dwords = ( 14 +
+ 7 +
+ I915_MAX_DYNAMIC +
+ 8 +
+ 2 + I915_TEX_UNITS*3 +
+ 2 + I915_TEX_UNITS*3 +
+ 2 + I915_MAX_CONSTANT*4 +
+#if 0
+ i915->current.program_len +
+#else
+ i915->fs->program_len +
+#endif
+ 6
+ ) * 3/2; /* plus 50% margin */
+ const unsigned relocs = ( I915_TEX_UNITS +
+ 3
+ ) * 3/2; /* plus 50% margin */
+
+#if 0
+ debug_printf("i915_emit_hardware_state: %d dwords, %d relocs\n", dwords, relocs);
+#endif
+
+ if(!BEGIN_BATCH(dwords, relocs)) {
+ FLUSH_BATCH(NULL);
+ assert(BEGIN_BATCH(dwords, relocs));
+ }
+
+ /* 14 dwords, 0 relocs */
+ if (i915->hardware_dirty & I915_HW_INVARIENT)
+ {
+ OUT_BATCH(_3DSTATE_AA_CMD |
+ AA_LINE_ECAAR_WIDTH_ENABLE |
+ AA_LINE_ECAAR_WIDTH_1_0 |
+ AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0);
+
+ OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_COORD_SET_BINDINGS |
+ CSB_TCB(0, 0) |
+ CSB_TCB(1, 1) |
+ CSB_TCB(2, 2) |
+ CSB_TCB(3, 3) |
+ CSB_TCB(4, 4) |
+ CSB_TCB(5, 5) |
+ CSB_TCB(6, 6) |
+ CSB_TCB(7, 7));
+
+ OUT_BATCH(_3DSTATE_RASTER_RULES_CMD |
+ ENABLE_POINT_RASTER_RULE |
+ OGL_POINT_RASTER_RULE |
+ ENABLE_LINE_STRIP_PROVOKE_VRTX |
+ ENABLE_TRI_FAN_PROVOKE_VRTX |
+ LINE_STRIP_PROVOKE_VRTX(1) |
+ TRI_FAN_PROVOKE_VRTX(2) |
+ ENABLE_TEXKILL_3D_4D |
+ TEXKILL_4D);
+
+ /* Need to initialize this to zero.
+ */
+ OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | (0));
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DEPTH_SUBRECT_DISABLE);
+
+ /* disable indirect state for now
+ */
+ OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0);
+ OUT_BATCH(0);
+ }
+
+ /* 7 dwords, 1 relocs */
+ if (i915->hardware_dirty & I915_HW_IMMEDIATE)
+ {
+ OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+ I1_LOAD_S(0) |
+ I1_LOAD_S(1) |
+ I1_LOAD_S(2) |
+ I1_LOAD_S(4) |
+ I1_LOAD_S(5) |
+ I1_LOAD_S(6) |
+ (5));
+
+ if(i915->vbo)
+ OUT_RELOC(i915->vbo,
+ INTEL_USAGE_VERTEX,
+ i915->current.immediate[I915_IMMEDIATE_S0]);
+ else
+ /* FIXME: we should not do this */
+ OUT_BATCH(0);
+ OUT_BATCH(i915->current.immediate[I915_IMMEDIATE_S1]);
+ OUT_BATCH(i915->current.immediate[I915_IMMEDIATE_S2]);
+ OUT_BATCH(i915->current.immediate[I915_IMMEDIATE_S4]);
+ OUT_BATCH(i915->current.immediate[I915_IMMEDIATE_S5]);
+ OUT_BATCH(i915->current.immediate[I915_IMMEDIATE_S6]);
+ }
+
+ /* I915_MAX_DYNAMIC dwords, 0 relocs */
+ if (i915->hardware_dirty & I915_HW_DYNAMIC)
+ {
+ int i;
+ for (i = 0; i < I915_MAX_DYNAMIC; i++) {
+ OUT_BATCH(i915->current.dynamic[i]);
+ }
+ }
+
+ /* 8 dwords, 2 relocs */
+ if (i915->hardware_dirty & I915_HW_STATIC)
+ {
+ struct pipe_surface *cbuf_surface = i915->framebuffer.cbufs[0];
+ struct pipe_surface *depth_surface = i915->framebuffer.zsbuf;
+
+ if (cbuf_surface) {
+ unsigned ctile = BUF_3D_USE_FENCE;
+ struct i915_texture *tex = (struct i915_texture *)
+ cbuf_surface->texture;
+ assert(tex);
+
+ if (tex && tex->sw_tiled) {
+ ctile = BUF_3D_TILED_SURFACE;
+ }
+
+ OUT_BATCH(_3DSTATE_BUF_INFO_CMD);
+
+ OUT_BATCH(BUF_3D_ID_COLOR_BACK |
+ BUF_3D_PITCH(tex->stride) | /* pitch in bytes */
+ ctile);
+
+ OUT_RELOC(tex->buffer,
+ INTEL_USAGE_RENDER,
+ cbuf_surface->offset);
+ }
+
+ /* What happens if no zbuf??
+ */
+ if (depth_surface) {
+ unsigned ztile = BUF_3D_USE_FENCE;
+ struct i915_texture *tex = (struct i915_texture *)
+ depth_surface->texture;
+ assert(tex);
+
+ if (tex && tex->sw_tiled) {
+ ztile = BUF_3D_TILED_SURFACE;
+ }
+
+ OUT_BATCH(_3DSTATE_BUF_INFO_CMD);
+
+ OUT_BATCH(BUF_3D_ID_DEPTH |
+ BUF_3D_PITCH(tex->stride) | /* pitch in bytes */
+ ztile);
+
+ OUT_RELOC(tex->buffer,
+ INTEL_USAGE_RENDER,
+ depth_surface->offset);
+ }
+
+ {
+ unsigned cformat, zformat = 0;
+
+ if (cbuf_surface)
+ cformat = cbuf_surface->format;
+ else
+ cformat = PIPE_FORMAT_A8R8G8B8_UNORM; /* arbitrary */
+ cformat = translate_format(cformat);
+
+ if (depth_surface)
+ zformat = translate_depth_format( i915->framebuffer.zsbuf->format );
+
+ OUT_BATCH(_3DSTATE_DST_BUF_VARS_CMD);
+ OUT_BATCH(DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ LOD_PRECLAMP_OGL |
+ TEX_DEFAULT_COLOR_OGL |
+ cformat |
+ zformat );
+ }
+ }
+
+#if 01
+ /* texture images */
+ /* 2 + I915_TEX_UNITS*3 dwords, I915_TEX_UNITS relocs */
+ if (i915->hardware_dirty & (I915_HW_MAP | I915_HW_SAMPLER))
+ {
+ const uint nr = i915->current.sampler_enable_nr;
+ if (nr) {
+ const uint enabled = i915->current.sampler_enable_flags;
+ uint unit;
+ uint count = 0;
+ OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
+ OUT_BATCH(enabled);
+ for (unit = 0; unit < I915_TEX_UNITS; unit++) {
+ if (enabled & (1 << unit)) {
+ struct intel_buffer *buf = i915->texture[unit]->buffer;
+ uint offset = 0;
+ assert(buf);
+
+ count++;
+
+ OUT_RELOC(buf, INTEL_USAGE_SAMPLER, offset);
+ OUT_BATCH(i915->current.texbuffer[unit][0]); /* MS3 */
+ OUT_BATCH(i915->current.texbuffer[unit][1]); /* MS4 */
+ }
+ }
+ assert(count == nr);
+ }
+ }
+#endif
+
+#if 01
+ /* samplers */
+ /* 2 + I915_TEX_UNITS*3 dwords, 0 relocs */
+ if (i915->hardware_dirty & I915_HW_SAMPLER)
+ {
+ if (i915->current.sampler_enable_nr) {
+ int i;
+
+ OUT_BATCH( _3DSTATE_SAMPLER_STATE |
+ (3 * i915->current.sampler_enable_nr) );
+
+ OUT_BATCH( i915->current.sampler_enable_flags );
+
+ for (i = 0; i < I915_TEX_UNITS; i++) {
+ if (i915->current.sampler_enable_flags & (1<<i)) {
+ OUT_BATCH( i915->current.sampler[i][0] );
+ OUT_BATCH( i915->current.sampler[i][1] );
+ OUT_BATCH( i915->current.sampler[i][2] );
+ }
+ }
+ }
+ }
+#endif
+
+ /* constants */
+ /* 2 + I915_MAX_CONSTANT*4 dwords, 0 relocs */
+ if (i915->hardware_dirty & I915_HW_PROGRAM)
+ {
+ /* Collate the user-defined constants with the fragment shader's
+ * immediates according to the constant_flags[] array.
+ */
+ const uint nr = i915->fs->num_constants;
+ if (nr) {
+ uint i;
+
+ OUT_BATCH( _3DSTATE_PIXEL_SHADER_CONSTANTS | (nr * 4) );
+ OUT_BATCH( (1 << (nr - 1)) | ((1 << (nr - 1)) - 1) );
+
+ for (i = 0; i < nr; i++) {
+ const uint *c;
+ if (i915->fs->constant_flags[i] == I915_CONSTFLAG_USER) {
+ /* grab user-defined constant */
+ c = (uint *) i915->current.constants[PIPE_SHADER_FRAGMENT][i];
+ }
+ else {
+ /* emit program constant */
+ c = (uint *) i915->fs->constants[i];
+ }
+#if 0 /* debug */
+ {
+ float *f = (float *) c;
+ printf("Const %2d: %f %f %f %f %s\n", i, f[0], f[1], f[2], f[3],
+ (i915->fs->constant_flags[i] == I915_CONSTFLAG_USER
+ ? "user" : "immediate"));
+ }
+#endif
+ OUT_BATCH(*c++);
+ OUT_BATCH(*c++);
+ OUT_BATCH(*c++);
+ OUT_BATCH(*c++);
+ }
+ }
+ }
+
+ /* Fragment program */
+ /* i915->current.program_len dwords, 0 relocs */
+ if (i915->hardware_dirty & I915_HW_PROGRAM)
+ {
+ uint i;
+ /* we should always have, at least, a pass-through program */
+ assert(i915->fs->program_len > 0);
+ for (i = 0; i < i915->fs->program_len; i++) {
+ OUT_BATCH(i915->fs->program[i]);
+ }
+ }
+
+ /* drawing surface size */
+ /* 6 dwords, 0 relocs */
+ {
+ uint w, h;
+ boolean k = framebuffer_size(&i915->framebuffer, &w, &h);
+ (void)k;
+ assert(k);
+
+ OUT_BATCH(_3DSTATE_DRAW_RECT_CMD);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(((w - 1) & 0xffff) | ((h - 1) << 16));
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ }
+
+
+ i915->hardware_dirty = 0;
+}
diff --git a/src/gallium/drivers/i915simple/i915_state_immediate.c b/src/gallium/drivers/i915simple/i915_state_immediate.c
new file mode 100644
index 0000000000..8c16bb4e27
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state_immediate.c
@@ -0,0 +1,225 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "i915_state_inlines.h"
+#include "i915_context.h"
+#include "i915_state.h"
+#include "i915_reg.h"
+#include "util/u_memory.h"
+
+
+/* All state expressable with the LOAD_STATE_IMMEDIATE_1 packet.
+ * Would like to opportunistically recombine all these fragments into
+ * a single packet containing only what has changed, but for now emit
+ * as multiple packets.
+ */
+
+
+
+
+/***********************************************************************
+ * S0,S1: Vertex buffer state.
+ */
+static void upload_S0S1(struct i915_context *i915)
+{
+ unsigned LIS0, LIS1;
+
+ /* INTEL_NEW_VBO */
+ /* TODO: re-use vertex buffers here? */
+ LIS0 = i915->vbo_offset;
+
+ /* INTEL_NEW_VERTEX_SIZE -- do this where the vertex size is calculated!
+ */
+ {
+ unsigned vertex_size = i915->current.vertex_info.size;
+
+ LIS1 = ((vertex_size << 24) |
+ (vertex_size << 16));
+ }
+
+ /* INTEL_NEW_VBO */
+ /* TODO: use a vertex generation number to track vbo changes */
+ if (1 ||
+ i915->current.immediate[I915_IMMEDIATE_S0] != LIS0 ||
+ i915->current.immediate[I915_IMMEDIATE_S1] != LIS1)
+ {
+ i915->current.immediate[I915_IMMEDIATE_S0] = LIS0;
+ i915->current.immediate[I915_IMMEDIATE_S1] = LIS1;
+ i915->hardware_dirty |= I915_HW_IMMEDIATE;
+ }
+}
+
+const struct i915_tracked_state i915_upload_S0S1 = {
+ I915_NEW_VBO | I915_NEW_VERTEX_FORMAT,
+ upload_S0S1
+};
+
+
+
+
+/***********************************************************************
+ * S4: Vertex format, rasterization state
+ */
+static void upload_S2S4(struct i915_context *i915)
+{
+ unsigned LIS2, LIS4;
+
+ /* I915_NEW_VERTEX_FORMAT */
+ {
+ LIS2 = i915->current.vertex_info.hwfmt[1];
+ LIS4 = i915->current.vertex_info.hwfmt[0];
+ /*
+ debug_printf("LIS2: 0x%x LIS4: 0x%x\n", LIS2, LIS4);
+ */
+ assert(LIS4); /* should never be zero? */
+ }
+
+ LIS4 |= i915->rasterizer->LIS4;
+
+ if (LIS2 != i915->current.immediate[I915_IMMEDIATE_S2] ||
+ LIS4 != i915->current.immediate[I915_IMMEDIATE_S4]) {
+
+ i915->current.immediate[I915_IMMEDIATE_S2] = LIS2;
+ i915->current.immediate[I915_IMMEDIATE_S4] = LIS4;
+ i915->hardware_dirty |= I915_HW_IMMEDIATE;
+ }
+}
+
+
+const struct i915_tracked_state i915_upload_S2S4 = {
+ I915_NEW_RASTERIZER | I915_NEW_VERTEX_FORMAT,
+ upload_S2S4
+};
+
+
+
+/***********************************************************************
+ *
+ */
+static void upload_S5( struct i915_context *i915 )
+{
+ unsigned LIS5 = 0;
+
+ LIS5 |= i915->depth_stencil->stencil_LIS5;
+
+ LIS5 |= i915->blend->LIS5;
+
+#if 0
+ /* I915_NEW_RASTERIZER */
+ if (i915->state.Polygon->OffsetFill) {
+ LIS5 |= S5_GLOBAL_DEPTH_OFFSET_ENABLE;
+ }
+#endif
+
+
+ if (LIS5 != i915->current.immediate[I915_IMMEDIATE_S5]) {
+ i915->current.immediate[I915_IMMEDIATE_S5] = LIS5;
+ i915->hardware_dirty |= I915_HW_IMMEDIATE;
+ }
+}
+
+const struct i915_tracked_state i915_upload_S5 = {
+ (I915_NEW_DEPTH_STENCIL | I915_NEW_BLEND | I915_NEW_RASTERIZER),
+ upload_S5
+};
+
+
+/***********************************************************************
+ */
+static void upload_S6( struct i915_context *i915 )
+{
+ unsigned LIS6 = (2 << S6_TRISTRIP_PV_SHIFT);
+
+ /* I915_NEW_FRAMEBUFFER
+ */
+ if (i915->framebuffer.cbufs[0])
+ LIS6 |= S6_COLOR_WRITE_ENABLE;
+
+ /* I915_NEW_BLEND
+ */
+ LIS6 |= i915->blend->LIS6;
+
+ /* I915_NEW_DEPTH
+ */
+ LIS6 |= i915->depth_stencil->depth_LIS6;
+
+ if (LIS6 != i915->current.immediate[I915_IMMEDIATE_S6]) {
+ i915->current.immediate[I915_IMMEDIATE_S6] = LIS6;
+ i915->hardware_dirty |= I915_HW_IMMEDIATE;
+ }
+}
+
+const struct i915_tracked_state i915_upload_S6 = {
+ I915_NEW_BLEND | I915_NEW_DEPTH_STENCIL | I915_NEW_FRAMEBUFFER,
+ upload_S6
+};
+
+
+/***********************************************************************
+ */
+static void upload_S7( struct i915_context *i915 )
+{
+ unsigned LIS7;
+
+ /* I915_NEW_RASTERIZER
+ */
+ LIS7 = i915->rasterizer->LIS7;
+
+ if (LIS7 != i915->current.immediate[I915_IMMEDIATE_S7]) {
+ i915->current.immediate[I915_IMMEDIATE_S7] = LIS7;
+ i915->hardware_dirty |= I915_HW_IMMEDIATE;
+ }
+}
+
+const struct i915_tracked_state i915_upload_S7 = {
+ I915_NEW_RASTERIZER,
+ upload_S7
+};
+
+
+static const struct i915_tracked_state *atoms[] = {
+ &i915_upload_S0S1,
+ &i915_upload_S2S4,
+ &i915_upload_S5,
+ &i915_upload_S6,
+ &i915_upload_S7
+};
+
+/*
+ */
+void i915_update_immediate( struct i915_context *i915 )
+{
+ int i;
+
+ for (i = 0; i < Elements(atoms); i++)
+ if (i915->dirty & atoms[i]->dirty)
+ atoms[i]->update( i915 );
+}
diff --git a/src/gallium/drivers/i915simple/i915_state_inlines.h b/src/gallium/drivers/i915simple/i915_state_inlines.h
new file mode 100644
index 0000000000..378de8f9c4
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state_inlines.h
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_STATE_INLINES_H
+#define I915_STATE_INLINES_H
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_defines.h"
+#include "i915_reg.h"
+
+
+static INLINE unsigned
+i915_translate_compare_func(unsigned func)
+{
+ switch (func) {
+ case PIPE_FUNC_NEVER:
+ return COMPAREFUNC_NEVER;
+ case PIPE_FUNC_LESS:
+ return COMPAREFUNC_LESS;
+ case PIPE_FUNC_LEQUAL:
+ return COMPAREFUNC_LEQUAL;
+ case PIPE_FUNC_GREATER:
+ return COMPAREFUNC_GREATER;
+ case PIPE_FUNC_GEQUAL:
+ return COMPAREFUNC_GEQUAL;
+ case PIPE_FUNC_NOTEQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case PIPE_FUNC_EQUAL:
+ return COMPAREFUNC_EQUAL;
+ case PIPE_FUNC_ALWAYS:
+ return COMPAREFUNC_ALWAYS;
+ default:
+ return COMPAREFUNC_ALWAYS;
+ }
+}
+
+static INLINE unsigned
+i915_translate_stencil_op(unsigned op)
+{
+ switch (op) {
+ case PIPE_STENCIL_OP_KEEP:
+ return STENCILOP_KEEP;
+ case PIPE_STENCIL_OP_ZERO:
+ return STENCILOP_ZERO;
+ case PIPE_STENCIL_OP_REPLACE:
+ return STENCILOP_REPLACE;
+ case PIPE_STENCIL_OP_INCR:
+ return STENCILOP_INCRSAT;
+ case PIPE_STENCIL_OP_DECR:
+ return STENCILOP_DECRSAT;
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ return STENCILOP_INCR;
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ return STENCILOP_DECR;
+ case PIPE_STENCIL_OP_INVERT:
+ return STENCILOP_INVERT;
+ default:
+ return STENCILOP_ZERO;
+ }
+}
+
+static INLINE unsigned
+i915_translate_blend_factor(unsigned factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ZERO:
+ return BLENDFACT_ZERO;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ return BLENDFACT_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_ONE:
+ return BLENDFACT_ONE;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ return BLENDFACT_SRC_COLR;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ return BLENDFACT_INV_SRC_COLR;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ return BLENDFACT_DST_COLR;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ return BLENDFACT_INV_DST_COLR;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ return BLENDFACT_INV_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ return BLENDFACT_DST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ return BLENDFACT_INV_DST_ALPHA;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ return BLENDFACT_SRC_ALPHA_SATURATE;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ return BLENDFACT_CONST_COLOR;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ return BLENDFACT_INV_CONST_COLOR;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ return BLENDFACT_CONST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return BLENDFACT_INV_CONST_ALPHA;
+ default:
+ return BLENDFACT_ZERO;
+ }
+}
+
+static INLINE unsigned
+i915_translate_blend_func(unsigned mode)
+{
+ switch (mode) {
+ case PIPE_BLEND_ADD:
+ return BLENDFUNC_ADD;
+ case PIPE_BLEND_MIN:
+ return BLENDFUNC_MIN;
+ case PIPE_BLEND_MAX:
+ return BLENDFUNC_MAX;
+ case PIPE_BLEND_SUBTRACT:
+ return BLENDFUNC_SUBTRACT;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ return BLENDFUNC_REVERSE_SUBTRACT;
+ default:
+ return 0;
+ }
+}
+
+
+static INLINE unsigned
+i915_translate_logic_op(unsigned opcode)
+{
+ switch (opcode) {
+ case PIPE_LOGICOP_CLEAR:
+ return LOGICOP_CLEAR;
+ case PIPE_LOGICOP_AND:
+ return LOGICOP_AND;
+ case PIPE_LOGICOP_AND_REVERSE:
+ return LOGICOP_AND_RVRSE;
+ case PIPE_LOGICOP_COPY:
+ return LOGICOP_COPY;
+ case PIPE_LOGICOP_COPY_INVERTED:
+ return LOGICOP_COPY_INV;
+ case PIPE_LOGICOP_AND_INVERTED:
+ return LOGICOP_AND_INV;
+ case PIPE_LOGICOP_NOOP:
+ return LOGICOP_NOOP;
+ case PIPE_LOGICOP_XOR:
+ return LOGICOP_XOR;
+ case PIPE_LOGICOP_OR:
+ return LOGICOP_OR;
+ case PIPE_LOGICOP_OR_INVERTED:
+ return LOGICOP_OR_INV;
+ case PIPE_LOGICOP_NOR:
+ return LOGICOP_NOR;
+ case PIPE_LOGICOP_EQUIV:
+ return LOGICOP_EQUIV;
+ case PIPE_LOGICOP_INVERT:
+ return LOGICOP_INV;
+ case PIPE_LOGICOP_OR_REVERSE:
+ return LOGICOP_OR_RVRSE;
+ case PIPE_LOGICOP_NAND:
+ return LOGICOP_NAND;
+ case PIPE_LOGICOP_SET:
+ return LOGICOP_SET;
+ default:
+ return LOGICOP_SET;
+ }
+}
+
+
+
+static INLINE boolean i915_validate_vertices( unsigned hw_prim, unsigned nr )
+{
+ boolean ok;
+
+ switch (hw_prim) {
+ case PRIM3D_POINTLIST:
+ ok = (nr >= 1);
+ assert(ok);
+ break;
+ case PRIM3D_LINELIST:
+ ok = (nr >= 2) && (nr % 2) == 0;
+ assert(ok);
+ break;
+ case PRIM3D_LINESTRIP:
+ ok = (nr >= 2);
+ assert(ok);
+ break;
+ case PRIM3D_TRILIST:
+ ok = (nr >= 3) && (nr % 3) == 0;
+ assert(ok);
+ break;
+ case PRIM3D_TRISTRIP:
+ ok = (nr >= 3);
+ assert(ok);
+ break;
+ case PRIM3D_TRIFAN:
+ ok = (nr >= 3);
+ assert(ok);
+ break;
+ case PRIM3D_POLY:
+ ok = (nr >= 3);
+ assert(ok);
+ break;
+ default:
+ assert(0);
+ ok = 0;
+ break;
+ }
+
+ return ok;
+}
+
+#endif
diff --git a/src/gallium/drivers/i915simple/i915_state_sampler.c b/src/gallium/drivers/i915simple/i915_state_sampler.c
new file mode 100644
index 0000000000..c5e9084d12
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_state_sampler.c
@@ -0,0 +1,299 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+
+#include "i915_state_inlines.h"
+#include "i915_context.h"
+#include "i915_reg.h"
+#include "i915_state.h"
+
+
+/*
+ * A note about min_lod & max_lod.
+ *
+ * There is a circular dependancy between the sampler state
+ * and the map state to be submitted to hw.
+ *
+ * Two condition must be meet:
+ * min_lod =< max_lod == true
+ * max_lod =< last_level == true
+ *
+ *
+ * This is all fine and dandy if it where for the fact that max_lod
+ * is set on the map state instead of the sampler state. That is
+ * the max_lod we submit on map is:
+ * max_lod = MIN2(last_level, max_lod);
+ *
+ * So we need to update the map state when we change samplers and
+ * we need to be change the sampler state when map state is changed.
+ * The first part is done by calling i915_update_texture in
+ * i915_update_samplers and the second part is done else where in
+ * code tracking the state changes.
+ */
+
+static void
+i915_update_texture(struct i915_context *i915,
+ uint unit,
+ const struct i915_texture *tex,
+ const struct i915_sampler_state *sampler,
+ uint state[6]);
+/**
+ * Compute i915 texture sampling state.
+ *
+ * Recalculate all state from scratch. Perhaps not the most
+ * efficient, but this has gotten complex enough that we need
+ * something which is understandable and reliable.
+ * \param state returns the 3 words of compute state
+ */
+static void update_sampler(struct i915_context *i915,
+ uint unit,
+ const struct i915_sampler_state *sampler,
+ const struct i915_texture *tex,
+ unsigned state[3] )
+{
+ const struct pipe_texture *pt = &tex->base;
+ unsigned minlod, lastlod;
+
+ /* Need to do this after updating the maps, which call the
+ * intel_finalize_mipmap_tree and hence can update firstLevel:
+ */
+ state[0] = sampler->state[0];
+ state[1] = sampler->state[1];
+ state[2] = sampler->state[2];
+
+ if (pt->format == PIPE_FORMAT_YCBCR ||
+ pt->format == PIPE_FORMAT_YCBCR_REV)
+ state[0] |= SS2_COLORSPACE_CONVERSION;
+
+ /* 3D textures don't seem to respect the border color.
+ * Fallback if there's ever a danger that they might refer to
+ * it.
+ *
+ * Effectively this means fallback on 3D clamp or
+ * clamp_to_border.
+ *
+ * XXX: Check if this is true on i945.
+ * XXX: Check if this bug got fixed in release silicon.
+ */
+#if 0
+ {
+ const unsigned ws = sampler->templ->wrap_s;
+ const unsigned wt = sampler->templ->wrap_t;
+ const unsigned wr = sampler->templ->wrap_r;
+ if (pt->target == PIPE_TEXTURE_3D &&
+ (sampler->templ->min_img_filter != PIPE_TEX_FILTER_NEAREST ||
+ sampler->templ->mag_img_filter != PIPE_TEX_FILTER_NEAREST) &&
+ (ws == PIPE_TEX_WRAP_CLAMP ||
+ wt == PIPE_TEX_WRAP_CLAMP ||
+ wr == PIPE_TEX_WRAP_CLAMP ||
+ ws == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
+ wt == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
+ wr == PIPE_TEX_WRAP_CLAMP_TO_BORDER)) {
+ if (i915->conformance_mode > 0) {
+ assert(0);
+ /* sampler->fallback = true; */
+ /* TODO */
+ }
+ }
+ }
+#endif
+
+ /* See note at the top of file */
+ minlod = sampler->minlod;
+ lastlod = pt->last_level << 4;
+
+ if (lastlod < minlod) {
+ minlod = lastlod;
+ }
+
+ state[1] |= (sampler->minlod << SS3_MIN_LOD_SHIFT);
+ state[1] |= (unit << SS3_TEXTUREMAP_INDEX_SHIFT);
+}
+
+
+void i915_update_samplers( struct i915_context *i915 )
+{
+ uint unit;
+
+ i915->current.sampler_enable_nr = 0;
+ i915->current.sampler_enable_flags = 0x0;
+
+ for (unit = 0; unit < i915->num_textures && unit < i915->num_samplers;
+ unit++) {
+ /* determine unit enable/disable by looking for a bound texture */
+ /* could also examine the fragment program? */
+ if (i915->texture[unit]) {
+ update_sampler( i915,
+ unit,
+ i915->sampler[unit], /* sampler state */
+ i915->texture[unit], /* texture */
+ i915->current.sampler[unit] /* the result */
+ );
+ i915_update_texture( i915,
+ unit,
+ i915->texture[unit], /* texture */
+ i915->sampler[unit], /* sampler state */
+ i915->current.texbuffer[unit] );
+
+ i915->current.sampler_enable_nr++;
+ i915->current.sampler_enable_flags |= (1 << unit);
+ }
+ }
+
+ i915->hardware_dirty |= I915_HW_SAMPLER | I915_HW_MAP;
+}
+
+
+static uint
+translate_texture_format(enum pipe_format pipeFormat)
+{
+ switch (pipeFormat) {
+ case PIPE_FORMAT_L8_UNORM:
+ return MAPSURF_8BIT | MT_8BIT_L8;
+ case PIPE_FORMAT_I8_UNORM:
+ return MAPSURF_8BIT | MT_8BIT_I8;
+ case PIPE_FORMAT_A8_UNORM:
+ return MAPSURF_8BIT | MT_8BIT_A8;
+ case PIPE_FORMAT_A8L8_UNORM:
+ return MAPSURF_16BIT | MT_16BIT_AY88;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return MAPSURF_16BIT | MT_16BIT_RGB565;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ return MAPSURF_16BIT | MT_16BIT_ARGB1555;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ return MAPSURF_16BIT | MT_16BIT_ARGB4444;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return MAPSURF_32BIT | MT_32BIT_ARGB8888;
+ case PIPE_FORMAT_YCBCR_REV:
+ return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
+ case PIPE_FORMAT_YCBCR:
+ return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
+#if 0
+ case PIPE_FORMAT_RGB_FXT1:
+ case PIPE_FORMAT_RGBA_FXT1:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
+#endif
+ case PIPE_FORMAT_Z16_UNORM:
+ return (MAPSURF_16BIT | MT_16BIT_L16);
+#if 0
+ case PIPE_FORMAT_RGBA_DXT1:
+ case PIPE_FORMAT_RGB_DXT1:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
+ case PIPE_FORMAT_RGBA_DXT3:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
+ case PIPE_FORMAT_RGBA_DXT5:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
+#endif
+ case PIPE_FORMAT_S8Z24_UNORM:
+ return (MAPSURF_32BIT | MT_32BIT_xI824);
+ default:
+ debug_printf("i915: translate_texture_format() bad image format %x\n",
+ pipeFormat);
+ assert(0);
+ return 0;
+ }
+}
+
+
+static void
+i915_update_texture(struct i915_context *i915,
+ uint unit,
+ const struct i915_texture *tex,
+ const struct i915_sampler_state *sampler,
+ uint state[6])
+{
+ const struct pipe_texture *pt = &tex->base;
+ uint format, pitch;
+ const uint width = pt->width[0], height = pt->height[0], depth = pt->depth[0];
+ const uint num_levels = pt->last_level;
+ unsigned max_lod = num_levels * 4;
+ unsigned tiled = MS3_USE_FENCE_REGS;
+
+ assert(tex);
+ assert(width);
+ assert(height);
+ assert(depth);
+
+ format = translate_texture_format(pt->format);
+ pitch = tex->stride;
+
+ assert(format);
+ assert(pitch);
+
+ if (tex->sw_tiled) {
+ assert(!((pitch - 1) & pitch));
+ tiled = MS3_TILED_SURFACE;
+ }
+
+ /* MS3 state */
+ state[0] =
+ (((height - 1) << MS3_HEIGHT_SHIFT)
+ | ((width - 1) << MS3_WIDTH_SHIFT)
+ | format
+ | tiled);
+
+ /*
+ * XXX When min_filter != mag_filter and there's just one mipmap level,
+ * set max_lod = 1 to make sure i915 chooses between min/mag filtering.
+ */
+
+ /* See note at the top of file */
+ if (max_lod > (sampler->maxlod >> 2))
+ max_lod = sampler->maxlod >> 2;
+
+ /* MS4 state */
+ state[1] =
+ ((((pitch / 4) - 1) << MS4_PITCH_SHIFT)
+ | MS4_CUBE_FACE_ENA_MASK
+ | ((max_lod) << MS4_MAX_LOD_SHIFT)
+ | ((depth - 1) << MS4_VOLUME_DEPTH_SHIFT));
+}
+
+
+void
+i915_update_textures(struct i915_context *i915)
+{
+ uint unit;
+
+ for (unit = 0; unit < i915->num_textures && unit < i915->num_samplers;
+ unit++) {
+ /* determine unit enable/disable by looking for a bound texture */
+ /* could also examine the fragment program? */
+ if (i915->texture[unit]) {
+ i915_update_texture( i915,
+ unit,
+ i915->texture[unit], /* texture */
+ i915->sampler[unit], /* sampler state */
+ i915->current.texbuffer[unit] );
+ }
+ }
+
+ i915->hardware_dirty |= I915_HW_MAP;
+}
diff --git a/src/gallium/drivers/i915simple/i915_surface.c b/src/gallium/drivers/i915simple/i915_surface.c
new file mode 100644
index 0000000000..ab8331f3e6
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_surface.c
@@ -0,0 +1,94 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_context.h"
+#include "i915_blit.h"
+#include "i915_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_tile.h"
+#include "util/u_rect.h"
+
+
+/* Assumes all values are within bounds -- no checking at this level -
+ * do it higher up if required.
+ */
+static void
+i915_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy, unsigned width, unsigned height)
+{
+ struct i915_texture *dst_tex = (struct i915_texture *)dst->texture;
+ struct i915_texture *src_tex = (struct i915_texture *)src->texture;
+
+ assert( dst != src );
+ assert( dst_tex->base.block.size == src_tex->base.block.size );
+ assert( dst_tex->base.block.width == src_tex->base.block.height );
+ assert( dst_tex->base.block.height == src_tex->base.block.height );
+ assert( dst_tex->base.block.width == 1 );
+ assert( dst_tex->base.block.height == 1 );
+
+ i915_copy_blit( i915_context(pipe),
+ FALSE,
+ dst_tex->base.block.size,
+ (unsigned short) src_tex->stride, src_tex->buffer, src->offset,
+ (unsigned short) dst_tex->stride, dst_tex->buffer, dst->offset,
+ (short) srcx, (short) srcy, (short) dstx, (short) dsty, (short) width, (short) height );
+}
+
+
+static void
+i915_surface_fill(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height, unsigned value)
+{
+ struct i915_texture *tex = (struct i915_texture *)dst->texture;
+
+ assert(tex->base.block.width == 1);
+ assert(tex->base.block.height == 1);
+
+ i915_fill_blit( i915_context(pipe),
+ tex->base.block.size,
+ (unsigned short) tex->stride,
+ tex->buffer, dst->offset,
+ (short) dstx, (short) dsty,
+ (short) width, (short) height,
+ value );
+}
+
+
+void
+i915_init_surface_functions(struct i915_context *i915)
+{
+ i915->base.surface_copy = i915_surface_copy;
+ i915->base.surface_fill = i915_surface_fill;
+}
diff --git a/src/gallium/drivers/i915simple/i915_texture.c b/src/gallium/drivers/i915simple/i915_texture.c
new file mode 100644
index 0000000000..6a6c654271
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_texture.c
@@ -0,0 +1,911 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "i915_context.h"
+#include "i915_texture.h"
+#include "i915_debug.h"
+#include "i915_screen.h"
+#include "intel_winsys.h"
+
+
+/*
+ * Helper function and arrays
+ */
+
+
+/**
+ * Initial offset for Cube map.
+ */
+static const int initial_offsets[6][2] = {
+ {0, 0},
+ {0, 2},
+ {1, 0},
+ {1, 2},
+ {1, 1},
+ {1, 3}
+};
+
+/**
+ * Step offsets for Cube map.
+ */
+static const int step_offsets[6][2] = {
+ {0, 2},
+ {0, 2},
+ {-1, 2},
+ {-1, 2},
+ {-1, 1},
+ {-1, 1}
+};
+
+static unsigned
+power_of_two(unsigned x)
+{
+ unsigned value = 1;
+ while (value < x)
+ value = value << 1;
+ return value;
+}
+
+static unsigned
+round_up(unsigned n, unsigned multiple)
+{
+ return (n + multiple - 1) & ~(multiple - 1);
+}
+
+
+/*
+ * More advanced helper funcs
+ */
+
+
+static void
+i915_miptree_set_level_info(struct i915_texture *tex,
+ unsigned level,
+ unsigned nr_images,
+ unsigned w, unsigned h, unsigned d)
+{
+ struct pipe_texture *pt = &tex->base;
+
+ assert(level < PIPE_MAX_TEXTURE_LEVELS);
+
+ pt->width[level] = w;
+ pt->height[level] = h;
+ pt->depth[level] = d;
+
+ pt->nblocksx[level] = pf_get_nblocksx(&pt->block, w);
+ pt->nblocksy[level] = pf_get_nblocksy(&pt->block, h);
+
+ tex->nr_images[level] = nr_images;
+
+ /*
+ DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
+ level, w, h, d, x, y, tex->level_offset[level]);
+ */
+
+ /* Not sure when this would happen, but anyway:
+ */
+ if (tex->image_offset[level]) {
+ FREE(tex->image_offset[level]);
+ tex->image_offset[level] = NULL;
+ }
+
+ assert(nr_images);
+ assert(!tex->image_offset[level]);
+
+ tex->image_offset[level] = (unsigned *) MALLOC(nr_images * sizeof(unsigned));
+ tex->image_offset[level][0] = 0;
+}
+
+static void
+i915_miptree_set_image_offset(struct i915_texture *tex,
+ unsigned level, unsigned img, unsigned x, unsigned y)
+{
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+
+ assert(img < tex->nr_images[level]);
+
+ tex->image_offset[level][img] = y * tex->stride + x * tex->base.block.size;
+
+ /*
+ printf("%s level %d img %d pos %d,%d image_offset %x\n",
+ __FUNCTION__, level, img, x, y, tex->image_offset[level][img]);
+ */
+}
+
+
+/*
+ * i915 layout functions, some used by i945
+ */
+
+
+/**
+ * Special case to deal with scanout textures.
+ */
+static boolean
+i915_scanout_layout(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+
+ if (pt->last_level > 0 || pt->block.size != 4)
+ return 0;
+
+ i915_miptree_set_level_info(tex, 0, 1,
+ tex->base.width[0],
+ tex->base.height[0],
+ 1);
+ i915_miptree_set_image_offset(tex, 0, 0, 0, 0);
+
+ if (tex->base.width[0] >= 240) {
+ tex->stride = power_of_two(tex->base.nblocksx[0] * pt->block.size);
+ tex->total_nblocksy = round_up(tex->base.nblocksy[0], 8);
+ tex->hw_tiled = INTEL_TILE_X;
+ } else if (tex->base.width[0] == 64 && tex->base.height[0] == 64) {
+ tex->stride = power_of_two(tex->base.nblocksx[0] * pt->block.size);
+ tex->total_nblocksy = round_up(tex->base.nblocksy[0], 8);
+ } else {
+ return FALSE;
+ }
+
+ debug_printf("%s size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
+ tex->base.width[0], tex->base.height[0], pt->block.size,
+ tex->stride, tex->total_nblocksy, tex->stride * tex->total_nblocksy);
+
+ return TRUE;
+}
+
+static void
+i915_miptree_layout_2d(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ unsigned level;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned nblocksx = pt->nblocksx[0];
+ unsigned nblocksy = pt->nblocksy[0];
+
+ tex->stride = round_up(pt->nblocksx[0] * pt->block.size, 4);
+ tex->total_nblocksy = 0;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ i915_miptree_set_level_info(tex, level, 1, width, height, 1);
+ i915_miptree_set_image_offset(tex, level, 0, 0, tex->total_nblocksy);
+
+ nblocksy = round_up(MAX2(2, nblocksy), 2);
+
+ tex->total_nblocksy += nblocksy;
+
+ width = minify(width);
+ height = minify(height);
+ nblocksx = pf_get_nblocksx(&pt->block, width);
+ nblocksy = pf_get_nblocksy(&pt->block, height);
+ }
+}
+
+static void
+i915_miptree_layout_3d(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ unsigned level;
+
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned depth = pt->depth[0];
+ unsigned nblocksx = pt->nblocksx[0];
+ unsigned nblocksy = pt->nblocksy[0];
+ unsigned stack_nblocksy = 0;
+
+ /* Calculate the size of a single slice.
+ */
+ tex->stride = round_up(pt->nblocksx[0] * pt->block.size, 4);
+
+ /* XXX: hardware expects/requires 9 levels at minimum.
+ */
+ for (level = 0; level <= MAX2(8, pt->last_level); level++) {
+ i915_miptree_set_level_info(tex, level, depth, width, height, depth);
+
+ stack_nblocksy += MAX2(2, nblocksy);
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ nblocksx = pf_get_nblocksx(&pt->block, width);
+ nblocksy = pf_get_nblocksy(&pt->block, height);
+ }
+
+ /* Fixup depth image_offsets:
+ */
+ depth = pt->depth[0];
+ for (level = 0; level <= pt->last_level; level++) {
+ unsigned i;
+ for (i = 0; i < depth; i++)
+ i915_miptree_set_image_offset(tex, level, i, 0, i * stack_nblocksy);
+
+ depth = minify(depth);
+ }
+
+ /* Multiply slice size by texture depth for total size. It's
+ * remarkable how wasteful of memory the i915 texture layouts
+ * are. They are largely fixed in the i945.
+ */
+ tex->total_nblocksy = stack_nblocksy * pt->depth[0];
+}
+
+static void
+i915_miptree_layout_cube(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ unsigned width = pt->width[0], height = pt->height[0];
+ const unsigned nblocks = pt->nblocksx[0];
+ unsigned level;
+ unsigned face;
+
+ assert(width == height); /* cubemap images are square */
+
+ /* double pitch for cube layouts */
+ tex->stride = round_up(nblocks * pt->block.size * 2, 4);
+ tex->total_nblocksy = nblocks * 4;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ i915_miptree_set_level_info(tex, level, 6, width, height, 1);
+ width /= 2;
+ height /= 2;
+ }
+
+ for (face = 0; face < 6; face++) {
+ unsigned x = initial_offsets[face][0] * nblocks;
+ unsigned y = initial_offsets[face][1] * nblocks;
+ unsigned d = nblocks;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ i915_miptree_set_image_offset(tex, level, face, x, y);
+ d >>= 1;
+ x += step_offsets[face][0] * d;
+ y += step_offsets[face][1] * d;
+ }
+ }
+}
+
+static boolean
+i915_miptree_layout(struct i915_texture * tex)
+{
+ struct pipe_texture *pt = &tex->base;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_2D:
+ i915_miptree_layout_2d(tex);
+ break;
+ case PIPE_TEXTURE_3D:
+ i915_miptree_layout_3d(tex);
+ break;
+ case PIPE_TEXTURE_CUBE:
+ i915_miptree_layout_cube(tex);
+ break;
+ default:
+ assert(0);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+/*
+ * i945 layout functions
+ */
+
+
+static void
+i945_miptree_layout_2d(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ const int align_x = 2, align_y = 4;
+ unsigned level;
+ unsigned x = 0;
+ unsigned y = 0;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned nblocksx = pt->nblocksx[0];
+ unsigned nblocksy = pt->nblocksy[0];
+
+ /* used for scanouts that need special layouts */
+ if (tex->base.tex_usage & PIPE_TEXTURE_USAGE_PRIMARY)
+ if (i915_scanout_layout(tex))
+ return;
+
+ tex->stride = round_up(pt->nblocksx[0] * pt->block.size, 4);
+
+ /* May need to adjust pitch to accomodate the placement of
+ * the 2nd mipmap level. This occurs when the alignment
+ * constraints of mipmap placement push the right edge of the
+ * 2nd mipmap level out past the width of its parent.
+ */
+ if (pt->last_level > 0) {
+ unsigned mip1_nblocksx
+ = align(pf_get_nblocksx(&pt->block, minify(width)), align_x)
+ + pf_get_nblocksx(&pt->block, minify(minify(width)));
+
+ if (mip1_nblocksx > nblocksx)
+ tex->stride = mip1_nblocksx * pt->block.size;
+ }
+
+ /* Pitch must be a whole number of dwords
+ */
+ tex->stride = align(tex->stride, 64);
+ tex->total_nblocksy = 0;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ i915_miptree_set_level_info(tex, level, 1, width, height, 1);
+ i915_miptree_set_image_offset(tex, level, 0, x, y);
+
+ nblocksy = align(nblocksy, align_y);
+
+ /* Because the images are packed better, the final offset
+ * might not be the maximal one:
+ */
+ tex->total_nblocksy = MAX2(tex->total_nblocksy, y + nblocksy);
+
+ /* Layout_below: step right after second mipmap level.
+ */
+ if (level == 1) {
+ x += align(nblocksx, align_x);
+ }
+ else {
+ y += nblocksy;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ nblocksx = pf_get_nblocksx(&pt->block, width);
+ nblocksy = pf_get_nblocksy(&pt->block, height);
+ }
+}
+
+static void
+i945_miptree_layout_3d(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned depth = pt->depth[0];
+ unsigned nblocksx = pt->nblocksx[0];
+ unsigned nblocksy = pt->nblocksy[0];
+ unsigned pack_x_pitch, pack_x_nr;
+ unsigned pack_y_pitch;
+ unsigned level;
+
+ tex->stride = round_up(pt->nblocksx[0] * pt->block.size, 4);
+ tex->total_nblocksy = 0;
+
+ pack_y_pitch = MAX2(pt->nblocksy[0], 2);
+ pack_x_pitch = tex->stride / pt->block.size;
+ pack_x_nr = 1;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ int x = 0;
+ int y = 0;
+ unsigned q, j;
+
+ i915_miptree_set_level_info(tex, level, depth, width, height, depth);
+
+ for (q = 0; q < depth;) {
+ for (j = 0; j < pack_x_nr && q < depth; j++, q++) {
+ i915_miptree_set_image_offset(tex, level, q, x, y + tex->total_nblocksy);
+ x += pack_x_pitch;
+ }
+
+ x = 0;
+ y += pack_y_pitch;
+ }
+
+ tex->total_nblocksy += y;
+
+ if (pack_x_pitch > 4) {
+ pack_x_pitch >>= 1;
+ pack_x_nr <<= 1;
+ assert(pack_x_pitch * pack_x_nr * pt->block.size <= tex->stride);
+ }
+
+ if (pack_y_pitch > 2) {
+ pack_y_pitch >>= 1;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ nblocksx = pf_get_nblocksx(&pt->block, width);
+ nblocksy = pf_get_nblocksy(&pt->block, height);
+ }
+}
+
+static void
+i945_miptree_layout_cube(struct i915_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ unsigned level;
+
+ const unsigned nblocks = pt->nblocksx[0];
+ unsigned face;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+
+ /*
+ printf("%s %i, %i\n", __FUNCTION__, pt->width[0], pt->height[0]);
+ */
+
+ assert(width == height); /* cubemap images are square */
+
+ /*
+ * XXX Should only be used for compressed formats. But lets
+ * keep this code active just in case.
+ *
+ * Depending on the size of the largest images, pitch can be
+ * determined either by the old-style packing of cubemap faces,
+ * or the final row of 4x4, 2x2 and 1x1 faces below this.
+ */
+ if (nblocks > 32)
+ tex->stride = round_up(nblocks * pt->block.size * 2, 4);
+ else
+ tex->stride = 14 * 8 * pt->block.size;
+
+ tex->total_nblocksy = nblocks * 4;
+
+ /* Set all the levels to effectively occupy the whole rectangular region.
+ */
+ for (level = 0; level <= pt->last_level; level++) {
+ i915_miptree_set_level_info(tex, level, 6, width, height, 1);
+ width /= 2;
+ height /= 2;
+ }
+
+ for (face = 0; face < 6; face++) {
+ unsigned x = initial_offsets[face][0] * nblocks;
+ unsigned y = initial_offsets[face][1] * nblocks;
+ unsigned d = nblocks;
+
+#if 0 /* Fix and enable this code for compressed formats */
+ if (nblocks == 4 && face >= 4) {
+ y = tex->total_height - 4;
+ x = (face - 4) * 8;
+ }
+ else if (nblocks < 4 && (face > 0)) {
+ y = tex->total_height - 4;
+ x = face * 8;
+ }
+#endif
+
+ for (level = 0; level <= pt->last_level; level++) {
+ i915_miptree_set_image_offset(tex, level, face, x, y);
+
+ d >>= 1;
+
+#if 0 /* Fix and enable this code for compressed formats */
+ switch (d) {
+ case 4:
+ switch (face) {
+ case PIPE_TEX_FACE_POS_X:
+ case PIPE_TEX_FACE_NEG_X:
+ x += step_offsets[face][0] * d;
+ y += step_offsets[face][1] * d;
+ break;
+ case PIPE_TEX_FACE_POS_Y:
+ case PIPE_TEX_FACE_NEG_Y:
+ y += 12;
+ x -= 8;
+ break;
+ case PIPE_TEX_FACE_POS_Z:
+ case PIPE_TEX_FACE_NEG_Z:
+ y = tex->total_height - 4;
+ x = (face - 4) * 8;
+ break;
+ }
+ case 2:
+ y = tex->total_height - 4;
+ x = 16 + face * 8;
+ break;
+
+ case 1:
+ x += 48;
+ break;
+ default:
+#endif
+ x += step_offsets[face][0] * d;
+ y += step_offsets[face][1] * d;
+#if 0
+ break;
+ }
+#endif
+ }
+ }
+}
+
+static boolean
+i945_miptree_layout(struct i915_texture * tex)
+{
+ struct pipe_texture *pt = &tex->base;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_2D:
+ i945_miptree_layout_2d(tex);
+ break;
+ case PIPE_TEXTURE_3D:
+ i945_miptree_layout_3d(tex);
+ break;
+ case PIPE_TEXTURE_CUBE:
+ i945_miptree_layout_cube(tex);
+ break;
+ default:
+ assert(0);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+/*
+ * Screen texture functions
+ */
+
+
+static struct pipe_texture *
+i915_texture_create(struct pipe_screen *screen,
+ const struct pipe_texture *templat)
+{
+ struct i915_screen *is = i915_screen(screen);
+ struct intel_winsys *iws = is->iws;
+ struct i915_texture *tex = CALLOC_STRUCT(i915_texture);
+ size_t tex_size;
+ unsigned buf_usage = 0;
+
+ if (!tex)
+ return NULL;
+
+ tex->base = *templat;
+ pipe_reference_init(&tex->base.reference, 1);
+ tex->base.screen = screen;
+
+ tex->base.nblocksx[0] = pf_get_nblocksx(&tex->base.block, tex->base.width[0]);
+ tex->base.nblocksy[0] = pf_get_nblocksy(&tex->base.block, tex->base.height[0]);
+
+ if (is->is_i945) {
+ if (!i945_miptree_layout(tex))
+ goto fail;
+ } else {
+ if (!i915_miptree_layout(tex))
+ goto fail;
+ }
+
+ tex_size = tex->stride * tex->total_nblocksy;
+
+
+
+ /* for scanouts and cursors, cursors arn't scanouts */
+ if (templat->tex_usage & PIPE_TEXTURE_USAGE_PRIMARY && templat->width[0] != 64)
+ buf_usage = INTEL_NEW_SCANOUT;
+ else
+ buf_usage = INTEL_NEW_TEXTURE;
+
+ tex->buffer = iws->buffer_create(iws, tex_size, 64, buf_usage);
+ if (!tex->buffer)
+ goto fail;
+
+ /* setup any hw fences */
+ if (tex->hw_tiled) {
+ assert(tex->sw_tiled == INTEL_TILE_NONE);
+ iws->buffer_set_fence_reg(iws, tex->buffer, tex->stride, tex->hw_tiled);
+ }
+
+
+#if 0
+ void *ptr = ws->buffer_map(ws, tex->buffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ memset(ptr, 0x80, tex_size);
+ ws->buffer_unmap(ws, tex->buffer);
+#endif
+
+ return &tex->base;
+
+fail:
+ FREE(tex);
+ return NULL;
+}
+
+static struct pipe_texture *
+i915_texture_blanket(struct pipe_screen * screen,
+ const struct pipe_texture *base,
+ const unsigned *stride,
+ struct pipe_buffer *buffer)
+{
+#if 0
+ struct i915_texture *tex;
+ assert(screen);
+
+ /* Only supports one type */
+ if (base->target != PIPE_TEXTURE_2D ||
+ base->last_level != 0 ||
+ base->depth[0] != 1) {
+ return NULL;
+ }
+
+ tex = CALLOC_STRUCT(i915_texture);
+ if (!tex)
+ return NULL;
+
+ tex->base = *base;
+ pipe_reference_init(&tex->base.reference, 1);
+ tex->base.screen = screen;
+
+ tex->stride = stride[0];
+
+ i915_miptree_set_level_info(tex, 0, 1, base->width[0], base->height[0], 1);
+ i915_miptree_set_image_offset(tex, 0, 0, 0, 0);
+
+ pipe_buffer_reference(&tex->buffer, buffer);
+
+ return &tex->base;
+#else
+ return NULL;
+#endif
+}
+
+static void
+i915_texture_destroy(struct pipe_texture *pt)
+{
+ struct i915_texture *tex = (struct i915_texture *)pt;
+ struct intel_winsys *iws = i915_screen(pt->screen)->iws;
+ uint i;
+
+ /*
+ DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
+ */
+
+ iws->buffer_destroy(iws, tex->buffer);
+
+ for (i = 0; i < PIPE_MAX_TEXTURE_LEVELS; i++)
+ if (tex->image_offset[i])
+ FREE(tex->image_offset[i]);
+
+ FREE(tex);
+}
+
+
+/*
+ * Screen surface functions
+ */
+
+
+static struct pipe_surface *
+i915_get_tex_surface(struct pipe_screen *screen,
+ struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct i915_texture *tex = (struct i915_texture *)pt;
+ struct pipe_surface *ps;
+ unsigned offset; /* in bytes */
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ offset = tex->image_offset[level][face];
+ }
+ else if (pt->target == PIPE_TEXTURE_3D) {
+ offset = tex->image_offset[level][zslice];
+ }
+ else {
+ offset = tex->image_offset[level][0];
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+
+ ps = CALLOC_STRUCT(pipe_surface);
+ if (ps) {
+ pipe_reference_init(&ps->reference, 1);
+ pipe_texture_reference(&ps->texture, pt);
+ ps->format = pt->format;
+ ps->width = pt->width[level];
+ ps->height = pt->height[level];
+ ps->offset = offset;
+ ps->usage = flags;
+ }
+ return ps;
+}
+
+static void
+i915_tex_surface_destroy(struct pipe_surface *surf)
+{
+ pipe_texture_reference(&surf->texture, NULL);
+ FREE(surf);
+}
+
+
+/*
+ * Screen transfer functions
+ */
+
+
+static struct pipe_transfer*
+i915_get_tex_transfer(struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage, unsigned x, unsigned y,
+ unsigned w, unsigned h)
+{
+ struct i915_texture *tex = (struct i915_texture *)texture;
+ struct i915_transfer *trans;
+ unsigned offset; /* in bytes */
+
+ if (texture->target == PIPE_TEXTURE_CUBE) {
+ offset = tex->image_offset[level][face];
+ }
+ else if (texture->target == PIPE_TEXTURE_3D) {
+ offset = tex->image_offset[level][zslice];
+ }
+ else {
+ offset = tex->image_offset[level][0];
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+
+ trans = CALLOC_STRUCT(i915_transfer);
+ if (trans) {
+ pipe_texture_reference(&trans->base.texture, texture);
+ trans->base.format = trans->base.format;
+ trans->base.x = x;
+ trans->base.y = y;
+ trans->base.width = w;
+ trans->base.height = h;
+ trans->base.block = texture->block;
+ trans->base.nblocksx = texture->nblocksx[level];
+ trans->base.nblocksy = texture->nblocksy[level];
+ trans->base.stride = tex->stride;
+ trans->offset = offset;
+ trans->base.usage = usage;
+ }
+ return &trans->base;
+}
+
+static void *
+i915_transfer_map(struct pipe_screen *screen,
+ struct pipe_transfer *transfer)
+{
+ struct i915_texture *tex = (struct i915_texture *)transfer->texture;
+ struct intel_winsys *iws = i915_screen(tex->base.screen)->iws;
+ char *map;
+ boolean write = FALSE;
+
+ if (transfer->usage != PIPE_TRANSFER_READ)
+ write = TRUE;
+
+ map = iws->buffer_map(iws, tex->buffer, write);
+ if (map == NULL)
+ return NULL;
+
+ return map + i915_transfer(transfer)->offset +
+ transfer->y / transfer->block.height * transfer->stride +
+ transfer->x / transfer->block.width * transfer->block.size;
+}
+
+static void
+i915_transfer_unmap(struct pipe_screen *screen,
+ struct pipe_transfer *transfer)
+{
+ struct i915_texture *tex = (struct i915_texture *)transfer->texture;
+ struct intel_winsys *iws = i915_screen(tex->base.screen)->iws;
+ iws->buffer_unmap(iws, tex->buffer);
+}
+
+static void
+i915_tex_transfer_destroy(struct pipe_transfer *trans)
+{
+ pipe_texture_reference(&trans->texture, NULL);
+ FREE(trans);
+}
+
+
+/*
+ * Other texture functions
+ */
+
+
+void
+i915_init_screen_texture_functions(struct i915_screen *is)
+{
+ is->base.texture_create = i915_texture_create;
+ is->base.texture_blanket = i915_texture_blanket;
+ is->base.texture_destroy = i915_texture_destroy;
+ is->base.get_tex_surface = i915_get_tex_surface;
+ is->base.tex_surface_destroy = i915_tex_surface_destroy;
+ is->base.get_tex_transfer = i915_get_tex_transfer;
+ is->base.transfer_map = i915_transfer_map;
+ is->base.transfer_unmap = i915_transfer_unmap;
+ is->base.tex_transfer_destroy = i915_tex_transfer_destroy;
+}
+
+struct pipe_texture *
+i915_texture_blanket_intel(struct pipe_screen *screen,
+ struct pipe_texture *base,
+ unsigned stride,
+ struct intel_buffer *buffer)
+{
+ struct i915_texture *tex;
+ assert(screen);
+
+ /* Only supports one type */
+ if (base->target != PIPE_TEXTURE_2D ||
+ base->last_level != 0 ||
+ base->depth[0] != 1) {
+ return NULL;
+ }
+
+ tex = CALLOC_STRUCT(i915_texture);
+ if (!tex)
+ return NULL;
+
+ tex->base = *base;
+ pipe_reference_init(&tex->base.reference, 1);
+ tex->base.screen = screen;
+
+ tex->stride = stride;
+
+ i915_miptree_set_level_info(tex, 0, 1, base->width[0], base->height[0], 1);
+ i915_miptree_set_image_offset(tex, 0, 0, 0, 0);
+
+ tex->buffer = buffer;
+
+ return &tex->base;
+}
+
+boolean
+i915_get_texture_buffer_intel(struct pipe_texture *texture,
+ struct intel_buffer **buffer,
+ unsigned *stride)
+{
+ struct i915_texture *tex = (struct i915_texture *)texture;
+
+ if (!texture)
+ return FALSE;
+
+ *stride = tex->stride;
+ *buffer = tex->buffer;
+
+ return TRUE;
+}
diff --git a/src/gallium/drivers/i915simple/i915_texture.h b/src/gallium/drivers/i915simple/i915_texture.h
new file mode 100644
index 0000000000..51a1dd984c
--- /dev/null
+++ b/src/gallium/drivers/i915simple/i915_texture.h
@@ -0,0 +1,36 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915_TEXTURE_H
+#define I915_TEXTURE_H
+
+struct i915_screen;
+
+extern void
+i915_init_screen_texture_functions(struct i915_screen *is);
+
+#endif /* I915_TEXTURE_H */
diff --git a/src/gallium/drivers/i915simple/intel_batchbuffer.h b/src/gallium/drivers/i915simple/intel_batchbuffer.h
new file mode 100644
index 0000000000..db12dfd2ac
--- /dev/null
+++ b/src/gallium/drivers/i915simple/intel_batchbuffer.h
@@ -0,0 +1,87 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BATCH_H
+#define INTEL_BATCH_H
+
+#include "intel_winsys.h"
+
+static INLINE boolean
+intel_batchbuffer_check(struct intel_batchbuffer *batch,
+ size_t dwords,
+ size_t relocs)
+{
+ return dwords * 4 <= batch->size - (batch->ptr - batch->map) &&
+ relocs <= (batch->max_relocs - batch->relocs);
+}
+
+static INLINE size_t
+intel_batchbuffer_space(struct intel_batchbuffer *batch)
+{
+ return batch->size - (batch->ptr - batch->map);
+}
+
+static INLINE void
+intel_batchbuffer_dword(struct intel_batchbuffer *batch,
+ unsigned dword)
+{
+ if (intel_batchbuffer_space(batch) < 4)
+ return;
+
+ *(unsigned *)batch->ptr = dword;
+ batch->ptr += 4;
+}
+
+static INLINE void
+intel_batchbuffer_write(struct intel_batchbuffer *batch,
+ void *data,
+ size_t size)
+{
+ if (intel_batchbuffer_space(batch) < size)
+ return;
+
+ memcpy(data, batch->ptr, size);
+ batch->ptr += size;
+}
+
+static INLINE int
+intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
+ struct intel_buffer *buffer,
+ enum intel_buffer_usage usage,
+ size_t offset)
+{
+ return batch->iws->batchbuffer_reloc(batch, buffer, usage, offset);
+}
+
+static INLINE void
+intel_batchbuffer_flush(struct intel_batchbuffer *batch,
+ struct pipe_fence_handle **fence)
+{
+ batch->iws->batchbuffer_flush(batch, fence);
+}
+
+#endif
diff --git a/src/gallium/drivers/i915simple/intel_winsys.h b/src/gallium/drivers/i915simple/intel_winsys.h
new file mode 100644
index 0000000000..42c5e7470e
--- /dev/null
+++ b/src/gallium/drivers/i915simple/intel_winsys.h
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 Jakob Bornecrantz
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_WINSYS_H
+#define INTEL_WINSYS_H
+
+#include "pipe/p_compiler.h"
+
+struct intel_winsys;
+struct intel_buffer;
+struct intel_batchbuffer;
+struct pipe_texture;
+struct pipe_fence_handle;
+
+enum intel_buffer_usage
+{
+ /* use on textures */
+ INTEL_USAGE_RENDER = 0x01,
+ INTEL_USAGE_SAMPLER = 0x02,
+ INTEL_USAGE_2D_TARGET = 0x04,
+ INTEL_USAGE_2D_SOURCE = 0x08,
+ /* use on vertex */
+ INTEL_USAGE_VERTEX = 0x10,
+};
+
+enum intel_buffer_type
+{
+ INTEL_NEW_TEXTURE,
+ INTEL_NEW_SCANOUT, /**< a texture used for scanning out from */
+ INTEL_NEW_VERTEX,
+};
+
+enum intel_buffer_tile
+{
+ INTEL_TILE_NONE,
+ INTEL_TILE_X,
+ INTEL_TILE_Y,
+};
+
+struct intel_batchbuffer {
+
+ struct intel_winsys *iws;
+
+ /**
+ * Values exported to speed up the writing the batchbuffer,
+ * instead of having to go trough a accesor function for
+ * each dword written.
+ */
+ /*{@*/
+ uint8_t *map;
+ uint8_t *ptr;
+ size_t size;
+
+ size_t relocs;
+ size_t max_relocs;
+ /*@}*/
+};
+
+struct intel_winsys {
+
+ /**
+ * Batchbuffer functions.
+ */
+ /*@{*/
+ /**
+ * Create a new batchbuffer.
+ */
+ struct intel_batchbuffer *(*batchbuffer_create)(struct intel_winsys *iws);
+
+ /**
+ * Emit a relocation to a buffer.
+ * Target position in batchbuffer is the same as ptr.
+ *
+ * @batch
+ * @reloc buffer address to be inserted into target.
+ * @usage how is the hardware going to use the buffer.
+ * @offset add this to the reloc buffers address
+ * @target buffer where to write the address, null for batchbuffer.
+ */
+ int (*batchbuffer_reloc)(struct intel_batchbuffer *batch,
+ struct intel_buffer *reloc,
+ enum intel_buffer_usage usage,
+ unsigned offset);
+
+ /**
+ * Flush a bufferbatch.
+ */
+ void (*batchbuffer_flush)(struct intel_batchbuffer *batch,
+ struct pipe_fence_handle **fence);
+
+ /**
+ * Destroy a batchbuffer.
+ */
+ void (*batchbuffer_destroy)(struct intel_batchbuffer *batch);
+ /*@}*/
+
+
+ /**
+ * Buffer functions.
+ */
+ /*@{*/
+ /**
+ * Create a buffer.
+ */
+ struct intel_buffer *(*buffer_create)(struct intel_winsys *iws,
+ unsigned size, unsigned alignment,
+ enum intel_buffer_type type);
+
+ /**
+ * Fence a buffer with a fence reg.
+ * Not to be confused with pipe_fence_handle.
+ */
+ int (*buffer_set_fence_reg)(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ unsigned stride,
+ enum intel_buffer_tile tile);
+
+ /**
+ * Map a buffer.
+ */
+ void *(*buffer_map)(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ boolean write);
+
+ /**
+ * Unmap a buffer.
+ */
+ void (*buffer_unmap)(struct intel_winsys *iws,
+ struct intel_buffer *buffer);
+
+ /**
+ * Write to a buffer.
+ *
+ * Arguments follows pwrite(2)
+ */
+ int (*buffer_write)(struct intel_winsys *iws,
+ struct intel_buffer *dst,
+ const void *src,
+ size_t size,
+ size_t offset);
+
+ void (*buffer_destroy)(struct intel_winsys *iws,
+ struct intel_buffer *buffer);
+ /*@}*/
+
+
+ /**
+ * Fence functions.
+ */
+ /*@{*/
+ /**
+ * Reference fence and set ptr to fence.
+ */
+ void (*fence_reference)(struct intel_winsys *iws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence);
+
+ /**
+ * Check if a fence has finished.
+ */
+ int (*fence_signalled)(struct intel_winsys *iws,
+ struct pipe_fence_handle *fence);
+
+ /**
+ * Wait on a fence to finish.
+ */
+ int (*fence_finish)(struct intel_winsys *iws,
+ struct pipe_fence_handle *fence);
+ /*@}*/
+
+
+ /**
+ * Destroy the winsys.
+ */
+ void (*destroy)(struct intel_winsys *iws);
+};
+
+
+/**
+ * Create i915 pipe_screen.
+ */
+struct pipe_screen *i915_create_screen(struct intel_winsys *iws, unsigned pci_id);
+
+/**
+ * Create a i915 pipe_context.
+ */
+struct pipe_context *i915_create_context(struct pipe_screen *screen);
+
+/**
+ * Get the intel_winsys buffer backing the texture.
+ *
+ * TODO UGLY
+ */
+boolean i915_get_texture_buffer_intel(struct pipe_texture *texture,
+ struct intel_buffer **buffer,
+ unsigned *stride);
+
+/**
+ * Wrap a intel_winsys buffer with a texture blanket.
+ *
+ * TODO UGLY
+ */
+struct pipe_texture * i915_texture_blanket_intel(struct pipe_screen *screen,
+ struct pipe_texture *tmplt,
+ unsigned pitch,
+ struct intel_buffer *buffer);
+
+#endif
diff --git a/src/gallium/drivers/i965simple/Makefile b/src/gallium/drivers/i965simple/Makefile
new file mode 100644
index 0000000000..19182afa75
--- /dev/null
+++ b/src/gallium/drivers/i965simple/Makefile
@@ -0,0 +1,52 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = i965simple
+
+C_SOURCES = \
+ brw_blit.c \
+ brw_flush.c \
+ brw_screen.c \
+ brw_surface.c \
+ brw_cc.c \
+ brw_clip.c \
+ brw_clip_line.c \
+ brw_clip_point.c \
+ brw_clip_state.c \
+ brw_clip_tri.c \
+ brw_clip_util.c \
+ brw_context.c \
+ brw_curbe.c \
+ brw_draw.c \
+ brw_draw_upload.c \
+ brw_eu.c \
+ brw_eu_debug.c \
+ brw_eu_emit.c \
+ brw_eu_util.c \
+ brw_gs.c \
+ brw_gs_emit.c \
+ brw_gs_state.c \
+ brw_misc_state.c \
+ brw_sf.c \
+ brw_sf_emit.c \
+ brw_sf_state.c \
+ brw_state.c \
+ brw_state_batch.c \
+ brw_state_cache.c \
+ brw_state_pool.c \
+ brw_state_upload.c \
+ brw_tex_layout.c \
+ brw_urb.c \
+ brw_util.c \
+ brw_vs.c \
+ brw_vs_emit.c \
+ brw_vs_state.c \
+ brw_wm.c \
+ brw_wm_iz.c \
+ brw_wm_decl.c \
+ brw_wm_glsl.c \
+ brw_wm_sampler_state.c \
+ brw_wm_state.c \
+ brw_wm_surface_state.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/i965simple/SConscript b/src/gallium/drivers/i965simple/SConscript
new file mode 100644
index 0000000000..43fc2a4005
--- /dev/null
+++ b/src/gallium/drivers/i965simple/SConscript
@@ -0,0 +1,54 @@
+Import('*')
+
+env = env.Clone()
+
+i965simple = env.ConvenienceLibrary(
+ target = 'i965simple',
+ source = [
+ 'brw_blit.c',
+ 'brw_cc.c',
+ 'brw_clip.c',
+ 'brw_clip_line.c',
+ 'brw_clip_point.c',
+ 'brw_clip_state.c',
+ 'brw_clip_tri.c',
+ 'brw_clip_util.c',
+ 'brw_context.c',
+ 'brw_curbe.c',
+ 'brw_draw.c',
+ 'brw_draw_upload.c',
+ 'brw_eu.c',
+ 'brw_eu_debug.c',
+ 'brw_eu_emit.c',
+ 'brw_eu_util.c',
+ 'brw_flush.c',
+ 'brw_gs.c',
+ 'brw_gs_emit.c',
+ 'brw_gs_state.c',
+ 'brw_misc_state.c',
+ 'brw_screen.c',
+ 'brw_sf.c',
+ 'brw_sf_emit.c',
+ 'brw_sf_state.c',
+ 'brw_state.c',
+ 'brw_state_batch.c',
+ 'brw_state_cache.c',
+ 'brw_state_pool.c',
+ 'brw_state_upload.c',
+ 'brw_surface.c',
+ 'brw_tex_layout.c',
+ 'brw_urb.c',
+ 'brw_util.c',
+ 'brw_vs.c',
+ 'brw_vs_emit.c',
+ 'brw_vs_state.c',
+ 'brw_wm.c',
+ 'brw_wm_decl.c',
+ 'brw_wm_glsl.c',
+ 'brw_wm_iz.c',
+ 'brw_wm_sampler_state.c',
+ 'brw_wm_state.c',
+ 'brw_wm_surface_state.c',
+ ])
+
+Export('i965simple')
diff --git a/src/gallium/drivers/i965simple/brw_batch.h b/src/gallium/drivers/i965simple/brw_batch.h
new file mode 100644
index 0000000000..5f5932a488
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_batch.h
@@ -0,0 +1,59 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef BRW_BATCH_H
+#define BRW_BATCH_H
+
+#include "brw_winsys.h"
+
+#define BATCH_LOCALS
+
+#define INTEL_BATCH_NO_CLIPRECTS 0x1
+#define INTEL_BATCH_CLIPRECTS 0x2
+
+#define BEGIN_BATCH( dwords, relocs ) \
+ brw->winsys->batch_start(brw->winsys, dwords, relocs)
+
+#define OUT_BATCH( dword ) \
+ brw->winsys->batch_dword(brw->winsys, dword)
+
+#define OUT_RELOC( buf, flags, delta ) \
+ brw->winsys->batch_reloc(brw->winsys, buf, flags, delta)
+
+#define ADVANCE_BATCH() \
+ brw->winsys->batch_end( brw->winsys )
+
+/* XXX: this is bogus - need proper handling for out-of-memory in batchbuffer.
+ */
+#define FLUSH_BATCH(fence) do { \
+ brw->winsys->batch_flush(brw->winsys, fence); \
+ brw->hardware_dirty = ~0; \
+} while (0)
+
+#define BRW_BATCH_STRUCT(brw, s) brw_batchbuffer_data( brw->winsys, (s), sizeof(*(s)))
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_blit.c b/src/gallium/drivers/i965simple/brw_blit.c
new file mode 100644
index 0000000000..4d11f8d2ab
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_blit.c
@@ -0,0 +1,218 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <errno.h>
+
+#include "brw_batch.h"
+#include "brw_blit.h"
+#include "brw_context.h"
+#include "brw_reg.h"
+
+#include "pipe/p_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+void brw_fill_blit(struct brw_context *brw,
+ unsigned cpp,
+ short dst_pitch,
+ struct pipe_buffer *dst_buffer,
+ unsigned dst_offset,
+ boolean dst_tiled,
+ short x, short y,
+ short w, short h,
+ unsigned color)
+{
+ unsigned BR13, CMD;
+ BATCH_LOCALS;
+
+ dst_pitch *= cpp;
+
+ switch(cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = (0xF0 << 16) | (1<<24);
+ CMD = XY_COLOR_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (0xF0 << 16) | (1<<24) | (1<<25);
+ CMD = XY_COLOR_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ break;
+ default:
+ return;
+ }
+
+ if (dst_tiled) {
+ CMD |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+
+ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH( CMD );
+ OUT_BATCH( dst_pitch | BR13 );
+ OUT_BATCH( (y << 16) | x );
+ OUT_BATCH( ((y+h) << 16) | (x+w) );
+ OUT_RELOC( dst_buffer, BRW_BUFFER_ACCESS_WRITE, dst_offset );
+ OUT_BATCH( color );
+ ADVANCE_BATCH();
+}
+
+static unsigned translate_raster_op(unsigned logicop)
+{
+ switch(logicop) {
+ case PIPE_LOGICOP_CLEAR: return 0x00;
+ case PIPE_LOGICOP_AND: return 0x88;
+ case PIPE_LOGICOP_AND_REVERSE: return 0x44;
+ case PIPE_LOGICOP_COPY: return 0xCC;
+ case PIPE_LOGICOP_AND_INVERTED: return 0x22;
+ case PIPE_LOGICOP_NOOP: return 0xAA;
+ case PIPE_LOGICOP_XOR: return 0x66;
+ case PIPE_LOGICOP_OR: return 0xEE;
+ case PIPE_LOGICOP_NOR: return 0x11;
+ case PIPE_LOGICOP_EQUIV: return 0x99;
+ case PIPE_LOGICOP_INVERT: return 0x55;
+ case PIPE_LOGICOP_OR_REVERSE: return 0xDD;
+ case PIPE_LOGICOP_COPY_INVERTED: return 0x33;
+ case PIPE_LOGICOP_OR_INVERTED: return 0xBB;
+ case PIPE_LOGICOP_NAND: return 0x77;
+ case PIPE_LOGICOP_SET: return 0xFF;
+ default: return 0;
+ }
+}
+
+
+/* Copy BitBlt
+ */
+void brw_copy_blit(struct brw_context *brw,
+ unsigned do_flip,
+ unsigned cpp,
+ short src_pitch,
+ struct pipe_buffer *src_buffer,
+ unsigned src_offset,
+ boolean src_tiled,
+ short dst_pitch,
+ struct pipe_buffer *dst_buffer,
+ unsigned dst_offset,
+ boolean dst_tiled,
+ short src_x, short src_y,
+ short dst_x, short dst_y,
+ short w, short h,
+ unsigned logic_op)
+{
+ unsigned CMD, BR13;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+ BATCH_LOCALS;
+
+
+ DBG("%s src:buf(%d)/%d %d,%d dst:buf(%d)/%d %d,%d sz:%dx%d op:%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_x, src_y,
+ dst_buffer, dst_pitch, dst_x, dst_y,
+ w,h,logic_op);
+
+ assert( logic_op - PIPE_LOGICOP_CLEAR >= 0 );
+ assert( logic_op - PIPE_LOGICOP_CLEAR < 0x10 );
+
+ src_pitch *= cpp;
+ dst_pitch *= cpp;
+
+ switch(cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = (translate_raster_op(logic_op) << 16) | (1<<24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (translate_raster_op(logic_op) << 16) | (1<<24) |
+ (1<<25);
+ CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ break;
+ default:
+ return;
+ }
+
+ if (src_tiled) {
+ CMD |= XY_SRC_TILED;
+ src_pitch /= 4;
+ }
+
+ if (dst_tiled) {
+ CMD |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+
+ if (dst_y2 < dst_y ||
+ dst_x2 < dst_x) {
+ return;
+ }
+
+ dst_pitch &= 0xffff;
+ src_pitch &= 0xffff;
+
+ /* Initial y values don't seem to work with negative pitches. If
+ * we adjust the offsets manually (below), it seems to work fine.
+ *
+ * On the other hand, if we always adjust, the hardware doesn't
+ * know which blit directions to use, so overlapping copypixels get
+ * the wrong result.
+ */
+ if (dst_pitch > 0 && src_pitch > 0) {
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH( CMD );
+ OUT_BATCH( dst_pitch | BR13 );
+ OUT_BATCH( (dst_y << 16) | dst_x );
+ OUT_BATCH( (dst_y2 << 16) | dst_x2 );
+ OUT_RELOC( dst_buffer, BRW_BUFFER_ACCESS_WRITE,
+ dst_offset );
+ OUT_BATCH( (src_y << 16) | src_x );
+ OUT_BATCH( src_pitch );
+ OUT_RELOC( src_buffer, BRW_BUFFER_ACCESS_READ,
+ src_offset );
+ ADVANCE_BATCH();
+ }
+ else {
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH( CMD );
+ OUT_BATCH( (dst_pitch & 0xffff) | BR13 );
+ OUT_BATCH( (0 << 16) | dst_x );
+ OUT_BATCH( (h << 16) | dst_x2 );
+ OUT_RELOC( dst_buffer, BRW_BUFFER_ACCESS_WRITE,
+ dst_offset + dst_y * dst_pitch );
+ OUT_BATCH( (src_pitch & 0xffff) );
+ OUT_RELOC( src_buffer, BRW_BUFFER_ACCESS_READ,
+ src_offset + src_y * src_pitch );
+ ADVANCE_BATCH();
+ }
+}
+
+
+
diff --git a/src/gallium/drivers/i965simple/brw_blit.h b/src/gallium/drivers/i965simple/brw_blit.h
new file mode 100644
index 0000000000..111c5d91d3
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_blit.h
@@ -0,0 +1,33 @@
+#ifndef BRW_BLIT_H
+#define BRW_BLIT_H
+
+#include "pipe/p_compiler.h"
+
+struct pipe_buffer;
+struct brw_context;
+
+void brw_fill_blit(struct brw_context *intel,
+ unsigned cpp,
+ short dst_pitch,
+ struct pipe_buffer *dst_buffer,
+ unsigned dst_offset,
+ boolean dst_tiled,
+ short x, short y,
+ short w, short h,
+ unsigned color);
+void brw_copy_blit(struct brw_context *intel,
+ unsigned do_flip,
+ unsigned cpp,
+ short src_pitch,
+ struct pipe_buffer *src_buffer,
+ unsigned src_offset,
+ boolean src_tiled,
+ short dst_pitch,
+ struct pipe_buffer *dst_buffer,
+ unsigned dst_offset,
+ boolean dst_tiled,
+ short src_x, short src_y,
+ short dst_x, short dst_y,
+ short w, short h,
+ unsigned logic_op);
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_cc.c b/src/gallium/drivers/i965simple/brw_cc.c
new file mode 100644
index 0000000000..3668123e2e
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_cc.c
@@ -0,0 +1,269 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+#include "brw_util.h"
+
+
+static int brw_translate_compare_func(int func)
+{
+ switch(func) {
+ case PIPE_FUNC_NEVER:
+ return BRW_COMPAREFUNCTION_NEVER;
+ case PIPE_FUNC_LESS:
+ return BRW_COMPAREFUNCTION_LESS;
+ case PIPE_FUNC_LEQUAL:
+ return BRW_COMPAREFUNCTION_LEQUAL;
+ case PIPE_FUNC_GREATER:
+ return BRW_COMPAREFUNCTION_GREATER;
+ case PIPE_FUNC_GEQUAL:
+ return BRW_COMPAREFUNCTION_GEQUAL;
+ case PIPE_FUNC_NOTEQUAL:
+ return BRW_COMPAREFUNCTION_NOTEQUAL;
+ case PIPE_FUNC_EQUAL:
+ return BRW_COMPAREFUNCTION_EQUAL;
+ case PIPE_FUNC_ALWAYS:
+ return BRW_COMPAREFUNCTION_ALWAYS;
+ }
+
+ debug_printf("Unknown value in %s: %x\n", __FUNCTION__, func);
+ return BRW_COMPAREFUNCTION_ALWAYS;
+}
+
+static int brw_translate_stencil_op(int op)
+{
+ switch(op) {
+ case PIPE_STENCIL_OP_KEEP:
+ return BRW_STENCILOP_KEEP;
+ case PIPE_STENCIL_OP_ZERO:
+ return BRW_STENCILOP_ZERO;
+ case PIPE_STENCIL_OP_REPLACE:
+ return BRW_STENCILOP_REPLACE;
+ case PIPE_STENCIL_OP_INCR:
+ return BRW_STENCILOP_INCRSAT;
+ case PIPE_STENCIL_OP_DECR:
+ return BRW_STENCILOP_DECRSAT;
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ return BRW_STENCILOP_INCR;
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ return BRW_STENCILOP_DECR;
+ case PIPE_STENCIL_OP_INVERT:
+ return BRW_STENCILOP_INVERT;
+ default:
+ return BRW_STENCILOP_ZERO;
+ }
+}
+
+
+static int brw_translate_logic_op(int opcode)
+{
+ switch(opcode) {
+ case PIPE_LOGICOP_CLEAR:
+ return BRW_LOGICOPFUNCTION_CLEAR;
+ case PIPE_LOGICOP_AND:
+ return BRW_LOGICOPFUNCTION_AND;
+ case PIPE_LOGICOP_AND_REVERSE:
+ return BRW_LOGICOPFUNCTION_AND_REVERSE;
+ case PIPE_LOGICOP_COPY:
+ return BRW_LOGICOPFUNCTION_COPY;
+ case PIPE_LOGICOP_COPY_INVERTED:
+ return BRW_LOGICOPFUNCTION_COPY_INVERTED;
+ case PIPE_LOGICOP_AND_INVERTED:
+ return BRW_LOGICOPFUNCTION_AND_INVERTED;
+ case PIPE_LOGICOP_NOOP:
+ return BRW_LOGICOPFUNCTION_NOOP;
+ case PIPE_LOGICOP_XOR:
+ return BRW_LOGICOPFUNCTION_XOR;
+ case PIPE_LOGICOP_OR:
+ return BRW_LOGICOPFUNCTION_OR;
+ case PIPE_LOGICOP_OR_INVERTED:
+ return BRW_LOGICOPFUNCTION_OR_INVERTED;
+ case PIPE_LOGICOP_NOR:
+ return BRW_LOGICOPFUNCTION_NOR;
+ case PIPE_LOGICOP_EQUIV:
+ return BRW_LOGICOPFUNCTION_EQUIV;
+ case PIPE_LOGICOP_INVERT:
+ return BRW_LOGICOPFUNCTION_INVERT;
+ case PIPE_LOGICOP_OR_REVERSE:
+ return BRW_LOGICOPFUNCTION_OR_REVERSE;
+ case PIPE_LOGICOP_NAND:
+ return BRW_LOGICOPFUNCTION_NAND;
+ case PIPE_LOGICOP_SET:
+ return BRW_LOGICOPFUNCTION_SET;
+ default:
+ return BRW_LOGICOPFUNCTION_SET;
+ }
+}
+
+
+static void upload_cc_vp( struct brw_context *brw )
+{
+ struct brw_cc_viewport ccv;
+
+ memset(&ccv, 0, sizeof(ccv));
+
+ ccv.min_depth = 0.0;
+ ccv.max_depth = 1.0;
+
+ brw->cc.vp_gs_offset = brw_cache_data( &brw->cache[BRW_CC_VP], &ccv );
+}
+
+const struct brw_tracked_state brw_cc_vp = {
+ .dirty = {
+ .brw = BRW_NEW_SCENE,
+ .cache = 0
+ },
+ .update = upload_cc_vp
+};
+
+
+static void upload_cc_unit( struct brw_context *brw )
+{
+ struct brw_cc_unit_state cc;
+
+ memset(&cc, 0, sizeof(cc));
+
+ /* BRW_NEW_DEPTH_STENCIL */
+ if (brw->attribs.DepthStencil->stencil[0].enabled) {
+ cc.cc0.stencil_enable = brw->attribs.DepthStencil->stencil[0].enabled;
+ cc.cc0.stencil_func = brw_translate_compare_func(brw->attribs.DepthStencil->stencil[0].func);
+ cc.cc0.stencil_fail_op = brw_translate_stencil_op(brw->attribs.DepthStencil->stencil[0].fail_op);
+ cc.cc0.stencil_pass_depth_fail_op = brw_translate_stencil_op(
+ brw->attribs.DepthStencil->stencil[0].zfail_op);
+ cc.cc0.stencil_pass_depth_pass_op = brw_translate_stencil_op(
+ brw->attribs.DepthStencil->stencil[0].zpass_op);
+ cc.cc1.stencil_ref = brw->attribs.DepthStencil->stencil[0].ref_value;
+ cc.cc1.stencil_write_mask = brw->attribs.DepthStencil->stencil[0].writemask;
+ cc.cc1.stencil_test_mask = brw->attribs.DepthStencil->stencil[0].valuemask;
+
+ if (brw->attribs.DepthStencil->stencil[1].enabled) {
+ cc.cc0.bf_stencil_enable = brw->attribs.DepthStencil->stencil[1].enabled;
+ cc.cc0.bf_stencil_func = brw_translate_compare_func(
+ brw->attribs.DepthStencil->stencil[1].func);
+ cc.cc0.bf_stencil_fail_op = brw_translate_stencil_op(
+ brw->attribs.DepthStencil->stencil[1].fail_op);
+ cc.cc0.bf_stencil_pass_depth_fail_op = brw_translate_stencil_op(
+ brw->attribs.DepthStencil->stencil[1].zfail_op);
+ cc.cc0.bf_stencil_pass_depth_pass_op = brw_translate_stencil_op(
+ brw->attribs.DepthStencil->stencil[1].zpass_op);
+ cc.cc1.bf_stencil_ref = brw->attribs.DepthStencil->stencil[1].ref_value;
+ cc.cc2.bf_stencil_write_mask = brw->attribs.DepthStencil->stencil[1].writemask;
+ cc.cc2.bf_stencil_test_mask = brw->attribs.DepthStencil->stencil[1].valuemask;
+ }
+
+ /* Not really sure about this:
+ */
+ if (brw->attribs.DepthStencil->stencil[0].writemask ||
+ brw->attribs.DepthStencil->stencil[1].writemask)
+ cc.cc0.stencil_write_enable = 1;
+ }
+
+ /* BRW_NEW_BLEND */
+ if (brw->attribs.Blend->logicop_enable) {
+ cc.cc2.logicop_enable = 1;
+ cc.cc5.logicop_func = brw_translate_logic_op( brw->attribs.Blend->logicop_func );
+ }
+ else if (brw->attribs.Blend->blend_enable) {
+ int eqRGB = brw->attribs.Blend->rgb_func;
+ int eqA = brw->attribs.Blend->alpha_func;
+ int srcRGB = brw->attribs.Blend->rgb_src_factor;
+ int dstRGB = brw->attribs.Blend->rgb_dst_factor;
+ int srcA = brw->attribs.Blend->alpha_src_factor;
+ int dstA = brw->attribs.Blend->alpha_dst_factor;
+
+ if (eqRGB == PIPE_BLEND_MIN || eqRGB == PIPE_BLEND_MAX) {
+ srcRGB = dstRGB = PIPE_BLENDFACTOR_ONE;
+ }
+
+ if (eqA == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MAX) {
+ srcA = dstA = PIPE_BLENDFACTOR_ONE;
+ }
+
+ cc.cc6.dest_blend_factor = brw_translate_blend_factor(dstRGB);
+ cc.cc6.src_blend_factor = brw_translate_blend_factor(srcRGB);
+ cc.cc6.blend_function = brw_translate_blend_equation( eqRGB );
+
+ cc.cc5.ia_dest_blend_factor = brw_translate_blend_factor(dstA);
+ cc.cc5.ia_src_blend_factor = brw_translate_blend_factor(srcA);
+ cc.cc5.ia_blend_function = brw_translate_blend_equation( eqA );
+
+ cc.cc3.blend_enable = 1;
+ cc.cc3.ia_blend_enable = (srcA != srcRGB ||
+ dstA != dstRGB ||
+ eqA != eqRGB);
+ }
+
+ /* BRW_NEW_ALPHATEST
+ */
+ if (brw->attribs.DepthStencil->alpha.enabled) {
+ cc.cc3.alpha_test = 1;
+ cc.cc3.alpha_test_func =
+ brw_translate_compare_func(brw->attribs.DepthStencil->alpha.func);
+
+ cc.cc7.alpha_ref.ub[0] = float_to_ubyte(brw->attribs.DepthStencil->alpha.ref_value);
+
+ cc.cc3.alpha_test_format = BRW_ALPHATEST_FORMAT_UNORM8;
+ }
+
+ if (brw->attribs.Blend->dither) {
+ cc.cc5.dither_enable = 1;
+ cc.cc6.y_dither_offset = 0;
+ cc.cc6.x_dither_offset = 0;
+ }
+
+ if (brw->attribs.DepthStencil->depth.enabled) {
+ cc.cc2.depth_test = brw->attribs.DepthStencil->depth.enabled;
+ cc.cc2.depth_test_function = brw_translate_compare_func(brw->attribs.DepthStencil->depth.func);
+ cc.cc2.depth_write_enable = brw->attribs.DepthStencil->depth.writemask;
+ }
+
+ /* CACHE_NEW_CC_VP */
+ cc.cc4.cc_viewport_state_offset = brw->cc.vp_gs_offset >> 5;
+
+ if (BRW_DEBUG & DEBUG_STATS)
+ cc.cc5.statistics_enable = 1;
+
+ brw->cc.state_gs_offset = brw_cache_data( &brw->cache[BRW_CC_UNIT], &cc );
+}
+
+const struct brw_tracked_state brw_cc_unit = {
+ .dirty = {
+ .brw = BRW_NEW_DEPTH_STENCIL | BRW_NEW_BLEND | BRW_NEW_ALPHA_TEST,
+ .cache = CACHE_NEW_CC_VP
+ },
+ .update = upload_cc_unit
+};
+
diff --git a/src/gallium/drivers/i965simple/brw_clip.c b/src/gallium/drivers/i965simple/brw_clip.c
new file mode 100644
index 0000000000..268124cc53
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip.c
@@ -0,0 +1,206 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_state.h"
+#include "brw_clip.h"
+
+#define FRONT_UNFILLED_BIT 0x1
+#define BACK_UNFILLED_BIT 0x2
+
+
+static void compile_clip_prog( struct brw_context *brw,
+ struct brw_clip_prog_key *key )
+{
+ struct brw_clip_compile c;
+ const unsigned *program;
+ unsigned program_size;
+ unsigned delta;
+ unsigned i;
+
+ memset(&c, 0, sizeof(c));
+
+ /* Begin the compilation:
+ */
+ brw_init_compile(&c.func);
+
+ c.func.single_program_flow = 1;
+
+ c.key = *key;
+
+
+ /* Need to locate the two positions present in vertex + header.
+ * These are currently hardcoded:
+ */
+ c.header_position_offset = ATTR_SIZE;
+
+ for (i = 0, delta = REG_SIZE; i < PIPE_MAX_SHADER_OUTPUTS; i++)
+ if (c.key.attrs & (1<<i)) {
+ c.offset[i] = delta;
+ delta += ATTR_SIZE;
+ }
+
+ c.nr_attrs = brw_count_bits(c.key.attrs);
+ c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */
+ c.nr_bytes = c.nr_regs * REG_SIZE;
+
+ c.prog_data.clip_mode = c.key.clip_mode; /* XXX */
+
+ /* For some reason the thread is spawned with only 4 channels
+ * unmasked.
+ */
+ brw_set_mask_control(&c.func, BRW_MASK_DISABLE);
+
+
+ /* Would ideally have the option of producing a program which could
+ * do all three:
+ */
+ switch (key->primitive) {
+ case PIPE_PRIM_TRIANGLES:
+#if 0
+ if (key->do_unfilled)
+ brw_emit_unfilled_clip( &c );
+ else
+#endif
+ brw_emit_tri_clip( &c );
+ break;
+ case PIPE_PRIM_LINES:
+ brw_emit_line_clip( &c );
+ break;
+ case PIPE_PRIM_POINTS:
+ brw_emit_point_clip( &c );
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+
+
+ /* get the program
+ */
+ program = brw_get_program(&c.func, &program_size);
+
+ /* Upload
+ */
+ brw->clip.prog_gs_offset = brw_upload_cache( &brw->cache[BRW_CLIP_PROG],
+ &c.key,
+ sizeof(c.key),
+ program,
+ program_size,
+ &c.prog_data,
+ &brw->clip.prog_data );
+}
+
+
+static boolean search_cache( struct brw_context *brw,
+ struct brw_clip_prog_key *key )
+{
+ return brw_search_cache(&brw->cache[BRW_CLIP_PROG],
+ key, sizeof(*key),
+ &brw->clip.prog_data,
+ &brw->clip.prog_gs_offset);
+}
+
+
+
+
+/* Calculate interpolants for triangle and line rasterization.
+ */
+static void upload_clip_prog(struct brw_context *brw)
+{
+ struct brw_clip_prog_key key;
+
+ memset(&key, 0, sizeof(key));
+
+ /* Populate the key:
+ */
+ /* BRW_NEW_REDUCED_PRIMITIVE */
+ key.primitive = brw->reduced_primitive;
+ /* CACHE_NEW_VS_PROG */
+ key.attrs = brw->vs.prog_data->outputs_written;
+ /* BRW_NEW_RASTER */
+ key.do_flat_shading = (brw->attribs.Raster->flatshade);
+ /* BRW_NEW_CLIP */
+ key.nr_userclip = brw->attribs.Clip.nr; /* XXX */
+
+#if 0
+ key.clip_mode = BRW_CLIPMODE_NORMAL;
+
+ if (key.primitive == PIPE_PRIM_TRIANGLES) {
+ if (brw->attribs.Raster->cull_mode == PIPE_WINDING_BOTH)
+ key.clip_mode = BRW_CLIPMODE_REJECT_ALL;
+ else {
+ if (brw->attribs.Raster->fill_cw != PIPE_POLYGON_MODE_FILL ||
+ brw->attribs.Raster->fill_ccw != PIPE_POLYGON_MODE_FILL)
+ key.do_unfilled = 1;
+
+ /* Most cases the fixed function units will handle. Cases where
+ * one or more polygon faces are unfilled will require help:
+ */
+ if (key.do_unfilled) {
+ key.clip_mode = BRW_CLIPMODE_CLIP_NON_REJECTED;
+
+ if (brw->attribs.Raster->offset_cw ||
+ brw->attribs.Raster->offset_ccw) {
+ key.offset_units = brw->attribs.Raster->offset_units;
+ key.offset_factor = brw->attribs.Raster->offset_scale;
+ }
+ key.fill_ccw = brw->attribs.Raster->fill_ccw;
+ key.fill_cw = brw->attribs.Raster->fill_cw;
+ key.offset_ccw = brw->attribs.Raster->offset_ccw;
+ key.offset_cw = brw->attribs.Raster->offset_cw;
+ if (brw->attribs.Raster->light_twoside &&
+ key.fill_cw != CLIP_CULL)
+ key.copy_bfc_cw = 1;
+ }
+ }
+ }
+#else
+ key.clip_mode = BRW_CLIPMODE_ACCEPT_ALL;
+#endif
+
+ if (!search_cache(brw, &key))
+ compile_clip_prog( brw, &key );
+}
+
+const struct brw_tracked_state brw_clip_prog = {
+ .dirty = {
+ .brw = (BRW_NEW_RASTERIZER |
+ BRW_NEW_CLIP |
+ BRW_NEW_REDUCED_PRIMITIVE),
+ .cache = CACHE_NEW_VS_PROG
+ },
+ .update = upload_clip_prog
+};
diff --git a/src/gallium/drivers/i965simple/brw_clip.h b/src/gallium/drivers/i965simple/brw_clip.h
new file mode 100644
index 0000000000..d70fc094ff
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip.h
@@ -0,0 +1,170 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef BRW_CLIP_H
+#define BRW_CLIP_H
+
+
+#include "brw_context.h"
+#include "brw_eu.h"
+
+#define MAX_VERTS (3+6+6)
+
+/* Note that if unfilled primitives are being emitted, we have to fix
+ * up polygon offset and flatshading at this point:
+ */
+struct brw_clip_prog_key {
+ unsigned attrs:32;
+ unsigned primitive:4;
+ unsigned nr_userclip:3;
+ unsigned do_flat_shading:1;
+ unsigned do_unfilled:1;
+ unsigned fill_cw:2; /* includes cull information */
+ unsigned fill_ccw:2; /* includes cull information */
+ unsigned offset_cw:1;
+ unsigned offset_ccw:1;
+ unsigned pad0:17;
+
+ unsigned copy_bfc_cw:1;
+ unsigned copy_bfc_ccw:1;
+ unsigned clip_mode:3;
+ unsigned pad1:27;
+
+ float offset_factor;
+ float offset_units;
+};
+
+
+#define CLIP_LINE 0
+#define CLIP_POINT 1
+#define CLIP_FILL 2
+#define CLIP_CULL 3
+
+
+#define PRIM_MASK (0x1f)
+
+struct brw_clip_compile {
+ struct brw_compile func;
+ struct brw_clip_prog_key key;
+ struct brw_clip_prog_data prog_data;
+
+ struct {
+ struct brw_reg R0;
+ struct brw_reg vertex[MAX_VERTS];
+
+ struct brw_reg t;
+ struct brw_reg t0, t1;
+ struct brw_reg dp0, dp1;
+
+ struct brw_reg dpPrev;
+ struct brw_reg dp;
+ struct brw_reg loopcount;
+ struct brw_reg nr_verts;
+ struct brw_reg planemask;
+
+ struct brw_reg inlist;
+ struct brw_reg outlist;
+ struct brw_reg freelist;
+
+ struct brw_reg dir;
+ struct brw_reg tmp0, tmp1;
+ struct brw_reg offset;
+
+ struct brw_reg fixed_planes;
+ struct brw_reg plane_equation;
+ } reg;
+
+ /* 3 different ways of expressing vertex size:
+ */
+ unsigned nr_attrs;
+ unsigned nr_regs;
+ unsigned nr_bytes;
+
+ unsigned first_tmp;
+ unsigned last_tmp;
+
+ boolean need_direction;
+
+ unsigned last_mrf;
+
+ unsigned header_position_offset;
+ unsigned offset[PIPE_MAX_ATTRIBS];
+};
+
+#define ATTR_SIZE (4*4)
+
+/* Points are only culled, so no need for a clip routine, however it
+ * works out easier to have a dummy one.
+ */
+void brw_emit_unfilled_clip( struct brw_clip_compile *c );
+void brw_emit_tri_clip( struct brw_clip_compile *c );
+void brw_emit_line_clip( struct brw_clip_compile *c );
+void brw_emit_point_clip( struct brw_clip_compile *c );
+
+/* brw_clip_tri.c, for use by the unfilled clip routine:
+ */
+void brw_clip_tri_init_vertices( struct brw_clip_compile *c );
+void brw_clip_tri_flat_shade( struct brw_clip_compile *c );
+void brw_clip_tri( struct brw_clip_compile *c );
+void brw_clip_tri_emit_polygon( struct brw_clip_compile *c );
+void brw_clip_tri_alloc_regs( struct brw_clip_compile *c,
+ unsigned nr_verts );
+
+
+/* Utils:
+ */
+
+void brw_clip_interp_vertex( struct brw_clip_compile *c,
+ struct brw_indirect dest_ptr,
+ struct brw_indirect v0_ptr, /* from */
+ struct brw_indirect v1_ptr, /* to */
+ struct brw_reg t0,
+ boolean force_edgeflag );
+
+void brw_clip_init_planes( struct brw_clip_compile *c );
+
+void brw_clip_emit_vue(struct brw_clip_compile *c,
+ struct brw_indirect vert,
+ boolean allocate,
+ boolean eot,
+ unsigned header);
+
+void brw_clip_kill_thread(struct brw_clip_compile *c);
+
+struct brw_reg brw_clip_plane_stride( struct brw_clip_compile *c );
+struct brw_reg brw_clip_plane0_address( struct brw_clip_compile *c );
+
+void brw_clip_copy_colors( struct brw_clip_compile *c,
+ unsigned to, unsigned from );
+
+void brw_clip_init_clipmask( struct brw_clip_compile *c );
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_clip_line.c b/src/gallium/drivers/i965simple/brw_clip_line.c
new file mode 100644
index 0000000000..75d9e5fcda
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip_line.c
@@ -0,0 +1,245 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_clip.h"
+
+
+
+static void brw_clip_line_alloc_regs( struct brw_clip_compile *c )
+{
+ unsigned i = 0,j;
+
+ /* Register usage is static, precompute here:
+ */
+ c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++;
+
+ if (c->key.nr_userclip) {
+ c->reg.fixed_planes = brw_vec4_grf(i, 0);
+ i += (6 + c->key.nr_userclip + 1) / 2;
+
+ c->prog_data.curb_read_length = (6 + c->key.nr_userclip + 1) / 2;
+ }
+ else
+ c->prog_data.curb_read_length = 0;
+
+
+ /* Payload vertices plus space for more generated vertices:
+ */
+ for (j = 0; j < 4; j++) {
+ c->reg.vertex[j] = brw_vec4_grf(i, 0);
+ i += c->nr_regs;
+ }
+
+ c->reg.t = brw_vec1_grf(i, 0);
+ c->reg.t0 = brw_vec1_grf(i, 1);
+ c->reg.t1 = brw_vec1_grf(i, 2);
+ c->reg.planemask = retype(brw_vec1_grf(i, 3), BRW_REGISTER_TYPE_UD);
+ c->reg.plane_equation = brw_vec4_grf(i, 4);
+ i++;
+
+ c->reg.dp0 = brw_vec1_grf(i, 0); /* fixme - dp4 will clobber r.1,2,3 */
+ c->reg.dp1 = brw_vec1_grf(i, 4);
+ i++;
+
+ if (!c->key.nr_userclip) {
+ c->reg.fixed_planes = brw_vec8_grf(i, 0);
+ i++;
+ }
+
+
+ c->first_tmp = i;
+ c->last_tmp = i;
+
+ c->prog_data.urb_read_length = c->nr_regs; /* ? */
+ c->prog_data.total_grf = i;
+}
+
+
+
+/* Line clipping, more or less following the following algorithm:
+ *
+ * for (p=0;p<MAX_PLANES;p++) {
+ * if (clipmask & (1 << p)) {
+ * float dp0 = DOTPROD( vtx0, plane[p] );
+ * float dp1 = DOTPROD( vtx1, plane[p] );
+ *
+ * if (IS_NEGATIVE(dp1)) {
+ * float t = dp1 / (dp1 - dp0);
+ * if (t > t1) t1 = t;
+ * } else {
+ * float t = dp0 / (dp0 - dp1);
+ * if (t > t0) t0 = t;
+ * }
+ *
+ * if (t0 + t1 >= 1.0)
+ * return;
+ * }
+ * }
+ *
+ * interp( ctx, newvtx0, vtx0, vtx1, t0 );
+ * interp( ctx, newvtx1, vtx1, vtx0, t1 );
+ *
+ */
+static void clip_and_emit_line( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_indirect vtx0 = brw_indirect(0, 0);
+ struct brw_indirect vtx1 = brw_indirect(1, 0);
+ struct brw_indirect newvtx0 = brw_indirect(2, 0);
+ struct brw_indirect newvtx1 = brw_indirect(3, 0);
+ struct brw_indirect plane_ptr = brw_indirect(4, 0);
+ struct brw_instruction *plane_loop;
+ struct brw_instruction *plane_active;
+ struct brw_instruction *is_negative;
+ struct brw_instruction *is_neg2;
+ struct brw_instruction *not_culled;
+ struct brw_reg v1_null_ud = retype(vec1(brw_null_reg()), BRW_REGISTER_TYPE_UD);
+
+ brw_MOV(p, get_addr_reg(vtx0), brw_address(c->reg.vertex[0]));
+ brw_MOV(p, get_addr_reg(vtx1), brw_address(c->reg.vertex[1]));
+ brw_MOV(p, get_addr_reg(newvtx0), brw_address(c->reg.vertex[2]));
+ brw_MOV(p, get_addr_reg(newvtx1), brw_address(c->reg.vertex[3]));
+ brw_MOV(p, get_addr_reg(plane_ptr), brw_clip_plane0_address(c));
+
+ /* Note: init t0, t1 together:
+ */
+ brw_MOV(p, vec2(c->reg.t0), brw_imm_f(0));
+
+ brw_clip_init_planes(c);
+ brw_clip_init_clipmask(c);
+
+ /* -ve rhw workaround */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_AND(p, brw_null_reg(), get_element_ud(c->reg.R0, 2),
+ brw_imm_ud(1<<20));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(0x3f));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ plane_loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ /* if (planemask & 1)
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_AND(p, v1_null_ud, c->reg.planemask, brw_imm_ud(1));
+
+ plane_active = brw_IF(p, BRW_EXECUTE_1);
+ {
+ if (c->key.nr_userclip)
+ brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0));
+ else
+ brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0));
+
+#if 0
+ /* dp = DP4(vtx->position, plane)
+ */
+ brw_DP4(p, vec4(c->reg.dp0), deref_4f(vtx0, c->offset[VERT_RESULT_HPOS]), c->reg.plane_equation);
+
+ /* if (IS_NEGATIVE(dp1))
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
+ brw_DP4(p, vec4(c->reg.dp1), deref_4f(vtx1, c->offset[VERT_RESULT_HPOS]), c->reg.plane_equation);
+#else
+ #warning "disabled"
+#endif
+ is_negative = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_ADD(p, c->reg.t, c->reg.dp1, negate(c->reg.dp0));
+ brw_math_invert(p, c->reg.t, c->reg.t);
+ brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp1);
+
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_G, c->reg.t, c->reg.t1 );
+ brw_MOV(p, c->reg.t1, c->reg.t);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
+ is_negative = brw_ELSE(p, is_negative);
+ {
+ /* Coming back in. We know that both cannot be negative
+ * because the line would have been culled in that case.
+ */
+
+ /* If both are positive, do nothing */
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.dp0, brw_imm_f(0.0));
+ is_neg2 = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_ADD(p, c->reg.t, c->reg.dp0, negate(c->reg.dp1));
+ brw_math_invert(p, c->reg.t, c->reg.t);
+ brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp0);
+
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_G, c->reg.t, c->reg.t0 );
+ brw_MOV(p, c->reg.t0, c->reg.t);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
+ brw_ENDIF(p, is_neg2);
+ }
+ brw_ENDIF(p, is_negative);
+ }
+ brw_ENDIF(p, plane_active);
+
+ /* plane_ptr++;
+ */
+ brw_ADD(p, get_addr_reg(plane_ptr), get_addr_reg(plane_ptr), brw_clip_plane_stride(c));
+
+ /* while (planemask>>=1) != 0
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_SHR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(1));
+ }
+ brw_WHILE(p, plane_loop);
+
+ brw_ADD(p, c->reg.t, c->reg.t0, c->reg.t1);
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.t, brw_imm_f(1.0));
+ not_culled = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_interp_vertex(c, newvtx0, vtx0, vtx1, c->reg.t0, FALSE);
+ brw_clip_interp_vertex(c, newvtx1, vtx1, vtx0, c->reg.t1, FALSE);
+
+ brw_clip_emit_vue(c, newvtx0, 1, 0, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_START);
+ brw_clip_emit_vue(c, newvtx1, 0, 1, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_END);
+ }
+ brw_ENDIF(p, not_culled);
+ brw_clip_kill_thread(c);
+}
+
+
+
+void brw_emit_line_clip( struct brw_clip_compile *c )
+{
+ brw_clip_line_alloc_regs(c);
+
+ if (c->key.do_flat_shading)
+ brw_clip_copy_colors(c, 0, 1);
+
+ clip_and_emit_line(c);
+}
diff --git a/src/gallium/drivers/i965simple/brw_clip_point.c b/src/gallium/drivers/i965simple/brw_clip_point.c
new file mode 100644
index 0000000000..6fce7210d1
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip_point.c
@@ -0,0 +1,47 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_clip.h"
+
+
+/* Point clipping, nothing to do?
+ */
+void brw_emit_point_clip( struct brw_clip_compile *c )
+{
+ /* Send an empty message to kill the thread:
+ */
+ brw_clip_tri_alloc_regs(c, 0);
+ brw_clip_kill_thread(c);
+}
diff --git a/src/gallium/drivers/i965simple/brw_clip_state.c b/src/gallium/drivers/i965simple/brw_clip_state.c
new file mode 100644
index 0000000000..8e78dd51be
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip_state.c
@@ -0,0 +1,93 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+static void upload_clip_unit( struct brw_context *brw )
+{
+ struct brw_clip_unit_state clip;
+
+ memset(&clip, 0, sizeof(clip));
+
+ /* CACHE_NEW_CLIP_PROG */
+ clip.thread0.grf_reg_count =
+ align(brw->clip.prog_data->total_grf, 16) / 16 - 1;
+ clip.thread0.kernel_start_pointer = brw->clip.prog_gs_offset >> 6;
+ clip.thread3.urb_entry_read_length = brw->clip.prog_data->urb_read_length;
+ clip.thread3.const_urb_entry_read_length = brw->clip.prog_data->curb_read_length;
+ clip.clip5.clip_mode = brw->clip.prog_data->clip_mode;
+
+ /* BRW_NEW_CURBE_OFFSETS */
+ clip.thread3.const_urb_entry_read_offset = brw->curbe.clip_start * 2;
+
+ /* BRW_NEW_URB_FENCE */
+ clip.thread4.nr_urb_entries = brw->urb.nr_clip_entries;
+ clip.thread4.urb_entry_allocation_size = brw->urb.vsize - 1;
+ clip.thread4.max_threads = 1; /* 2 threads */
+
+ if (BRW_DEBUG & DEBUG_STATS)
+ clip.thread4.stats_enable = 1;
+
+ /* CONSTANT */
+ clip.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
+ clip.thread1.single_program_flow = 1;
+ clip.thread3.dispatch_grf_start_reg = 1;
+ clip.thread3.urb_entry_read_offset = 0;
+ clip.clip5.userclip_enable_flags = 0x7f;
+ clip.clip5.userclip_must_clip = 1;
+ clip.clip5.guard_band_enable = 0;
+ clip.clip5.viewport_z_clip_enable = 1;
+ clip.clip5.viewport_xy_clip_enable = 1;
+ clip.clip5.vertex_position_space = BRW_CLIP_NDCSPACE;
+ clip.clip5.api_mode = BRW_CLIP_API_OGL;
+ clip.clip6.clipper_viewport_state_ptr = 0;
+ clip.viewport_xmin = -1;
+ clip.viewport_xmax = 1;
+ clip.viewport_ymin = -1;
+ clip.viewport_ymax = 1;
+
+ brw->clip.state_gs_offset = brw_cache_data( &brw->cache[BRW_CLIP_UNIT], &clip );
+}
+
+
+const struct brw_tracked_state brw_clip_unit = {
+ .dirty = {
+ .brw = (BRW_NEW_CURBE_OFFSETS |
+ BRW_NEW_URB_FENCE),
+ .cache = CACHE_NEW_CLIP_PROG
+ },
+ .update = upload_clip_unit
+};
diff --git a/src/gallium/drivers/i965simple/brw_clip_tri.c b/src/gallium/drivers/i965simple/brw_clip_tri.c
new file mode 100644
index 0000000000..c5da7b825e
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip_tri.c
@@ -0,0 +1,566 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_clip.h"
+
+static struct brw_reg get_tmp( struct brw_clip_compile *c )
+{
+ struct brw_reg tmp = brw_vec4_grf(c->last_tmp, 0);
+
+ if (++c->last_tmp > c->prog_data.total_grf)
+ c->prog_data.total_grf = c->last_tmp;
+
+ return tmp;
+}
+
+static void release_tmps( struct brw_clip_compile *c )
+{
+ c->last_tmp = c->first_tmp;
+}
+
+
+void brw_clip_tri_alloc_regs( struct brw_clip_compile *c,
+ unsigned nr_verts )
+{
+ unsigned i = 0,j;
+
+ /* Register usage is static, precompute here:
+ */
+ c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++;
+
+ if (c->key.nr_userclip) {
+ c->reg.fixed_planes = brw_vec4_grf(i, 0);
+ i += (6 + c->key.nr_userclip + 1) / 2;
+
+ c->prog_data.curb_read_length = (6 + c->key.nr_userclip + 1) / 2;
+ }
+ else
+ c->prog_data.curb_read_length = 0;
+
+
+ /* Payload vertices plus space for more generated vertices:
+ */
+ for (j = 0; j < nr_verts; j++) {
+ c->reg.vertex[j] = brw_vec4_grf(i, 0);
+ i += c->nr_regs;
+ }
+
+ if (c->nr_attrs & 1) {
+ for (j = 0; j < 3; j++) {
+ unsigned delta = c->nr_attrs*16 + 32;
+ brw_MOV(&c->func, byte_offset(c->reg.vertex[j], delta), brw_imm_f(0));
+ }
+ }
+
+ c->reg.t = brw_vec1_grf(i, 0);
+ c->reg.loopcount = retype(brw_vec1_grf(i, 1), BRW_REGISTER_TYPE_UD);
+ c->reg.nr_verts = retype(brw_vec1_grf(i, 2), BRW_REGISTER_TYPE_UD);
+ c->reg.planemask = retype(brw_vec1_grf(i, 3), BRW_REGISTER_TYPE_UD);
+ c->reg.plane_equation = brw_vec4_grf(i, 4);
+ i++;
+
+ c->reg.dpPrev = brw_vec1_grf(i, 0); /* fixme - dp4 will clobber r.1,2,3 */
+ c->reg.dp = brw_vec1_grf(i, 4);
+ i++;
+
+ c->reg.inlist = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, i, 0);
+ i++;
+
+ c->reg.outlist = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, i, 0);
+ i++;
+
+ c->reg.freelist = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, i, 0);
+ i++;
+
+ if (!c->key.nr_userclip) {
+ c->reg.fixed_planes = brw_vec8_grf(i, 0);
+ i++;
+ }
+
+ if (c->key.do_unfilled) {
+ c->reg.dir = brw_vec4_grf(i, 0);
+ c->reg.offset = brw_vec4_grf(i, 4);
+ i++;
+ c->reg.tmp0 = brw_vec4_grf(i, 0);
+ c->reg.tmp1 = brw_vec4_grf(i, 4);
+ i++;
+ }
+
+ c->first_tmp = i;
+ c->last_tmp = i;
+
+ c->prog_data.urb_read_length = c->nr_regs; /* ? */
+ c->prog_data.total_grf = i;
+}
+
+
+
+void brw_clip_tri_init_vertices( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */
+ struct brw_instruction *is_rev;
+
+ /* Initial list of indices for incoming vertexes:
+ */
+ brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK));
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ BRW_CONDITIONAL_EQ,
+ tmp0,
+ brw_imm_ud(_3DPRIM_TRISTRIP_REVERSE));
+
+ /* XXX: Is there an easier way to do this? Need to reverse every
+ * second tristrip element: Can ignore sometimes?
+ */
+ is_rev = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_MOV(p, get_element(c->reg.inlist, 0), brw_address(c->reg.vertex[1]) );
+ brw_MOV(p, get_element(c->reg.inlist, 1), brw_address(c->reg.vertex[0]) );
+ if (c->need_direction)
+ brw_MOV(p, c->reg.dir, brw_imm_f(-1));
+ }
+ is_rev = brw_ELSE(p, is_rev);
+ {
+ brw_MOV(p, get_element(c->reg.inlist, 0), brw_address(c->reg.vertex[0]) );
+ brw_MOV(p, get_element(c->reg.inlist, 1), brw_address(c->reg.vertex[1]) );
+ if (c->need_direction)
+ brw_MOV(p, c->reg.dir, brw_imm_f(1));
+ }
+ brw_ENDIF(p, is_rev);
+
+ brw_MOV(p, get_element(c->reg.inlist, 2), brw_address(c->reg.vertex[2]) );
+ brw_MOV(p, brw_vec8_grf(c->reg.outlist.nr, 0), brw_imm_f(0));
+ brw_MOV(p, c->reg.nr_verts, brw_imm_ud(3));
+}
+
+
+
+void brw_clip_tri_flat_shade( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *is_poly;
+ struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */
+
+ brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK));
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ BRW_CONDITIONAL_EQ,
+ tmp0,
+ brw_imm_ud(_3DPRIM_POLYGON));
+
+ is_poly = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_copy_colors(c, 1, 0);
+ brw_clip_copy_colors(c, 2, 0);
+ }
+ is_poly = brw_ELSE(p, is_poly);
+ {
+ brw_clip_copy_colors(c, 0, 2);
+ brw_clip_copy_colors(c, 1, 2);
+ }
+ brw_ENDIF(p, is_poly);
+}
+
+
+
+/* Use mesa's clipping algorithms, translated to GEN4 assembly.
+ */
+void brw_clip_tri( struct brw_clip_compile *c )
+{
+#if 0
+ struct brw_compile *p = &c->func;
+ struct brw_indirect vtx = brw_indirect(0, 0);
+ struct brw_indirect vtxPrev = brw_indirect(1, 0);
+ struct brw_indirect vtxOut = brw_indirect(2, 0);
+ struct brw_indirect plane_ptr = brw_indirect(3, 0);
+ struct brw_indirect inlist_ptr = brw_indirect(4, 0);
+ struct brw_indirect outlist_ptr = brw_indirect(5, 0);
+ struct brw_indirect freelist_ptr = brw_indirect(6, 0);
+ struct brw_instruction *plane_loop;
+ struct brw_instruction *plane_active;
+ struct brw_instruction *vertex_loop;
+ struct brw_instruction *next_test;
+ struct brw_instruction *prev_test;
+
+ brw_MOV(p, get_addr_reg(vtxPrev), brw_address(c->reg.vertex[2]) );
+ brw_MOV(p, get_addr_reg(plane_ptr), brw_clip_plane0_address(c));
+ brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist));
+ brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist));
+
+ brw_MOV(p, get_addr_reg(freelist_ptr), brw_address(c->reg.vertex[3]) );
+
+ plane_loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ /* if (planemask & 1)
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_AND(p, vec1(brw_null_reg()), c->reg.planemask, brw_imm_ud(1));
+
+ plane_active = brw_IF(p, BRW_EXECUTE_1);
+ {
+ /* vtxOut = freelist_ptr++
+ */
+ brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(freelist_ptr) );
+ brw_ADD(p, get_addr_reg(freelist_ptr), get_addr_reg(freelist_ptr), brw_imm_uw(c->nr_regs * REG_SIZE));
+
+ if (c->key.nr_userclip)
+ brw_MOV(p, c->reg.plane_equation, deref_4f(plane_ptr, 0));
+ else
+ brw_MOV(p, c->reg.plane_equation, deref_4b(plane_ptr, 0));
+
+ brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
+ brw_MOV(p, c->reg.nr_verts, brw_imm_ud(0));
+
+ vertex_loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ /* vtx = *input_ptr;
+ */
+ brw_MOV(p, get_addr_reg(vtx), deref_1uw(inlist_ptr, 0));
+
+ /* IS_NEGATIVE(prev) */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
+ brw_DP4(p, vec4(c->reg.dpPrev), deref_4f(vtxPrev, c->offset[VERT_RESULT_HPOS]), c->reg.plane_equation);
+ prev_test = brw_IF(p, BRW_EXECUTE_1);
+ {
+ /* IS_POSITIVE(next)
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_GE);
+ brw_DP4(p, vec4(c->reg.dp), deref_4f(vtx, c->offset[VERT_RESULT_HPOS]), c->reg.plane_equation);
+ next_test = brw_IF(p, BRW_EXECUTE_1);
+ {
+
+ /* Coming back in.
+ */
+ brw_ADD(p, c->reg.t, c->reg.dpPrev, negate(c->reg.dp));
+ brw_math_invert(p, c->reg.t, c->reg.t);
+ brw_MUL(p, c->reg.t, c->reg.t, c->reg.dpPrev);
+
+ /* If (vtxOut == 0) vtxOut = vtxPrev
+ */
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) );
+ brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtxPrev) );
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_clip_interp_vertex(c, vtxOut, vtxPrev, vtx, c->reg.t, FALSE);
+
+ /* *outlist_ptr++ = vtxOut;
+ * nr_verts++;
+ * vtxOut = 0;
+ */
+ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut));
+ brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
+ brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));
+ brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) );
+ }
+ brw_ENDIF(p, next_test);
+
+ }
+ prev_test = brw_ELSE(p, prev_test);
+ {
+ /* *outlist_ptr++ = vtxPrev;
+ * nr_verts++;
+ */
+ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxPrev));
+ brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
+ brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));
+
+ /* IS_NEGATIVE(next)
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
+ brw_DP4(p, vec4(c->reg.dp), deref_4f(vtx, c->offset[VERT_RESULT_HPOS]), c->reg.plane_equation);
+ next_test = brw_IF(p, BRW_EXECUTE_1);
+ {
+ /* Going out of bounds. Avoid division by zero as we
+ * know dp != dpPrev from DIFFERENT_SIGNS, above.
+ */
+ brw_ADD(p, c->reg.t, c->reg.dp, negate(c->reg.dpPrev));
+ brw_math_invert(p, c->reg.t, c->reg.t);
+ brw_MUL(p, c->reg.t, c->reg.t, c->reg.dp);
+
+ /* If (vtxOut == 0) vtxOut = vtx
+ */
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_EQ, get_addr_reg(vtxOut), brw_imm_uw(0) );
+ brw_MOV(p, get_addr_reg(vtxOut), get_addr_reg(vtx) );
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_clip_interp_vertex(c, vtxOut, vtx, vtxPrev, c->reg.t, TRUE);
+
+ /* *outlist_ptr++ = vtxOut;
+ * nr_verts++;
+ * vtxOut = 0;
+ */
+ brw_MOV(p, deref_1uw(outlist_ptr, 0), get_addr_reg(vtxOut));
+ brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_uw(sizeof(short)));
+ brw_ADD(p, c->reg.nr_verts, c->reg.nr_verts, brw_imm_ud(1));
+ brw_MOV(p, get_addr_reg(vtxOut), brw_imm_uw(0) );
+ }
+ brw_ENDIF(p, next_test);
+ }
+ brw_ENDIF(p, prev_test);
+
+ /* vtxPrev = vtx;
+ * inlist_ptr++;
+ */
+ brw_MOV(p, get_addr_reg(vtxPrev), get_addr_reg(vtx));
+ brw_ADD(p, get_addr_reg(inlist_ptr), get_addr_reg(inlist_ptr), brw_imm_uw(sizeof(short)));
+
+ /* while (--loopcount != 0)
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
+ }
+ brw_WHILE(p, vertex_loop);
+
+ /* vtxPrev = *(outlist_ptr-1) OR: outlist[nr_verts-1]
+ * inlist = outlist
+ * inlist_ptr = &inlist[0]
+ * outlist_ptr = &outlist[0]
+ */
+ brw_ADD(p, get_addr_reg(outlist_ptr), get_addr_reg(outlist_ptr), brw_imm_w(-2));
+ brw_MOV(p, get_addr_reg(vtxPrev), deref_1uw(outlist_ptr, 0));
+ brw_MOV(p, brw_vec8_grf(c->reg.inlist.nr, 0), brw_vec8_grf(c->reg.outlist.nr, 0));
+ brw_MOV(p, get_addr_reg(inlist_ptr), brw_address(c->reg.inlist));
+ brw_MOV(p, get_addr_reg(outlist_ptr), brw_address(c->reg.outlist));
+ }
+ brw_ENDIF(p, plane_active);
+
+ /* plane_ptr++;
+ */
+ brw_ADD(p, get_addr_reg(plane_ptr), get_addr_reg(plane_ptr), brw_clip_plane_stride(c));
+
+ /* nr_verts >= 3
+ */
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ BRW_CONDITIONAL_GE,
+ c->reg.nr_verts,
+ brw_imm_ud(3));
+
+ /* && (planemask>>=1) != 0
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_SHR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud(1));
+ }
+ brw_WHILE(p, plane_loop);
+#else
+ #warning "disabled"
+#endif
+}
+
+
+
+void brw_clip_tri_emit_polygon(struct brw_clip_compile *c)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *loop, *if_insn;
+
+ /* for (loopcount = nr_verts-2; loopcount > 0; loopcount--)
+ */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_G);
+ brw_ADD(p,
+ c->reg.loopcount,
+ c->reg.nr_verts,
+ brw_imm_d(-2));
+
+ if_insn = brw_IF(p, BRW_EXECUTE_1);
+ {
+ struct brw_indirect v0 = brw_indirect(0, 0);
+ struct brw_indirect vptr = brw_indirect(1, 0);
+
+ brw_MOV(p, get_addr_reg(vptr), brw_address(c->reg.inlist));
+ brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));
+
+ brw_clip_emit_vue(c, v0, 1, 0, ((_3DPRIM_TRIFAN << 2) | R02_PRIM_START));
+
+ brw_ADD(p, get_addr_reg(vptr), get_addr_reg(vptr), brw_imm_uw(2));
+ brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));
+
+ loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ brw_clip_emit_vue(c, v0, 1, 0, (_3DPRIM_TRIFAN << 2));
+
+ brw_ADD(p, get_addr_reg(vptr), get_addr_reg(vptr), brw_imm_uw(2));
+ brw_MOV(p, get_addr_reg(v0), deref_1uw(vptr, 0));
+
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
+ }
+ brw_WHILE(p, loop);
+
+ brw_clip_emit_vue(c, v0, 0, 1, ((_3DPRIM_TRIFAN << 2) | R02_PRIM_END));
+ }
+ brw_ENDIF(p, if_insn);
+}
+
+static void do_clip_tri( struct brw_clip_compile *c )
+{
+ brw_clip_init_planes(c);
+
+ brw_clip_tri(c);
+}
+
+
+static void maybe_do_clip_tri( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *do_clip;
+
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, c->reg.planemask, brw_imm_ud(0));
+ do_clip = brw_IF(p, BRW_EXECUTE_1);
+ {
+ do_clip_tri(c);
+ }
+ brw_ENDIF(p, do_clip);
+}
+
+static void brw_clip_test( struct brw_clip_compile *c )
+{
+#if 0
+ struct brw_reg t = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
+ struct brw_reg t1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
+ struct brw_reg t2 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
+ struct brw_reg t3 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
+
+ struct brw_reg v0 = get_tmp(c);
+ struct brw_reg v1 = get_tmp(c);
+ struct brw_reg v2 = get_tmp(c);
+
+ struct brw_indirect vt0 = brw_indirect(0, 0);
+ struct brw_indirect vt1 = brw_indirect(1, 0);
+ struct brw_indirect vt2 = brw_indirect(2, 0);
+
+ struct brw_compile *p = &c->func;
+
+ brw_MOV(p, get_addr_reg(vt0), brw_address(c->reg.vertex[0]));
+ brw_MOV(p, get_addr_reg(vt1), brw_address(c->reg.vertex[1]));
+ brw_MOV(p, get_addr_reg(vt2), brw_address(c->reg.vertex[2]));
+ brw_MOV(p, v0, deref_4f(vt0, c->offset[VERT_RESULT_HPOS]));
+ brw_MOV(p, v1, deref_4f(vt1, c->offset[VERT_RESULT_HPOS]));
+ brw_MOV(p, v2, deref_4f(vt2, c->offset[VERT_RESULT_HPOS]));
+
+ /* test nearz, xmin, ymin plane */
+ brw_CMP(p, t1, BRW_CONDITIONAL_LE, negate(v0), get_element(v0, 3));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, t2, BRW_CONDITIONAL_LE, negate(v1), get_element(v1, 3));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, t3, BRW_CONDITIONAL_LE, negate(v2), get_element(v2, 3));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_XOR(p, t, t1, t2);
+ brw_XOR(p, t1, t2, t3);
+ brw_OR(p, t, t, t1);
+
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
+ get_element(t, 0), brw_imm_ud(0));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<5)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
+ get_element(t, 1), brw_imm_ud(0));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<3)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
+ get_element(t, 2), brw_imm_ud(0));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<1)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ /* test farz, xmax, ymax plane */
+ brw_CMP(p, t1, BRW_CONDITIONAL_L, v0, get_element(v0, 3));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, t2, BRW_CONDITIONAL_L, v1, get_element(v1, 3));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, t3, BRW_CONDITIONAL_L, v2, get_element(v2, 3));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_XOR(p, t, t1, t2);
+ brw_XOR(p, t1, t2, t3);
+ brw_OR(p, t, t, t1);
+
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
+ get_element(t, 0), brw_imm_ud(0));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<4)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
+ get_element(t, 1), brw_imm_ud(0));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<2)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_NZ,
+ get_element(t, 2), brw_imm_ud(0));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, brw_imm_ud((1<<0)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ release_tmps(c);
+#else
+ #warning "disabled"
+#endif
+}
+
+
+void brw_emit_tri_clip( struct brw_clip_compile *c )
+{
+ struct brw_instruction *neg_rhw;
+ struct brw_compile *p = &c->func;
+ brw_clip_tri_alloc_regs(c, 3 + c->key.nr_userclip + 6);
+ brw_clip_tri_init_vertices(c);
+ brw_clip_init_clipmask(c);
+
+ /* if -ve rhw workaround bit is set,
+ do cliptest */
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_AND(p, brw_null_reg(), get_element_ud(c->reg.R0, 2),
+ brw_imm_ud(1<<20));
+ neg_rhw = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_test(c);
+ }
+ brw_ENDIF(p, neg_rhw);
+
+ /* Can't push into do_clip_tri because with polygon (or quad)
+ * flatshading, need to apply the flatshade here because we don't
+ * respect the PV when converting to trifan for emit:
+ */
+ if (c->key.do_flat_shading)
+ brw_clip_tri_flat_shade(c);
+
+ if (c->key.clip_mode == BRW_CLIPMODE_NORMAL)
+ do_clip_tri(c);
+ else
+ maybe_do_clip_tri(c);
+
+ brw_clip_tri_emit_polygon(c);
+
+ /* Send an empty message to kill the thread:
+ */
+ brw_clip_kill_thread(c);
+}
diff --git a/src/gallium/drivers/i965simple/brw_clip_unfilled.c b/src/gallium/drivers/i965simple/brw_clip_unfilled.c
new file mode 100644
index 0000000000..b774a76dd6
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip_unfilled.c
@@ -0,0 +1,477 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_clip.h"
+
+
+
+/* This is performed against the original triangles, so no indirection
+ * required:
+BZZZT!
+ */
+static void compute_tri_direction( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg e = c->reg.tmp0;
+ struct brw_reg f = c->reg.tmp1;
+ struct brw_reg v0 = byte_offset(c->reg.vertex[0], c->offset[VERT_RESULT_HPOS]);
+ struct brw_reg v1 = byte_offset(c->reg.vertex[1], c->offset[VERT_RESULT_HPOS]);
+ struct brw_reg v2 = byte_offset(c->reg.vertex[2], c->offset[VERT_RESULT_HPOS]);
+
+
+ /* Calculate the vectors of two edges of the triangle:
+ */
+ brw_ADD(p, e, v0, negate(v2));
+ brw_ADD(p, f, v1, negate(v2));
+
+ /* Take their crossproduct:
+ */
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ brw_MUL(p, vec4(brw_null_reg()), brw_swizzle(e, 1,2,0,3), brw_swizzle(f,2,0,1,3));
+ brw_MAC(p, vec4(e), negate(brw_swizzle(e, 2,0,1,3)), brw_swizzle(f,1,2,0,3));
+ brw_set_access_mode(p, BRW_ALIGN_1);
+
+ brw_MUL(p, c->reg.dir, c->reg.dir, vec4(e));
+}
+
+
+static void cull_direction( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *ccw;
+ unsigned conditional;
+
+ assert (!(c->key.fill_ccw == CLIP_CULL &&
+ c->key.fill_cw == CLIP_CULL));
+
+ if (c->key.fill_ccw == CLIP_CULL)
+ conditional = BRW_CONDITIONAL_GE;
+ else
+ conditional = BRW_CONDITIONAL_L;
+
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ conditional,
+ get_element(c->reg.dir, 2),
+ brw_imm_f(0));
+
+ ccw = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_kill_thread(c);
+ }
+ brw_ENDIF(p, ccw);
+}
+
+
+
+static void copy_bfc( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *ccw;
+ unsigned conditional;
+
+ /* Do we have any colors to copy?
+ */
+ if (!(c->offset[VERT_RESULT_COL0] && c->offset[VERT_RESULT_BFC0]) &&
+ !(c->offset[VERT_RESULT_COL1] && c->offset[VERT_RESULT_BFC1]))
+ return;
+
+ /* In some wierd degnerate cases we can end up testing the
+ * direction twice, once for culling and once for bfc copying. Oh
+ * well, that's what you get for setting wierd GL state.
+ */
+ if (c->key.copy_bfc_ccw)
+ conditional = BRW_CONDITIONAL_GE;
+ else
+ conditional = BRW_CONDITIONAL_L;
+
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ conditional,
+ get_element(c->reg.dir, 2),
+ brw_imm_f(0));
+
+ ccw = brw_IF(p, BRW_EXECUTE_1);
+ {
+ unsigned i;
+
+ for (i = 0; i < 3; i++) {
+ if (c->offset[VERT_RESULT_COL0] && c->offset[VERT_RESULT_BFC0])
+ brw_MOV(p,
+ byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_COL0]),
+ byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_BFC0]));
+
+ if (c->offset[VERT_RESULT_COL1] && c->offset[VERT_RESULT_BFC1])
+ brw_MOV(p,
+ byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_COL1]),
+ byte_offset(c->reg.vertex[i], c->offset[VERT_RESULT_BFC1]));
+ }
+ }
+ brw_ENDIF(p, ccw);
+}
+
+
+
+
+/*
+ float iz = 1.0 / dir.z;
+ float ac = dir.x * iz;
+ float bc = dir.y * iz;
+ offset = ctx->Polygon.OffsetUnits * DEPTH_SCALE;
+ offset += MAX2( abs(ac), abs(bc) ) * ctx->Polygon.OffsetFactor;
+ offset *= MRD;
+*/
+static void compute_offset( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg off = c->reg.offset;
+ struct brw_reg dir = c->reg.dir;
+
+ brw_math_invert(p, get_element(off, 2), get_element(dir, 2));
+ brw_MUL(p, vec2(off), dir, get_element(off, 2));
+
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ BRW_CONDITIONAL_GE,
+ brw_abs(get_element(off, 0)),
+ brw_abs(get_element(off, 1)));
+
+ brw_SEL(p, vec1(off), brw_abs(get_element(off, 0)), brw_abs(get_element(off, 1)));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_MUL(p, vec1(off), off, brw_imm_f(c->key.offset_factor));
+ brw_ADD(p, vec1(off), off, brw_imm_f(c->key.offset_units));
+}
+
+
+static void merge_edgeflags( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *is_poly;
+ struct brw_reg tmp0 = get_element_ud(c->reg.tmp0, 0);
+
+ brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK));
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ BRW_CONDITIONAL_EQ,
+ tmp0,
+ brw_imm_ud(_3DPRIM_POLYGON));
+
+ /* Get away with using reg.vertex because we know that this is not
+ * a _3DPRIM_TRISTRIP_REVERSE:
+ */
+ is_poly = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_EQ);
+ brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<8));
+ brw_MOV(p, byte_offset(c->reg.vertex[0], c->offset[VERT_RESULT_EDGE]), brw_imm_f(0));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_EQ);
+ brw_AND(p, vec1(brw_null_reg()), get_element_ud(c->reg.R0, 2), brw_imm_ud(1<<9));
+ brw_MOV(p, byte_offset(c->reg.vertex[2], c->offset[VERT_RESULT_EDGE]), brw_imm_f(0));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
+ brw_ENDIF(p, is_poly);
+}
+
+
+
+static void apply_one_offset( struct brw_clip_compile *c,
+ struct brw_indirect vert )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg pos = deref_4f(vert, c->offset[VERT_RESULT_HPOS]);
+ struct brw_reg z = get_element(pos, 2);
+
+ brw_ADD(p, z, z, vec1(c->reg.offset));
+}
+
+
+
+/***********************************************************************
+ * Output clipped polygon as an unfilled primitive:
+ */
+static void emit_lines(struct brw_clip_compile *c,
+ boolean do_offset)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *loop;
+ struct brw_instruction *draw_edge;
+ struct brw_indirect v0 = brw_indirect(0, 0);
+ struct brw_indirect v1 = brw_indirect(1, 0);
+ struct brw_indirect v0ptr = brw_indirect(2, 0);
+ struct brw_indirect v1ptr = brw_indirect(3, 0);
+
+ /* Need a seperate loop for offset:
+ */
+ if (do_offset) {
+ brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
+ brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));
+
+ loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
+ brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));
+
+ apply_one_offset(c, v0);
+
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_G);
+ brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
+ }
+ brw_WHILE(p, loop);
+ }
+
+ /* v1ptr = &inlist[nr_verts]
+ * *v1ptr = v0
+ */
+ brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
+ brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));
+ brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v0ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW));
+ brw_ADD(p, get_addr_reg(v1ptr), get_addr_reg(v1ptr), retype(c->reg.nr_verts, BRW_REGISTER_TYPE_UW));
+ brw_MOV(p, deref_1uw(v1ptr, 0), deref_1uw(v0ptr, 0));
+
+ loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
+ brw_MOV(p, get_addr_reg(v1), deref_1uw(v0ptr, 2));
+ brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));
+
+ /* draw edge if edgeflag != 0 */
+ brw_CMP(p,
+ vec1(brw_null_reg()), BRW_CONDITIONAL_NZ,
+ deref_1f(v0, c->offset[VERT_RESULT_EDGE]),
+ brw_imm_f(0));
+ draw_edge = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_emit_vue(c, v0, 1, 0, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_START);
+ brw_clip_emit_vue(c, v1, 1, 0, (_3DPRIM_LINESTRIP << 2) | R02_PRIM_END);
+ }
+ brw_ENDIF(p, draw_edge);
+
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
+ }
+ brw_WHILE(p, loop);
+}
+
+
+
+static void emit_points(struct brw_clip_compile *c,
+ boolean do_offset )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *loop;
+ struct brw_instruction *draw_point;
+
+ struct brw_indirect v0 = brw_indirect(0, 0);
+ struct brw_indirect v0ptr = brw_indirect(2, 0);
+
+ brw_MOV(p, c->reg.loopcount, c->reg.nr_verts);
+ brw_MOV(p, get_addr_reg(v0ptr), brw_address(c->reg.inlist));
+
+ loop = brw_DO(p, BRW_EXECUTE_1);
+ {
+ brw_MOV(p, get_addr_reg(v0), deref_1uw(v0ptr, 0));
+ brw_ADD(p, get_addr_reg(v0ptr), get_addr_reg(v0ptr), brw_imm_uw(2));
+
+ /* draw if edgeflag != 0
+ */
+ brw_CMP(p,
+ vec1(brw_null_reg()), BRW_CONDITIONAL_NZ,
+ deref_1f(v0, c->offset[VERT_RESULT_EDGE]),
+ brw_imm_f(0));
+ draw_point = brw_IF(p, BRW_EXECUTE_1);
+ {
+ if (do_offset)
+ apply_one_offset(c, v0);
+
+ brw_clip_emit_vue(c, v0, 1, 0, (_3DPRIM_POINTLIST << 2) | R02_PRIM_START | R02_PRIM_END);
+ }
+ brw_ENDIF(p, draw_point);
+
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ brw_ADD(p, c->reg.loopcount, c->reg.loopcount, brw_imm_d(-1));
+ }
+ brw_WHILE(p, loop);
+}
+
+
+
+
+
+
+
+static void emit_primitives( struct brw_clip_compile *c,
+ unsigned mode,
+ boolean do_offset )
+{
+ switch (mode) {
+ case CLIP_FILL:
+ brw_clip_tri_emit_polygon(c);
+ break;
+
+ case CLIP_LINE:
+ emit_lines(c, do_offset);
+ break;
+
+ case CLIP_POINT:
+ emit_points(c, do_offset);
+ break;
+
+ case CLIP_CULL:
+ assert(0);
+ break;
+ }
+}
+
+
+
+static void emit_unfilled_primitives( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *ccw;
+
+ /* Direction culling has already been done.
+ */
+ if (c->key.fill_ccw != c->key.fill_cw &&
+ c->key.fill_ccw != CLIP_CULL &&
+ c->key.fill_cw != CLIP_CULL)
+ {
+ brw_CMP(p,
+ vec1(brw_null_reg()),
+ BRW_CONDITIONAL_GE,
+ get_element(c->reg.dir, 2),
+ brw_imm_f(0));
+
+ ccw = brw_IF(p, BRW_EXECUTE_1);
+ {
+ emit_primitives(c, c->key.fill_ccw, c->key.offset_ccw);
+ }
+ ccw = brw_ELSE(p, ccw);
+ {
+ emit_primitives(c, c->key.fill_cw, c->key.offset_cw);
+ }
+ brw_ENDIF(p, ccw);
+ }
+ else if (c->key.fill_cw != CLIP_CULL) {
+ emit_primitives(c, c->key.fill_cw, c->key.offset_cw);
+ }
+ else if (c->key.fill_ccw != CLIP_CULL) {
+ emit_primitives(c, c->key.fill_ccw, c->key.offset_ccw);
+ }
+}
+
+
+
+
+static void check_nr_verts( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *if_insn;
+
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_L, c->reg.nr_verts, brw_imm_d(3));
+ if_insn = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_kill_thread(c);
+ }
+ brw_ENDIF(p, if_insn);
+}
+
+
+void brw_emit_unfilled_clip( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *do_clip;
+
+
+ c->need_direction = ((c->key.offset_ccw || c->key.offset_cw) ||
+ (c->key.fill_ccw != c->key.fill_cw) ||
+ c->key.fill_ccw == CLIP_CULL ||
+ c->key.fill_cw == CLIP_CULL ||
+ c->key.copy_bfc_cw ||
+ c->key.copy_bfc_ccw);
+
+ brw_clip_tri_alloc_regs(c, 3 + c->key.nr_userclip + 6);
+ brw_clip_tri_init_vertices(c);
+
+ assert(c->offset[VERT_RESULT_EDGE]);
+
+ if (c->key.fill_ccw == CLIP_CULL &&
+ c->key.fill_cw == CLIP_CULL) {
+ brw_clip_kill_thread(c);
+ return;
+ }
+
+ merge_edgeflags(c);
+
+ /* Need to use the inlist indirection here:
+ */
+ if (c->need_direction)
+ compute_tri_direction(c);
+
+ if (c->key.fill_ccw == CLIP_CULL ||
+ c->key.fill_cw == CLIP_CULL)
+ cull_direction(c);
+
+ if (c->key.offset_ccw ||
+ c->key.offset_cw)
+ compute_offset(c);
+
+ if (c->key.copy_bfc_ccw ||
+ c->key.copy_bfc_cw)
+ copy_bfc(c);
+
+ /* Need to do this whether we clip or not:
+ */
+ if (c->key.do_flat_shading)
+ brw_clip_tri_flat_shade(c);
+
+ brw_clip_init_clipmask(c);
+ brw_CMP(p, vec1(brw_null_reg()), BRW_CONDITIONAL_NZ, c->reg.planemask, brw_imm_ud(0));
+ do_clip = brw_IF(p, BRW_EXECUTE_1);
+ {
+ brw_clip_init_planes(c);
+ brw_clip_tri(c);
+ check_nr_verts(c);
+ }
+ brw_ENDIF(p, do_clip);
+
+ emit_unfilled_primitives(c);
+ brw_clip_kill_thread(c);
+}
+
+
+
diff --git a/src/gallium/drivers/i965simple/brw_clip_util.c b/src/gallium/drivers/i965simple/brw_clip_util.c
new file mode 100644
index 0000000000..6d58ceafff
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_clip_util.c
@@ -0,0 +1,351 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_clip.h"
+
+
+
+
+
+static struct brw_reg get_tmp( struct brw_clip_compile *c )
+{
+ struct brw_reg tmp = brw_vec4_grf(c->last_tmp, 0);
+
+ if (++c->last_tmp > c->prog_data.total_grf)
+ c->prog_data.total_grf = c->last_tmp;
+
+ return tmp;
+}
+
+static void release_tmp( struct brw_clip_compile *c, struct brw_reg tmp )
+{
+ if (tmp.nr == c->last_tmp-1)
+ c->last_tmp--;
+}
+
+
+static struct brw_reg make_plane_ud(unsigned x, unsigned y, unsigned z, unsigned w)
+{
+ return brw_imm_ud((w<<24) | (z<<16) | (y<<8) | x);
+}
+
+
+void brw_clip_init_planes( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+
+ if (!c->key.nr_userclip) {
+ brw_MOV(p, get_element_ud(c->reg.fixed_planes, 0), make_plane_ud( 0, 0, 0xff, 1));
+ brw_MOV(p, get_element_ud(c->reg.fixed_planes, 1), make_plane_ud( 0, 0, 1, 1));
+ brw_MOV(p, get_element_ud(c->reg.fixed_planes, 2), make_plane_ud( 0, 0xff, 0, 1));
+ brw_MOV(p, get_element_ud(c->reg.fixed_planes, 3), make_plane_ud( 0, 1, 0, 1));
+ brw_MOV(p, get_element_ud(c->reg.fixed_planes, 4), make_plane_ud(0xff, 0, 0, 1));
+ brw_MOV(p, get_element_ud(c->reg.fixed_planes, 5), make_plane_ud( 1, 0, 0, 1));
+ }
+}
+
+
+
+#define W 3
+
+/* Project 'pos' to screen space (or back again), overwrite with results:
+ */
+static void brw_clip_project_position(struct brw_clip_compile *c, struct brw_reg pos )
+{
+ struct brw_compile *p = &c->func;
+
+ /* calc rhw
+ */
+ brw_math_invert(p, get_element(pos, W), get_element(pos, W));
+
+ /* value.xyz *= value.rhw
+ */
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ brw_MUL(p, brw_writemask(pos, TGSI_WRITEMASK_XYZ), pos, brw_swizzle1(pos, W));
+ brw_set_access_mode(p, BRW_ALIGN_1);
+}
+
+
+static void brw_clip_project_vertex( struct brw_clip_compile *c,
+ struct brw_indirect vert_addr )
+{
+#if 0
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = get_tmp(c);
+
+ /* Fixup position. Extract from the original vertex and re-project
+ * to screen space:
+ */
+ brw_MOV(p, tmp, deref_4f(vert_addr, c->offset[VERT_RESULT_HPOS]));
+ brw_clip_project_position(c, tmp);
+ brw_MOV(p, deref_4f(vert_addr, c->header_position_offset), tmp);
+
+ release_tmp(c, tmp);
+#else
+ #warning "disabled"
+#endif
+}
+
+
+
+
+/* Interpolate between two vertices and put the result into a0.0.
+ * Increment a0.0 accordingly.
+ */
+void brw_clip_interp_vertex( struct brw_clip_compile *c,
+ struct brw_indirect dest_ptr,
+ struct brw_indirect v0_ptr, /* from */
+ struct brw_indirect v1_ptr, /* to */
+ struct brw_reg t0,
+ boolean force_edgeflag)
+{
+#if 0
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = get_tmp(c);
+ unsigned i;
+
+ /* Just copy the vertex header:
+ */
+ brw_copy_indirect_to_indirect(p, dest_ptr, v0_ptr, 1);
+
+ /* Iterate over each attribute (could be done in pairs?)
+ */
+ for (i = 0; i < c->nr_attrs; i++) {
+ unsigned delta = i*16 + 32;
+
+ if (delta == c->offset[VERT_RESULT_EDGE]) {
+ if (force_edgeflag)
+ brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(1));
+ else
+ brw_MOV(p, deref_4f(dest_ptr, delta), deref_4f(v0_ptr, delta));
+ }
+ else {
+ /* Interpolate:
+ *
+ * New = attr0 + t*attr1 - t*attr0
+ */
+ brw_MUL(p,
+ vec4(brw_null_reg()),
+ deref_4f(v1_ptr, delta),
+ t0);
+
+ brw_MAC(p,
+ tmp,
+ negate(deref_4f(v0_ptr, delta)),
+ t0);
+
+ brw_ADD(p,
+ deref_4f(dest_ptr, delta),
+ deref_4f(v0_ptr, delta),
+ tmp);
+ }
+ }
+
+ if (i & 1) {
+ unsigned delta = i*16 + 32;
+ brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0));
+ }
+
+ release_tmp(c, tmp);
+
+ /* Recreate the projected (NDC) coordinate in the new vertex
+ * header:
+ */
+ brw_clip_project_vertex(c, dest_ptr );
+#else
+ #warning "disabled"
+#endif
+}
+
+
+
+
+#define MAX_MRF 16
+
+void brw_clip_emit_vue(struct brw_clip_compile *c,
+ struct brw_indirect vert,
+ boolean allocate,
+ boolean eot,
+ unsigned header)
+{
+ struct brw_compile *p = &c->func;
+ unsigned start = c->last_mrf;
+
+ assert(!(allocate && eot));
+
+ /* Cycle through mrf regs - probably futile as we have to wait for
+ * the allocation response anyway. Also, the order this function
+ * is invoked doesn't correspond to the order the instructions will
+ * be executed, so it won't have any effect in many cases.
+ */
+#if 0
+ if (start + c->nr_regs + 1 >= MAX_MRF)
+ start = 0;
+
+ c->last_mrf = start + c->nr_regs + 1;
+#endif
+
+ /* Copy the vertex from vertn into m1..mN+1:
+ */
+ brw_copy_from_indirect(p, brw_message_reg(start+1), vert, c->nr_regs);
+
+ /* Overwrite PrimType and PrimStart in the message header, for
+ * each vertex in turn:
+ */
+ brw_MOV(p, get_element_ud(c->reg.R0, 2), brw_imm_ud(header));
+
+
+ /* Send each vertex as a seperate write to the urb. This
+ * is different to the concept in brw_sf_emit.c, where
+ * subsequent writes are used to build up a single urb
+ * entry. Each of these writes instantiates a seperate
+ * urb entry - (I think... what about 'allocate'?)
+ */
+ brw_urb_WRITE(p,
+ allocate ? c->reg.R0 : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
+ start,
+ c->reg.R0,
+ allocate,
+ 1, /* used */
+ c->nr_regs + 1, /* msg length */
+ allocate ? 1 : 0, /* response_length */
+ eot, /* eot */
+ 1, /* writes_complete */
+ 0, /* urb offset */
+ BRW_URB_SWIZZLE_NONE);
+}
+
+
+
+void brw_clip_kill_thread(struct brw_clip_compile *c)
+{
+ struct brw_compile *p = &c->func;
+
+ /* Send an empty message to kill the thread and release any
+ * allocated urb entry:
+ */
+ brw_urb_WRITE(p,
+ retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
+ 0,
+ c->reg.R0,
+ 0, /* allocate */
+ 0, /* used */
+ 0, /* msg len */
+ 0, /* response len */
+ 1, /* eot */
+ 1, /* writes complete */
+ 0,
+ BRW_URB_SWIZZLE_NONE);
+}
+
+
+
+
+struct brw_reg brw_clip_plane0_address( struct brw_clip_compile *c )
+{
+ return brw_address(c->reg.fixed_planes);
+}
+
+
+struct brw_reg brw_clip_plane_stride( struct brw_clip_compile *c )
+{
+ if (c->key.nr_userclip) {
+ return brw_imm_uw(16);
+ }
+ else {
+ return brw_imm_uw(4);
+ }
+}
+
+
+/* If flatshading, distribute color from provoking vertex prior to
+ * clipping.
+ */
+void brw_clip_copy_colors( struct brw_clip_compile *c,
+ unsigned to, unsigned from )
+{
+#if 0
+ struct brw_compile *p = &c->func;
+
+ if (c->offset[VERT_RESULT_COL0])
+ brw_MOV(p,
+ byte_offset(c->reg.vertex[to], c->offset[VERT_RESULT_COL0]),
+ byte_offset(c->reg.vertex[from], c->offset[VERT_RESULT_COL0]));
+
+ if (c->offset[VERT_RESULT_COL1])
+ brw_MOV(p,
+ byte_offset(c->reg.vertex[to], c->offset[VERT_RESULT_COL1]),
+ byte_offset(c->reg.vertex[from], c->offset[VERT_RESULT_COL1]));
+
+ if (c->offset[VERT_RESULT_BFC0])
+ brw_MOV(p,
+ byte_offset(c->reg.vertex[to], c->offset[VERT_RESULT_BFC0]),
+ byte_offset(c->reg.vertex[from], c->offset[VERT_RESULT_BFC0]));
+
+ if (c->offset[VERT_RESULT_BFC1])
+ brw_MOV(p,
+ byte_offset(c->reg.vertex[to], c->offset[VERT_RESULT_BFC1]),
+ byte_offset(c->reg.vertex[from], c->offset[VERT_RESULT_BFC1]));
+#else
+ #warning "disabled"
+#endif
+}
+
+
+
+void brw_clip_init_clipmask( struct brw_clip_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg incoming = get_element_ud(c->reg.R0, 2);
+
+ /* Shift so that lowest outcode bit is rightmost:
+ */
+ brw_SHR(p, c->reg.planemask, incoming, brw_imm_ud(26));
+
+ if (c->key.nr_userclip) {
+ struct brw_reg tmp = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UD);
+
+ /* Rearrange userclip outcodes so that they come directly after
+ * the fixed plane bits.
+ */
+ brw_AND(p, tmp, incoming, brw_imm_ud(0x3f<<14));
+ brw_SHR(p, tmp, tmp, brw_imm_ud(8));
+ brw_OR(p, c->reg.planemask, c->reg.planemask, tmp);
+
+ release_tmp(c, tmp);
+ }
+}
+
diff --git a/src/gallium/drivers/i965simple/brw_context.c b/src/gallium/drivers/i965simple/brw_context.c
new file mode 100644
index 0000000000..9b33285bc7
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_context.c
@@ -0,0 +1,139 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_draw.h"
+#include "brw_vs.h"
+#include "brw_tex_layout.h"
+#include "brw_winsys.h"
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_context.h"
+#include "util/u_memory.h"
+#include "pipe/p_screen.h"
+
+
+#ifndef BRW_DEBUG
+int BRW_DEBUG = (0);
+#endif
+
+
+static void brw_destroy(struct pipe_context *pipe)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ if(brw->winsys->destroy)
+ brw->winsys->destroy(brw->winsys);
+
+ FREE(brw);
+}
+
+
+static void brw_clear(struct pipe_context *pipe, struct pipe_surface *ps,
+ unsigned clearValue)
+{
+ int x, y, w, h;
+ /* FIXME: corny... */
+
+ x = 0;
+ y = 0;
+ w = ps->width;
+ h = ps->height;
+
+ pipe->surface_fill(pipe, ps, x, y, w, h, clearValue);
+}
+
+static unsigned int
+brw_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+brw_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *brw_create(struct pipe_screen *screen,
+ struct brw_winsys *brw_winsys,
+ unsigned pci_id)
+{
+ struct brw_context *brw;
+
+ debug_printf("%s: creating brw_context with pci id 0x%x\n",
+ __FUNCTION__, pci_id);
+
+ brw = CALLOC_STRUCT(brw_context);
+ if (brw == NULL)
+ return NULL;
+
+ brw->winsys = brw_winsys;
+ brw->pipe.winsys = screen->winsys;
+ brw->pipe.screen = screen;
+
+ brw->pipe.destroy = brw_destroy;
+ brw->pipe.clear = brw_clear;
+
+ brw->pipe.is_texture_referenced = brw_is_texture_referenced;
+ brw->pipe.is_buffer_referenced = brw_is_buffer_referenced;
+
+ brw_init_surface_functions(brw);
+ brw_init_texture_functions(brw);
+ brw_init_state_functions(brw);
+ brw_init_flush_functions(brw);
+ brw_init_draw_functions( brw );
+
+
+ brw_init_state( brw );
+
+ brw->pci_id = pci_id;
+ brw->dirty = ~0;
+ brw->hardware_dirty = ~0;
+
+ memset(&brw->wm.bind, ~0, sizeof(brw->wm.bind));
+
+ return &brw->pipe;
+}
+
diff --git a/src/gallium/drivers/i965simple/brw_context.h b/src/gallium/drivers/i965simple/brw_context.h
new file mode 100644
index 0000000000..3079485180
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_context.h
@@ -0,0 +1,684 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRWCONTEXT_INC
+#define BRWCONTEXT_INC
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "tgsi/tgsi_scan.h"
+
+#include "brw_structs.h"
+#include "brw_winsys.h"
+
+
+/* Glossary:
+ *
+ * URB - uniform resource buffer. A mid-sized buffer which is
+ * partitioned between the fixed function units and used for passing
+ * values (vertices, primitives, constants) between them.
+ *
+ * CURBE - constant URB entry. An urb region (entry) used to hold
+ * constant values which the fixed function units can be instructed to
+ * preload into the GRF when spawining a thread.
+ *
+ * VUE - vertex URB entry. An urb entry holding a vertex and usually
+ * a vertex header. The header contains control information and
+ * things like primitive type, Begin/end flags and clip codes.
+ *
+ * PUE - primitive URB entry. An urb entry produced by the setup (SF)
+ * unit holding rasterization and interpolation parameters.
+ *
+ * GRF - general register file. One of several register files
+ * addressable by programmed threads. The inputs (r0, payload, curbe,
+ * urb) of the thread are preloaded to this area before the thread is
+ * spawned. The registers are individually 8 dwords wide and suitable
+ * for general usage. Registers holding thread input values are not
+ * special and may be overwritten.
+ *
+ * MRF - message register file. Threads communicate (and terminate)
+ * by sending messages. Message parameters are placed in contigous
+ * MRF registers. All program output is via these messages. URB
+ * entries are populated by sending a message to the shared URB
+ * function containing the new data, together with a control word,
+ * often an unmodified copy of R0.
+ *
+ * R0 - GRF register 0. Typically holds control information used when
+ * sending messages to other threads.
+ *
+ * EU or GEN4 EU: The name of the programmable subsystem of the
+ * i965 hardware. Threads are executed by the EU, the registers
+ * described above are part of the EU architecture.
+ *
+ * Fixed function units:
+ *
+ * CS - Command streamer. Notional first unit, little software
+ * interaction. Holds the URB entries used for constant data, ie the
+ * CURBEs.
+ *
+ * VF/VS - Vertex Fetch / Vertex Shader. The fixed function part of
+ * this unit is responsible for pulling vertices out of vertex buffers
+ * in vram and injecting them into the processing pipe as VUEs. If
+ * enabled, it first passes them to a VS thread which is a good place
+ * for the driver to implement any active vertex shader.
+ *
+ * GS - Geometry Shader. This corresponds to a new DX10 concept. If
+ * enabled, incoming strips etc are passed to GS threads in individual
+ * line/triangle/point units. The GS thread may perform arbitary
+ * computation and emit whatever primtives with whatever vertices it
+ * chooses. This makes GS an excellent place to implement GL's
+ * unfilled polygon modes, though of course it is capable of much
+ * more. Additionally, GS is used to translate away primitives not
+ * handled by latter units, including Quads and Lineloops.
+ *
+ * CS - Clipper. Mesa's clipping algorithms are imported to run on
+ * this unit. The fixed function part performs cliptesting against
+ * the 6 fixed clipplanes and makes descisions on whether or not the
+ * incoming primitive needs to be passed to a thread for clipping.
+ * User clip planes are handled via cooperation with the VS thread.
+ *
+ * SF - Strips Fans or Setup: Triangles are prepared for
+ * rasterization. Interpolation coefficients are calculated.
+ * Flatshading and two-side lighting usually performed here.
+ *
+ * WM - Windower. Interpolation of vertex attributes performed here.
+ * Fragment shader implemented here. SIMD aspects of EU taken full
+ * advantage of, as pixels are processed in blocks of 16.
+ *
+ * CC - Color Calculator. No EU threads associated with this unit.
+ * Handles blending and (presumably) depth and stencil testing.
+ */
+
+#define BRW_MAX_CURBE (32*16)
+
+struct brw_context;
+struct brw_winsys;
+
+
+/* Raised when we receive new state across the pipe interface:
+ */
+#define BRW_NEW_VIEWPORT 0x1
+#define BRW_NEW_RASTERIZER 0x2
+#define BRW_NEW_FS 0x4
+#define BRW_NEW_BLEND 0x8
+#define BRW_NEW_CLIP 0x10
+#define BRW_NEW_SCISSOR 0x20
+#define BRW_NEW_STIPPLE 0x40
+#define BRW_NEW_FRAMEBUFFER 0x80
+#define BRW_NEW_ALPHA_TEST 0x100
+#define BRW_NEW_DEPTH_STENCIL 0x200
+#define BRW_NEW_SAMPLER 0x400
+#define BRW_NEW_TEXTURE 0x800
+#define BRW_NEW_CONSTANTS 0x1000
+#define BRW_NEW_VBO 0x2000
+#define BRW_NEW_VS 0x4000
+
+/* Raised for other internal events:
+ */
+#define BRW_NEW_URB_FENCE 0x10000
+#define BRW_NEW_PSP 0x20000
+#define BRW_NEW_CURBE_OFFSETS 0x40000
+#define BRW_NEW_REDUCED_PRIMITIVE 0x80000
+#define BRW_NEW_PRIMITIVE 0x100000
+#define BRW_NEW_SCENE 0x200000
+#define BRW_NEW_SF_LINKAGE 0x400000
+
+extern int BRW_DEBUG;
+
+#define DEBUG_TEXTURE 0x1
+#define DEBUG_STATE 0x2
+#define DEBUG_IOCTL 0x4
+#define DEBUG_PRIMS 0x8
+#define DEBUG_VERTS 0x10
+#define DEBUG_FALLBACKS 0x20
+#define DEBUG_VERBOSE 0x40
+#define DEBUG_DRI 0x80
+#define DEBUG_DMA 0x100
+#define DEBUG_SANITY 0x200
+#define DEBUG_SYNC 0x400
+#define DEBUG_SLEEP 0x800
+#define DEBUG_PIXEL 0x1000
+#define DEBUG_STATS 0x2000
+#define DEBUG_TILE 0x4000
+#define DEBUG_SINGLE_THREAD 0x8000
+#define DEBUG_WM 0x10000
+#define DEBUG_URB 0x20000
+#define DEBUG_VS 0x40000
+#define DEBUG_BATCH 0x80000
+#define DEBUG_BUFMGR 0x100000
+#define DEBUG_BLIT 0x200000
+#define DEBUG_REGION 0x400000
+#define DEBUG_MIPTREE 0x800000
+
+#define DBG(...) do { \
+ if (BRW_DEBUG & FILE_DEBUG_FLAG) \
+ debug_printf(__VA_ARGS__); \
+} while(0)
+
+#define PRINT(...) do { \
+ debug_printf(__VA_ARGS__); \
+} while(0)
+
+struct brw_state_flags {
+ unsigned cache;
+ unsigned brw;
+};
+
+
+struct brw_vertex_program {
+ struct pipe_shader_state program;
+ struct tgsi_shader_info info;
+ int id;
+};
+
+
+struct brw_fragment_program {
+ struct pipe_shader_state program;
+ struct tgsi_shader_info info;
+
+ boolean UsesDepth; /* XXX add this to tgsi_shader_info? */
+ int id;
+};
+
+
+struct pipe_setup_linkage {
+ struct {
+ unsigned vp_output:5;
+ unsigned interp_mode:4;
+ unsigned bf_vp_output:5;
+ } fp_input[PIPE_MAX_SHADER_INPUTS];
+
+ unsigned fp_input_count:5;
+ unsigned max_vp_output:5;
+};
+
+
+
+struct brw_texture {
+ struct pipe_texture base;
+
+ /* Derived from the above:
+ */
+ unsigned stride;
+ unsigned depth_pitch; /* per-image on i945? */
+ unsigned total_nblocksy;
+
+ unsigned nr_images[PIPE_MAX_TEXTURE_LEVELS];
+
+ /* Explicitly store the offset of each image for each cube face or
+ * depth value. Pretty much have to accept that hardware formats
+ * are going to be so diverse that there is no unified way to
+ * compute the offsets of depth/cube images within a mipmap level,
+ * so have to store them as a lookup table:
+ */
+ unsigned *image_offset[PIPE_MAX_TEXTURE_LEVELS]; /**< array [depth] of offsets */
+
+ /* Includes image offset tables:
+ */
+ unsigned level_offset[PIPE_MAX_TEXTURE_LEVELS];
+
+ /* The data is held here:
+ */
+ struct pipe_buffer *buffer;
+};
+
+/* Data about a particular attempt to compile a program. Note that
+ * there can be many of these, each in a different GL state
+ * corresponding to a different brw_wm_prog_key struct, with different
+ * compiled programs:
+ */
+/* Data about a particular attempt to compile a program. Note that
+ * there can be many of these, each in a different GL state
+ * corresponding to a different brw_wm_prog_key struct, with different
+ * compiled programs:
+ */
+
+struct brw_wm_prog_data {
+ unsigned curb_read_length;
+ unsigned urb_read_length;
+
+ unsigned first_curbe_grf;
+ unsigned total_grf;
+ unsigned total_scratch;
+
+ /* Internally generated constants for the CURBE. These are loaded
+ * ahead of the data from the constant buffer.
+ */
+ const float internal_const[8];
+ unsigned nr_internal_consts;
+ unsigned max_const;
+
+ boolean error;
+};
+
+struct brw_sf_prog_data {
+ unsigned urb_read_length;
+ unsigned total_grf;
+
+ /* Each vertex may have upto 12 attributes, 4 components each,
+ * except WPOS which requires only 2. (11*4 + 2) == 44 ==> 11
+ * rows.
+ *
+ * Actually we use 4 for each, so call it 12 rows.
+ */
+ unsigned urb_entry_size;
+};
+
+struct brw_clip_prog_data {
+ unsigned curb_read_length; /* user planes? */
+ unsigned clip_mode;
+ unsigned urb_read_length;
+ unsigned total_grf;
+};
+
+struct brw_gs_prog_data {
+ unsigned urb_read_length;
+ unsigned total_grf;
+};
+
+struct brw_vs_prog_data {
+ unsigned curb_read_length;
+ unsigned urb_read_length;
+ unsigned total_grf;
+ unsigned outputs_written;
+
+ unsigned inputs_read;
+
+ unsigned max_const;
+
+ float imm_buf[PIPE_MAX_CONSTANT][4];
+ unsigned num_imm;
+ unsigned num_consts;
+
+ /* Used for calculating urb partitions:
+ */
+ unsigned urb_entry_size;
+};
+
+
+#define BRW_MAX_TEX_UNIT 8
+#define BRW_WM_MAX_SURF BRW_MAX_TEX_UNIT + 1
+
+/* Create a fixed sized struct for caching binding tables:
+ */
+struct brw_surface_binding_table {
+ unsigned surf_ss_offset[BRW_WM_MAX_SURF];
+};
+
+
+struct brw_cache;
+
+struct brw_mem_pool {
+ struct pipe_buffer *buffer;
+
+ unsigned size;
+ unsigned offset; /* offset of first free byte */
+
+ struct brw_context *brw;
+};
+
+struct brw_cache_item {
+ unsigned hash;
+ unsigned key_size; /* for variable-sized keys */
+ const void *key;
+
+ unsigned offset; /* offset within pool's buffer */
+ unsigned data_size;
+
+ struct brw_cache_item *next;
+};
+
+
+
+struct brw_cache {
+ unsigned id;
+
+ const char *name;
+
+ struct brw_context *brw;
+ struct brw_mem_pool *pool;
+
+ struct brw_cache_item **items;
+ unsigned size, n_items;
+
+ unsigned key_size; /* for fixed-size keys */
+ unsigned aux_size;
+
+ unsigned last_addr; /* offset of active item */
+};
+
+
+
+
+/* Considered adding a member to this struct to document which flags
+ * an update might raise so that ordering of the state atoms can be
+ * checked or derived at runtime. Dropped the idea in favor of having
+ * a debug mode where the state is monitored for flags which are
+ * raised that have already been tested against.
+ */
+struct brw_tracked_state {
+ struct brw_state_flags dirty;
+ void (*update)( struct brw_context *brw );
+};
+
+
+/* Flags for brw->state.cache.
+ */
+#define CACHE_NEW_CC_VP (1<<BRW_CC_VP)
+#define CACHE_NEW_CC_UNIT (1<<BRW_CC_UNIT)
+#define CACHE_NEW_WM_PROG (1<<BRW_WM_PROG)
+#define CACHE_NEW_SAMPLER_DEFAULT_COLOR (1<<BRW_SAMPLER_DEFAULT_COLOR)
+#define CACHE_NEW_SAMPLER (1<<BRW_SAMPLER)
+#define CACHE_NEW_WM_UNIT (1<<BRW_WM_UNIT)
+#define CACHE_NEW_SF_PROG (1<<BRW_SF_PROG)
+#define CACHE_NEW_SF_VP (1<<BRW_SF_VP)
+#define CACHE_NEW_SF_UNIT (1<<BRW_SF_UNIT)
+#define CACHE_NEW_VS_UNIT (1<<BRW_VS_UNIT)
+#define CACHE_NEW_VS_PROG (1<<BRW_VS_PROG)
+#define CACHE_NEW_GS_UNIT (1<<BRW_GS_UNIT)
+#define CACHE_NEW_GS_PROG (1<<BRW_GS_PROG)
+#define CACHE_NEW_CLIP_VP (1<<BRW_CLIP_VP)
+#define CACHE_NEW_CLIP_UNIT (1<<BRW_CLIP_UNIT)
+#define CACHE_NEW_CLIP_PROG (1<<BRW_CLIP_PROG)
+#define CACHE_NEW_SURFACE (1<<BRW_SS_SURFACE)
+#define CACHE_NEW_SURF_BIND (1<<BRW_SS_SURF_BIND)
+
+
+
+
+enum brw_mempool_id {
+ BRW_GS_POOL,
+ BRW_SS_POOL,
+ BRW_MAX_POOL
+};
+
+
+struct brw_cached_batch_item {
+ struct header *header;
+ unsigned sz;
+ struct brw_cached_batch_item *next;
+};
+
+
+
+/* Protect against a future where PIPE_MAX_ATTRIBS > 32. Wouldn't life
+ * be easier if C allowed arrays of packed elements?
+ */
+#define ATTRIB_BIT_DWORDS ((PIPE_MAX_ATTRIBS+31)/32)
+
+
+
+
+struct brw_vertex_info {
+ unsigned varying; /* varying:1[PIPE_MAX_ATTRIBS] */
+ unsigned sizes[ATTRIB_BIT_DWORDS * 2]; /* sizes:2[PIPE_MAX_ATTRIBS] */
+};
+
+
+
+
+
+struct brw_context
+{
+ struct pipe_context pipe;
+ struct brw_winsys *winsys;
+
+ unsigned primitive;
+ unsigned reduced_primitive;
+
+ boolean emit_state_always;
+
+ struct {
+ struct brw_state_flags dirty;
+ } state;
+
+
+ struct {
+ const struct pipe_blend_state *Blend;
+ const struct pipe_depth_stencil_alpha_state *DepthStencil;
+ const struct pipe_poly_stipple *PolygonStipple;
+ const struct pipe_rasterizer_state *Raster;
+ const struct pipe_sampler_state *Samplers[PIPE_MAX_SAMPLERS];
+ const struct brw_vertex_program *VertexProgram;
+ const struct brw_fragment_program *FragmentProgram;
+
+ struct pipe_clip_state Clip;
+ struct pipe_blend_color BlendColor;
+ struct pipe_scissor_state Scissor;
+ struct pipe_viewport_state Viewport;
+ struct pipe_framebuffer_state FrameBuffer;
+
+ const struct pipe_constant_buffer *Constants[2];
+ const struct brw_texture *Texture[PIPE_MAX_SAMPLERS];
+ } attribs;
+
+ unsigned num_samplers;
+ unsigned num_textures;
+
+ struct brw_mem_pool pool[BRW_MAX_POOL];
+ struct brw_cache cache[BRW_MAX_CACHE];
+ struct brw_cached_batch_item *cached_batch_items;
+
+ struct {
+
+ /* Arrays with buffer objects to copy non-bufferobj arrays into
+ * for upload:
+ */
+ const struct pipe_vertex_buffer *vbo_array[PIPE_MAX_ATTRIBS];
+
+ struct brw_vertex_element_state inputs[PIPE_MAX_ATTRIBS];
+
+#define BRW_NR_UPLOAD_BUFS 17
+#define BRW_UPLOAD_INIT_SIZE (128*1024)
+
+ /* Summary of size and varying of active arrays, so we can check
+ * for changes to this state:
+ */
+ struct brw_vertex_info info;
+ } vb;
+
+
+ unsigned hardware_dirty;
+ unsigned dirty;
+ unsigned pci_id;
+ /* BRW_NEW_URB_ALLOCATIONS:
+ */
+ struct {
+ unsigned vsize; /* vertex size plus header in urb registers */
+ unsigned csize; /* constant buffer size in urb registers */
+ unsigned sfsize; /* setup data size in urb registers */
+
+ boolean constrained;
+
+ unsigned nr_vs_entries;
+ unsigned nr_gs_entries;
+ unsigned nr_clip_entries;
+ unsigned nr_sf_entries;
+ unsigned nr_cs_entries;
+
+/* unsigned vs_size; */
+/* unsigned gs_size; */
+/* unsigned clip_size; */
+/* unsigned sf_size; */
+/* unsigned cs_size; */
+
+ unsigned vs_start;
+ unsigned gs_start;
+ unsigned clip_start;
+ unsigned sf_start;
+ unsigned cs_start;
+ } urb;
+
+
+ /* BRW_NEW_CURBE_OFFSETS:
+ */
+ struct {
+ unsigned wm_start;
+ unsigned wm_size;
+ unsigned clip_start;
+ unsigned clip_size;
+ unsigned vs_start;
+ unsigned vs_size;
+ unsigned total_size;
+
+ unsigned gs_offset;
+
+ float *last_buf;
+ unsigned last_bufsz;
+ } curbe;
+
+ struct {
+ struct brw_vs_prog_data *prog_data;
+
+ unsigned prog_gs_offset;
+ unsigned state_gs_offset;
+ } vs;
+
+ struct {
+ struct brw_gs_prog_data *prog_data;
+
+ boolean prog_active;
+ unsigned prog_gs_offset;
+ unsigned state_gs_offset;
+ } gs;
+
+ struct {
+ struct brw_clip_prog_data *prog_data;
+
+ unsigned prog_gs_offset;
+ unsigned vp_gs_offset;
+ unsigned state_gs_offset;
+ } clip;
+
+
+ struct {
+ struct brw_sf_prog_data *prog_data;
+
+ struct pipe_setup_linkage linkage;
+
+ unsigned prog_gs_offset;
+ unsigned vp_gs_offset;
+ unsigned state_gs_offset;
+ } sf;
+
+ struct {
+ struct brw_wm_prog_data *prog_data;
+
+// struct brw_wm_compiler *compile_data;
+
+
+ /**
+ * Array of sampler state uploaded at sampler_gs_offset of BRW_SAMPLER
+ * cache
+ */
+ struct brw_sampler_state sampler[BRW_MAX_TEX_UNIT];
+
+ unsigned render_surf;
+ unsigned nr_surfaces;
+
+ unsigned max_threads;
+ struct pipe_buffer *scratch_buffer;
+ unsigned scratch_buffer_size;
+
+ unsigned sampler_count;
+ unsigned sampler_gs_offset;
+
+ struct brw_surface_binding_table bind;
+ unsigned bind_ss_offset;
+
+ unsigned prog_gs_offset;
+ unsigned state_gs_offset;
+ } wm;
+
+
+ struct {
+ unsigned vp_gs_offset;
+ unsigned state_gs_offset;
+ } cc;
+
+
+ /* Used to give every program string a unique id
+ */
+ unsigned program_id;
+};
+
+
+#define BRW_PACKCOLOR8888(r,g,b,a) ((r<<24) | (g<<16) | (b<<8) | a)
+
+
+/*======================================================================
+ * brw_vtbl.c
+ */
+void brw_do_flush( struct brw_context *brw,
+ unsigned flags );
+
+
+/*======================================================================
+ * brw_state.c
+ */
+void brw_validate_state(struct brw_context *brw);
+void brw_init_state(struct brw_context *brw);
+void brw_destroy_state(struct brw_context *brw);
+
+
+/*======================================================================
+ * brw_tex.c
+ */
+void brwUpdateTextureState( struct brw_context *brw );
+
+
+/* brw_urb.c
+ */
+void brw_upload_urb_fence(struct brw_context *brw);
+
+void brw_upload_constant_buffer_state(struct brw_context *brw);
+
+void brw_init_surface_functions(struct brw_context *brw);
+void brw_init_state_functions(struct brw_context *brw);
+void brw_init_flush_functions(struct brw_context *brw);
+void brw_init_string_functions(struct brw_context *brw);
+
+/*======================================================================
+ * Inline conversion functions. These are better-typed than the
+ * macros used previously:
+ */
+static inline struct brw_context *
+brw_context( struct pipe_context *ctx )
+{
+ return (struct brw_context *)ctx;
+}
+
+#endif
+
diff --git a/src/gallium/drivers/i965simple/brw_curbe.c b/src/gallium/drivers/i965simple/brw_curbe.c
new file mode 100644
index 0000000000..904cde8e30
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_curbe.c
@@ -0,0 +1,369 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_state.h"
+#include "brw_batch.h"
+#include "brw_util.h"
+#include "brw_wm.h"
+#include "pipe/p_state.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#define FILE_DEBUG_FLAG DEBUG_FALLBACKS
+
+/* Partition the CURBE between the various users of constant values:
+ */
+static void calculate_curbe_offsets( struct brw_context *brw )
+{
+ /* CACHE_NEW_WM_PROG */
+ unsigned nr_fp_regs = align(brw->wm.prog_data->max_const, 16);
+
+ /* BRW_NEW_VERTEX_PROGRAM */
+ unsigned nr_vp_regs = align(brw->vs.prog_data->max_const, 16);
+ unsigned nr_clip_regs = 0;
+ unsigned total_regs;
+
+#if 0
+ /* BRW_NEW_CLIP ? */
+ if (brw->attribs.Transform->ClipPlanesEnabled) {
+ unsigned nr_planes = 6 + brw_count_bits(brw->attribs.Transform->ClipPlanesEnabled);
+ nr_clip_regs = align(nr_planes * 4, 16);
+ }
+#endif
+
+
+ total_regs = nr_fp_regs + nr_vp_regs + nr_clip_regs;
+
+ /* This can happen - what to do? Probably rather than falling
+ * back, the best thing to do is emit programs which code the
+ * constants as immediate values. Could do this either as a static
+ * cap on WM and VS, or adaptively.
+ *
+ * Unfortunately, this is currently dependent on the results of the
+ * program generation process (in the case of wm), so this would
+ * introduce the need to re-generate programs in the event of a
+ * curbe allocation failure.
+ */
+ /* Max size is 32 - just large enough to
+ * hold the 128 parameters allowed by
+ * the fragment and vertex program
+ * api's. It's not clear what happens
+ * when both VP and FP want to use 128
+ * parameters, though.
+ */
+ assert(total_regs <= 32);
+
+ /* Lazy resize:
+ */
+ if (nr_fp_regs > brw->curbe.wm_size ||
+ nr_vp_regs > brw->curbe.vs_size ||
+ nr_clip_regs != brw->curbe.clip_size ||
+ (total_regs < brw->curbe.total_size / 4 &&
+ brw->curbe.total_size > 16)) {
+
+ unsigned reg = 0;
+
+ /* Calculate a new layout:
+ */
+ reg = 0;
+ brw->curbe.wm_start = reg;
+ brw->curbe.wm_size = nr_fp_regs; reg += nr_fp_regs;
+ brw->curbe.clip_start = reg;
+ brw->curbe.clip_size = nr_clip_regs; reg += nr_clip_regs;
+ brw->curbe.vs_start = reg;
+ brw->curbe.vs_size = nr_vp_regs; reg += nr_vp_regs;
+ brw->curbe.total_size = reg;
+
+#if 0
+ if (0)
+ DBG("curbe wm %d+%d clip %d+%d vs %d+%d\n",
+ brw->curbe.wm_start,
+ brw->curbe.wm_size,
+ brw->curbe.clip_start,
+ brw->curbe.clip_size,
+ brw->curbe.vs_start,
+ brw->curbe.vs_size );
+#endif
+
+ brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
+ }
+}
+
+
+const struct brw_tracked_state brw_curbe_offsets = {
+ .dirty = {
+ .brw = (BRW_NEW_CLIP |
+ BRW_NEW_VS),
+ .cache = CACHE_NEW_WM_PROG
+ },
+ .update = calculate_curbe_offsets
+};
+
+
+
+/* Define the number of curbes within CS's urb allocation. Multiple
+ * urb entries -> multiple curbes. These will be used by
+ * fixed-function hardware in a double-buffering scheme to avoid a
+ * pipeline stall each time the contents of the curbe is changed.
+ */
+void brw_upload_constant_buffer_state(struct brw_context *brw)
+{
+ struct brw_constant_buffer_state cbs;
+ memset(&cbs, 0, sizeof(cbs));
+
+ /* It appears that this is the state packet for the CS unit, ie. the
+ * urb entries detailed here are housed in the CS range from the
+ * URB_FENCE command.
+ */
+ cbs.header.opcode = CMD_CONST_BUFFER_STATE;
+ cbs.header.length = sizeof(cbs)/4 - 2;
+
+ /* BRW_NEW_URB_FENCE */
+ cbs.bits0.nr_urb_entries = brw->urb.nr_cs_entries;
+ cbs.bits0.urb_entry_size = brw->urb.csize - 1;
+
+ assert(brw->urb.nr_cs_entries);
+ BRW_CACHED_BATCH_STRUCT(brw, &cbs);
+}
+
+
+static float fixed_plane[6][4] = {
+ { 0, 0, -1, 1 },
+ { 0, 0, 1, 1 },
+ { 0, -1, 0, 1 },
+ { 0, 1, 0, 1 },
+ {-1, 0, 0, 1 },
+ { 1, 0, 0, 1 }
+};
+
+/* Upload a new set of constants. Too much variability to go into the
+ * cache mechanism, but maybe would benefit from a comparison against
+ * the current uploaded set of constants.
+ */
+static void upload_constant_buffer(struct brw_context *brw)
+{
+ struct brw_mem_pool *pool = &brw->pool[BRW_GS_POOL];
+ unsigned sz = brw->curbe.total_size;
+ unsigned bufsz = sz * sizeof(float);
+ float *buf;
+ unsigned i;
+
+
+ if (sz == 0) {
+ struct brw_constant_buffer cb;
+ cb.header.opcode = CMD_CONST_BUFFER;
+ cb.header.length = sizeof(cb)/4 - 2;
+ cb.header.valid = 0;
+ cb.bits0.buffer_length = 0;
+ cb.bits0.buffer_address = 0;
+ BRW_BATCH_STRUCT(brw, &cb);
+
+ if (brw->curbe.last_buf) {
+ free(brw->curbe.last_buf);
+ brw->curbe.last_buf = NULL;
+ brw->curbe.last_bufsz = 0;
+ }
+
+ return;
+ }
+
+ buf = (float *)malloc(bufsz);
+
+ memset(buf, 0, bufsz);
+
+ if (brw->curbe.wm_size) {
+ unsigned offset = brw->curbe.wm_start * 16;
+
+ /* First the constant buffer constants:
+ */
+
+ /* Then any internally generated constants:
+ */
+ for (i = 0; i < brw->wm.prog_data->nr_internal_consts; i++)
+ buf[offset + i] = brw->wm.prog_data->internal_const[i];
+
+ assert(brw->wm.prog_data->max_const ==
+ brw->wm.prog_data->nr_internal_consts);
+ }
+
+
+ /* The clipplanes are actually delivered to both CLIP and VS units.
+ * VS uses them to calculate the outcode bitmasks.
+ */
+ if (brw->curbe.clip_size) {
+ unsigned offset = brw->curbe.clip_start * 16;
+ unsigned j;
+
+ /* If any planes are going this way, send them all this way:
+ */
+ for (i = 0; i < 6; i++) {
+ buf[offset + i * 4 + 0] = fixed_plane[i][0];
+ buf[offset + i * 4 + 1] = fixed_plane[i][1];
+ buf[offset + i * 4 + 2] = fixed_plane[i][2];
+ buf[offset + i * 4 + 3] = fixed_plane[i][3];
+ }
+
+ /* Clip planes: BRW_NEW_CLIP:
+ */
+ for (j = 0; j < brw->attribs.Clip.nr; j++) {
+ buf[offset + i * 4 + 0] = brw->attribs.Clip.ucp[j][0];
+ buf[offset + i * 4 + 1] = brw->attribs.Clip.ucp[j][1];
+ buf[offset + i * 4 + 2] = brw->attribs.Clip.ucp[j][2];
+ buf[offset + i * 4 + 3] = brw->attribs.Clip.ucp[j][3];
+ i++;
+ }
+ }
+
+
+ if (brw->curbe.vs_size) {
+ unsigned offset = brw->curbe.vs_start * 16;
+ /*unsigned nr = vp->max_const;*/
+ const struct pipe_constant_buffer *cbuffer = brw->attribs.Constants[0];
+ struct pipe_winsys *ws = brw->pipe.winsys;
+ /* FIXME: buffer size is num_consts + num_immediates */
+ if (brw->vs.prog_data->num_consts) {
+ /* map the vertex constant buffer and copy to curbe: */
+ void *data = ws->buffer_map(ws, cbuffer->buffer, 0);
+ /* FIXME: this is wrong. the cbuffer->buffer->size currently
+ * represents size of consts + immediates. so if we'll
+ * have both we'll copy over the end of the buffer
+ * with the subsequent memcpy */
+ memcpy(&buf[offset], data, cbuffer->buffer->size);
+ ws->buffer_unmap(ws, cbuffer->buffer);
+ offset += cbuffer->buffer->size;
+ }
+ /*immediates*/
+ if (brw->vs.prog_data->num_imm) {
+ memcpy(&buf[offset], brw->vs.prog_data->imm_buf,
+ brw->vs.prog_data->num_imm * 4 * sizeof(float));
+ }
+ }
+
+ if (1) {
+ for (i = 0; i < sz; i+=4)
+ debug_printf("curbe %d.%d: %f %f %f %f\n", i/8, i&4,
+ buf[i+0], buf[i+1], buf[i+2], buf[i+3]);
+
+ debug_printf("last_buf %p buf %p sz %d/%d cmp %d\n",
+ brw->curbe.last_buf, buf,
+ bufsz, brw->curbe.last_bufsz,
+ brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);
+ }
+
+ if (brw->curbe.last_buf &&
+ bufsz == brw->curbe.last_bufsz &&
+ memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {
+ free(buf);
+/* return; */
+ }
+ else {
+ if (brw->curbe.last_buf)
+ free(brw->curbe.last_buf);
+ brw->curbe.last_buf = buf;
+ brw->curbe.last_bufsz = bufsz;
+
+
+ if (!brw_pool_alloc(pool,
+ bufsz,
+ 1 << 6,
+ &brw->curbe.gs_offset)) {
+ debug_printf("out of GS memory for curbe\n");
+ assert(0);
+ return;
+ }
+
+
+ /* Copy data to the buffer:
+ */
+ brw->winsys->buffer_subdata_typed(brw->winsys,
+ pool->buffer,
+ brw->curbe.gs_offset,
+ bufsz,
+ buf,
+ BRW_CONSTANT_BUFFER );
+ }
+
+ /* TODO: only emit the constant_buffer packet when necessary, ie:
+ - contents have changed
+ - offset has changed
+ - hw requirements due to other packets emitted.
+ */
+ {
+ struct brw_constant_buffer cb;
+
+ memset(&cb, 0, sizeof(cb));
+
+ cb.header.opcode = CMD_CONST_BUFFER;
+ cb.header.length = sizeof(cb)/4 - 2;
+ cb.header.valid = 1;
+ cb.bits0.buffer_length = sz - 1;
+ cb.bits0.buffer_address = brw->curbe.gs_offset >> 6;
+
+ /* Because this provokes an action (ie copy the constants into the
+ * URB), it shouldn't be shortcircuited if identical to the
+ * previous time - because eg. the urb destination may have
+ * changed, or the urb contents different to last time.
+ *
+ * Note that the data referred to is actually copied internally,
+ * not just used in place according to passed pointer.
+ *
+ * It appears that the CS unit takes care of using each available
+ * URB entry (Const URB Entry == CURBE) in turn, and issuing
+ * flushes as necessary when doublebuffering of CURBEs isn't
+ * possible.
+ */
+ BRW_BATCH_STRUCT(brw, &cb);
+ }
+}
+
+/* This tracked state is unique in that the state it monitors varies
+ * dynamically depending on the parameters tracked by the fragment and
+ * vertex programs. This is the template used as a starting point,
+ * each context will maintain a copy of this internally and update as
+ * required.
+ */
+const struct brw_tracked_state brw_constant_buffer = {
+ .dirty = {
+ .brw = (BRW_NEW_CLIP |
+ BRW_NEW_CONSTANTS |
+ BRW_NEW_URB_FENCE | /* Implicit - hardware requires this, not used above */
+ BRW_NEW_PSP | /* Implicit - hardware requires this, not used above */
+ BRW_NEW_CURBE_OFFSETS),
+ .cache = (CACHE_NEW_WM_PROG)
+ },
+ .update = upload_constant_buffer
+};
+
diff --git a/src/gallium/drivers/i965simple/brw_defines.h b/src/gallium/drivers/i965simple/brw_defines.h
new file mode 100644
index 0000000000..715d2d2d01
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_defines.h
@@ -0,0 +1,870 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_DEFINES_H
+#define BRW_DEFINES_H
+
+/*
+ */
+#define MI_NOOP 0x00
+#define MI_USER_INTERRUPT 0x02
+#define MI_WAIT_FOR_EVENT 0x03
+#define MI_FLUSH 0x04
+#define MI_REPORT_HEAD 0x07
+#define MI_ARB_ON_OFF 0x08
+#define MI_BATCH_BUFFER_END 0x0A
+#define MI_OVERLAY_FLIP 0x11
+#define MI_LOAD_SCAN_LINES_INCL 0x12
+#define MI_LOAD_SCAN_LINES_EXCL 0x13
+#define MI_DISPLAY_BUFFER_INFO 0x14
+#define MI_SET_CONTEXT 0x18
+#define MI_STORE_DATA_IMM 0x20
+#define MI_STORE_DATA_INDEX 0x21
+#define MI_LOAD_REGISTER_IMM 0x22
+#define MI_STORE_REGISTER_MEM 0x24
+#define MI_BATCH_BUFFER_START 0x31
+
+#define MI_SYNCHRONOUS_FLIP 0x0
+#define MI_ASYNCHRONOUS_FLIP 0x1
+
+#define MI_BUFFER_SECURE 0x0
+#define MI_BUFFER_NONSECURE 0x1
+
+#define MI_ARBITRATE_AT_CHAIN_POINTS 0x0
+#define MI_ARBITRATE_BETWEEN_INSTS 0x1
+#define MI_NO_ARBITRATION 0x3
+
+#define MI_CONDITION_CODE_WAIT_DISABLED 0x0
+#define MI_CONDITION_CODE_WAIT_0 0x1
+#define MI_CONDITION_CODE_WAIT_1 0x2
+#define MI_CONDITION_CODE_WAIT_2 0x3
+#define MI_CONDITION_CODE_WAIT_3 0x4
+#define MI_CONDITION_CODE_WAIT_4 0x5
+
+#define MI_DISPLAY_PIPE_A 0x0
+#define MI_DISPLAY_PIPE_B 0x1
+
+#define MI_DISPLAY_PLANE_A 0x0
+#define MI_DISPLAY_PLANE_B 0x1
+#define MI_DISPLAY_PLANE_C 0x2
+
+#define MI_STANDARD_FLIP 0x0
+#define MI_ENQUEUE_FLIP_PERFORM_BASE_FRAME_NUMBER_LOAD 0x1
+#define MI_ENQUEUE_FLIP_TARGET_FRAME_NUMBER_RELATIVE 0x2
+#define MI_ENQUEUE_FLIP_ABSOLUTE_TARGET_FRAME_NUMBER 0x3
+
+#define MI_PHYSICAL_ADDRESS 0x0
+#define MI_VIRTUAL_ADDRESS 0x1
+
+#define MI_BUFFER_MEMORY_MAIN 0x0
+#define MI_BUFFER_MEMORY_GTT 0x2
+#define MI_BUFFER_MEMORY_PER_PROCESS_GTT 0x3
+
+#define MI_FLIP_CONTINUE 0x0
+#define MI_FLIP_ON 0x1
+#define MI_FLIP_OFF 0x2
+
+#define MI_UNTRUSTED_REGISTER_SPACE 0x0
+#define MI_TRUSTED_REGISTER_SPACE 0x1
+
+/* 3D state:
+ */
+#define _3DOP_3DSTATE_PIPELINED 0x0
+#define _3DOP_3DSTATE_NONPIPELINED 0x1
+#define _3DOP_3DCONTROL 0x2
+#define _3DOP_3DPRIMITIVE 0x3
+
+#define _3DSTATE_PIPELINED_POINTERS 0x00
+#define _3DSTATE_BINDING_TABLE_POINTERS 0x01
+#define _3DSTATE_VERTEX_BUFFERS 0x08
+#define _3DSTATE_VERTEX_ELEMENTS 0x09
+#define _3DSTATE_INDEX_BUFFER 0x0A
+#define _3DSTATE_VF_STATISTICS 0x0B
+#define _3DSTATE_DRAWING_RECTANGLE 0x00
+#define _3DSTATE_CONSTANT_COLOR 0x01
+#define _3DSTATE_SAMPLER_PALETTE_LOAD 0x02
+#define _3DSTATE_CHROMA_KEY 0x04
+#define _3DSTATE_DEPTH_BUFFER 0x05
+#define _3DSTATE_POLY_STIPPLE_OFFSET 0x06
+#define _3DSTATE_POLY_STIPPLE_PATTERN 0x07
+#define _3DSTATE_LINE_STIPPLE 0x08
+#define _3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP 0x09
+#define _3DCONTROL 0x00
+#define _3DPRIMITIVE 0x00
+
+#define PIPE_CONTROL_NOWRITE 0x00
+#define PIPE_CONTROL_WRITEIMMEDIATE 0x01
+#define PIPE_CONTROL_WRITEDEPTH 0x02
+#define PIPE_CONTROL_WRITETIMESTAMP 0x03
+
+#define PIPE_CONTROL_GTTWRITE_PROCESS_LOCAL 0x00
+#define PIPE_CONTROL_GTTWRITE_GLOBAL 0x01
+
+#define _3DPRIM_POINTLIST 0x01
+#define _3DPRIM_LINELIST 0x02
+#define _3DPRIM_LINESTRIP 0x03
+#define _3DPRIM_TRILIST 0x04
+#define _3DPRIM_TRISTRIP 0x05
+#define _3DPRIM_TRIFAN 0x06
+#define _3DPRIM_QUADLIST 0x07
+#define _3DPRIM_QUADSTRIP 0x08
+#define _3DPRIM_LINELIST_ADJ 0x09
+#define _3DPRIM_LINESTRIP_ADJ 0x0A
+#define _3DPRIM_TRILIST_ADJ 0x0B
+#define _3DPRIM_TRISTRIP_ADJ 0x0C
+#define _3DPRIM_TRISTRIP_REVERSE 0x0D
+#define _3DPRIM_POLYGON 0x0E
+#define _3DPRIM_RECTLIST 0x0F
+#define _3DPRIM_LINELOOP 0x10
+#define _3DPRIM_POINTLIST_BF 0x11
+#define _3DPRIM_LINESTRIP_CONT 0x12
+#define _3DPRIM_LINESTRIP_BF 0x13
+#define _3DPRIM_LINESTRIP_CONT_BF 0x14
+#define _3DPRIM_TRIFAN_NOSTIPPLE 0x15
+
+#define _3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL 0
+#define _3DPRIM_VERTEXBUFFER_ACCESS_RANDOM 1
+
+#define BRW_ANISORATIO_2 0
+#define BRW_ANISORATIO_4 1
+#define BRW_ANISORATIO_6 2
+#define BRW_ANISORATIO_8 3
+#define BRW_ANISORATIO_10 4
+#define BRW_ANISORATIO_12 5
+#define BRW_ANISORATIO_14 6
+#define BRW_ANISORATIO_16 7
+
+#define BRW_BLENDFACTOR_ONE 0x1
+#define BRW_BLENDFACTOR_SRC_COLOR 0x2
+#define BRW_BLENDFACTOR_SRC_ALPHA 0x3
+#define BRW_BLENDFACTOR_DST_ALPHA 0x4
+#define BRW_BLENDFACTOR_DST_COLOR 0x5
+#define BRW_BLENDFACTOR_SRC_ALPHA_SATURATE 0x6
+#define BRW_BLENDFACTOR_CONST_COLOR 0x7
+#define BRW_BLENDFACTOR_CONST_ALPHA 0x8
+#define BRW_BLENDFACTOR_SRC1_COLOR 0x9
+#define BRW_BLENDFACTOR_SRC1_ALPHA 0x0A
+#define BRW_BLENDFACTOR_ZERO 0x11
+#define BRW_BLENDFACTOR_INV_SRC_COLOR 0x12
+#define BRW_BLENDFACTOR_INV_SRC_ALPHA 0x13
+#define BRW_BLENDFACTOR_INV_DST_ALPHA 0x14
+#define BRW_BLENDFACTOR_INV_DST_COLOR 0x15
+#define BRW_BLENDFACTOR_INV_CONST_COLOR 0x17
+#define BRW_BLENDFACTOR_INV_CONST_ALPHA 0x18
+#define BRW_BLENDFACTOR_INV_SRC1_COLOR 0x19
+#define BRW_BLENDFACTOR_INV_SRC1_ALPHA 0x1A
+
+#define BRW_BLENDFUNCTION_ADD 0
+#define BRW_BLENDFUNCTION_SUBTRACT 1
+#define BRW_BLENDFUNCTION_REVERSE_SUBTRACT 2
+#define BRW_BLENDFUNCTION_MIN 3
+#define BRW_BLENDFUNCTION_MAX 4
+
+#define BRW_ALPHATEST_FORMAT_UNORM8 0
+#define BRW_ALPHATEST_FORMAT_FLOAT32 1
+
+#define BRW_CHROMAKEY_KILL_ON_ANY_MATCH 0
+#define BRW_CHROMAKEY_REPLACE_BLACK 1
+
+#define BRW_CLIP_API_OGL 0
+#define BRW_CLIP_API_DX 1
+
+#define BRW_CLIPMODE_NORMAL 0
+#define BRW_CLIPMODE_CLIP_ALL 1
+#define BRW_CLIPMODE_CLIP_NON_REJECTED 2
+#define BRW_CLIPMODE_REJECT_ALL 3
+#define BRW_CLIPMODE_ACCEPT_ALL 4
+
+#define BRW_CLIP_NDCSPACE 0
+#define BRW_CLIP_SCREENSPACE 1
+
+#define BRW_COMPAREFUNCTION_ALWAYS 0
+#define BRW_COMPAREFUNCTION_NEVER 1
+#define BRW_COMPAREFUNCTION_LESS 2
+#define BRW_COMPAREFUNCTION_EQUAL 3
+#define BRW_COMPAREFUNCTION_LEQUAL 4
+#define BRW_COMPAREFUNCTION_GREATER 5
+#define BRW_COMPAREFUNCTION_NOTEQUAL 6
+#define BRW_COMPAREFUNCTION_GEQUAL 7
+
+#define BRW_COVERAGE_PIXELS_HALF 0
+#define BRW_COVERAGE_PIXELS_1 1
+#define BRW_COVERAGE_PIXELS_2 2
+#define BRW_COVERAGE_PIXELS_4 3
+
+#define BRW_CULLMODE_BOTH 0
+#define BRW_CULLMODE_NONE 1
+#define BRW_CULLMODE_FRONT 2
+#define BRW_CULLMODE_BACK 3
+
+#define BRW_DEFAULTCOLOR_R8G8B8A8_UNORM 0
+#define BRW_DEFAULTCOLOR_R32G32B32A32_FLOAT 1
+
+#define BRW_DEPTHFORMAT_D32_FLOAT_S8X24_UINT 0
+#define BRW_DEPTHFORMAT_D32_FLOAT 1
+#define BRW_DEPTHFORMAT_D24_UNORM_S8_UINT 2
+#define BRW_DEPTHFORMAT_D16_UNORM 5
+
+#define BRW_FLOATING_POINT_IEEE_754 0
+#define BRW_FLOATING_POINT_NON_IEEE_754 1
+
+#define BRW_FRONTWINDING_CW 0
+#define BRW_FRONTWINDING_CCW 1
+
+#define BRW_SPRITE_POINT_ENABLE 16
+
+#define BRW_INDEX_BYTE 0
+#define BRW_INDEX_WORD 1
+#define BRW_INDEX_DWORD 2
+
+#define BRW_LOGICOPFUNCTION_CLEAR 0
+#define BRW_LOGICOPFUNCTION_NOR 1
+#define BRW_LOGICOPFUNCTION_AND_INVERTED 2
+#define BRW_LOGICOPFUNCTION_COPY_INVERTED 3
+#define BRW_LOGICOPFUNCTION_AND_REVERSE 4
+#define BRW_LOGICOPFUNCTION_INVERT 5
+#define BRW_LOGICOPFUNCTION_XOR 6
+#define BRW_LOGICOPFUNCTION_NAND 7
+#define BRW_LOGICOPFUNCTION_AND 8
+#define BRW_LOGICOPFUNCTION_EQUIV 9
+#define BRW_LOGICOPFUNCTION_NOOP 10
+#define BRW_LOGICOPFUNCTION_OR_INVERTED 11
+#define BRW_LOGICOPFUNCTION_COPY 12
+#define BRW_LOGICOPFUNCTION_OR_REVERSE 13
+#define BRW_LOGICOPFUNCTION_OR 14
+#define BRW_LOGICOPFUNCTION_SET 15
+
+#define BRW_MAPFILTER_NEAREST 0x0
+#define BRW_MAPFILTER_LINEAR 0x1
+#define BRW_MAPFILTER_ANISOTROPIC 0x2
+
+#define BRW_MIPFILTER_NONE 0
+#define BRW_MIPFILTER_NEAREST 1
+#define BRW_MIPFILTER_LINEAR 3
+
+#define BRW_POLYGON_FRONT_FACING 0
+#define BRW_POLYGON_BACK_FACING 1
+
+#define BRW_PREFILTER_ALWAYS 0x0
+#define BRW_PREFILTER_NEVER 0x1
+#define BRW_PREFILTER_LESS 0x2
+#define BRW_PREFILTER_EQUAL 0x3
+#define BRW_PREFILTER_LEQUAL 0x4
+#define BRW_PREFILTER_GREATER 0x5
+#define BRW_PREFILTER_NOTEQUAL 0x6
+#define BRW_PREFILTER_GEQUAL 0x7
+
+#define BRW_PROVOKING_VERTEX_0 0
+#define BRW_PROVOKING_VERTEX_1 1
+#define BRW_PROVOKING_VERTEX_2 2
+
+#define BRW_RASTRULE_UPPER_LEFT 0
+#define BRW_RASTRULE_UPPER_RIGHT 1
+/* These are listed as "Reserved, but not seen as useful"
+ * in Intel documentation (page 212, "Point Rasterization Rule",
+ * section 7.4 "SF Pipeline State Summary", of document
+ * "Intel® 965 Express Chipset Family and Intel® G35 Express
+ * Chipset Graphics Controller Programmer's Reference Manual,
+ * Volume 2: 3D/Media", Revision 1.0b as of January 2008,
+ * available at
+ * http://intellinuxgraphics.org/documentation.html
+ * at the time of this writing).
+ *
+ * These appear to be supported on at least some
+ * i965-family devices, and the BRW_RASTRULE_LOWER_RIGHT
+ * is useful when using OpenGL to render to a FBO
+ * (which has the pixel coordinate Y orientation inverted
+ * with respect to the normal OpenGL pixel coordinate system).
+ */
+#define BRW_RASTRULE_LOWER_LEFT 2
+#define BRW_RASTRULE_LOWER_RIGHT 3
+
+#define BRW_RENDERTARGET_CLAMPRANGE_UNORM 0
+#define BRW_RENDERTARGET_CLAMPRANGE_SNORM 1
+#define BRW_RENDERTARGET_CLAMPRANGE_FORMAT 2
+
+#define BRW_STENCILOP_KEEP 0
+#define BRW_STENCILOP_ZERO 1
+#define BRW_STENCILOP_REPLACE 2
+#define BRW_STENCILOP_INCRSAT 3
+#define BRW_STENCILOP_DECRSAT 4
+#define BRW_STENCILOP_INCR 5
+#define BRW_STENCILOP_DECR 6
+#define BRW_STENCILOP_INVERT 7
+
+#define BRW_SURFACE_MIPMAPLAYOUT_BELOW 0
+#define BRW_SURFACE_MIPMAPLAYOUT_RIGHT 1
+
+#define BRW_SURFACEFORMAT_R32G32B32A32_FLOAT 0x000
+#define BRW_SURFACEFORMAT_R32G32B32A32_SINT 0x001
+#define BRW_SURFACEFORMAT_R32G32B32A32_UINT 0x002
+#define BRW_SURFACEFORMAT_R32G32B32A32_UNORM 0x003
+#define BRW_SURFACEFORMAT_R32G32B32A32_SNORM 0x004
+#define BRW_SURFACEFORMAT_R64G64_FLOAT 0x005
+#define BRW_SURFACEFORMAT_R32G32B32X32_FLOAT 0x006
+#define BRW_SURFACEFORMAT_R32G32B32A32_SSCALED 0x007
+#define BRW_SURFACEFORMAT_R32G32B32A32_USCALED 0x008
+#define BRW_SURFACEFORMAT_R32G32B32_FLOAT 0x040
+#define BRW_SURFACEFORMAT_R32G32B32_SINT 0x041
+#define BRW_SURFACEFORMAT_R32G32B32_UINT 0x042
+#define BRW_SURFACEFORMAT_R32G32B32_UNORM 0x043
+#define BRW_SURFACEFORMAT_R32G32B32_SNORM 0x044
+#define BRW_SURFACEFORMAT_R32G32B32_SSCALED 0x045
+#define BRW_SURFACEFORMAT_R32G32B32_USCALED 0x046
+#define BRW_SURFACEFORMAT_R16G16B16A16_UNORM 0x080
+#define BRW_SURFACEFORMAT_R16G16B16A16_SNORM 0x081
+#define BRW_SURFACEFORMAT_R16G16B16A16_SINT 0x082
+#define BRW_SURFACEFORMAT_R16G16B16A16_UINT 0x083
+#define BRW_SURFACEFORMAT_R16G16B16A16_FLOAT 0x084
+#define BRW_SURFACEFORMAT_R32G32_FLOAT 0x085
+#define BRW_SURFACEFORMAT_R32G32_SINT 0x086
+#define BRW_SURFACEFORMAT_R32G32_UINT 0x087
+#define BRW_SURFACEFORMAT_R32_FLOAT_X8X24_TYPELESS 0x088
+#define BRW_SURFACEFORMAT_X32_TYPELESS_G8X24_UINT 0x089
+#define BRW_SURFACEFORMAT_L32A32_FLOAT 0x08A
+#define BRW_SURFACEFORMAT_R32G32_UNORM 0x08B
+#define BRW_SURFACEFORMAT_R32G32_SNORM 0x08C
+#define BRW_SURFACEFORMAT_R64_FLOAT 0x08D
+#define BRW_SURFACEFORMAT_R16G16B16X16_UNORM 0x08E
+#define BRW_SURFACEFORMAT_R16G16B16X16_FLOAT 0x08F
+#define BRW_SURFACEFORMAT_A32X32_FLOAT 0x090
+#define BRW_SURFACEFORMAT_L32X32_FLOAT 0x091
+#define BRW_SURFACEFORMAT_I32X32_FLOAT 0x092
+#define BRW_SURFACEFORMAT_R16G16B16A16_SSCALED 0x093
+#define BRW_SURFACEFORMAT_R16G16B16A16_USCALED 0x094
+#define BRW_SURFACEFORMAT_R32G32_SSCALED 0x095
+#define BRW_SURFACEFORMAT_R32G32_USCALED 0x096
+#define BRW_SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define BRW_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB 0x0C1
+#define BRW_SURFACEFORMAT_R10G10B10A2_UNORM 0x0C2
+#define BRW_SURFACEFORMAT_R10G10B10A2_UNORM_SRGB 0x0C3
+#define BRW_SURFACEFORMAT_R10G10B10A2_UINT 0x0C4
+#define BRW_SURFACEFORMAT_R10G10B10_SNORM_A2_UNORM 0x0C5
+#define BRW_SURFACEFORMAT_R8G8B8A8_UNORM 0x0C7
+#define BRW_SURFACEFORMAT_R8G8B8A8_UNORM_SRGB 0x0C8
+#define BRW_SURFACEFORMAT_R8G8B8A8_SNORM 0x0C9
+#define BRW_SURFACEFORMAT_R8G8B8A8_SINT 0x0CA
+#define BRW_SURFACEFORMAT_R8G8B8A8_UINT 0x0CB
+#define BRW_SURFACEFORMAT_R16G16_UNORM 0x0CC
+#define BRW_SURFACEFORMAT_R16G16_SNORM 0x0CD
+#define BRW_SURFACEFORMAT_R16G16_SINT 0x0CE
+#define BRW_SURFACEFORMAT_R16G16_UINT 0x0CF
+#define BRW_SURFACEFORMAT_R16G16_FLOAT 0x0D0
+#define BRW_SURFACEFORMAT_B10G10R10A2_UNORM 0x0D1
+#define BRW_SURFACEFORMAT_B10G10R10A2_UNORM_SRGB 0x0D2
+#define BRW_SURFACEFORMAT_R11G11B10_FLOAT 0x0D3
+#define BRW_SURFACEFORMAT_R32_SINT 0x0D6
+#define BRW_SURFACEFORMAT_R32_UINT 0x0D7
+#define BRW_SURFACEFORMAT_R32_FLOAT 0x0D8
+#define BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS 0x0D9
+#define BRW_SURFACEFORMAT_X24_TYPELESS_G8_UINT 0x0DA
+#define BRW_SURFACEFORMAT_L16A16_UNORM 0x0DF
+#define BRW_SURFACEFORMAT_I24X8_UNORM 0x0E0
+#define BRW_SURFACEFORMAT_L24X8_UNORM 0x0E1
+#define BRW_SURFACEFORMAT_A24X8_UNORM 0x0E2
+#define BRW_SURFACEFORMAT_I32_FLOAT 0x0E3
+#define BRW_SURFACEFORMAT_L32_FLOAT 0x0E4
+#define BRW_SURFACEFORMAT_A32_FLOAT 0x0E5
+#define BRW_SURFACEFORMAT_B8G8R8X8_UNORM 0x0E9
+#define BRW_SURFACEFORMAT_B8G8R8X8_UNORM_SRGB 0x0EA
+#define BRW_SURFACEFORMAT_R8G8B8X8_UNORM 0x0EB
+#define BRW_SURFACEFORMAT_R8G8B8X8_UNORM_SRGB 0x0EC
+#define BRW_SURFACEFORMAT_R9G9B9E5_SHAREDEXP 0x0ED
+#define BRW_SURFACEFORMAT_B10G10R10X2_UNORM 0x0EE
+#define BRW_SURFACEFORMAT_L16A16_FLOAT 0x0F0
+#define BRW_SURFACEFORMAT_R32_UNORM 0x0F1
+#define BRW_SURFACEFORMAT_R32_SNORM 0x0F2
+#define BRW_SURFACEFORMAT_R10G10B10X2_USCALED 0x0F3
+#define BRW_SURFACEFORMAT_R8G8B8A8_SSCALED 0x0F4
+#define BRW_SURFACEFORMAT_R8G8B8A8_USCALED 0x0F5
+#define BRW_SURFACEFORMAT_R16G16_SSCALED 0x0F6
+#define BRW_SURFACEFORMAT_R16G16_USCALED 0x0F7
+#define BRW_SURFACEFORMAT_R32_SSCALED 0x0F8
+#define BRW_SURFACEFORMAT_R32_USCALED 0x0F9
+#define BRW_SURFACEFORMAT_B5G6R5_UNORM 0x100
+#define BRW_SURFACEFORMAT_B5G6R5_UNORM_SRGB 0x101
+#define BRW_SURFACEFORMAT_B5G5R5A1_UNORM 0x102
+#define BRW_SURFACEFORMAT_B5G5R5A1_UNORM_SRGB 0x103
+#define BRW_SURFACEFORMAT_B4G4R4A4_UNORM 0x104
+#define BRW_SURFACEFORMAT_B4G4R4A4_UNORM_SRGB 0x105
+#define BRW_SURFACEFORMAT_R8G8_UNORM 0x106
+#define BRW_SURFACEFORMAT_R8G8_SNORM 0x107
+#define BRW_SURFACEFORMAT_R8G8_SINT 0x108
+#define BRW_SURFACEFORMAT_R8G8_UINT 0x109
+#define BRW_SURFACEFORMAT_R16_UNORM 0x10A
+#define BRW_SURFACEFORMAT_R16_SNORM 0x10B
+#define BRW_SURFACEFORMAT_R16_SINT 0x10C
+#define BRW_SURFACEFORMAT_R16_UINT 0x10D
+#define BRW_SURFACEFORMAT_R16_FLOAT 0x10E
+#define BRW_SURFACEFORMAT_I16_UNORM 0x111
+#define BRW_SURFACEFORMAT_L16_UNORM 0x112
+#define BRW_SURFACEFORMAT_A16_UNORM 0x113
+#define BRW_SURFACEFORMAT_L8A8_UNORM 0x114
+#define BRW_SURFACEFORMAT_I16_FLOAT 0x115
+#define BRW_SURFACEFORMAT_L16_FLOAT 0x116
+#define BRW_SURFACEFORMAT_A16_FLOAT 0x117
+#define BRW_SURFACEFORMAT_R5G5_SNORM_B6_UNORM 0x119
+#define BRW_SURFACEFORMAT_B5G5R5X1_UNORM 0x11A
+#define BRW_SURFACEFORMAT_B5G5R5X1_UNORM_SRGB 0x11B
+#define BRW_SURFACEFORMAT_R8G8_SSCALED 0x11C
+#define BRW_SURFACEFORMAT_R8G8_USCALED 0x11D
+#define BRW_SURFACEFORMAT_R16_SSCALED 0x11E
+#define BRW_SURFACEFORMAT_R16_USCALED 0x11F
+#define BRW_SURFACEFORMAT_R8_UNORM 0x140
+#define BRW_SURFACEFORMAT_R8_SNORM 0x141
+#define BRW_SURFACEFORMAT_R8_SINT 0x142
+#define BRW_SURFACEFORMAT_R8_UINT 0x143
+#define BRW_SURFACEFORMAT_A8_UNORM 0x144
+#define BRW_SURFACEFORMAT_I8_UNORM 0x145
+#define BRW_SURFACEFORMAT_L8_UNORM 0x146
+#define BRW_SURFACEFORMAT_P4A4_UNORM 0x147
+#define BRW_SURFACEFORMAT_A4P4_UNORM 0x148
+#define BRW_SURFACEFORMAT_R8_SSCALED 0x149
+#define BRW_SURFACEFORMAT_R8_USCALED 0x14A
+#define BRW_SURFACEFORMAT_R1_UINT 0x181
+#define BRW_SURFACEFORMAT_YCRCB_NORMAL 0x182
+#define BRW_SURFACEFORMAT_YCRCB_SWAPUVY 0x183
+#define BRW_SURFACEFORMAT_BC1_UNORM 0x186
+#define BRW_SURFACEFORMAT_BC2_UNORM 0x187
+#define BRW_SURFACEFORMAT_BC3_UNORM 0x188
+#define BRW_SURFACEFORMAT_BC4_UNORM 0x189
+#define BRW_SURFACEFORMAT_BC5_UNORM 0x18A
+#define BRW_SURFACEFORMAT_BC1_UNORM_SRGB 0x18B
+#define BRW_SURFACEFORMAT_BC2_UNORM_SRGB 0x18C
+#define BRW_SURFACEFORMAT_BC3_UNORM_SRGB 0x18D
+#define BRW_SURFACEFORMAT_MONO8 0x18E
+#define BRW_SURFACEFORMAT_YCRCB_SWAPUV 0x18F
+#define BRW_SURFACEFORMAT_YCRCB_SWAPY 0x190
+#define BRW_SURFACEFORMAT_DXT1_RGB 0x191
+#define BRW_SURFACEFORMAT_FXT1 0x192
+#define BRW_SURFACEFORMAT_R8G8B8_UNORM 0x193
+#define BRW_SURFACEFORMAT_R8G8B8_SNORM 0x194
+#define BRW_SURFACEFORMAT_R8G8B8_SSCALED 0x195
+#define BRW_SURFACEFORMAT_R8G8B8_USCALED 0x196
+#define BRW_SURFACEFORMAT_R64G64B64A64_FLOAT 0x197
+#define BRW_SURFACEFORMAT_R64G64B64_FLOAT 0x198
+#define BRW_SURFACEFORMAT_BC4_SNORM 0x199
+#define BRW_SURFACEFORMAT_BC5_SNORM 0x19A
+#define BRW_SURFACEFORMAT_R16G16B16_UNORM 0x19C
+#define BRW_SURFACEFORMAT_R16G16B16_SNORM 0x19D
+#define BRW_SURFACEFORMAT_R16G16B16_SSCALED 0x19E
+#define BRW_SURFACEFORMAT_R16G16B16_USCALED 0x19F
+
+#define BRW_SURFACERETURNFORMAT_FLOAT32 0
+#define BRW_SURFACERETURNFORMAT_S1 1
+
+#define BRW_SURFACE_1D 0
+#define BRW_SURFACE_2D 1
+#define BRW_SURFACE_3D 2
+#define BRW_SURFACE_CUBE 3
+#define BRW_SURFACE_BUFFER 4
+#define BRW_SURFACE_NULL 7
+
+#define BRW_TEXCOORDMODE_WRAP 0
+#define BRW_TEXCOORDMODE_MIRROR 1
+#define BRW_TEXCOORDMODE_CLAMP 2
+#define BRW_TEXCOORDMODE_CUBE 3
+#define BRW_TEXCOORDMODE_CLAMP_BORDER 4
+#define BRW_TEXCOORDMODE_MIRROR_ONCE 5
+
+#define BRW_THREAD_PRIORITY_NORMAL 0
+#define BRW_THREAD_PRIORITY_HIGH 1
+
+#define BRW_TILEWALK_XMAJOR 0
+#define BRW_TILEWALK_YMAJOR 1
+
+#define BRW_VERTEX_SUBPIXEL_PRECISION_8BITS 0
+#define BRW_VERTEX_SUBPIXEL_PRECISION_4BITS 1
+
+#define BRW_VERTEXBUFFER_ACCESS_VERTEXDATA 0
+#define BRW_VERTEXBUFFER_ACCESS_INSTANCEDATA 1
+
+#define BRW_VFCOMPONENT_NOSTORE 0
+#define BRW_VFCOMPONENT_STORE_SRC 1
+#define BRW_VFCOMPONENT_STORE_0 2
+#define BRW_VFCOMPONENT_STORE_1_FLT 3
+#define BRW_VFCOMPONENT_STORE_1_INT 4
+#define BRW_VFCOMPONENT_STORE_VID 5
+#define BRW_VFCOMPONENT_STORE_IID 6
+#define BRW_VFCOMPONENT_STORE_PID 7
+
+
+
+/* Execution Unit (EU) defines
+ */
+
+#define BRW_ALIGN_1 0
+#define BRW_ALIGN_16 1
+
+#define BRW_ADDRESS_DIRECT 0
+#define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER 1
+
+#define BRW_CHANNEL_X 0
+#define BRW_CHANNEL_Y 1
+#define BRW_CHANNEL_Z 2
+#define BRW_CHANNEL_W 3
+
+#define BRW_COMPRESSION_NONE 0
+#define BRW_COMPRESSION_2NDHALF 1
+#define BRW_COMPRESSION_COMPRESSED 2
+
+#define BRW_CONDITIONAL_NONE 0
+#define BRW_CONDITIONAL_Z 1
+#define BRW_CONDITIONAL_NZ 2
+#define BRW_CONDITIONAL_EQ 1 /* Z */
+#define BRW_CONDITIONAL_NEQ 2 /* NZ */
+#define BRW_CONDITIONAL_G 3
+#define BRW_CONDITIONAL_GE 4
+#define BRW_CONDITIONAL_L 5
+#define BRW_CONDITIONAL_LE 6
+#define BRW_CONDITIONAL_C 7
+#define BRW_CONDITIONAL_O 8
+
+#define BRW_DEBUG_NONE 0
+#define BRW_DEBUG_BREAKPOINT 1
+
+#define BRW_DEPENDENCY_NORMAL 0
+#define BRW_DEPENDENCY_NOTCLEARED 1
+#define BRW_DEPENDENCY_NOTCHECKED 2
+#define BRW_DEPENDENCY_DISABLE 3
+
+#define BRW_EXECUTE_1 0
+#define BRW_EXECUTE_2 1
+#define BRW_EXECUTE_4 2
+#define BRW_EXECUTE_8 3
+#define BRW_EXECUTE_16 4
+#define BRW_EXECUTE_32 5
+
+#define BRW_HORIZONTAL_STRIDE_0 0
+#define BRW_HORIZONTAL_STRIDE_1 1
+#define BRW_HORIZONTAL_STRIDE_2 2
+#define BRW_HORIZONTAL_STRIDE_4 3
+
+#define BRW_INSTRUCTION_NORMAL 0
+#define BRW_INSTRUCTION_SATURATE 1
+
+#define BRW_MASK_ENABLE 0
+#define BRW_MASK_DISABLE 1
+
+#define BRW_OPCODE_MOV 1
+#define BRW_OPCODE_SEL 2
+#define BRW_OPCODE_NOT 4
+#define BRW_OPCODE_AND 5
+#define BRW_OPCODE_OR 6
+#define BRW_OPCODE_XOR 7
+#define BRW_OPCODE_SHR 8
+#define BRW_OPCODE_SHL 9
+#define BRW_OPCODE_RSR 10
+#define BRW_OPCODE_RSL 11
+#define BRW_OPCODE_ASR 12
+#define BRW_OPCODE_CMP 16
+#define BRW_OPCODE_JMPI 32
+#define BRW_OPCODE_IF 34
+#define BRW_OPCODE_IFF 35
+#define BRW_OPCODE_ELSE 36
+#define BRW_OPCODE_ENDIF 37
+#define BRW_OPCODE_DO 38
+#define BRW_OPCODE_WHILE 39
+#define BRW_OPCODE_BREAK 40
+#define BRW_OPCODE_CONTINUE 41
+#define BRW_OPCODE_HALT 42
+#define BRW_OPCODE_MSAVE 44
+#define BRW_OPCODE_MRESTORE 45
+#define BRW_OPCODE_PUSH 46
+#define BRW_OPCODE_POP 47
+#define BRW_OPCODE_WAIT 48
+#define BRW_OPCODE_SEND 49
+#define BRW_OPCODE_ADD 64
+#define BRW_OPCODE_MUL 65
+#define BRW_OPCODE_AVG 66
+#define BRW_OPCODE_FRC 67
+#define BRW_OPCODE_RNDU 68
+#define BRW_OPCODE_RNDD 69
+#define BRW_OPCODE_RNDE 70
+#define BRW_OPCODE_RNDZ 71
+#define BRW_OPCODE_MAC 72
+#define BRW_OPCODE_MACH 73
+#define BRW_OPCODE_LZD 74
+#define BRW_OPCODE_SAD2 80
+#define BRW_OPCODE_SADA2 81
+#define BRW_OPCODE_DP4 84
+#define BRW_OPCODE_DPH 85
+#define BRW_OPCODE_DP3 86
+#define BRW_OPCODE_DP2 87
+#define BRW_OPCODE_DPA2 88
+#define BRW_OPCODE_LINE 89
+#define BRW_OPCODE_NOP 126
+
+#define BRW_PREDICATE_NONE 0
+#define BRW_PREDICATE_NORMAL 1
+#define BRW_PREDICATE_ALIGN1_ANYV 2
+#define BRW_PREDICATE_ALIGN1_ALLV 3
+#define BRW_PREDICATE_ALIGN1_ANY2H 4
+#define BRW_PREDICATE_ALIGN1_ALL2H 5
+#define BRW_PREDICATE_ALIGN1_ANY4H 6
+#define BRW_PREDICATE_ALIGN1_ALL4H 7
+#define BRW_PREDICATE_ALIGN1_ANY8H 8
+#define BRW_PREDICATE_ALIGN1_ALL8H 9
+#define BRW_PREDICATE_ALIGN1_ANY16H 10
+#define BRW_PREDICATE_ALIGN1_ALL16H 11
+#define BRW_PREDICATE_ALIGN16_REPLICATE_X 2
+#define BRW_PREDICATE_ALIGN16_REPLICATE_Y 3
+#define BRW_PREDICATE_ALIGN16_REPLICATE_Z 4
+#define BRW_PREDICATE_ALIGN16_REPLICATE_W 5
+#define BRW_PREDICATE_ALIGN16_ANY4H 6
+#define BRW_PREDICATE_ALIGN16_ALL4H 7
+
+#define BRW_ARCHITECTURE_REGISTER_FILE 0
+#define BRW_GENERAL_REGISTER_FILE 1
+#define BRW_MESSAGE_REGISTER_FILE 2
+#define BRW_IMMEDIATE_VALUE 3
+
+#define BRW_REGISTER_TYPE_UD 0
+#define BRW_REGISTER_TYPE_D 1
+#define BRW_REGISTER_TYPE_UW 2
+#define BRW_REGISTER_TYPE_W 3
+#define BRW_REGISTER_TYPE_UB 4
+#define BRW_REGISTER_TYPE_B 5
+#define BRW_REGISTER_TYPE_VF 5 /* packed float vector, immediates only? */
+#define BRW_REGISTER_TYPE_HF 6
+#define BRW_REGISTER_TYPE_V 6 /* packed int vector, immediates only, uword dest only */
+#define BRW_REGISTER_TYPE_F 7
+
+#define BRW_ARF_NULL 0x00
+#define BRW_ARF_ADDRESS 0x10
+#define BRW_ARF_ACCUMULATOR 0x20
+#define BRW_ARF_FLAG 0x30
+#define BRW_ARF_MASK 0x40
+#define BRW_ARF_MASK_STACK 0x50
+#define BRW_ARF_MASK_STACK_DEPTH 0x60
+#define BRW_ARF_STATE 0x70
+#define BRW_ARF_CONTROL 0x80
+#define BRW_ARF_NOTIFICATION_COUNT 0x90
+#define BRW_ARF_IP 0xA0
+
+#define BRW_AMASK 0
+#define BRW_IMASK 1
+#define BRW_LMASK 2
+#define BRW_CMASK 3
+
+
+
+#define BRW_THREAD_NORMAL 0
+#define BRW_THREAD_ATOMIC 1
+#define BRW_THREAD_SWITCH 2
+
+#define BRW_VERTICAL_STRIDE_0 0
+#define BRW_VERTICAL_STRIDE_1 1
+#define BRW_VERTICAL_STRIDE_2 2
+#define BRW_VERTICAL_STRIDE_4 3
+#define BRW_VERTICAL_STRIDE_8 4
+#define BRW_VERTICAL_STRIDE_16 5
+#define BRW_VERTICAL_STRIDE_32 6
+#define BRW_VERTICAL_STRIDE_64 7
+#define BRW_VERTICAL_STRIDE_128 8
+#define BRW_VERTICAL_STRIDE_256 9
+#define BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL 0xF
+
+#define BRW_WIDTH_1 0
+#define BRW_WIDTH_2 1
+#define BRW_WIDTH_4 2
+#define BRW_WIDTH_8 3
+#define BRW_WIDTH_16 4
+
+#define BRW_STATELESS_BUFFER_BOUNDARY_1K 0
+#define BRW_STATELESS_BUFFER_BOUNDARY_2K 1
+#define BRW_STATELESS_BUFFER_BOUNDARY_4K 2
+#define BRW_STATELESS_BUFFER_BOUNDARY_8K 3
+#define BRW_STATELESS_BUFFER_BOUNDARY_16K 4
+#define BRW_STATELESS_BUFFER_BOUNDARY_32K 5
+#define BRW_STATELESS_BUFFER_BOUNDARY_64K 6
+#define BRW_STATELESS_BUFFER_BOUNDARY_128K 7
+#define BRW_STATELESS_BUFFER_BOUNDARY_256K 8
+#define BRW_STATELESS_BUFFER_BOUNDARY_512K 9
+#define BRW_STATELESS_BUFFER_BOUNDARY_1M 10
+#define BRW_STATELESS_BUFFER_BOUNDARY_2M 11
+
+#define BRW_POLYGON_FACING_FRONT 0
+#define BRW_POLYGON_FACING_BACK 1
+
+#define BRW_MESSAGE_TARGET_NULL 0
+#define BRW_MESSAGE_TARGET_MATH 1
+#define BRW_MESSAGE_TARGET_SAMPLER 2
+#define BRW_MESSAGE_TARGET_GATEWAY 3
+#define BRW_MESSAGE_TARGET_DATAPORT_READ 4
+#define BRW_MESSAGE_TARGET_DATAPORT_WRITE 5
+#define BRW_MESSAGE_TARGET_URB 6
+#define BRW_MESSAGE_TARGET_THREAD_SPAWNER 7
+
+#define BRW_SAMPLER_RETURN_FORMAT_FLOAT32 0
+#define BRW_SAMPLER_RETURN_FORMAT_UINT32 2
+#define BRW_SAMPLER_RETURN_FORMAT_SINT32 3
+
+#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE 0
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE 0
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0
+#define BRW_SAMPLER_MESSAGE_SIMD8_KILLPIX 1
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2
+#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2
+#define BRW_SAMPLER_MESSAGE_SIMD8_RESINFO 2
+#define BRW_SAMPLER_MESSAGE_SIMD16_RESINFO 2
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_LD 3
+#define BRW_SAMPLER_MESSAGE_SIMD8_LD 3
+#define BRW_SAMPLER_MESSAGE_SIMD16_LD 3
+
+#define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0
+#define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1
+#define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS 2
+#define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS 3
+#define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS 4
+
+#define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0
+#define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2
+
+#define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2
+#define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3
+
+#define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0
+#define BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1
+#define BRW_DATAPORT_READ_MESSAGE_DWORD_BLOCK_READ 2
+#define BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3
+
+#define BRW_DATAPORT_READ_TARGET_DATA_CACHE 0
+#define BRW_DATAPORT_READ_TARGET_RENDER_CACHE 1
+#define BRW_DATAPORT_READ_TARGET_SAMPLER_CACHE 2
+
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4
+
+#define BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0
+#define BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1
+#define BRW_DATAPORT_WRITE_MESSAGE_DWORD_BLOCK_WRITE 2
+#define BRW_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3
+#define BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4
+#define BRW_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5
+#define BRW_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7
+
+#define BRW_MATH_FUNCTION_INV 1
+#define BRW_MATH_FUNCTION_LOG 2
+#define BRW_MATH_FUNCTION_EXP 3
+#define BRW_MATH_FUNCTION_SQRT 4
+#define BRW_MATH_FUNCTION_RSQ 5
+#define BRW_MATH_FUNCTION_SIN 6 /* was 7 */
+#define BRW_MATH_FUNCTION_COS 7 /* was 8 */
+#define BRW_MATH_FUNCTION_SINCOS 8 /* was 6 */
+#define BRW_MATH_FUNCTION_TAN 9
+#define BRW_MATH_FUNCTION_POW 10
+#define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11
+#define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT 12
+#define BRW_MATH_FUNCTION_INT_DIV_REMAINDER 13
+
+#define BRW_MATH_INTEGER_UNSIGNED 0
+#define BRW_MATH_INTEGER_SIGNED 1
+
+#define BRW_MATH_PRECISION_FULL 0
+#define BRW_MATH_PRECISION_PARTIAL 1
+
+#define BRW_MATH_SATURATE_NONE 0
+#define BRW_MATH_SATURATE_SATURATE 1
+
+#define BRW_MATH_DATA_VECTOR 0
+#define BRW_MATH_DATA_SCALAR 1
+
+#define BRW_URB_OPCODE_WRITE 0
+
+#define BRW_URB_SWIZZLE_NONE 0
+#define BRW_URB_SWIZZLE_INTERLEAVE 1
+#define BRW_URB_SWIZZLE_TRANSPOSE 2
+
+#define BRW_SCRATCH_SPACE_SIZE_1K 0
+#define BRW_SCRATCH_SPACE_SIZE_2K 1
+#define BRW_SCRATCH_SPACE_SIZE_4K 2
+#define BRW_SCRATCH_SPACE_SIZE_8K 3
+#define BRW_SCRATCH_SPACE_SIZE_16K 4
+#define BRW_SCRATCH_SPACE_SIZE_32K 5
+#define BRW_SCRATCH_SPACE_SIZE_64K 6
+#define BRW_SCRATCH_SPACE_SIZE_128K 7
+#define BRW_SCRATCH_SPACE_SIZE_256K 8
+#define BRW_SCRATCH_SPACE_SIZE_512K 9
+#define BRW_SCRATCH_SPACE_SIZE_1M 10
+#define BRW_SCRATCH_SPACE_SIZE_2M 11
+
+
+
+
+#define CMD_URB_FENCE 0x6000
+#define CMD_CONST_BUFFER_STATE 0x6001
+#define CMD_CONST_BUFFER 0x6002
+
+#define CMD_STATE_BASE_ADDRESS 0x6101
+#define CMD_STATE_INSN_POINTER 0x6102
+#define CMD_PIPELINE_SELECT 0x6104
+
+#define CMD_PIPELINED_STATE_POINTERS 0x7800
+#define CMD_BINDING_TABLE_PTRS 0x7801
+#define CMD_VERTEX_BUFFER 0x7808
+#define CMD_VERTEX_ELEMENT 0x7809
+#define CMD_INDEX_BUFFER 0x780a
+#define CMD_VF_STATISTICS 0x780b
+
+#define CMD_DRAW_RECT 0x7900
+#define CMD_BLEND_CONSTANT_COLOR 0x7901
+#define CMD_CHROMA_KEY 0x7904
+#define CMD_DEPTH_BUFFER 0x7905
+#define CMD_POLY_STIPPLE_OFFSET 0x7906
+#define CMD_POLY_STIPPLE_PATTERN 0x7907
+#define CMD_LINE_STIPPLE_PATTERN 0x7908
+#define CMD_GLOBAL_DEPTH_OFFSET_CLAMP 0x7909
+
+#define CMD_PIPE_CONTROL 0x7a00
+
+#define CMD_3D_PRIM 0x7b00
+
+#define CMD_MI_FLUSH 0x0200
+
+
+/* Various values from the R0 vertex header:
+ */
+#define R02_PRIM_END 0x1
+#define R02_PRIM_START 0x2
+
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_draw.c b/src/gallium/drivers/i965simple/brw_draw.c
new file mode 100644
index 0000000000..49d80cb41c
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_draw.c
@@ -0,0 +1,226 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <stdlib.h>
+
+#include "brw_batch.h"
+#include "brw_draw.h"
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_state.h"
+
+#include "pipe/p_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_prim.h"
+
+static unsigned hw_prim[PIPE_PRIM_POLYGON+1] = {
+ _3DPRIM_POINTLIST,
+ _3DPRIM_LINELIST,
+ _3DPRIM_LINELOOP,
+ _3DPRIM_LINESTRIP,
+ _3DPRIM_TRILIST,
+ _3DPRIM_TRISTRIP,
+ _3DPRIM_TRIFAN,
+ _3DPRIM_QUADLIST,
+ _3DPRIM_QUADSTRIP,
+ _3DPRIM_POLYGON
+};
+
+
+/* When the primitive changes, set a state bit and re-validate. Not
+ * the nicest and would rather deal with this by having all the
+ * programs be immune to the active primitive (ie. cope with all
+ * possibilities). That may not be realistic however.
+ */
+static void brw_set_prim(struct brw_context *brw, int prim)
+{
+ PRINT("PRIM: %d\n", prim);
+
+ /* Slight optimization to avoid the GS program when not needed:
+ */
+ if (prim == PIPE_PRIM_QUAD_STRIP &&
+ brw->attribs.Raster->flatshade &&
+ brw->attribs.Raster->fill_cw == PIPE_POLYGON_MODE_FILL &&
+ brw->attribs.Raster->fill_ccw == PIPE_POLYGON_MODE_FILL)
+ prim = PIPE_PRIM_TRIANGLE_STRIP;
+
+ if (prim != brw->primitive) {
+ brw->primitive = prim;
+ brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+
+ if (u_reduced_prim(prim) != brw->reduced_primitive) {
+ brw->reduced_primitive = u_reduced_prim(prim);
+ brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
+ }
+
+ brw_validate_state(brw);
+ }
+
+}
+
+
+static unsigned trim(int prim, unsigned length)
+{
+ if (prim == PIPE_PRIM_QUAD_STRIP)
+ return length > 3 ? (length - length % 2) : 0;
+ else if (prim == PIPE_PRIM_QUADS)
+ return length - length % 4;
+ else
+ return length;
+}
+
+
+
+static boolean brw_emit_prim( struct brw_context *brw,
+ boolean indexed,
+ unsigned start,
+ unsigned count )
+
+{
+ struct brw_3d_primitive prim_packet;
+
+ if (BRW_DEBUG & DEBUG_PRIMS)
+ PRINT("PRIM: %d %d %d\n", brw->primitive, start, count);
+
+ prim_packet.header.opcode = CMD_3D_PRIM;
+ prim_packet.header.length = sizeof(prim_packet)/4 - 2;
+ prim_packet.header.pad = 0;
+ prim_packet.header.topology = hw_prim[brw->primitive];
+ prim_packet.header.indexed = indexed;
+
+ prim_packet.verts_per_instance = trim(brw->primitive, count);
+ prim_packet.start_vert_location = start;
+ prim_packet.instance_count = 1;
+ prim_packet.start_instance_location = 0;
+ prim_packet.base_vert_location = 0;
+
+ if (prim_packet.verts_per_instance == 0)
+ return TRUE;
+
+ return brw_batchbuffer_data( brw->winsys,
+ &prim_packet,
+ sizeof(prim_packet) );
+}
+
+
+/* May fail if out of video memory for texture or vbo upload, or on
+ * fallback conditions.
+ */
+static boolean brw_try_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *index_buffer,
+ unsigned index_size,
+ unsigned mode,
+ unsigned start,
+ unsigned count )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ /* Set the first primitive ahead of validate_state:
+ */
+ brw_set_prim(brw, mode);
+
+ /* Upload index, vertex data:
+ */
+ if (index_buffer &&
+ !brw_upload_indices( brw, index_buffer, index_size, start, count ))
+ return FALSE;
+
+ if (!brw_upload_vertex_buffers(brw))
+ return FALSE;
+
+ if (!brw_upload_vertex_elements( brw ))
+ return FALSE;
+
+ /* XXX: Need to separate validate and upload of state.
+ */
+ if (brw->state.dirty.brw)
+ brw_validate_state( brw );
+
+ if (!brw_emit_prim(brw, index_buffer != NULL,
+ start, count))
+ return FALSE;
+
+ return TRUE;
+}
+
+
+
+static boolean brw_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode,
+ unsigned start,
+ unsigned count )
+{
+ if (!brw_try_draw_elements( pipe,
+ indexBuffer,
+ indexSize,
+ mode, start, count ))
+ {
+ /* flush ? */
+
+ if (!brw_try_draw_elements( pipe,
+ indexBuffer,
+ indexSize,
+ mode, start,
+ count )) {
+ assert(0);
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
+
+static boolean brw_draw_arrays( struct pipe_context *pipe,
+ unsigned mode,
+ unsigned start,
+ unsigned count )
+{
+ if (!brw_try_draw_elements( pipe, NULL, 0, mode, start, count )) {
+ /* flush ? */
+
+ if (!brw_try_draw_elements( pipe, NULL, 0, mode, start, count )) {
+ assert(0);
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
+
+void brw_init_draw_functions( struct brw_context *brw )
+{
+ brw->pipe.draw_arrays = brw_draw_arrays;
+ brw->pipe.draw_elements = brw_draw_elements;
+}
+
+
diff --git a/src/gallium/drivers/i965simple/brw_draw.h b/src/gallium/drivers/i965simple/brw_draw.h
new file mode 100644
index 0000000000..62fe0d5d0e
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_draw.h
@@ -0,0 +1,55 @@
+ /**************************************************************************
+ *
+ * Copyright 2005 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef BRW_DRAW_H
+#define BRW_DRAW_H
+
+#include "pipe/p_context.h"
+
+struct brw_context;
+
+
+
+void brw_init_draw_functions( struct brw_context *brw );
+
+
+boolean brw_upload_vertices( struct brw_context *brw,
+ unsigned min_index,
+ unsigned max_index );
+
+boolean brw_upload_indices(struct brw_context *brw,
+ const struct pipe_buffer *index_buffer,
+ int ib_size, int start, int count);
+
+boolean brw_upload_vertex_buffers( struct brw_context *brw );
+boolean brw_upload_vertex_elements( struct brw_context *brw );
+
+unsigned brw_translate_surface_format( unsigned id );
+
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_draw_upload.c b/src/gallium/drivers/i965simple/brw_draw_upload.c
new file mode 100644
index 0000000000..2d9ca3f2ea
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_draw_upload.c
@@ -0,0 +1,300 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <stdlib.h>
+
+#include "brw_batch.h"
+#include "brw_draw.h"
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_state.h"
+
+
+struct brw_array_state {
+ union header_union header;
+
+ struct {
+ union {
+ struct {
+ unsigned pitch:11;
+ unsigned pad:15;
+ unsigned access_type:1;
+ unsigned vb_index:5;
+ } bits;
+ unsigned dword;
+ } vb0;
+
+ struct pipe_buffer *buffer;
+ unsigned offset;
+
+ unsigned max_index;
+ unsigned instance_data_step_rate;
+
+ } vb[BRW_VBP_MAX];
+};
+
+
+
+unsigned brw_translate_surface_format( unsigned id )
+{
+ switch (id) {
+ case PIPE_FORMAT_R64_FLOAT:
+ return BRW_SURFACEFORMAT_R64_FLOAT;
+ case PIPE_FORMAT_R64G64_FLOAT:
+ return BRW_SURFACEFORMAT_R64G64_FLOAT;
+ case PIPE_FORMAT_R64G64B64_FLOAT:
+ return BRW_SURFACEFORMAT_R64G64B64_FLOAT;
+ case PIPE_FORMAT_R64G64B64A64_FLOAT:
+ return BRW_SURFACEFORMAT_R64G64B64A64_FLOAT;
+
+ case PIPE_FORMAT_R32_FLOAT:
+ return BRW_SURFACEFORMAT_R32_FLOAT;
+ case PIPE_FORMAT_R32G32_FLOAT:
+ return BRW_SURFACEFORMAT_R32G32_FLOAT;
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ return BRW_SURFACEFORMAT_R32G32B32_FLOAT;
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
+
+ case PIPE_FORMAT_R32_UNORM:
+ return BRW_SURFACEFORMAT_R32_UNORM;
+ case PIPE_FORMAT_R32G32_UNORM:
+ return BRW_SURFACEFORMAT_R32G32_UNORM;
+ case PIPE_FORMAT_R32G32B32_UNORM:
+ return BRW_SURFACEFORMAT_R32G32B32_UNORM;
+ case PIPE_FORMAT_R32G32B32A32_UNORM:
+ return BRW_SURFACEFORMAT_R32G32B32A32_UNORM;
+
+ case PIPE_FORMAT_R32_USCALED:
+ return BRW_SURFACEFORMAT_R32_USCALED;
+ case PIPE_FORMAT_R32G32_USCALED:
+ return BRW_SURFACEFORMAT_R32G32_USCALED;
+ case PIPE_FORMAT_R32G32B32_USCALED:
+ return BRW_SURFACEFORMAT_R32G32B32_USCALED;
+ case PIPE_FORMAT_R32G32B32A32_USCALED:
+ return BRW_SURFACEFORMAT_R32G32B32A32_USCALED;
+
+ case PIPE_FORMAT_R32_SNORM:
+ return BRW_SURFACEFORMAT_R32_SNORM;
+ case PIPE_FORMAT_R32G32_SNORM:
+ return BRW_SURFACEFORMAT_R32G32_SNORM;
+ case PIPE_FORMAT_R32G32B32_SNORM:
+ return BRW_SURFACEFORMAT_R32G32B32_SNORM;
+ case PIPE_FORMAT_R32G32B32A32_SNORM:
+ return BRW_SURFACEFORMAT_R32G32B32A32_SNORM;
+
+ case PIPE_FORMAT_R32_SSCALED:
+ return BRW_SURFACEFORMAT_R32_SSCALED;
+ case PIPE_FORMAT_R32G32_SSCALED:
+ return BRW_SURFACEFORMAT_R32G32_SSCALED;
+ case PIPE_FORMAT_R32G32B32_SSCALED:
+ return BRW_SURFACEFORMAT_R32G32B32_SSCALED;
+ case PIPE_FORMAT_R32G32B32A32_SSCALED:
+ return BRW_SURFACEFORMAT_R32G32B32A32_SSCALED;
+
+ case PIPE_FORMAT_R16_UNORM:
+ return BRW_SURFACEFORMAT_R16_UNORM;
+ case PIPE_FORMAT_R16G16_UNORM:
+ return BRW_SURFACEFORMAT_R16G16_UNORM;
+ case PIPE_FORMAT_R16G16B16_UNORM:
+ return BRW_SURFACEFORMAT_R16G16B16_UNORM;
+ case PIPE_FORMAT_R16G16B16A16_UNORM:
+ return BRW_SURFACEFORMAT_R16G16B16A16_UNORM;
+
+ case PIPE_FORMAT_R16_USCALED:
+ return BRW_SURFACEFORMAT_R16_USCALED;
+ case PIPE_FORMAT_R16G16_USCALED:
+ return BRW_SURFACEFORMAT_R16G16_USCALED;
+ case PIPE_FORMAT_R16G16B16_USCALED:
+ return BRW_SURFACEFORMAT_R16G16B16_USCALED;
+ case PIPE_FORMAT_R16G16B16A16_USCALED:
+ return BRW_SURFACEFORMAT_R16G16B16A16_USCALED;
+
+ case PIPE_FORMAT_R16_SNORM:
+ return BRW_SURFACEFORMAT_R16_SNORM;
+ case PIPE_FORMAT_R16G16_SNORM:
+ return BRW_SURFACEFORMAT_R16G16_SNORM;
+ case PIPE_FORMAT_R16G16B16_SNORM:
+ return BRW_SURFACEFORMAT_R16G16B16_SNORM;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ return BRW_SURFACEFORMAT_R16G16B16A16_SNORM;
+
+ case PIPE_FORMAT_R16_SSCALED:
+ return BRW_SURFACEFORMAT_R16_SSCALED;
+ case PIPE_FORMAT_R16G16_SSCALED:
+ return BRW_SURFACEFORMAT_R16G16_SSCALED;
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ return BRW_SURFACEFORMAT_R16G16B16_SSCALED;
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ return BRW_SURFACEFORMAT_R16G16B16A16_SSCALED;
+
+ case PIPE_FORMAT_R8_UNORM:
+ return BRW_SURFACEFORMAT_R8_UNORM;
+ case PIPE_FORMAT_R8G8_UNORM:
+ return BRW_SURFACEFORMAT_R8G8_UNORM;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ return BRW_SURFACEFORMAT_R8G8B8_UNORM;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ return BRW_SURFACEFORMAT_R8G8B8A8_UNORM;
+
+ case PIPE_FORMAT_R8_USCALED:
+ return BRW_SURFACEFORMAT_R8_USCALED;
+ case PIPE_FORMAT_R8G8_USCALED:
+ return BRW_SURFACEFORMAT_R8G8_USCALED;
+ case PIPE_FORMAT_R8G8B8_USCALED:
+ return BRW_SURFACEFORMAT_R8G8B8_USCALED;
+ case PIPE_FORMAT_R8G8B8A8_USCALED:
+ return BRW_SURFACEFORMAT_R8G8B8A8_USCALED;
+
+ case PIPE_FORMAT_R8_SNORM:
+ return BRW_SURFACEFORMAT_R8_SNORM;
+ case PIPE_FORMAT_R8G8_SNORM:
+ return BRW_SURFACEFORMAT_R8G8_SNORM;
+ case PIPE_FORMAT_R8G8B8_SNORM:
+ return BRW_SURFACEFORMAT_R8G8B8_SNORM;
+ case PIPE_FORMAT_R8G8B8A8_SNORM:
+ return BRW_SURFACEFORMAT_R8G8B8A8_SNORM;
+
+ case PIPE_FORMAT_R8_SSCALED:
+ return BRW_SURFACEFORMAT_R8_SSCALED;
+ case PIPE_FORMAT_R8G8_SSCALED:
+ return BRW_SURFACEFORMAT_R8G8_SSCALED;
+ case PIPE_FORMAT_R8G8B8_SSCALED:
+ return BRW_SURFACEFORMAT_R8G8B8_SSCALED;
+ case PIPE_FORMAT_R8G8B8A8_SSCALED:
+ return BRW_SURFACEFORMAT_R8G8B8A8_SSCALED;
+
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static unsigned get_index_type(int type)
+{
+ switch (type) {
+ case 1: return BRW_INDEX_BYTE;
+ case 2: return BRW_INDEX_WORD;
+ case 4: return BRW_INDEX_DWORD;
+ default: assert(0); return 0;
+ }
+}
+
+
+boolean brw_upload_vertex_buffers( struct brw_context *brw )
+{
+ struct brw_array_state vbp;
+ unsigned nr_enabled = 0;
+ unsigned i;
+
+ memset(&vbp, 0, sizeof(vbp));
+
+ /* This is a hardware limit:
+ */
+
+ for (i = 0; i < BRW_VEP_MAX; i++)
+ {
+ if (brw->vb.vbo_array[i] == NULL) {
+ nr_enabled = i;
+ break;
+ }
+
+ vbp.vb[i].vb0.bits.pitch = brw->vb.vbo_array[i]->stride;
+ vbp.vb[i].vb0.bits.pad = 0;
+ vbp.vb[i].vb0.bits.access_type = BRW_VERTEXBUFFER_ACCESS_VERTEXDATA;
+ vbp.vb[i].vb0.bits.vb_index = i;
+ vbp.vb[i].offset = brw->vb.vbo_array[i]->buffer_offset;
+ vbp.vb[i].buffer = brw->vb.vbo_array[i]->buffer;
+ vbp.vb[i].max_index = brw->vb.vbo_array[i]->max_index;
+ }
+
+
+ vbp.header.bits.length = (1 + nr_enabled * 4) - 2;
+ vbp.header.bits.opcode = CMD_VERTEX_BUFFER;
+
+ BEGIN_BATCH(vbp.header.bits.length+2, 0);
+ OUT_BATCH( vbp.header.dword );
+
+ for (i = 0; i < nr_enabled; i++) {
+ OUT_BATCH( vbp.vb[i].vb0.dword );
+ OUT_RELOC( vbp.vb[i].buffer, PIPE_BUFFER_USAGE_GPU_READ,
+ vbp.vb[i].offset);
+ OUT_BATCH( vbp.vb[i].max_index );
+ OUT_BATCH( vbp.vb[i].instance_data_step_rate );
+ }
+ ADVANCE_BATCH();
+ return TRUE;
+}
+
+
+
+boolean brw_upload_vertex_elements( struct brw_context *brw )
+{
+ struct brw_vertex_element_packet vep;
+
+ unsigned i;
+ unsigned nr_enabled = brw->attribs.VertexProgram->info.num_inputs;
+
+ memset(&vep, 0, sizeof(vep));
+
+ for (i = 0; i < nr_enabled; i++)
+ vep.ve[i] = brw->vb.inputs[i];
+
+
+ vep.header.length = (1 + nr_enabled * sizeof(vep.ve[0])/4) - 2;
+ vep.header.opcode = CMD_VERTEX_ELEMENT;
+ brw_cached_batch_struct(brw, &vep, 4 + nr_enabled * sizeof(vep.ve[0]));
+
+ return TRUE;
+}
+
+boolean brw_upload_indices( struct brw_context *brw,
+ const struct pipe_buffer *index_buffer,
+ int ib_size, int start, int count)
+{
+ /* Emit the indexbuffer packet:
+ */
+ {
+ struct brw_indexbuffer ib;
+
+ memset(&ib, 0, sizeof(ib));
+
+ ib.header.bits.opcode = CMD_INDEX_BUFFER;
+ ib.header.bits.length = sizeof(ib)/4 - 2;
+ ib.header.bits.index_format = get_index_type(ib_size);
+ ib.header.bits.cut_index_enable = 0;
+
+
+ BEGIN_BATCH(4, 0);
+ OUT_BATCH( ib.header.dword );
+ OUT_RELOC( index_buffer, PIPE_BUFFER_USAGE_GPU_READ, start);
+ OUT_RELOC( index_buffer, PIPE_BUFFER_USAGE_GPU_READ, start + count);
+ OUT_BATCH( 0 );
+ ADVANCE_BATCH();
+ }
+ return TRUE;
+}
diff --git a/src/gallium/drivers/i965simple/brw_eu.c b/src/gallium/drivers/i965simple/brw_eu.c
new file mode 100644
index 0000000000..e2002d1821
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_eu.c
@@ -0,0 +1,130 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_eu.h"
+
+
+
+/* How does predicate control work when execution_size != 8? Do I
+ * need to test/set for 0xffff when execution_size is 16?
+ */
+void brw_set_predicate_control_flag_value( struct brw_compile *p, unsigned value )
+{
+ p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+ if (value != 0xff) {
+ if (value != p->flag_value) {
+ brw_push_insn_state(p);
+ brw_MOV(p, brw_flag_reg(), brw_imm_uw(value));
+ p->flag_value = value;
+ brw_pop_insn_state(p);
+ }
+
+ p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
+ }
+}
+
+void brw_set_predicate_control( struct brw_compile *p, unsigned pc )
+{
+ p->current->header.predicate_control = pc;
+}
+
+void brw_set_conditionalmod( struct brw_compile *p, unsigned conditional )
+{
+ p->current->header.destreg__conditonalmod = conditional;
+}
+
+void brw_set_access_mode( struct brw_compile *p, unsigned access_mode )
+{
+ p->current->header.access_mode = access_mode;
+}
+
+void brw_set_compression_control( struct brw_compile *p, boolean compression_control )
+{
+ p->current->header.compression_control = compression_control;
+}
+
+void brw_set_mask_control( struct brw_compile *p, unsigned value )
+{
+ p->current->header.mask_control = value;
+}
+
+void brw_set_saturate( struct brw_compile *p, unsigned value )
+{
+ p->current->header.saturate = value;
+}
+
+void brw_push_insn_state( struct brw_compile *p )
+{
+ assert(p->current != &p->stack[BRW_EU_MAX_INSN_STACK-1]);
+ memcpy(p->current+1, p->current, sizeof(struct brw_instruction));
+ p->current++;
+}
+
+void brw_pop_insn_state( struct brw_compile *p )
+{
+ assert(p->current != p->stack);
+ p->current--;
+}
+
+
+/***********************************************************************
+ */
+void brw_init_compile( struct brw_compile *p )
+{
+ p->nr_insn = 0;
+ p->current = p->stack;
+ memset(p->current, 0, sizeof(p->current[0]));
+
+ /* Some defaults?
+ */
+ brw_set_mask_control(p, BRW_MASK_ENABLE); /* what does this do? */
+ brw_set_saturate(p, 0);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_set_predicate_control_flag_value(p, 0xff);
+}
+
+
+const unsigned *brw_get_program( struct brw_compile *p,
+ unsigned *sz )
+{
+ unsigned i;
+
+ for (i = 0; i < 8; i++)
+ brw_NOP(p);
+
+ *sz = p->nr_insn * sizeof(struct brw_instruction);
+ return (const unsigned *)p->store;
+}
+
diff --git a/src/gallium/drivers/i965simple/brw_eu.h b/src/gallium/drivers/i965simple/brw_eu.h
new file mode 100644
index 0000000000..23151ae9ed
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_eu.h
@@ -0,0 +1,888 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_EU_H
+#define BRW_EU_H
+
+#include "brw_structs.h"
+#include "brw_defines.h"
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_shader_tokens.h"
+
+#define BRW_SWIZZLE4(a,b,c,d) (((a)<<0) | ((b)<<2) | ((c)<<4) | ((d)<<6))
+#define BRW_GET_SWZ(swz, idx) (((swz) >> ((idx)*2)) & 0x3)
+
+#define BRW_SWIZZLE_NOOP BRW_SWIZZLE4(0,1,2,3)
+#define BRW_SWIZZLE_XYZW BRW_SWIZZLE4(0,1,2,3)
+#define BRW_SWIZZLE_XXXX BRW_SWIZZLE4(0,0,0,0)
+#define BRW_SWIZZLE_XYXY BRW_SWIZZLE4(0,1,0,1)
+
+
+#define REG_SIZE (8*4)
+
+
+/* These aren't hardware structs, just something useful for us to pass around:
+ *
+ * Align1 operation has a lot of control over input ranges. Used in
+ * WM programs to implement shaders decomposed into "channel serial"
+ * or "structure of array" form:
+ */
+struct brw_reg
+{
+ unsigned type:4;
+ unsigned file:2;
+ unsigned nr:8;
+ unsigned subnr:5; /* :1 in align16 */
+ unsigned negate:1; /* source only */
+ unsigned abs:1; /* source only */
+ unsigned vstride:4; /* source only */
+ unsigned width:3; /* src only, align1 only */
+ unsigned hstride:2; /* src only, align1 only */
+ unsigned address_mode:1; /* relative addressing, hopefully! */
+ unsigned pad0:1;
+
+ union {
+ struct {
+ unsigned swizzle:8; /* src only, align16 only */
+ unsigned writemask:4; /* dest only, align16 only */
+ int indirect_offset:10; /* relative addressing offset */
+ unsigned pad1:10; /* two dwords total */
+ } bits;
+
+ float f;
+ int d;
+ unsigned ud;
+ } dw1;
+};
+
+
+struct brw_indirect {
+ unsigned addr_subnr:4;
+ int addr_offset:10;
+ unsigned pad:18;
+};
+
+
+#define BRW_EU_MAX_INSN_STACK 5
+#define BRW_EU_MAX_INSN 1200
+
+struct brw_compile {
+ struct brw_instruction store[BRW_EU_MAX_INSN];
+ unsigned nr_insn;
+
+ /* Allow clients to push/pop instruction state:
+ */
+ struct brw_instruction stack[BRW_EU_MAX_INSN_STACK];
+ struct brw_instruction *current;
+
+ unsigned flag_value;
+ boolean single_program_flow;
+};
+
+
+
+static __inline int type_sz( unsigned type )
+{
+ switch( type ) {
+ case BRW_REGISTER_TYPE_UD:
+ case BRW_REGISTER_TYPE_D:
+ case BRW_REGISTER_TYPE_F:
+ return 4;
+ case BRW_REGISTER_TYPE_HF:
+ case BRW_REGISTER_TYPE_UW:
+ case BRW_REGISTER_TYPE_W:
+ return 2;
+ case BRW_REGISTER_TYPE_UB:
+ case BRW_REGISTER_TYPE_B:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static __inline struct brw_reg brw_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr,
+ unsigned type,
+ unsigned vstride,
+ unsigned width,
+ unsigned hstride,
+ unsigned swizzle,
+ unsigned writemask)
+{
+
+ struct brw_reg reg;
+ reg.type = type;
+ reg.file = file;
+ reg.nr = nr;
+ reg.subnr = subnr * type_sz(type);
+ reg.negate = 0;
+ reg.abs = 0;
+ reg.vstride = vstride;
+ reg.width = width;
+ reg.hstride = hstride;
+ reg.address_mode = BRW_ADDRESS_DIRECT;
+ reg.pad0 = 0;
+
+ /* Could do better: If the reg is r5.3<0;1,0>, we probably want to
+ * set swizzle and writemask to W, as the lower bits of subnr will
+ * be lost when converted to align16. This is probably too much to
+ * keep track of as you'd want it adjusted by suboffset(), etc.
+ * Perhaps fix up when converting to align16?
+ */
+ reg.dw1.bits.swizzle = swizzle;
+ reg.dw1.bits.writemask = writemask;
+ reg.dw1.bits.indirect_offset = 0;
+ reg.dw1.bits.pad1 = 0;
+ return reg;
+}
+
+static __inline struct brw_reg brw_vec16_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return brw_reg(file,
+ nr,
+ subnr,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_16,
+ BRW_WIDTH_16,
+ BRW_HORIZONTAL_STRIDE_1,
+ BRW_SWIZZLE_XYZW,
+ TGSI_WRITEMASK_XYZW);
+}
+
+static __inline struct brw_reg brw_vec8_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return brw_reg(file,
+ nr,
+ subnr,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_8,
+ BRW_WIDTH_8,
+ BRW_HORIZONTAL_STRIDE_1,
+ BRW_SWIZZLE_XYZW,
+ TGSI_WRITEMASK_XYZW);
+}
+
+
+static __inline struct brw_reg brw_vec4_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return brw_reg(file,
+ nr,
+ subnr,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_4,
+ BRW_WIDTH_4,
+ BRW_HORIZONTAL_STRIDE_1,
+ BRW_SWIZZLE_XYZW,
+ TGSI_WRITEMASK_XYZW);
+}
+
+
+static __inline struct brw_reg brw_vec2_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return brw_reg(file,
+ nr,
+ subnr,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_2,
+ BRW_WIDTH_2,
+ BRW_HORIZONTAL_STRIDE_1,
+ BRW_SWIZZLE_XYXY,
+ TGSI_WRITEMASK_XY);
+}
+
+static __inline struct brw_reg brw_vec1_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return brw_reg(file,
+ nr,
+ subnr,
+ BRW_REGISTER_TYPE_F,
+ BRW_VERTICAL_STRIDE_0,
+ BRW_WIDTH_1,
+ BRW_HORIZONTAL_STRIDE_0,
+ BRW_SWIZZLE_XXXX,
+ TGSI_WRITEMASK_X);
+}
+
+
+static __inline struct brw_reg retype( struct brw_reg reg,
+ unsigned type )
+{
+ reg.type = type;
+ return reg;
+}
+
+static __inline struct brw_reg suboffset( struct brw_reg reg,
+ unsigned delta )
+{
+ reg.subnr += delta * type_sz(reg.type);
+ return reg;
+}
+
+
+static __inline struct brw_reg offset( struct brw_reg reg,
+ unsigned delta )
+{
+ reg.nr += delta;
+ return reg;
+}
+
+
+static __inline struct brw_reg byte_offset( struct brw_reg reg,
+ unsigned bytes )
+{
+ unsigned newoffset = reg.nr * REG_SIZE + reg.subnr + bytes;
+ reg.nr = newoffset / REG_SIZE;
+ reg.subnr = newoffset % REG_SIZE;
+ return reg;
+}
+
+
+static __inline struct brw_reg brw_uw16_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return suboffset(retype(brw_vec16_reg(file, nr, 0), BRW_REGISTER_TYPE_UW), subnr);
+}
+
+static __inline struct brw_reg brw_uw8_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return suboffset(retype(brw_vec8_reg(file, nr, 0), BRW_REGISTER_TYPE_UW), subnr);
+}
+
+static __inline struct brw_reg brw_uw1_reg( unsigned file,
+ unsigned nr,
+ unsigned subnr )
+{
+ return suboffset(retype(brw_vec1_reg(file, nr, 0), BRW_REGISTER_TYPE_UW), subnr);
+}
+
+static __inline struct brw_reg brw_imm_reg( unsigned type )
+{
+ return brw_reg( BRW_IMMEDIATE_VALUE,
+ 0,
+ 0,
+ type,
+ BRW_VERTICAL_STRIDE_0,
+ BRW_WIDTH_1,
+ BRW_HORIZONTAL_STRIDE_0,
+ 0,
+ 0);
+}
+
+static __inline struct brw_reg brw_imm_f( float f )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_F);
+ imm.dw1.f = f;
+ return imm;
+}
+
+static __inline struct brw_reg brw_imm_d( int d )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_D);
+ imm.dw1.d = d;
+ return imm;
+}
+
+static __inline struct brw_reg brw_imm_ud( unsigned ud )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_UD);
+ imm.dw1.ud = ud;
+ return imm;
+}
+
+static __inline struct brw_reg brw_imm_uw( ushort uw )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_UW);
+ imm.dw1.ud = uw;
+ return imm;
+}
+
+static __inline struct brw_reg brw_imm_w( short w )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_W);
+ imm.dw1.d = w;
+ return imm;
+}
+
+/* brw_imm_b and brw_imm_ub aren't supported by hardware - the type
+ * numbers alias with _V and _VF below:
+ */
+
+/* Vector of eight signed half-byte values:
+ */
+static __inline struct brw_reg brw_imm_v( unsigned v )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_V);
+ imm.vstride = BRW_VERTICAL_STRIDE_0;
+ imm.width = BRW_WIDTH_8;
+ imm.hstride = BRW_HORIZONTAL_STRIDE_1;
+ imm.dw1.ud = v;
+ return imm;
+}
+
+/* Vector of four 8-bit float values:
+ */
+static __inline struct brw_reg brw_imm_vf( unsigned v )
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_VF);
+ imm.vstride = BRW_VERTICAL_STRIDE_0;
+ imm.width = BRW_WIDTH_4;
+ imm.hstride = BRW_HORIZONTAL_STRIDE_1;
+ imm.dw1.ud = v;
+ return imm;
+}
+
+#define VF_ZERO 0x0
+#define VF_ONE 0x30
+#define VF_NEG (1<<7)
+
+static __inline struct brw_reg brw_imm_vf4( unsigned v0,
+ unsigned v1,
+ unsigned v2,
+ unsigned v3)
+{
+ struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_VF);
+ imm.vstride = BRW_VERTICAL_STRIDE_0;
+ imm.width = BRW_WIDTH_4;
+ imm.hstride = BRW_HORIZONTAL_STRIDE_1;
+ imm.dw1.ud = ((v0 << 0) |
+ (v1 << 8) |
+ (v2 << 16) |
+ (v3 << 24));
+ return imm;
+}
+
+
+static __inline struct brw_reg brw_address( struct brw_reg reg )
+{
+ return brw_imm_uw(reg.nr * REG_SIZE + reg.subnr);
+}
+
+
+static __inline struct brw_reg brw_vec1_grf( unsigned nr,
+ unsigned subnr )
+{
+ return brw_vec1_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+static __inline struct brw_reg brw_vec8_grf( unsigned nr,
+ unsigned subnr )
+{
+ return brw_vec8_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+static __inline struct brw_reg brw_vec4_grf( unsigned nr,
+ unsigned subnr )
+{
+ return brw_vec4_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+
+static __inline struct brw_reg brw_vec2_grf( unsigned nr,
+ unsigned subnr )
+{
+ return brw_vec2_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+static __inline struct brw_reg brw_uw8_grf( unsigned nr,
+ unsigned subnr )
+{
+ return brw_uw8_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+static __inline struct brw_reg brw_null_reg( void )
+{
+ return brw_vec8_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+ BRW_ARF_NULL,
+ 0);
+}
+
+static __inline struct brw_reg brw_address_reg( unsigned subnr )
+{
+ return brw_uw1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+ BRW_ARF_ADDRESS,
+ subnr);
+}
+
+/* If/else instructions break in align16 mode if writemask & swizzle
+ * aren't xyzw. This goes against the convention for other scalar
+ * regs:
+ */
+static __inline struct brw_reg brw_ip_reg( void )
+{
+ return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+ BRW_ARF_IP,
+ 0,
+ BRW_REGISTER_TYPE_UD,
+ BRW_VERTICAL_STRIDE_4, /* ? */
+ BRW_WIDTH_1,
+ BRW_HORIZONTAL_STRIDE_0,
+ BRW_SWIZZLE_XYZW, /* NOTE! */
+ TGSI_WRITEMASK_XYZW); /* NOTE! */
+}
+
+static __inline struct brw_reg brw_acc_reg( void )
+{
+ return brw_vec8_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+ BRW_ARF_ACCUMULATOR,
+ 0);
+}
+
+
+static __inline struct brw_reg brw_flag_reg( void )
+{
+ return brw_uw1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+ BRW_ARF_FLAG,
+ 0);
+}
+
+
+static __inline struct brw_reg brw_mask_reg( unsigned subnr )
+{
+ return brw_uw1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+ BRW_ARF_MASK,
+ subnr);
+}
+
+static __inline struct brw_reg brw_message_reg( unsigned nr )
+{
+ return brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE,
+ nr,
+ 0);
+}
+
+
+
+
+/* This is almost always called with a numeric constant argument, so
+ * make things easy to evaluate at compile time:
+ */
+static __inline unsigned cvt( unsigned val )
+{
+ switch (val) {
+ case 0: return 0;
+ case 1: return 1;
+ case 2: return 2;
+ case 4: return 3;
+ case 8: return 4;
+ case 16: return 5;
+ case 32: return 6;
+ }
+ return 0;
+}
+
+static __inline struct brw_reg stride( struct brw_reg reg,
+ unsigned vstride,
+ unsigned width,
+ unsigned hstride )
+{
+
+ reg.vstride = cvt(vstride);
+ reg.width = cvt(width) - 1;
+ reg.hstride = cvt(hstride);
+ return reg;
+}
+
+static __inline struct brw_reg vec16( struct brw_reg reg )
+{
+ return stride(reg, 16,16,1);
+}
+
+static __inline struct brw_reg vec8( struct brw_reg reg )
+{
+ return stride(reg, 8,8,1);
+}
+
+static __inline struct brw_reg vec4( struct brw_reg reg )
+{
+ return stride(reg, 4,4,1);
+}
+
+static __inline struct brw_reg vec2( struct brw_reg reg )
+{
+ return stride(reg, 2,2,1);
+}
+
+static __inline struct brw_reg vec1( struct brw_reg reg )
+{
+ return stride(reg, 0,1,0);
+}
+
+static __inline struct brw_reg get_element( struct brw_reg reg, unsigned elt )
+{
+ return vec1(suboffset(reg, elt));
+}
+
+static __inline struct brw_reg get_element_ud( struct brw_reg reg, unsigned elt )
+{
+ return vec1(suboffset(retype(reg, BRW_REGISTER_TYPE_UD), elt));
+}
+
+
+static __inline struct brw_reg brw_swizzle( struct brw_reg reg,
+ unsigned x,
+ unsigned y,
+ unsigned z,
+ unsigned w)
+{
+ reg.dw1.bits.swizzle = BRW_SWIZZLE4(BRW_GET_SWZ(reg.dw1.bits.swizzle, x),
+ BRW_GET_SWZ(reg.dw1.bits.swizzle, y),
+ BRW_GET_SWZ(reg.dw1.bits.swizzle, z),
+ BRW_GET_SWZ(reg.dw1.bits.swizzle, w));
+ return reg;
+}
+
+
+static __inline struct brw_reg brw_swizzle1( struct brw_reg reg,
+ unsigned x )
+{
+ return brw_swizzle(reg, x, x, x, x);
+}
+
+static __inline struct brw_reg brw_writemask( struct brw_reg reg,
+ unsigned mask )
+{
+ reg.dw1.bits.writemask &= mask;
+ return reg;
+}
+
+static __inline struct brw_reg brw_set_writemask( struct brw_reg reg,
+ unsigned mask )
+{
+ reg.dw1.bits.writemask = mask;
+ return reg;
+}
+
+static __inline struct brw_reg negate( struct brw_reg reg )
+{
+ reg.negate ^= 1;
+ return reg;
+}
+
+static __inline struct brw_reg brw_abs( struct brw_reg reg )
+{
+ reg.abs = 1;
+ return reg;
+}
+
+/***********************************************************************
+ */
+static __inline struct brw_reg brw_vec4_indirect( unsigned subnr,
+ int offset )
+{
+ struct brw_reg reg = brw_vec4_grf(0, 0);
+ reg.subnr = subnr;
+ reg.address_mode = BRW_ADDRESS_REGISTER_INDIRECT_REGISTER;
+ reg.dw1.bits.indirect_offset = offset;
+ return reg;
+}
+
+static __inline struct brw_reg brw_vec1_indirect( unsigned subnr,
+ int offset )
+{
+ struct brw_reg reg = brw_vec1_grf(0, 0);
+ reg.subnr = subnr;
+ reg.address_mode = BRW_ADDRESS_REGISTER_INDIRECT_REGISTER;
+ reg.dw1.bits.indirect_offset = offset;
+ return reg;
+}
+
+static __inline struct brw_reg deref_4f(struct brw_indirect ptr, int offset)
+{
+ return brw_vec4_indirect(ptr.addr_subnr, ptr.addr_offset + offset);
+}
+
+static __inline struct brw_reg deref_1f(struct brw_indirect ptr, int offset)
+{
+ return brw_vec1_indirect(ptr.addr_subnr, ptr.addr_offset + offset);
+}
+
+static __inline struct brw_reg deref_4b(struct brw_indirect ptr, int offset)
+{
+ return retype(deref_4f(ptr, offset), BRW_REGISTER_TYPE_B);
+}
+
+static __inline struct brw_reg deref_1uw(struct brw_indirect ptr, int offset)
+{
+ return retype(deref_1f(ptr, offset), BRW_REGISTER_TYPE_UW);
+}
+
+static __inline struct brw_reg deref_1ud(struct brw_indirect ptr, int offset)
+{
+ return retype(deref_1f(ptr, offset), BRW_REGISTER_TYPE_UD);
+}
+
+static __inline struct brw_reg get_addr_reg(struct brw_indirect ptr)
+{
+ return brw_address_reg(ptr.addr_subnr);
+}
+
+static __inline struct brw_indirect brw_indirect_offset( struct brw_indirect ptr, int offset )
+{
+ ptr.addr_offset += offset;
+ return ptr;
+}
+
+static __inline struct brw_indirect brw_indirect( unsigned addr_subnr, int offset )
+{
+ struct brw_indirect ptr;
+ ptr.addr_subnr = addr_subnr;
+ ptr.addr_offset = offset;
+ ptr.pad = 0;
+ return ptr;
+}
+
+static __inline struct brw_instruction *current_insn( struct brw_compile *p)
+{
+ return &p->store[p->nr_insn];
+}
+
+void brw_pop_insn_state( struct brw_compile *p );
+void brw_push_insn_state( struct brw_compile *p );
+void brw_set_mask_control( struct brw_compile *p, unsigned value );
+void brw_set_saturate( struct brw_compile *p, unsigned value );
+void brw_set_access_mode( struct brw_compile *p, unsigned access_mode );
+void brw_set_compression_control( struct brw_compile *p, boolean control );
+void brw_set_predicate_control_flag_value( struct brw_compile *p, unsigned value );
+void brw_set_predicate_control( struct brw_compile *p, unsigned pc );
+void brw_set_conditionalmod( struct brw_compile *p, unsigned conditional );
+
+void brw_init_compile( struct brw_compile *p );
+const unsigned *brw_get_program( struct brw_compile *p, unsigned *sz );
+
+
+struct brw_instruction *brw_alu1( struct brw_compile *p,
+ unsigned opcode,
+ struct brw_reg dest,
+ struct brw_reg src );
+
+struct brw_instruction *brw_alu2(struct brw_compile *p,
+ unsigned opcode,
+ struct brw_reg dest,
+ struct brw_reg src0,
+ struct brw_reg src1 );
+
+/* Helpers for regular instructions:
+ */
+#define ALU1(OP) \
+struct brw_instruction *brw_##OP(struct brw_compile *p, \
+ struct brw_reg dest, \
+ struct brw_reg src0);
+
+#define ALU2(OP) \
+struct brw_instruction *brw_##OP(struct brw_compile *p, \
+ struct brw_reg dest, \
+ struct brw_reg src0, \
+ struct brw_reg src1);
+
+ALU1(MOV)
+ALU2(SEL)
+ALU1(NOT)
+ALU2(AND)
+ALU2(OR)
+ALU2(XOR)
+ALU2(SHR)
+ALU2(SHL)
+ALU2(RSR)
+ALU2(RSL)
+ALU2(ASR)
+ALU2(JMPI)
+ALU2(ADD)
+ALU2(MUL)
+ALU1(FRC)
+ALU1(RNDD)
+ALU2(MAC)
+ALU2(MACH)
+ALU1(LZD)
+ALU2(DP4)
+ALU2(DPH)
+ALU2(DP3)
+ALU2(DP2)
+ALU2(LINE)
+
+#undef ALU1
+#undef ALU2
+
+
+
+/* Helpers for SEND instruction:
+ */
+void brw_urb_WRITE(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ struct brw_reg src0,
+ boolean allocate,
+ boolean used,
+ unsigned msg_length,
+ unsigned response_length,
+ boolean eot,
+ boolean writes_complete,
+ unsigned offset,
+ unsigned swizzle);
+
+void brw_fb_WRITE(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ struct brw_reg src0,
+ unsigned binding_table_index,
+ unsigned msg_length,
+ unsigned response_length,
+ boolean eot);
+
+void brw_SAMPLE(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ struct brw_reg src0,
+ unsigned binding_table_index,
+ unsigned sampler,
+ unsigned writemask,
+ unsigned msg_type,
+ unsigned response_length,
+ unsigned msg_length,
+ boolean eot);
+
+void brw_math_16( struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned function,
+ unsigned saturate,
+ unsigned msg_reg_nr,
+ struct brw_reg src,
+ unsigned precision );
+
+void brw_math( struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned function,
+ unsigned saturate,
+ unsigned msg_reg_nr,
+ struct brw_reg src,
+ unsigned data_type,
+ unsigned precision );
+
+void brw_dp_READ_16( struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ unsigned scratch_offset );
+
+void brw_dp_WRITE_16( struct brw_compile *p,
+ struct brw_reg src,
+ unsigned msg_reg_nr,
+ unsigned scratch_offset );
+
+/* If/else/endif. Works by manipulating the execution flags on each
+ * channel.
+ */
+struct brw_instruction *brw_IF(struct brw_compile *p,
+ unsigned execute_size);
+
+struct brw_instruction *brw_ELSE(struct brw_compile *p,
+ struct brw_instruction *if_insn);
+
+void brw_ENDIF(struct brw_compile *p,
+ struct brw_instruction *if_or_else_insn);
+
+
+/* DO/WHILE loops:
+ */
+struct brw_instruction *brw_DO(struct brw_compile *p,
+ unsigned execute_size);
+
+struct brw_instruction *brw_WHILE(struct brw_compile *p,
+ struct brw_instruction *patch_insn);
+
+struct brw_instruction *brw_BREAK(struct brw_compile *p);
+struct brw_instruction *brw_CONT(struct brw_compile *p);
+/* Forward jumps:
+ */
+void brw_land_fwd_jump(struct brw_compile *p,
+ struct brw_instruction *jmp_insn);
+
+
+
+void brw_NOP(struct brw_compile *p);
+
+/* Special case: there is never a destination, execution size will be
+ * taken from src0:
+ */
+void brw_CMP(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned conditional,
+ struct brw_reg src0,
+ struct brw_reg src1);
+
+void brw_print_reg( struct brw_reg reg );
+
+
+/***********************************************************************
+ * brw_eu_util.c:
+ */
+
+void brw_copy_indirect_to_indirect(struct brw_compile *p,
+ struct brw_indirect dst_ptr,
+ struct brw_indirect src_ptr,
+ unsigned count);
+
+void brw_copy_from_indirect(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_indirect ptr,
+ unsigned count);
+
+void brw_copy4(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src,
+ unsigned count);
+
+void brw_copy8(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src,
+ unsigned count);
+
+void brw_math_invert( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src);
+
+void brw_set_src1( struct brw_instruction *insn,
+ struct brw_reg reg );
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_eu_debug.c b/src/gallium/drivers/i965simple/brw_eu_debug.c
new file mode 100644
index 0000000000..4adfb0c02f
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_eu_debug.c
@@ -0,0 +1,90 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_debug.h"
+
+#include "brw_eu.h"
+
+void brw_print_reg( struct brw_reg hwreg )
+{
+ static const char *file[] = {
+ "arf",
+ "grf",
+ "msg",
+ "imm"
+ };
+
+ static const char *type[] = {
+ "ud",
+ "d",
+ "uw",
+ "w",
+ "ub",
+ "vf",
+ "hf",
+ "f"
+ };
+
+ debug_printf("%s%s",
+ hwreg.abs ? "abs/" : "",
+ hwreg.negate ? "-" : "");
+
+ if (hwreg.file == BRW_GENERAL_REGISTER_FILE &&
+ hwreg.nr % 2 == 0 &&
+ hwreg.subnr == 0 &&
+ hwreg.vstride == BRW_VERTICAL_STRIDE_8 &&
+ hwreg.width == BRW_WIDTH_8 &&
+ hwreg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
+ hwreg.type == BRW_REGISTER_TYPE_F) {
+ debug_printf("vec%d", hwreg.nr);
+ }
+ else if (hwreg.file == BRW_GENERAL_REGISTER_FILE &&
+ hwreg.vstride == BRW_VERTICAL_STRIDE_0 &&
+ hwreg.width == BRW_WIDTH_1 &&
+ hwreg.hstride == BRW_HORIZONTAL_STRIDE_0 &&
+ hwreg.type == BRW_REGISTER_TYPE_F) {
+ debug_printf("scl%d.%d", hwreg.nr, hwreg.subnr / 4);
+ }
+ else {
+ debug_printf("%s%d.%d<%d;%d,%d>:%s",
+ file[hwreg.file],
+ hwreg.nr,
+ hwreg.subnr / type_sz(hwreg.type),
+ hwreg.vstride ? (1<<(hwreg.vstride-1)) : 0,
+ 1<<hwreg.width,
+ hwreg.hstride ? (1<<(hwreg.hstride-1)) : 0,
+ type[hwreg.type]);
+ }
+}
+
+
+
diff --git a/src/gallium/drivers/i965simple/brw_eu_emit.c b/src/gallium/drivers/i965simple/brw_eu_emit.c
new file mode 100644
index 0000000000..400a80b6fb
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_eu_emit.c
@@ -0,0 +1,1080 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_eu.h"
+
+
+
+
+/***********************************************************************
+ * Internal helper for constructing instructions
+ */
+
+static void guess_execution_size( struct brw_instruction *insn,
+ struct brw_reg reg )
+{
+ if (reg.width == BRW_WIDTH_8 &&
+ insn->header.compression_control == BRW_COMPRESSION_COMPRESSED)
+ insn->header.execution_size = BRW_EXECUTE_16;
+ else
+ insn->header.execution_size = reg.width; /* note - definitions are compatible */
+}
+
+
+static void brw_set_dest( struct brw_instruction *insn,
+ struct brw_reg dest )
+{
+ insn->bits1.da1.dest_reg_file = dest.file;
+ insn->bits1.da1.dest_reg_type = dest.type;
+ insn->bits1.da1.dest_address_mode = dest.address_mode;
+
+ if (dest.address_mode == BRW_ADDRESS_DIRECT) {
+ insn->bits1.da1.dest_reg_nr = dest.nr;
+
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ insn->bits1.da1.dest_subreg_nr = dest.subnr;
+ insn->bits1.da1.dest_horiz_stride = BRW_HORIZONTAL_STRIDE_1;
+ }
+ else {
+ insn->bits1.da16.dest_subreg_nr = dest.subnr / 16;
+ insn->bits1.da16.dest_writemask = dest.dw1.bits.writemask;
+ }
+ }
+ else {
+ insn->bits1.ia1.dest_subreg_nr = dest.subnr;
+
+ /* These are different sizes in align1 vs align16:
+ */
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ insn->bits1.ia1.dest_indirect_offset = dest.dw1.bits.indirect_offset;
+ insn->bits1.ia1.dest_horiz_stride = BRW_HORIZONTAL_STRIDE_1;
+ }
+ else {
+ insn->bits1.ia16.dest_indirect_offset = dest.dw1.bits.indirect_offset;
+ }
+ }
+
+ /* NEW: Set the execution size based on dest.width and
+ * insn->compression_control:
+ */
+ guess_execution_size(insn, dest);
+}
+
+static void brw_set_src0( struct brw_instruction *insn,
+ struct brw_reg reg )
+{
+ assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
+
+ insn->bits1.da1.src0_reg_file = reg.file;
+ insn->bits1.da1.src0_reg_type = reg.type;
+ insn->bits2.da1.src0_abs = reg.abs;
+ insn->bits2.da1.src0_negate = reg.negate;
+ insn->bits2.da1.src0_address_mode = reg.address_mode;
+
+ if (reg.file == BRW_IMMEDIATE_VALUE) {
+ insn->bits3.ud = reg.dw1.ud;
+
+ /* Required to set some fields in src1 as well:
+ */
+ insn->bits1.da1.src1_reg_file = 0; /* arf */
+ insn->bits1.da1.src1_reg_type = reg.type;
+ }
+ else
+ {
+ if (reg.address_mode == BRW_ADDRESS_DIRECT) {
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ insn->bits2.da1.src0_subreg_nr = reg.subnr;
+ insn->bits2.da1.src0_reg_nr = reg.nr;
+ }
+ else {
+ insn->bits2.da16.src0_subreg_nr = reg.subnr / 16;
+ insn->bits2.da16.src0_reg_nr = reg.nr;
+ }
+ }
+ else {
+ insn->bits2.ia1.src0_subreg_nr = reg.subnr;
+
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ insn->bits2.ia1.src0_indirect_offset = reg.dw1.bits.indirect_offset;
+ }
+ else {
+ insn->bits2.ia16.src0_subreg_nr = reg.dw1.bits.indirect_offset;
+ }
+ }
+
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ if (reg.width == BRW_WIDTH_1 &&
+ insn->header.execution_size == BRW_EXECUTE_1) {
+ insn->bits2.da1.src0_horiz_stride = BRW_HORIZONTAL_STRIDE_0;
+ insn->bits2.da1.src0_width = BRW_WIDTH_1;
+ insn->bits2.da1.src0_vert_stride = BRW_VERTICAL_STRIDE_0;
+ }
+ else {
+ insn->bits2.da1.src0_horiz_stride = reg.hstride;
+ insn->bits2.da1.src0_width = reg.width;
+ insn->bits2.da1.src0_vert_stride = reg.vstride;
+ }
+ }
+ else {
+ insn->bits2.da16.src0_swz_x = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X);
+ insn->bits2.da16.src0_swz_y = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y);
+ insn->bits2.da16.src0_swz_z = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z);
+ insn->bits2.da16.src0_swz_w = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W);
+
+ /* This is an oddity of the fact we're using the same
+ * descriptions for registers in align_16 as align_1:
+ */
+ if (reg.vstride == BRW_VERTICAL_STRIDE_8)
+ insn->bits2.da16.src0_vert_stride = BRW_VERTICAL_STRIDE_4;
+ else
+ insn->bits2.da16.src0_vert_stride = reg.vstride;
+ }
+ }
+}
+
+
+void brw_set_src1( struct brw_instruction *insn,
+ struct brw_reg reg )
+{
+ assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
+
+ insn->bits1.da1.src1_reg_file = reg.file;
+ insn->bits1.da1.src1_reg_type = reg.type;
+ insn->bits3.da1.src1_abs = reg.abs;
+ insn->bits3.da1.src1_negate = reg.negate;
+
+ /* Only src1 can be immediate in two-argument instructions.
+ */
+ assert(insn->bits1.da1.src0_reg_file != BRW_IMMEDIATE_VALUE);
+
+ if (reg.file == BRW_IMMEDIATE_VALUE) {
+ insn->bits3.ud = reg.dw1.ud;
+ }
+ else {
+ /* This is a hardware restriction, which may or may not be lifted
+ * in the future:
+ */
+ assert (reg.address_mode == BRW_ADDRESS_DIRECT);
+ //assert (reg.file == BRW_GENERAL_REGISTER_FILE);
+
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ insn->bits3.da1.src1_subreg_nr = reg.subnr;
+ insn->bits3.da1.src1_reg_nr = reg.nr;
+ }
+ else {
+ insn->bits3.da16.src1_subreg_nr = reg.subnr / 16;
+ insn->bits3.da16.src1_reg_nr = reg.nr;
+ }
+
+ if (insn->header.access_mode == BRW_ALIGN_1) {
+ if (reg.width == BRW_WIDTH_1 &&
+ insn->header.execution_size == BRW_EXECUTE_1) {
+ insn->bits3.da1.src1_horiz_stride = BRW_HORIZONTAL_STRIDE_0;
+ insn->bits3.da1.src1_width = BRW_WIDTH_1;
+ insn->bits3.da1.src1_vert_stride = BRW_VERTICAL_STRIDE_0;
+ }
+ else {
+ insn->bits3.da1.src1_horiz_stride = reg.hstride;
+ insn->bits3.da1.src1_width = reg.width;
+ insn->bits3.da1.src1_vert_stride = reg.vstride;
+ }
+ }
+ else {
+ insn->bits3.da16.src1_swz_x = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X);
+ insn->bits3.da16.src1_swz_y = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y);
+ insn->bits3.da16.src1_swz_z = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z);
+ insn->bits3.da16.src1_swz_w = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W);
+
+ /* This is an oddity of the fact we're using the same
+ * descriptions for registers in align_16 as align_1:
+ */
+ if (reg.vstride == BRW_VERTICAL_STRIDE_8)
+ insn->bits3.da16.src1_vert_stride = BRW_VERTICAL_STRIDE_4;
+ else
+ insn->bits3.da16.src1_vert_stride = reg.vstride;
+ }
+ }
+}
+
+
+
+static void brw_set_math_message( struct brw_instruction *insn,
+ unsigned msg_length,
+ unsigned response_length,
+ unsigned function,
+ unsigned integer_type,
+ boolean low_precision,
+ boolean saturate,
+ unsigned dataType )
+{
+ brw_set_src1(insn, brw_imm_d(0));
+
+ insn->bits3.math.function = function;
+ insn->bits3.math.int_type = integer_type;
+ insn->bits3.math.precision = low_precision;
+ insn->bits3.math.saturate = saturate;
+ insn->bits3.math.data_type = dataType;
+ insn->bits3.math.response_length = response_length;
+ insn->bits3.math.msg_length = msg_length;
+ insn->bits3.math.msg_target = BRW_MESSAGE_TARGET_MATH;
+ insn->bits3.math.end_of_thread = 0;
+}
+
+static void brw_set_urb_message( struct brw_instruction *insn,
+ boolean allocate,
+ boolean used,
+ unsigned msg_length,
+ unsigned response_length,
+ boolean end_of_thread,
+ boolean complete,
+ unsigned offset,
+ unsigned swizzle_control )
+{
+ brw_set_src1(insn, brw_imm_d(0));
+
+ insn->bits3.urb.opcode = 0; /* ? */
+ insn->bits3.urb.offset = offset;
+ insn->bits3.urb.swizzle_control = swizzle_control;
+ insn->bits3.urb.allocate = allocate;
+ insn->bits3.urb.used = used; /* ? */
+ insn->bits3.urb.complete = complete;
+ insn->bits3.urb.response_length = response_length;
+ insn->bits3.urb.msg_length = msg_length;
+ insn->bits3.urb.msg_target = BRW_MESSAGE_TARGET_URB;
+ insn->bits3.urb.end_of_thread = end_of_thread;
+}
+
+static void brw_set_dp_write_message( struct brw_instruction *insn,
+ unsigned binding_table_index,
+ unsigned msg_control,
+ unsigned msg_type,
+ unsigned msg_length,
+ unsigned pixel_scoreboard_clear,
+ unsigned response_length,
+ unsigned end_of_thread )
+{
+ brw_set_src1(insn, brw_imm_d(0));
+
+ insn->bits3.dp_write.binding_table_index = binding_table_index;
+ insn->bits3.dp_write.msg_control = msg_control;
+ insn->bits3.dp_write.pixel_scoreboard_clear = pixel_scoreboard_clear;
+ insn->bits3.dp_write.msg_type = msg_type;
+ insn->bits3.dp_write.send_commit_msg = 0;
+ insn->bits3.dp_write.response_length = response_length;
+ insn->bits3.dp_write.msg_length = msg_length;
+ insn->bits3.dp_write.msg_target = BRW_MESSAGE_TARGET_DATAPORT_WRITE;
+ insn->bits3.urb.end_of_thread = end_of_thread;
+}
+
+static void brw_set_dp_read_message( struct brw_instruction *insn,
+ unsigned binding_table_index,
+ unsigned msg_control,
+ unsigned msg_type,
+ unsigned target_cache,
+ unsigned msg_length,
+ unsigned response_length,
+ unsigned end_of_thread )
+{
+ brw_set_src1(insn, brw_imm_d(0));
+
+ insn->bits3.dp_read.binding_table_index = binding_table_index;
+ insn->bits3.dp_read.msg_control = msg_control;
+ insn->bits3.dp_read.msg_type = msg_type;
+ insn->bits3.dp_read.target_cache = target_cache;
+ insn->bits3.dp_read.response_length = response_length;
+ insn->bits3.dp_read.msg_length = msg_length;
+ insn->bits3.dp_read.msg_target = BRW_MESSAGE_TARGET_DATAPORT_READ;
+ insn->bits3.dp_read.end_of_thread = end_of_thread;
+}
+
+static void brw_set_sampler_message( struct brw_instruction *insn,
+ unsigned binding_table_index,
+ unsigned sampler,
+ unsigned msg_type,
+ unsigned response_length,
+ unsigned msg_length,
+ boolean eot)
+{
+ brw_set_src1(insn, brw_imm_d(0));
+
+ insn->bits3.sampler.binding_table_index = binding_table_index;
+ insn->bits3.sampler.sampler = sampler;
+ insn->bits3.sampler.msg_type = msg_type;
+ insn->bits3.sampler.return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
+ insn->bits3.sampler.response_length = response_length;
+ insn->bits3.sampler.msg_length = msg_length;
+ insn->bits3.sampler.end_of_thread = eot;
+ insn->bits3.sampler.msg_target = BRW_MESSAGE_TARGET_SAMPLER;
+}
+
+
+
+static struct brw_instruction *next_insn( struct brw_compile *p,
+ unsigned opcode )
+{
+ struct brw_instruction *insn;
+
+ assert(p->nr_insn + 1 < BRW_EU_MAX_INSN);
+
+ insn = &p->store[p->nr_insn++];
+ memcpy(insn, p->current, sizeof(*insn));
+
+ /* Reset this one-shot flag:
+ */
+
+ if (p->current->header.destreg__conditonalmod) {
+ p->current->header.destreg__conditonalmod = 0;
+ p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
+ }
+
+ insn->header.opcode = opcode;
+ return insn;
+}
+
+
+struct brw_instruction *brw_alu1( struct brw_compile *p,
+ unsigned opcode,
+ struct brw_reg dest,
+ struct brw_reg src )
+{
+ struct brw_instruction *insn = next_insn(p, opcode);
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src);
+ return insn;
+}
+
+struct brw_instruction *brw_alu2(struct brw_compile *p,
+ unsigned opcode,
+ struct brw_reg dest,
+ struct brw_reg src0,
+ struct brw_reg src1 )
+{
+ struct brw_instruction *insn = next_insn(p, opcode);
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src0);
+ brw_set_src1(insn, src1);
+ return insn;
+}
+
+
+/***********************************************************************
+ * Convenience routines.
+ */
+#define ALU1(OP) \
+struct brw_instruction *brw_##OP(struct brw_compile *p, \
+ struct brw_reg dest, \
+ struct brw_reg src0) \
+{ \
+ return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
+}
+
+#define ALU2(OP) \
+struct brw_instruction *brw_##OP(struct brw_compile *p, \
+ struct brw_reg dest, \
+ struct brw_reg src0, \
+ struct brw_reg src1) \
+{ \
+ return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
+}
+
+
+ALU1(MOV)
+ALU2(SEL)
+ALU1(NOT)
+ALU2(AND)
+ALU2(OR)
+ALU2(XOR)
+ALU2(SHR)
+ALU2(SHL)
+ALU2(RSR)
+ALU2(RSL)
+ALU2(ASR)
+ALU2(ADD)
+ALU2(MUL)
+ALU1(FRC)
+ALU1(RNDD)
+ALU2(MAC)
+ALU2(MACH)
+ALU1(LZD)
+ALU2(DP4)
+ALU2(DPH)
+ALU2(DP3)
+ALU2(DP2)
+ALU2(LINE)
+
+
+
+
+void brw_NOP(struct brw_compile *p)
+{
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_NOP);
+ brw_set_dest(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
+ brw_set_src0(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
+ brw_set_src1(insn, brw_imm_ud(0x0));
+}
+
+
+
+
+
+/***********************************************************************
+ * Comparisons, if/else/endif
+ */
+
+struct brw_instruction *brw_JMPI(struct brw_compile *p,
+ struct brw_reg dest,
+ struct brw_reg src0,
+ struct brw_reg src1)
+{
+ struct brw_instruction *insn = brw_alu2(p, BRW_OPCODE_JMPI, dest, src0, src1);
+
+ p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+ return insn;
+}
+
+/* EU takes the value from the flag register and pushes it onto some
+ * sort of a stack (presumably merging with any flag value already on
+ * the stack). Within an if block, the flags at the top of the stack
+ * control execution on each channel of the unit, eg. on each of the
+ * 16 pixel values in our wm programs.
+ *
+ * When the matching 'else' instruction is reached (presumably by
+ * countdown of the instruction count patched in by our ELSE/ENDIF
+ * functions), the relevent flags are inverted.
+ *
+ * When the matching 'endif' instruction is reached, the flags are
+ * popped off. If the stack is now empty, normal execution resumes.
+ *
+ * No attempt is made to deal with stack overflow (14 elements?).
+ */
+struct brw_instruction *brw_IF(struct brw_compile *p, unsigned execute_size)
+{
+ struct brw_instruction *insn;
+
+ if (p->single_program_flow) {
+ assert(execute_size == BRW_EXECUTE_1);
+
+ insn = next_insn(p, BRW_OPCODE_ADD);
+ insn->header.predicate_inverse = 1;
+ } else {
+ insn = next_insn(p, BRW_OPCODE_IF);
+ }
+
+ /* Override the defaults for this instruction:
+ */
+ brw_set_dest(insn, brw_ip_reg());
+ brw_set_src0(insn, brw_ip_reg());
+ brw_set_src1(insn, brw_imm_d(0x0));
+
+ insn->header.execution_size = execute_size;
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.predicate_control = BRW_PREDICATE_NORMAL;
+ insn->header.mask_control = BRW_MASK_ENABLE;
+
+ p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+ return insn;
+}
+
+
+struct brw_instruction *brw_ELSE(struct brw_compile *p,
+ struct brw_instruction *if_insn)
+{
+ struct brw_instruction *insn;
+
+ if (p->single_program_flow) {
+ insn = next_insn(p, BRW_OPCODE_ADD);
+ } else {
+ insn = next_insn(p, BRW_OPCODE_ELSE);
+ }
+
+ brw_set_dest(insn, brw_ip_reg());
+ brw_set_src0(insn, brw_ip_reg());
+ brw_set_src1(insn, brw_imm_d(0x0));
+
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.execution_size = if_insn->header.execution_size;
+ insn->header.mask_control = BRW_MASK_ENABLE;
+
+ /* Patch the if instruction to point at this instruction.
+ */
+ if (p->single_program_flow) {
+ assert(if_insn->header.opcode == BRW_OPCODE_ADD);
+
+ if_insn->bits3.ud = (insn - if_insn + 1) * 16;
+ } else {
+ assert(if_insn->header.opcode == BRW_OPCODE_IF);
+
+ if_insn->bits3.if_else.jump_count = insn - if_insn;
+ if_insn->bits3.if_else.pop_count = 1;
+ if_insn->bits3.if_else.pad0 = 0;
+ }
+
+ return insn;
+}
+
+void brw_ENDIF(struct brw_compile *p,
+ struct brw_instruction *patch_insn)
+{
+ if (p->single_program_flow) {
+ /* In single program flow mode, there's no need to execute an ENDIF,
+ * since we don't need to do any stack operations, and if we're executing
+ * currently, we want to just continue executing.
+ */
+ struct brw_instruction *next = &p->store[p->nr_insn];
+
+ assert(patch_insn->header.opcode == BRW_OPCODE_ADD);
+
+ patch_insn->bits3.ud = (next - patch_insn) * 16;
+ } else {
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_ENDIF);
+
+ brw_set_dest(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
+ brw_set_src0(insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
+ brw_set_src1(insn, brw_imm_d(0x0));
+
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.execution_size = patch_insn->header.execution_size;
+ insn->header.mask_control = BRW_MASK_ENABLE;
+
+ assert(patch_insn->bits3.if_else.jump_count == 0);
+
+ /* Patch the if or else instructions to point at this or the next
+ * instruction respectively.
+ */
+ if (patch_insn->header.opcode == BRW_OPCODE_IF) {
+ /* Automagically turn it into an IFF:
+ */
+ patch_insn->header.opcode = BRW_OPCODE_IFF;
+ patch_insn->bits3.if_else.jump_count = insn - patch_insn + 1;
+ patch_insn->bits3.if_else.pop_count = 0;
+ patch_insn->bits3.if_else.pad0 = 0;
+ } else if (patch_insn->header.opcode == BRW_OPCODE_ELSE) {
+ patch_insn->bits3.if_else.jump_count = insn - patch_insn + 1;
+ patch_insn->bits3.if_else.pop_count = 1;
+ patch_insn->bits3.if_else.pad0 = 0;
+ } else {
+ assert(0);
+ }
+
+ /* Also pop item off the stack in the endif instruction:
+ */
+ insn->bits3.if_else.jump_count = 0;
+ insn->bits3.if_else.pop_count = 1;
+ insn->bits3.if_else.pad0 = 0;
+ }
+}
+
+struct brw_instruction *brw_BREAK(struct brw_compile *p)
+{
+ struct brw_instruction *insn;
+ insn = next_insn(p, BRW_OPCODE_BREAK);
+ brw_set_dest(insn, brw_ip_reg());
+ brw_set_src0(insn, brw_ip_reg());
+ brw_set_src1(insn, brw_imm_d(0x0));
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.execution_size = BRW_EXECUTE_8;
+ insn->header.mask_control = BRW_MASK_DISABLE;
+ insn->bits3.if_else.pad0 = 0;
+ return insn;
+}
+
+struct brw_instruction *brw_CONT(struct brw_compile *p)
+{
+ struct brw_instruction *insn;
+ insn = next_insn(p, BRW_OPCODE_CONTINUE);
+ brw_set_dest(insn, brw_ip_reg());
+ brw_set_src0(insn, brw_ip_reg());
+ brw_set_src1(insn, brw_imm_d(0x0));
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.execution_size = BRW_EXECUTE_8;
+ insn->header.mask_control = BRW_MASK_DISABLE;
+ insn->bits3.if_else.pad0 = 0;
+ return insn;
+}
+
+/* DO/WHILE loop:
+ */
+struct brw_instruction *brw_DO(struct brw_compile *p, unsigned execute_size)
+{
+ if (p->single_program_flow) {
+ return &p->store[p->nr_insn];
+ } else {
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_DO);
+
+ /* Override the defaults for this instruction:
+ */
+ brw_set_dest(insn, brw_null_reg());
+ brw_set_src0(insn, brw_null_reg());
+ brw_set_src1(insn, brw_null_reg());
+
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.execution_size = execute_size;
+ insn->header.predicate_control = BRW_PREDICATE_NONE;
+ /* insn->header.mask_control = BRW_MASK_ENABLE; */
+ insn->header.mask_control = BRW_MASK_DISABLE;
+
+ return insn;
+ }
+}
+
+
+
+struct brw_instruction *brw_WHILE(struct brw_compile *p,
+ struct brw_instruction *do_insn)
+{
+ struct brw_instruction *insn;
+
+ if (p->single_program_flow)
+ insn = next_insn(p, BRW_OPCODE_ADD);
+ else
+ insn = next_insn(p, BRW_OPCODE_WHILE);
+
+ brw_set_dest(insn, brw_ip_reg());
+ brw_set_src0(insn, brw_ip_reg());
+ brw_set_src1(insn, brw_imm_d(0x0));
+
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+
+ if (p->single_program_flow) {
+ insn->header.execution_size = BRW_EXECUTE_1;
+
+ insn->bits3.d = (do_insn - insn) * 16;
+ } else {
+ insn->header.execution_size = do_insn->header.execution_size;
+
+ assert(do_insn->header.opcode == BRW_OPCODE_DO);
+ insn->bits3.if_else.jump_count = do_insn - insn;
+ insn->bits3.if_else.pop_count = 0;
+ insn->bits3.if_else.pad0 = 0;
+ }
+
+/* insn->header.mask_control = BRW_MASK_ENABLE; */
+
+ insn->header.mask_control = BRW_MASK_DISABLE;
+ p->current->header.predicate_control = BRW_PREDICATE_NONE;
+ return insn;
+}
+
+
+/* FORWARD JUMPS:
+ */
+void brw_land_fwd_jump(struct brw_compile *p,
+ struct brw_instruction *jmp_insn)
+{
+ struct brw_instruction *landing = &p->store[p->nr_insn];
+
+ assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI);
+ assert(jmp_insn->bits1.da1.src1_reg_file = BRW_IMMEDIATE_VALUE);
+
+ jmp_insn->bits3.ud = (landing - jmp_insn) - 1;
+}
+
+
+
+/* To integrate with the above, it makes sense that the comparison
+ * instruction should populate the flag register. It might be simpler
+ * just to use the flag reg for most WM tasks?
+ */
+void brw_CMP(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned conditional,
+ struct brw_reg src0,
+ struct brw_reg src1)
+{
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_CMP);
+
+ insn->header.destreg__conditonalmod = conditional;
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src0);
+ brw_set_src1(insn, src1);
+
+/* guess_execution_size(insn, src0); */
+
+
+ /* Make it so that future instructions will use the computed flag
+ * value until brw_set_predicate_control_flag_value() is called
+ * again.
+ */
+ if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+ dest.nr == 0) {
+ p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
+ p->flag_value = 0xff;
+ }
+}
+
+
+
+/***********************************************************************
+ * Helpers for the various SEND message types:
+ */
+
+/* Invert 8 values
+ */
+void brw_math( struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned function,
+ unsigned saturate,
+ unsigned msg_reg_nr,
+ struct brw_reg src,
+ unsigned data_type,
+ unsigned precision )
+{
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND);
+ unsigned msg_length = (function == BRW_MATH_FUNCTION_POW) ? 2 : 1;
+ unsigned response_length = (function == BRW_MATH_FUNCTION_SINCOS) ? 2 : 1;
+
+ /* Example code doesn't set predicate_control for send
+ * instructions.
+ */
+ insn->header.predicate_control = 0;
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src);
+ brw_set_math_message(insn,
+ msg_length, response_length,
+ function,
+ BRW_MATH_INTEGER_UNSIGNED,
+ precision,
+ saturate,
+ data_type);
+}
+
+/* Use 2 send instructions to invert 16 elements
+ */
+void brw_math_16( struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned function,
+ unsigned saturate,
+ unsigned msg_reg_nr,
+ struct brw_reg src,
+ unsigned precision )
+{
+ struct brw_instruction *insn;
+ unsigned msg_length = (function == BRW_MATH_FUNCTION_POW) ? 2 : 1;
+ unsigned response_length = (function == BRW_MATH_FUNCTION_SINCOS) ? 2 : 1;
+
+ /* First instruction:
+ */
+ brw_push_insn_state(p);
+ brw_set_predicate_control_flag_value(p, 0xff);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+ insn = next_insn(p, BRW_OPCODE_SEND);
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src);
+ brw_set_math_message(insn,
+ msg_length, response_length,
+ function,
+ BRW_MATH_INTEGER_UNSIGNED,
+ precision,
+ saturate,
+ BRW_MATH_DATA_VECTOR);
+
+ /* Second instruction:
+ */
+ insn = next_insn(p, BRW_OPCODE_SEND);
+ insn->header.compression_control = BRW_COMPRESSION_2NDHALF;
+ insn->header.destreg__conditonalmod = msg_reg_nr+1;
+
+ brw_set_dest(insn, offset(dest,1));
+ brw_set_src0(insn, src);
+ brw_set_math_message(insn,
+ msg_length, response_length,
+ function,
+ BRW_MATH_INTEGER_UNSIGNED,
+ precision,
+ saturate,
+ BRW_MATH_DATA_VECTOR);
+
+ brw_pop_insn_state(p);
+}
+
+
+
+
+void brw_dp_WRITE_16( struct brw_compile *p,
+ struct brw_reg src,
+ unsigned msg_reg_nr,
+ unsigned scratch_offset )
+{
+ {
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+ brw_MOV(p,
+ retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_D),
+ brw_imm_d(scratch_offset));
+
+ brw_pop_insn_state(p);
+ }
+
+ {
+ unsigned msg_length = 3;
+ struct brw_reg dest = retype(brw_null_reg(), BRW_REGISTER_TYPE_UW);
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND);
+
+ insn->header.predicate_control = 0; /* XXX */
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src);
+
+ brw_set_dp_write_message(insn,
+ 255, /* bti */
+ BRW_DATAPORT_OWORD_BLOCK_4_OWORDS, /* msg_control */
+ BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE, /* msg_type */
+ msg_length,
+ 0, /* pixel scoreboard */
+ 0, /* response_length */
+ 0); /* eot */
+ }
+
+}
+
+
+void brw_dp_READ_16( struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ unsigned scratch_offset )
+{
+ {
+ brw_push_insn_state(p);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+
+ brw_MOV(p,
+ retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_D),
+ brw_imm_d(scratch_offset));
+
+ brw_pop_insn_state(p);
+ }
+
+ {
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND);
+
+ insn->header.predicate_control = 0; /* XXX */
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_dest(insn, dest); /* UW? */
+ brw_set_src0(insn, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW));
+
+ brw_set_dp_read_message(insn,
+ 255, /* bti */
+ 3, /* msg_control */
+ BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
+ 1, /* target cache */
+ 1, /* msg_length */
+ 2, /* response_length */
+ 0); /* eot */
+ }
+}
+
+
+void brw_fb_WRITE(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ struct brw_reg src0,
+ unsigned binding_table_index,
+ unsigned msg_length,
+ unsigned response_length,
+ boolean eot)
+{
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND);
+
+ insn->header.predicate_control = 0; /* XXX */
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src0);
+ brw_set_dp_write_message(insn,
+ binding_table_index,
+ BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE, /* msg_control */
+ BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE, /* msg_type */
+ msg_length,
+ 1, /* pixel scoreboard */
+ response_length,
+ eot);
+}
+
+
+
+void brw_SAMPLE(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ struct brw_reg src0,
+ unsigned binding_table_index,
+ unsigned sampler,
+ unsigned writemask,
+ unsigned msg_type,
+ unsigned response_length,
+ unsigned msg_length,
+ boolean eot)
+{
+ boolean need_stall = 0;
+
+ if(writemask == 0) {
+/* debug_printf("%s: zero writemask??\n", __FUNCTION__); */
+ return;
+ }
+
+ /* Hardware doesn't do destination dependency checking on send
+ * instructions properly. Add a workaround which generates the
+ * dependency by other means. In practice it seems like this bug
+ * only crops up for texture samples, and only where registers are
+ * written by the send and then written again later without being
+ * read in between. Luckily for us, we already track that
+ * information and use it to modify the writemask for the
+ * instruction, so that is a guide for whether a workaround is
+ * needed.
+ */
+ if (writemask != TGSI_WRITEMASK_XYZW) {
+ unsigned dst_offset = 0;
+ unsigned i, newmask = 0, len = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (writemask & (1<<i))
+ break;
+ dst_offset += 2;
+ }
+ for (; i < 4; i++) {
+ if (!(writemask & (1<<i)))
+ break;
+ newmask |= 1<<i;
+ len++;
+ }
+
+ if (newmask != writemask) {
+ need_stall = 1;
+/* debug_printf("need stall %x %x\n", newmask , writemask); */
+ }
+ else {
+ struct brw_reg m1 = brw_message_reg(msg_reg_nr);
+
+ newmask = ~newmask & TGSI_WRITEMASK_XYZW;
+
+ brw_push_insn_state(p);
+
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+
+ brw_MOV(p, m1, brw_vec8_grf(0,0));
+ brw_MOV(p, get_element_ud(m1, 2), brw_imm_ud(newmask << 12));
+
+ brw_pop_insn_state(p);
+
+ src0 = retype(brw_null_reg(), BRW_REGISTER_TYPE_UW);
+ dest = offset(dest, dst_offset);
+ response_length = len * 2;
+ }
+ }
+
+ {
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND);
+
+ insn->header.predicate_control = 0; /* XXX */
+ insn->header.compression_control = BRW_COMPRESSION_NONE;
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src0);
+ brw_set_sampler_message(insn,
+ binding_table_index,
+ sampler,
+ msg_type,
+ response_length,
+ msg_length,
+ eot);
+ }
+
+ if (need_stall)
+ {
+ struct brw_reg reg = vec8(offset(dest, response_length-1));
+
+ /* mov (8) r9.0<1>:f r9.0<8;8,1>:f { Align1 }
+ */
+ brw_push_insn_state(p);
+ brw_set_compression_control(p, FALSE);
+ brw_MOV(p, reg, reg);
+ brw_pop_insn_state(p);
+ }
+
+}
+
+/* All these variables are pretty confusing - we might be better off
+ * using bitmasks and macros for this, in the old style. Or perhaps
+ * just having the caller instantiate the fields in dword3 itself.
+ */
+void brw_urb_WRITE(struct brw_compile *p,
+ struct brw_reg dest,
+ unsigned msg_reg_nr,
+ struct brw_reg src0,
+ boolean allocate,
+ boolean used,
+ unsigned msg_length,
+ unsigned response_length,
+ boolean eot,
+ boolean writes_complete,
+ unsigned offset,
+ unsigned swizzle)
+{
+ struct brw_instruction *insn = next_insn(p, BRW_OPCODE_SEND);
+
+ assert(msg_length < 16);
+
+ brw_set_dest(insn, dest);
+ brw_set_src0(insn, src0);
+ brw_set_src1(insn, brw_imm_d(0));
+
+ insn->header.destreg__conditonalmod = msg_reg_nr;
+
+ brw_set_urb_message(insn,
+ allocate,
+ used,
+ msg_length,
+ response_length,
+ eot,
+ writes_complete,
+ offset,
+ swizzle);
+}
+
diff --git a/src/gallium/drivers/i965simple/brw_eu_util.c b/src/gallium/drivers/i965simple/brw_eu_util.c
new file mode 100644
index 0000000000..3a65b141f0
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_eu_util.c
@@ -0,0 +1,126 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_eu.h"
+
+
+void brw_math_invert( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src)
+{
+ brw_math( p,
+ dst,
+ BRW_MATH_FUNCTION_INV,
+ BRW_MATH_SATURATE_NONE,
+ 0,
+ src,
+ BRW_MATH_PRECISION_FULL,
+ BRW_MATH_DATA_VECTOR );
+}
+
+
+
+void brw_copy4(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src,
+ unsigned count)
+{
+ unsigned i;
+
+ dst = vec4(dst);
+ src = vec4(src);
+
+ for (i = 0; i < count; i++)
+ {
+ unsigned delta = i*32;
+ brw_MOV(p, byte_offset(dst, delta), byte_offset(src, delta));
+ brw_MOV(p, byte_offset(dst, delta+16), byte_offset(src, delta+16));
+ }
+}
+
+
+void brw_copy8(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg src,
+ unsigned count)
+{
+ unsigned i;
+
+ dst = vec8(dst);
+ src = vec8(src);
+
+ for (i = 0; i < count; i++)
+ {
+ unsigned delta = i*32;
+ brw_MOV(p, byte_offset(dst, delta), byte_offset(src, delta));
+ }
+}
+
+
+void brw_copy_indirect_to_indirect(struct brw_compile *p,
+ struct brw_indirect dst_ptr,
+ struct brw_indirect src_ptr,
+ unsigned count)
+{
+ unsigned i;
+
+ for (i = 0; i < count; i++)
+ {
+ unsigned delta = i*32;
+ brw_MOV(p, deref_4f(dst_ptr, delta), deref_4f(src_ptr, delta));
+ brw_MOV(p, deref_4f(dst_ptr, delta+16), deref_4f(src_ptr, delta+16));
+ }
+}
+
+
+void brw_copy_from_indirect(struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_indirect ptr,
+ unsigned count)
+{
+ unsigned i;
+
+ dst = vec4(dst);
+
+ for (i = 0; i < count; i++)
+ {
+ unsigned delta = i*32;
+ brw_MOV(p, byte_offset(dst, delta), deref_4f(ptr, delta));
+ brw_MOV(p, byte_offset(dst, delta+16), deref_4f(ptr, delta+16));
+ }
+}
+
+
+
+
diff --git a/src/gallium/drivers/i965simple/brw_flush.c b/src/gallium/drivers/i965simple/brw_flush.c
new file mode 100644
index 0000000000..e6001c30d9
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_flush.c
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_defines.h"
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_batch.h"
+
+
+static void brw_flush( struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ /* Do we need to emit an MI_FLUSH command to flush the hardware
+ * caches?
+ */
+ if (flags & (PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_TEXTURE_CACHE)) {
+ struct brw_mi_flush flush;
+
+ memset(&flush, 0, sizeof(flush));
+ flush.opcode = CMD_MI_FLUSH;
+
+ if (!(flags & PIPE_FLUSH_RENDER_CACHE))
+ flush.flags |= BRW_INHIBIT_FLUSH_RENDER_CACHE;
+
+ if (flags & PIPE_FLUSH_TEXTURE_CACHE)
+ flush.flags |= BRW_FLUSH_READ_CACHE;
+
+ BRW_BATCH_STRUCT(brw, &flush);
+ }
+
+ /* If there are no flags, just flush pending commands to hardware:
+ */
+ FLUSH_BATCH( fence );
+}
+
+
+
+void brw_init_flush_functions( struct brw_context *brw )
+{
+ brw->pipe.flush = brw_flush;
+}
diff --git a/src/gallium/drivers/i965simple/brw_gs.c b/src/gallium/drivers/i965simple/brw_gs.c
new file mode 100644
index 0000000000..de60868ccc
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_gs.c
@@ -0,0 +1,196 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_state.h"
+#include "brw_gs.h"
+
+
+
+static void compile_gs_prog( struct brw_context *brw,
+ struct brw_gs_prog_key *key )
+{
+ struct brw_gs_compile c;
+ const unsigned *program;
+ unsigned program_size;
+
+ memset(&c, 0, sizeof(c));
+
+ c.key = *key;
+
+ /* Need to locate the two positions present in vertex + header.
+ * These are currently hardcoded:
+ */
+ c.nr_attrs = brw_count_bits(c.key.attrs);
+ c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */
+ c.nr_bytes = c.nr_regs * REG_SIZE;
+
+
+ /* Begin the compilation:
+ */
+ brw_init_compile(&c.func);
+
+ c.func.single_program_flow = 1;
+
+ /* For some reason the thread is spawned with only 4 channels
+ * unmasked.
+ */
+ brw_set_mask_control(&c.func, BRW_MASK_DISABLE);
+
+
+ /* Note that primitives which don't require a GS program have
+ * already been weeded out by this stage:
+ */
+ switch (key->primitive) {
+ case PIPE_PRIM_QUADS:
+ brw_gs_quads( &c );
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ brw_gs_quad_strip( &c );
+ break;
+ case PIPE_PRIM_LINE_LOOP:
+ brw_gs_lines( &c );
+ break;
+ case PIPE_PRIM_LINES:
+ if (key->hint_gs_always)
+ brw_gs_lines( &c );
+ else {
+ return;
+ }
+ break;
+ case PIPE_PRIM_TRIANGLES:
+ if (key->hint_gs_always)
+ brw_gs_tris( &c );
+ else {
+ return;
+ }
+ break;
+ case PIPE_PRIM_POINTS:
+ if (key->hint_gs_always)
+ brw_gs_points( &c );
+ else {
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
+ /* get the program
+ */
+ program = brw_get_program(&c.func, &program_size);
+
+ /* Upload
+ */
+ brw->gs.prog_gs_offset = brw_upload_cache( &brw->cache[BRW_GS_PROG],
+ &c.key,
+ sizeof(c.key),
+ program,
+ program_size,
+ &c.prog_data,
+ &brw->gs.prog_data );
+}
+
+
+static boolean search_cache( struct brw_context *brw,
+ struct brw_gs_prog_key *key )
+{
+ return brw_search_cache(&brw->cache[BRW_GS_PROG],
+ key, sizeof(*key),
+ &brw->gs.prog_data,
+ &brw->gs.prog_gs_offset);
+}
+
+
+static const int gs_prim[PIPE_PRIM_POLYGON+1] = {
+ PIPE_PRIM_POINTS,
+ PIPE_PRIM_LINES,
+ PIPE_PRIM_LINE_LOOP,
+ PIPE_PRIM_LINES,
+ PIPE_PRIM_TRIANGLES,
+ PIPE_PRIM_TRIANGLES,
+ PIPE_PRIM_TRIANGLES,
+ PIPE_PRIM_QUADS,
+ PIPE_PRIM_QUAD_STRIP,
+ PIPE_PRIM_TRIANGLES
+};
+
+static void populate_key( struct brw_context *brw,
+ struct brw_gs_prog_key *key )
+{
+ memset(key, 0, sizeof(*key));
+
+ /* CACHE_NEW_VS_PROG */
+ key->attrs = brw->vs.prog_data->outputs_written;
+
+ /* BRW_NEW_PRIMITIVE */
+ key->primitive = gs_prim[brw->primitive];
+
+ key->hint_gs_always = 0; /* debug code? */
+
+ key->need_gs_prog = (key->hint_gs_always ||
+ brw->primitive == PIPE_PRIM_QUADS ||
+ brw->primitive == PIPE_PRIM_QUAD_STRIP ||
+ brw->primitive == PIPE_PRIM_LINE_LOOP);
+}
+
+/* Calculate interpolants for triangle and line rasterization.
+ */
+static void upload_gs_prog( struct brw_context *brw )
+{
+ struct brw_gs_prog_key key;
+
+ /* Populate the key:
+ */
+ populate_key(brw, &key);
+
+ if (brw->gs.prog_active != key.need_gs_prog) {
+ brw->state.dirty.cache |= CACHE_NEW_GS_PROG;
+ brw->gs.prog_active = key.need_gs_prog;
+ }
+
+ if (brw->gs.prog_active) {
+ if (!search_cache(brw, &key))
+ compile_gs_prog( brw, &key );
+ }
+}
+
+
+const struct brw_tracked_state brw_gs_prog = {
+ .dirty = {
+ .brw = BRW_NEW_PRIMITIVE,
+ .cache = CACHE_NEW_VS_PROG
+ },
+ .update = upload_gs_prog
+};
diff --git a/src/gallium/drivers/i965simple/brw_gs.h b/src/gallium/drivers/i965simple/brw_gs.h
new file mode 100644
index 0000000000..f09141c6aa
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_gs.h
@@ -0,0 +1,75 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_GS_H
+#define BRW_GS_H
+
+
+#include "brw_context.h"
+#include "brw_eu.h"
+
+#define MAX_GS_VERTS (4)
+
+struct brw_gs_prog_key {
+ unsigned attrs:32;
+ unsigned primitive:4;
+ unsigned hint_gs_always:1;
+ unsigned need_gs_prog:1;
+ unsigned pad:26;
+};
+
+struct brw_gs_compile {
+ struct brw_compile func;
+ struct brw_gs_prog_key key;
+ struct brw_gs_prog_data prog_data;
+
+ struct {
+ struct brw_reg R0;
+ struct brw_reg vertex[MAX_GS_VERTS];
+ } reg;
+
+ /* 3 different ways of expressing vertex size:
+ */
+ unsigned nr_attrs;
+ unsigned nr_regs;
+ unsigned nr_bytes;
+};
+
+#define ATTR_SIZE (4*4)
+
+void brw_gs_quads( struct brw_gs_compile *c );
+void brw_gs_quad_strip( struct brw_gs_compile *c );
+void brw_gs_tris( struct brw_gs_compile *c );
+void brw_gs_lines( struct brw_gs_compile *c );
+void brw_gs_points( struct brw_gs_compile *c );
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_gs_emit.c b/src/gallium/drivers/i965simple/brw_gs_emit.c
new file mode 100644
index 0000000000..c3cc90b10f
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_gs_emit.c
@@ -0,0 +1,148 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_gs.h"
+
+static void brw_gs_alloc_regs( struct brw_gs_compile *c,
+ unsigned nr_verts )
+{
+ unsigned i = 0,j;
+
+ /* Register usage is static, precompute here:
+ */
+ c->reg.R0 = retype(brw_vec8_grf(i, 0), BRW_REGISTER_TYPE_UD); i++;
+
+ /* Payload vertices plus space for more generated vertices:
+ */
+ for (j = 0; j < nr_verts; j++) {
+ c->reg.vertex[j] = brw_vec4_grf(i, 0);
+ i += c->nr_regs;
+ }
+
+ c->prog_data.urb_read_length = c->nr_regs;
+ c->prog_data.total_grf = i;
+}
+
+
+static void brw_gs_emit_vue(struct brw_gs_compile *c,
+ struct brw_reg vert,
+ boolean last,
+ unsigned header)
+{
+ struct brw_compile *p = &c->func;
+ boolean allocate = !last;
+
+ /* Overwrite PrimType and PrimStart in the message header, for
+ * each vertex in turn:
+ */
+ brw_MOV(p, get_element_ud(c->reg.R0, 2), brw_imm_ud(header));
+
+ /* Copy the vertex from vertn into m1..mN+1:
+ */
+ brw_copy8(p, brw_message_reg(1), vert, c->nr_regs);
+
+ /* Send each vertex as a seperate write to the urb. This is
+ * different to the concept in brw_sf_emit.c, where subsequent
+ * writes are used to build up a single urb entry. Each of these
+ * writes instantiates a seperate urb entry, and a new one must be
+ * allocated each time.
+ */
+ brw_urb_WRITE(p,
+ allocate ? c->reg.R0 : retype(brw_null_reg(), BRW_REGISTER_TYPE_UD),
+ 0,
+ c->reg.R0,
+ allocate,
+ 1, /* used */
+ c->nr_regs + 1, /* msg length */
+ allocate ? 1 : 0, /* response length */
+ allocate ? 0 : 1, /* eot */
+ 1, /* writes_complete */
+ 0, /* urb offset */
+ BRW_URB_SWIZZLE_NONE);
+}
+
+
+
+void brw_gs_quads( struct brw_gs_compile *c )
+{
+ brw_gs_alloc_regs(c, 4);
+
+ /* Use polygons for correct edgeflag behaviour. Note that vertex 3
+ * is the PV for quads, but vertex 0 for polygons:
+ */
+ brw_gs_emit_vue(c, c->reg.vertex[3], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
+ brw_gs_emit_vue(c, c->reg.vertex[0], 0, (_3DPRIM_POLYGON << 2));
+ brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_POLYGON << 2));
+ brw_gs_emit_vue(c, c->reg.vertex[2], 1, ((_3DPRIM_POLYGON << 2) | R02_PRIM_END));
+}
+
+void brw_gs_quad_strip( struct brw_gs_compile *c )
+{
+ brw_gs_alloc_regs(c, 4);
+
+ brw_gs_emit_vue(c, c->reg.vertex[2], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
+ brw_gs_emit_vue(c, c->reg.vertex[3], 0, (_3DPRIM_POLYGON << 2));
+ brw_gs_emit_vue(c, c->reg.vertex[0], 0, (_3DPRIM_POLYGON << 2));
+ brw_gs_emit_vue(c, c->reg.vertex[1], 1, ((_3DPRIM_POLYGON << 2) | R02_PRIM_END));
+}
+
+void brw_gs_tris( struct brw_gs_compile *c )
+{
+ brw_gs_alloc_regs(c, 3);
+ brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_TRILIST << 2) | R02_PRIM_START));
+ brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_TRILIST << 2));
+ brw_gs_emit_vue(c, c->reg.vertex[2], 1, ((_3DPRIM_TRILIST << 2) | R02_PRIM_END));
+}
+
+void brw_gs_lines( struct brw_gs_compile *c )
+{
+ brw_gs_alloc_regs(c, 2);
+ brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_LINESTRIP << 2) | R02_PRIM_START));
+ brw_gs_emit_vue(c, c->reg.vertex[1], 1, ((_3DPRIM_LINESTRIP << 2) | R02_PRIM_END));
+}
+
+void brw_gs_points( struct brw_gs_compile *c )
+{
+ brw_gs_alloc_regs(c, 1);
+ brw_gs_emit_vue(c, c->reg.vertex[0], 1, ((_3DPRIM_POINTLIST << 2) | R02_PRIM_START | R02_PRIM_END));
+}
+
+
+
+
+
+
+
+
diff --git a/src/gallium/drivers/i965simple/brw_gs_state.c b/src/gallium/drivers/i965simple/brw_gs_state.c
new file mode 100644
index 0000000000..5b8016b2e9
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_gs_state.c
@@ -0,0 +1,90 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+
+static void upload_gs_unit( struct brw_context *brw )
+{
+ struct brw_gs_unit_state gs;
+
+ memset(&gs, 0, sizeof(gs));
+
+ /* CACHE_NEW_GS_PROG */
+ if (brw->gs.prog_active) {
+ gs.thread0.grf_reg_count =
+ align(brw->gs.prog_data->total_grf, 16) / 16 - 1;
+ gs.thread0.kernel_start_pointer = brw->gs.prog_gs_offset >> 6;
+ gs.thread3.urb_entry_read_length = brw->gs.prog_data->urb_read_length;
+ }
+ else {
+ gs.thread0.grf_reg_count = 0;
+ gs.thread0.kernel_start_pointer = 0;
+ gs.thread3.urb_entry_read_length = 1;
+ }
+
+ /* BRW_NEW_URB_FENCE */
+ gs.thread4.nr_urb_entries = brw->urb.nr_gs_entries;
+ gs.thread4.urb_entry_allocation_size = brw->urb.vsize - 1;
+
+ gs.thread4.max_threads = 0; /* Hardware requirement */
+
+ if (BRW_DEBUG & DEBUG_STATS)
+ gs.thread4.stats_enable = 1;
+
+ /* CONSTANT */
+ gs.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
+ gs.thread1.single_program_flow = 1;
+ gs.thread3.dispatch_grf_start_reg = 1;
+ gs.thread3.const_urb_entry_read_offset = 0;
+ gs.thread3.const_urb_entry_read_length = 0;
+ gs.thread3.urb_entry_read_offset = 0;
+
+
+ brw->gs.state_gs_offset = brw_cache_data( &brw->cache[BRW_GS_UNIT], &gs );
+}
+
+
+const struct brw_tracked_state brw_gs_unit = {
+ .dirty = {
+ .brw = (BRW_NEW_CURBE_OFFSETS |
+ BRW_NEW_URB_FENCE),
+ .cache = CACHE_NEW_GS_PROG
+ },
+ .update = upload_gs_unit
+};
diff --git a/src/gallium/drivers/i965simple/brw_misc_state.c b/src/gallium/drivers/i965simple/brw_misc_state.c
new file mode 100644
index 0000000000..99ff4403a5
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_misc_state.c
@@ -0,0 +1,488 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_batch.h"
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+
+
+
+
+
+/***********************************************************************
+ * Blend color
+ */
+
+static void upload_blend_constant_color(struct brw_context *brw)
+{
+ struct brw_blend_constant_color bcc;
+
+ memset(&bcc, 0, sizeof(bcc));
+ bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
+ bcc.header.length = sizeof(bcc)/4-2;
+ bcc.blend_constant_color[0] = brw->attribs.BlendColor.color[0];
+ bcc.blend_constant_color[1] = brw->attribs.BlendColor.color[1];
+ bcc.blend_constant_color[2] = brw->attribs.BlendColor.color[2];
+ bcc.blend_constant_color[3] = brw->attribs.BlendColor.color[3];
+
+ BRW_CACHED_BATCH_STRUCT(brw, &bcc);
+}
+
+
+const struct brw_tracked_state brw_blend_constant_color = {
+ .dirty = {
+ .brw = BRW_NEW_BLEND,
+ .cache = 0
+ },
+ .update = upload_blend_constant_color
+};
+
+
+/***********************************************************************
+ * Drawing rectangle
+ */
+static void upload_drawing_rect(struct brw_context *brw)
+{
+ struct brw_drawrect bdr;
+
+ memset(&bdr, 0, sizeof(bdr));
+ bdr.header.opcode = CMD_DRAW_RECT;
+ bdr.header.length = sizeof(bdr)/4 - 2;
+ bdr.xmin = 0;
+ bdr.ymin = 0;
+ bdr.xmax = brw->attribs.FrameBuffer.cbufs[0]->width;
+ bdr.ymax = brw->attribs.FrameBuffer.cbufs[0]->height;
+ bdr.xorg = 0;
+ bdr.yorg = 0;
+
+ /* Can't use BRW_CACHED_BATCH_STRUCT because this is also emitted
+ * uncached in brw_draw.c:
+ */
+ BRW_BATCH_STRUCT(brw, &bdr);
+}
+
+const struct brw_tracked_state brw_drawing_rect = {
+ .dirty = {
+ .brw = BRW_NEW_SCENE,
+ .cache = 0
+ },
+ .update = upload_drawing_rect
+};
+
+/**
+ * Upload the binding table pointers, which point each stage's array of surface
+ * state pointers.
+ *
+ * The binding table pointers are relative to the surface state base address,
+ * which is the BRW_SS_POOL cache buffer.
+ */
+static void upload_binding_table_pointers(struct brw_context *brw)
+{
+ struct brw_binding_table_pointers btp;
+ memset(&btp, 0, sizeof(btp));
+
+ btp.header.opcode = CMD_BINDING_TABLE_PTRS;
+ btp.header.length = sizeof(btp)/4 - 2;
+ btp.vs = 0;
+ btp.gs = 0;
+ btp.clp = 0;
+ btp.sf = 0;
+ btp.wm = brw->wm.bind_ss_offset;
+
+ BRW_CACHED_BATCH_STRUCT(brw, &btp);
+}
+
+const struct brw_tracked_state brw_binding_table_pointers = {
+ .dirty = {
+ .brw = 0,
+ .cache = CACHE_NEW_SURF_BIND
+ },
+ .update = upload_binding_table_pointers,
+};
+
+
+/**
+ * Upload pointers to the per-stage state.
+ *
+ * The state pointers in this packet are all relative to the general state
+ * base address set by CMD_STATE_BASE_ADDRESS, which is the BRW_GS_POOL buffer.
+ */
+static void upload_pipelined_state_pointers(struct brw_context *brw )
+{
+ struct brw_pipelined_state_pointers psp;
+ memset(&psp, 0, sizeof(psp));
+
+ psp.header.opcode = CMD_PIPELINED_STATE_POINTERS;
+ psp.header.length = sizeof(psp)/4 - 2;
+
+ psp.vs.offset = brw->vs.state_gs_offset >> 5;
+ psp.sf.offset = brw->sf.state_gs_offset >> 5;
+ psp.wm.offset = brw->wm.state_gs_offset >> 5;
+ psp.cc.offset = brw->cc.state_gs_offset >> 5;
+
+ /* GS gets turned on and off regularly. Need to re-emit URB fence
+ * after this occurs.
+ */
+ if (brw->gs.prog_active) {
+ psp.gs.offset = brw->gs.state_gs_offset >> 5;
+ psp.gs.enable = 1;
+ }
+
+ if (0) {
+ psp.clp.offset = brw->clip.state_gs_offset >> 5;
+ psp.clp.enable = 1;
+ }
+
+
+ if (BRW_CACHED_BATCH_STRUCT(brw, &psp))
+ brw->state.dirty.brw |= BRW_NEW_PSP;
+}
+
+const struct brw_tracked_state brw_pipelined_state_pointers = {
+ .dirty = {
+ .brw = 0,
+ .cache = (CACHE_NEW_VS_UNIT |
+ CACHE_NEW_GS_UNIT |
+ CACHE_NEW_GS_PROG |
+ CACHE_NEW_CLIP_UNIT |
+ CACHE_NEW_SF_UNIT |
+ CACHE_NEW_WM_UNIT |
+ CACHE_NEW_CC_UNIT)
+ },
+ .update = upload_pipelined_state_pointers
+};
+
+static void upload_psp_urb_cbs(struct brw_context *brw )
+{
+ upload_pipelined_state_pointers(brw);
+ brw_upload_urb_fence(brw);
+ brw_upload_constant_buffer_state(brw);
+}
+
+
+const struct brw_tracked_state brw_psp_urb_cbs = {
+ .dirty = {
+ .brw = BRW_NEW_URB_FENCE,
+ .cache = (CACHE_NEW_VS_UNIT |
+ CACHE_NEW_GS_UNIT |
+ CACHE_NEW_GS_PROG |
+ CACHE_NEW_CLIP_UNIT |
+ CACHE_NEW_SF_UNIT |
+ CACHE_NEW_WM_UNIT |
+ CACHE_NEW_CC_UNIT)
+ },
+ .update = upload_psp_urb_cbs
+};
+
+/**
+ * Upload the depthbuffer offset and format.
+ *
+ * We have to do this per state validation as we need to emit the relocation
+ * in the batch buffer.
+ */
+static void upload_depthbuffer(struct brw_context *brw)
+{
+ struct pipe_surface *depth_surface = brw->attribs.FrameBuffer.zsbuf;
+
+ BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
+ if (depth_surface == NULL) {
+ OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
+ (BRW_SURFACE_NULL << 29));
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+ } else {
+ unsigned int format;
+ struct brw_texture *tex = (struct brw_texture *)depth_surface->texture;
+ assert(depth_surface->block.width == 1);
+ assert(depth_surface->block.height == 1);
+ switch (depth_surface->block.size) {
+ case 2:
+ format = BRW_DEPTHFORMAT_D16_UNORM;
+ break;
+ case 4:
+ if (depth_surface->format == PIPE_FORMAT_Z32_FLOAT)
+ format = BRW_DEPTHFORMAT_D32_FLOAT;
+ else
+ format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+ OUT_BATCH((depth_surface->stride - 1) |
+ (format << 18) |
+ (BRW_TILEWALK_YMAJOR << 26) |
+// (depth_surface->region->tiled << 27) |
+ (BRW_SURFACE_2D << 29));
+ OUT_RELOC(tex->buffer,
+ PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE, 0);
+ OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
+ ((depth_surface->stride/depth_surface->block.size - 1) << 6) |
+ ((depth_surface->height - 1) << 19));
+ OUT_BATCH(0);
+ }
+ ADVANCE_BATCH();
+}
+
+const struct brw_tracked_state brw_depthbuffer = {
+ .dirty = {
+ .brw = BRW_NEW_SCENE,
+ .cache = 0
+ },
+ .update = upload_depthbuffer,
+};
+
+
+
+
+/***********************************************************************
+ * Polygon stipple packet
+ */
+
+static void upload_polygon_stipple(struct brw_context *brw)
+{
+ struct brw_polygon_stipple bps;
+ unsigned i;
+
+ memset(&bps, 0, sizeof(bps));
+ bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
+ bps.header.length = sizeof(bps)/4-2;
+
+ /* XXX: state tracker should send *all* state down initially!
+ */
+ if (brw->attribs.PolygonStipple)
+ for (i = 0; i < 32; i++)
+ bps.stipple[i] = brw->attribs.PolygonStipple->stipple[31 - i]; /* invert */
+
+ BRW_CACHED_BATCH_STRUCT(brw, &bps);
+}
+
+const struct brw_tracked_state brw_polygon_stipple = {
+ .dirty = {
+ .brw = BRW_NEW_STIPPLE,
+ .cache = 0
+ },
+ .update = upload_polygon_stipple
+};
+
+
+/***********************************************************************
+ * Line stipple packet
+ */
+
+static void upload_line_stipple(struct brw_context *brw)
+{
+ struct brw_line_stipple bls;
+ float tmp;
+ int tmpi;
+
+ memset(&bls, 0, sizeof(bls));
+ bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
+ bls.header.length = sizeof(bls)/4 - 2;
+
+ bls.bits0.pattern = brw->attribs.Raster->line_stipple_pattern;
+ bls.bits1.repeat_count = brw->attribs.Raster->line_stipple_factor;
+
+ tmp = 1.0 / (float) brw->attribs.Raster->line_stipple_factor;
+ tmpi = tmp * (1<<13);
+
+
+ bls.bits1.inverse_repeat_count = tmpi;
+
+ BRW_CACHED_BATCH_STRUCT(brw, &bls);
+}
+
+const struct brw_tracked_state brw_line_stipple = {
+ .dirty = {
+ .brw = BRW_NEW_STIPPLE,
+ .cache = 0
+ },
+ .update = upload_line_stipple
+};
+
+
+/***********************************************************************
+ * Misc constant state packets
+ */
+
+static void upload_pipe_control(struct brw_context *brw)
+{
+ struct brw_pipe_control pc;
+
+ return;
+
+ memset(&pc, 0, sizeof(pc));
+
+ pc.header.opcode = CMD_PIPE_CONTROL;
+ pc.header.length = sizeof(pc)/4 - 2;
+ pc.header.post_sync_operation = PIPE_CONTROL_NOWRITE;
+
+ pc.header.instruction_state_cache_flush_enable = 1;
+
+ pc.bits1.dest_addr_type = PIPE_CONTROL_GTTWRITE_GLOBAL;
+
+ BRW_BATCH_STRUCT(brw, &pc);
+}
+
+const struct brw_tracked_state brw_pipe_control = {
+ .dirty = {
+ .brw = BRW_NEW_SCENE,
+ .cache = 0
+ },
+ .update = upload_pipe_control
+};
+
+
+/***********************************************************************
+ * Misc invarient state packets
+ */
+
+static void upload_invarient_state( struct brw_context *brw )
+{
+ {
+ struct brw_mi_flush flush;
+
+ memset(&flush, 0, sizeof(flush));
+ flush.opcode = CMD_MI_FLUSH;
+ flush.flags = BRW_FLUSH_STATE_CACHE | BRW_FLUSH_READ_CACHE;
+ BRW_BATCH_STRUCT(brw, &flush);
+ }
+
+ {
+ /* 0x61040000 Pipeline Select */
+ /* PipelineSelect : 0 */
+ struct brw_pipeline_select ps;
+
+ memset(&ps, 0, sizeof(ps));
+ ps.header.opcode = CMD_PIPELINE_SELECT;
+ ps.header.pipeline_select = 0;
+ BRW_BATCH_STRUCT(brw, &ps);
+ }
+
+ {
+ struct brw_global_depth_offset_clamp gdo;
+ memset(&gdo, 0, sizeof(gdo));
+
+ /* Disable depth offset clamping.
+ */
+ gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
+ gdo.header.length = sizeof(gdo)/4 - 2;
+ gdo.depth_offset_clamp = 0.0;
+
+ BRW_BATCH_STRUCT(brw, &gdo);
+ }
+
+
+ /* 0x61020000 State Instruction Pointer */
+ {
+ struct brw_system_instruction_pointer sip;
+ memset(&sip, 0, sizeof(sip));
+
+ sip.header.opcode = CMD_STATE_INSN_POINTER;
+ sip.header.length = 0;
+ sip.bits0.pad = 0;
+ sip.bits0.system_instruction_pointer = 0;
+ BRW_BATCH_STRUCT(brw, &sip);
+ }
+
+
+ {
+ struct brw_vf_statistics vfs;
+ memset(&vfs, 0, sizeof(vfs));
+
+ vfs.opcode = CMD_VF_STATISTICS;
+ if (BRW_DEBUG & DEBUG_STATS)
+ vfs.statistics_enable = 1;
+
+ BRW_BATCH_STRUCT(brw, &vfs);
+ }
+
+
+ {
+ struct brw_polygon_stipple_offset bpso;
+
+ memset(&bpso, 0, sizeof(bpso));
+ bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
+ bpso.header.length = sizeof(bpso)/4-2;
+ bpso.bits0.x_offset = 0;
+ bpso.bits0.y_offset = 0;
+
+ BRW_BATCH_STRUCT(brw, &bpso);
+ }
+}
+
+const struct brw_tracked_state brw_invarient_state = {
+ .dirty = {
+ .brw = BRW_NEW_SCENE,
+ .cache = 0
+ },
+ .update = upload_invarient_state
+};
+
+/**
+ * Define the base addresses which some state is referenced from.
+ *
+ * This allows us to avoid having to emit relocations in many places for
+ * cached state, and instead emit pointers inside of large, mostly-static
+ * state pools. This comes at the expense of memory, and more expensive cache
+ * misses.
+ */
+static void upload_state_base_address( struct brw_context *brw )
+{
+ /* Output the structure (brw_state_base_address) directly to the
+ * batchbuffer, so we can emit relocations inline.
+ */
+ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
+ OUT_RELOC(brw->pool[BRW_GS_POOL].buffer,
+ PIPE_BUFFER_USAGE_GPU_READ,
+ 1); /* General state base address */
+ OUT_RELOC(brw->pool[BRW_SS_POOL].buffer,
+ PIPE_BUFFER_USAGE_GPU_READ,
+ 1); /* Surface state base address */
+ OUT_BATCH(1); /* Indirect object base address */
+ OUT_BATCH(1); /* General state upper bound */
+ OUT_BATCH(1); /* Indirect object upper bound */
+ ADVANCE_BATCH();
+}
+
+
+const struct brw_tracked_state brw_state_base_address = {
+ .dirty = {
+ .brw = BRW_NEW_SCENE,
+ .cache = 0
+ },
+ .update = upload_state_base_address
+};
diff --git a/src/gallium/drivers/i965simple/brw_reg.h b/src/gallium/drivers/i965simple/brw_reg.h
new file mode 100644
index 0000000000..9e885c3b3b
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_reg.h
@@ -0,0 +1,76 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#define CMD_MI (0x0 << 29)
+#define CMD_2D (0x2 << 29)
+#define CMD_3D (0x3 << 29)
+
+#define MI_BATCH_BUFFER_END (CMD_MI | 0xA << 23)
+
+/* Stalls command execution waiting for the given events to have occurred. */
+#define MI_WAIT_FOR_EVENT (CMD_MI | (0x3 << 23))
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+
+/* Primitive dispatch on 830-945 */
+#define _3DPRIMITIVE (CMD_3D | (0x1f << 24))
+#define PRIM_INDIRECT (1<<23)
+#define PRIM_INLINE (0<<23)
+#define PRIM_INDIRECT_SEQUENTIAL (0<<17)
+#define PRIM_INDIRECT_ELTS (1<<17)
+
+#define PRIM3D_TRILIST (0x0<<18)
+#define PRIM3D_TRISTRIP (0x1<<18)
+#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
+#define PRIM3D_TRIFAN (0x3<<18)
+#define PRIM3D_POLY (0x4<<18)
+#define PRIM3D_LINELIST (0x5<<18)
+#define PRIM3D_LINESTRIP (0x6<<18)
+#define PRIM3D_RECTLIST (0x7<<18)
+#define PRIM3D_POINTLIST (0x8<<18)
+#define PRIM3D_DIB (0x9<<18)
+#define PRIM3D_MASK (0x1f<<18)
+
+#define XY_SETUP_BLT_CMD (CMD_2D | (0x01 << 22) | 6)
+
+#define XY_COLOR_BLT_CMD (CMD_2D | (0x50 << 22) | 4)
+
+#define XY_SRC_COPY_BLT_CMD (CMD_2D | (0x53 << 22) | 6)
+
+/* BR00 */
+#define XY_BLT_WRITE_ALPHA (1 << 21)
+#define XY_BLT_WRITE_RGB (1 << 20)
+#define XY_SRC_TILED (1 << 15)
+#define XY_DST_TILED (1 << 11)
+
+/* BR13 */
+#define BR13_565 (0x1 << 24)
+#define BR13_8888 (0x3 << 24)
+
+#define FENCE_LINEAR 0
+#define FENCE_XMAJOR 1
+#define FENCE_YMAJOR 2
diff --git a/src/gallium/drivers/i965simple/brw_screen.c b/src/gallium/drivers/i965simple/brw_screen.c
new file mode 100644
index 0000000000..b22e105f10
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_screen.c
@@ -0,0 +1,246 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_string.h"
+#include "util/u_simple_screen.h"
+
+#include "brw_context.h"
+#include "brw_screen.h"
+#include "brw_tex_layout.h"
+
+
+static const char *
+brw_get_vendor( struct pipe_screen *screen )
+{
+ return "Tungsten Graphics, Inc.";
+}
+
+
+static const char *
+brw_get_name( struct pipe_screen *screen )
+{
+ static char buffer[128];
+ const char *chipset;
+
+ switch (brw_screen(screen)->pci_id) {
+ case PCI_CHIP_I965_Q:
+ chipset = "Intel(R) 965Q";
+ break;
+ case PCI_CHIP_I965_G:
+ case PCI_CHIP_I965_G_1:
+ chipset = "Intel(R) 965G";
+ break;
+ case PCI_CHIP_I965_GM:
+ chipset = "Intel(R) 965GM";
+ break;
+ case PCI_CHIP_I965_GME:
+ chipset = "Intel(R) 965GME/GLE";
+ break;
+ default:
+ chipset = "unknown";
+ break;
+ }
+
+ util_snprintf(buffer, sizeof(buffer), "i965 (chipset: %s)", chipset);
+ return buffer;
+}
+
+
+static int
+brw_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 8;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 0;
+ case PIPE_CAP_POINT_SPRITE:
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 1;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 0;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 11; /* max 1024x1024 */
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 8; /* max 128x128x128 */
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 11; /* max 1024x1024 */
+ default:
+ return 0;
+ }
+}
+
+
+static float
+brw_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 7.5;
+
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 255.0;
+
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 4.0;
+
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0;
+
+ default:
+ return 0;
+ }
+}
+
+
+static boolean
+brw_is_format_supported( struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags )
+{
+#if 0
+ /* XXX: This is broken -- rewrite if still needed. */
+ static const unsigned tex_supported[] = {
+ PIPE_FORMAT_R8G8B8A8_UNORM,
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_I8_UNORM,
+ PIPE_FORMAT_L8A8_UNORM,
+ PIPE_FORMAT_YCBCR,
+ PIPE_FORMAT_YCBCR_REV,
+ PIPE_FORMAT_S8_Z24,
+ };
+
+
+ /* Actually a lot more than this - add later:
+ */
+ static const unsigned render_supported[] = {
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ };
+
+ /*
+ */
+ static const unsigned z_stencil_supported[] = {
+ PIPE_FORMAT_Z16_UNORM,
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_S8Z24_UNORM,
+ };
+
+ switch (type) {
+ case PIPE_RENDER_FORMAT:
+ *numFormats = Elements(render_supported);
+ return render_supported;
+
+ case PIPE_TEX_FORMAT:
+ *numFormats = Elements(tex_supported);
+ return render_supported;
+
+ case PIPE_Z_STENCIL_FORMAT:
+ *numFormats = Elements(render_supported);
+ return render_supported;
+
+ default:
+ *numFormats = 0;
+ return NULL;
+ }
+#else
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_S8Z24_UNORM:
+ return TRUE;
+ default:
+ return FALSE;
+ };
+ return FALSE;
+#endif
+}
+
+
+static void
+brw_destroy_screen( struct pipe_screen *screen )
+{
+ struct pipe_winsys *winsys = screen->winsys;
+
+ if(winsys->destroy)
+ winsys->destroy(winsys);
+
+ FREE(screen);
+}
+
+
+/**
+ * Create a new brw_screen object
+ */
+struct pipe_screen *
+brw_create_screen(struct pipe_winsys *winsys, uint pci_id)
+{
+ struct brw_screen *brwscreen = CALLOC_STRUCT(brw_screen);
+
+ if (!brwscreen)
+ return NULL;
+
+ brwscreen->pci_id = pci_id;
+
+ brwscreen->screen.winsys = winsys;
+
+ brwscreen->screen.destroy = brw_destroy_screen;
+
+ brwscreen->screen.get_name = brw_get_name;
+ brwscreen->screen.get_vendor = brw_get_vendor;
+ brwscreen->screen.get_param = brw_get_param;
+ brwscreen->screen.get_paramf = brw_get_paramf;
+ brwscreen->screen.is_format_supported = brw_is_format_supported;
+
+ brw_init_screen_texture_funcs(&brwscreen->screen);
+ u_simple_screen_init(&brwscreen->screen);
+
+ return &brwscreen->screen;
+}
diff --git a/src/gallium/drivers/i965simple/brw_screen.h b/src/gallium/drivers/i965simple/brw_screen.h
new file mode 100644
index 0000000000..d3c70387e6
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_screen.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef BRW_SCREEN_H
+#define BRW_SCREEN_H
+
+
+#include "pipe/p_screen.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Subclass of pipe_screen
+ */
+struct brw_screen
+{
+ struct pipe_screen screen;
+
+ uint pci_id;
+};
+
+
+/** cast wrapper */
+static INLINE struct brw_screen *
+brw_screen(struct pipe_screen *pscreen)
+{
+ return (struct brw_screen *) pscreen;
+}
+
+
+extern struct pipe_screen *
+brw_create_screen(struct pipe_winsys *winsys, uint pci_id);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* BRW_SCREEN_H */
diff --git a/src/gallium/drivers/i965simple/brw_sf.c b/src/gallium/drivers/i965simple/brw_sf.c
new file mode 100644
index 0000000000..b82a2e143b
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_sf.c
@@ -0,0 +1,351 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_sf.h"
+#include "brw_state.h"
+#include "tgsi/tgsi_parse.h"
+
+
+static void compile_sf_prog( struct brw_context *brw,
+ struct brw_sf_prog_key *key )
+{
+ struct brw_sf_compile c;
+ const unsigned *program;
+ unsigned program_size;
+
+ memset(&c, 0, sizeof(c));
+
+ /* Begin the compilation:
+ */
+ brw_init_compile(&c.func);
+
+ c.key = *key;
+
+
+ c.nr_attrs = c.key.vp_output_count;
+ c.nr_attr_regs = (c.nr_attrs+1)/2;
+
+ c.nr_setup_attrs = c.key.fp_input_count + 1; /* +1 for position */
+ c.nr_setup_regs = (c.nr_setup_attrs+1)/2;
+
+ c.prog_data.urb_read_length = c.nr_attr_regs;
+ c.prog_data.urb_entry_size = c.nr_setup_regs * 2;
+
+
+ /* Which primitive? Or all three?
+ */
+ switch (key->primitive) {
+ case SF_TRIANGLES:
+ c.nr_verts = 3;
+ brw_emit_tri_setup( &c );
+ break;
+ case SF_LINES:
+ c.nr_verts = 2;
+ brw_emit_line_setup( &c );
+ break;
+ case SF_POINTS:
+ c.nr_verts = 1;
+ brw_emit_point_setup( &c );
+ break;
+
+ case SF_UNFILLED_TRIS:
+ default:
+ assert(0);
+ return;
+ }
+
+
+
+ /* get the program
+ */
+ program = brw_get_program(&c.func, &program_size);
+
+ /* Upload
+ */
+ brw->sf.prog_gs_offset = brw_upload_cache( &brw->cache[BRW_SF_PROG],
+ &c.key,
+ sizeof(c.key),
+ program,
+ program_size,
+ &c.prog_data,
+ &brw->sf.prog_data );
+}
+
+
+static boolean search_cache( struct brw_context *brw,
+ struct brw_sf_prog_key *key )
+{
+ return brw_search_cache(&brw->cache[BRW_SF_PROG],
+ key, sizeof(*key),
+ &brw->sf.prog_data,
+ &brw->sf.prog_gs_offset);
+}
+
+
+/* Calculate interpolants for triangle and line rasterization.
+ */
+static void upload_sf_prog( struct brw_context *brw )
+{
+ const struct brw_fragment_program *fs = brw->attribs.FragmentProgram;
+ struct brw_sf_prog_key key;
+ struct tgsi_parse_context parse;
+ int i, done = 0;
+
+
+ memset(&key, 0, sizeof(key));
+
+ /* Populate the key, noting state dependencies:
+ */
+ /* CACHE_NEW_VS_PROG */
+ key.vp_output_count = brw->vs.prog_data->outputs_written;
+
+ /* BRW_NEW_FS */
+ key.fp_input_count = brw->attribs.FragmentProgram->info.file_max[TGSI_FILE_INPUT] + 1;
+
+
+ /* BRW_NEW_REDUCED_PRIMITIVE */
+ switch (brw->reduced_primitive) {
+ case PIPE_PRIM_TRIANGLES:
+// if (key.attrs & (1<<VERT_RESULT_EDGE))
+// key.primitive = SF_UNFILLED_TRIS;
+// else
+ key.primitive = SF_TRIANGLES;
+ break;
+ case PIPE_PRIM_LINES:
+ key.primitive = SF_LINES;
+ break;
+ case PIPE_PRIM_POINTS:
+ key.primitive = SF_POINTS;
+ break;
+ }
+
+
+
+ /* Scan fp inputs to figure out what interpolation modes are
+ * required for each incoming vp output. There is an assumption
+ * that the state tracker makes sure there is a 1:1 linkage between
+ * these sets of attributes (XXX: position??)
+ */
+ tgsi_parse_init( &parse, fs->program.tokens );
+ while( !done &&
+ !tgsi_parse_end_of_tokens( &parse ) )
+ {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (parse.FullToken.FullDeclaration.Declaration.File == TGSI_FILE_INPUT)
+ {
+ int first = parse.FullToken.FullDeclaration.DeclarationRange.First;
+ int last = parse.FullToken.FullDeclaration.DeclarationRange.Last;
+ int interp_mode = parse.FullToken.FullDeclaration.Declaration.Interpolate;
+ //int semantic = parse.FullToken.FullDeclaration.Semantic.SemanticName;
+ //int semantic_index = parse.FullToken.FullDeclaration.Semantic.SemanticIndex;
+
+ debug_printf("fs input %d..%d interp mode %d\n", first, last, interp_mode);
+
+ switch (interp_mode) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ for (i = first; i <= last; i++)
+ key.const_mask |= (1 << i);
+ break;
+ case TGSI_INTERPOLATE_LINEAR:
+ for (i = first; i <= last; i++)
+ key.linear_mask |= (1 << i);
+ break;
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ for (i = first; i <= last; i++)
+ key.persp_mask |= (1 << i);
+ break;
+ default:
+ break;
+ }
+
+ /* Also need stuff for flat shading, twosided color.
+ */
+
+ }
+ break;
+ default:
+ done = 1;
+ break;
+ }
+ }
+
+ /* Hack: Adjust for position. Optimize away when not required (ie
+ * for perspective interpolation).
+ */
+ key.persp_mask <<= 1;
+ key.linear_mask <<= 1;
+ key.linear_mask |= 1;
+ key.const_mask <<= 1;
+
+ debug_printf("key.persp_mask: %x\n", key.persp_mask);
+ debug_printf("key.linear_mask: %x\n", key.linear_mask);
+ debug_printf("key.const_mask: %x\n", key.const_mask);
+
+
+// key.do_point_sprite = brw->attribs.Point->PointSprite;
+// key.SpriteOrigin = brw->attribs.Point->SpriteOrigin;
+
+// key.do_flat_shading = (brw->attribs.Raster->flatshade);
+// key.do_twoside_color = (brw->attribs.Light->Enabled && brw->attribs.Light->Model.TwoSide);
+
+// if (key.do_twoside_color)
+// key.frontface_ccw = (brw->attribs.Polygon->FrontFace == GL_CCW);
+
+
+ if (!search_cache(brw, &key))
+ compile_sf_prog( brw, &key );
+}
+
+
+const struct brw_tracked_state brw_sf_prog = {
+ .dirty = {
+ .brw = (BRW_NEW_RASTERIZER |
+ BRW_NEW_REDUCED_PRIMITIVE |
+ BRW_NEW_VS |
+ BRW_NEW_FS),
+ .cache = 0,
+ },
+ .update = upload_sf_prog
+};
+
+
+
+#if 0
+/* Build a struct like the one we'd like the state tracker to pass to
+ * us.
+ */
+static void update_sf_linkage( struct brw_context *brw )
+{
+ const struct brw_vertex_program *vs = brw->attribs.VertexProgram;
+ const struct brw_fragment_program *fs = brw->attribs.FragmentProgram;
+ struct pipe_setup_linkage state;
+ struct tgsi_parse_context parse;
+
+ int i, j;
+ int nr_vp_outputs = 0;
+ int done = 0;
+
+ struct {
+ unsigned semantic:8;
+ unsigned semantic_index:16;
+ } fp_semantic[32], vp_semantic[32];
+
+ memset(&state, 0, sizeof(state));
+
+ state.fp_input_count = 0;
+
+
+
+
+
+
+ assert(state.fp_input_count == fs->program.num_inputs);
+
+
+ /* Then scan vp outputs
+ */
+ done = 0;
+ tgsi_parse_init( &parse, vs->program.tokens );
+ while( !done &&
+ !tgsi_parse_end_of_tokens( &parse ) )
+ {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ if (parse.FullToken.FullDeclaration.Declaration.File == TGSI_FILE_INPUT)
+ {
+ int first = parse.FullToken.FullDeclaration.DeclarationRange.First;
+ int last = parse.FullToken.FullDeclaration.DeclarationRange.Last;
+
+ for (i = first; i < last; i++) {
+ vp_semantic[i].semantic =
+ parse.FullToken.FullDeclaration.Semantic.SemanticName;
+ vp_semantic[i].semantic_index =
+ parse.FullToken.FullDeclaration.Semantic.SemanticIndex;
+ }
+
+ assert(last > nr_vp_outputs);
+ nr_vp_outputs = last;
+ }
+ break;
+ default:
+ done = 1;
+ break;
+ }
+ }
+
+
+ /* Now match based on semantic information.
+ */
+ for (i = 0; i< state.fp_input_count; i++) {
+ for (j = 0; j < nr_vp_outputs; j++) {
+ if (fp_semantic[i].semantic == vp_semantic[j].semantic &&
+ fp_semantic[i].semantic_index == vp_semantic[j].semantic_index) {
+ state.fp_input[i].vp_output = j;
+ }
+ }
+ if (fp_semantic[i].semantic == TGSI_SEMANTIC_COLOR) {
+ for (j = 0; j < nr_vp_outputs; j++) {
+ if (TGSI_SEMANTIC_BCOLOR == vp_semantic[j].semantic &&
+ fp_semantic[i].semantic_index == vp_semantic[j].semantic_index) {
+ state.fp_input[i].bf_vp_output = j;
+ }
+ }
+ }
+ }
+
+ if (memcmp(&brw->sf.linkage, &state, sizeof(state)) != 0) {
+ brw->sf.linkage = state;
+ brw->state.dirty.brw |= BRW_NEW_SF_LINKAGE;
+ }
+}
+
+
+const struct brw_tracked_state brw_sf_linkage = {
+ .dirty = {
+ .brw = (BRW_NEW_VS |
+ BRW_NEW_FS),
+ .cache = 0,
+ },
+ .update = update_sf_linkage
+};
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_sf.h b/src/gallium/drivers/i965simple/brw_sf.h
new file mode 100644
index 0000000000..b7ada47560
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_sf.h
@@ -0,0 +1,122 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_SF_H
+#define BRW_SF_H
+
+#include "brw_context.h"
+#include "brw_eu.h"
+
+
+#define SF_POINTS 0
+#define SF_LINES 1
+#define SF_TRIANGLES 2
+#define SF_UNFILLED_TRIS 3
+
+
+
+struct brw_sf_prog_key {
+ unsigned vp_output_count:5;
+ unsigned fp_input_count:5;
+
+ unsigned primitive:2;
+ unsigned do_twoside_color:1;
+ unsigned do_flat_shading:1;
+ unsigned frontface_ccw:1;
+ unsigned do_point_sprite:1;
+
+ /* Interpolation masks;
+ */
+ unsigned linear_mask;
+ unsigned persp_mask;
+ unsigned const_mask;
+
+
+// int SpriteOrigin;
+};
+
+struct brw_sf_point_tex {
+ boolean CoordReplace;
+};
+
+struct brw_sf_compile {
+ struct brw_compile func;
+ struct brw_sf_prog_key key;
+ struct brw_sf_prog_data prog_data;
+
+ struct brw_reg pv;
+ struct brw_reg det;
+ struct brw_reg dx0;
+ struct brw_reg dx2;
+ struct brw_reg dy0;
+ struct brw_reg dy2;
+
+ /* z and 1/w passed in seperately:
+ */
+ struct brw_reg z[3];
+ struct brw_reg inv_w[3];
+
+ /* The vertices:
+ */
+ struct brw_reg vert[3];
+
+ /* Temporaries, allocated after last vertex reg.
+ */
+ struct brw_reg inv_det;
+ struct brw_reg a1_sub_a0;
+ struct brw_reg a2_sub_a0;
+ struct brw_reg tmp;
+
+ struct brw_reg m1Cx;
+ struct brw_reg m2Cy;
+ struct brw_reg m3C0;
+
+ unsigned nr_verts;
+ unsigned nr_attrs;
+ unsigned nr_attr_regs;
+ unsigned nr_setup_attrs;
+ unsigned nr_setup_regs;
+#if 0
+ ubyte attr_to_idx[VERT_RESULT_MAX];
+ ubyte idx_to_attr[VERT_RESULT_MAX];
+ struct brw_sf_point_tex point_attrs[VERT_RESULT_MAX];
+#endif
+};
+
+
+void brw_emit_tri_setup( struct brw_sf_compile *c );
+void brw_emit_line_setup( struct brw_sf_compile *c );
+void brw_emit_point_setup( struct brw_sf_compile *c );
+void brw_emit_point_sprite_setup( struct brw_sf_compile *c );
+void brw_emit_anyprim_setup( struct brw_sf_compile *c );
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_sf_emit.c b/src/gallium/drivers/i965simple/brw_sf_emit.c
new file mode 100644
index 0000000000..78d6fa5e9e
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_sf_emit.c
@@ -0,0 +1,382 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_defines.h"
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_util.h"
+#include "brw_sf.h"
+
+
+
+/***********************************************************************
+ * Triangle setup.
+ */
+
+
+static void alloc_regs( struct brw_sf_compile *c )
+{
+ unsigned reg, i;
+
+ /* Values computed by fixed function unit:
+ */
+ c->pv = retype(brw_vec1_grf(1, 1), BRW_REGISTER_TYPE_UD);
+ c->det = brw_vec1_grf(1, 2);
+ c->dx0 = brw_vec1_grf(1, 3);
+ c->dx2 = brw_vec1_grf(1, 4);
+ c->dy0 = brw_vec1_grf(1, 5);
+ c->dy2 = brw_vec1_grf(1, 6);
+
+ /* z and 1/w passed in seperately:
+ */
+ c->z[0] = brw_vec1_grf(2, 0);
+ c->inv_w[0] = brw_vec1_grf(2, 1);
+ c->z[1] = brw_vec1_grf(2, 2);
+ c->inv_w[1] = brw_vec1_grf(2, 3);
+ c->z[2] = brw_vec1_grf(2, 4);
+ c->inv_w[2] = brw_vec1_grf(2, 5);
+
+ /* The vertices:
+ */
+ reg = 3;
+ for (i = 0; i < c->nr_verts; i++) {
+ c->vert[i] = brw_vec8_grf(reg, 0);
+ reg += c->nr_attr_regs;
+ }
+
+ /* Temporaries, allocated after last vertex reg.
+ */
+ c->inv_det = brw_vec1_grf(reg, 0); reg++;
+ c->a1_sub_a0 = brw_vec8_grf(reg, 0); reg++;
+ c->a2_sub_a0 = brw_vec8_grf(reg, 0); reg++;
+ c->tmp = brw_vec8_grf(reg, 0); reg++;
+
+ /* Note grf allocation:
+ */
+ c->prog_data.total_grf = reg;
+
+
+ /* Outputs of this program - interpolation coefficients for
+ * rasterization:
+ */
+ c->m1Cx = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 1, 0);
+ c->m2Cy = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 2, 0);
+ c->m3C0 = brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, 3, 0);
+}
+
+
+static void copy_z_inv_w( struct brw_sf_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ unsigned i;
+
+ brw_push_insn_state(p);
+
+ /* Copy both scalars with a single MOV:
+ */
+ for (i = 0; i < c->nr_verts; i++)
+ brw_MOV(p, vec2(suboffset(c->vert[i], 2)), vec2(c->z[i]));
+
+ brw_pop_insn_state(p);
+}
+
+
+static void invert_det( struct brw_sf_compile *c)
+{
+ brw_math(&c->func,
+ c->inv_det,
+ BRW_MATH_FUNCTION_INV,
+ BRW_MATH_SATURATE_NONE,
+ 0,
+ c->det,
+ BRW_MATH_DATA_SCALAR,
+ BRW_MATH_PRECISION_FULL);
+
+}
+
+#define NON_PERPECTIVE_ATTRS (FRAG_BIT_WPOS | \
+ FRAG_BIT_COL0 | \
+ FRAG_BIT_COL1)
+
+static boolean calculate_masks( struct brw_sf_compile *c,
+ unsigned reg,
+ ushort *pc,
+ ushort *pc_persp,
+ ushort *pc_linear)
+{
+ boolean is_last_attr = (reg == c->nr_setup_regs - 1);
+ unsigned persp_mask = c->key.persp_mask;
+ unsigned linear_mask = c->key.linear_mask;
+
+ debug_printf("persp_mask: %x\n", persp_mask);
+ debug_printf("linear_mask: %x\n", linear_mask);
+
+ *pc_persp = 0;
+ *pc_linear = 0;
+ *pc = 0xf;
+
+ if (persp_mask & (1 << (reg*2)))
+ *pc_persp = 0xf;
+
+ if (linear_mask & (1 << (reg*2)))
+ *pc_linear = 0xf;
+
+ /* Maybe only processs one attribute on the final round:
+ */
+ if (reg*2+1 < c->nr_setup_attrs) {
+ *pc |= 0xf0;
+
+ if (persp_mask & (1 << (reg*2+1)))
+ *pc_persp |= 0xf0;
+
+ if (linear_mask & (1 << (reg*2+1)))
+ *pc_linear |= 0xf0;
+ }
+
+ debug_printf("pc: %x\n", *pc);
+ debug_printf("pc_persp: %x\n", *pc_persp);
+ debug_printf("pc_linear: %x\n", *pc_linear);
+
+
+ return is_last_attr;
+}
+
+
+
+void brw_emit_tri_setup( struct brw_sf_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ unsigned i;
+
+ debug_printf("%s START ==============\n", __FUNCTION__);
+
+ c->nr_verts = 3;
+ alloc_regs(c);
+ invert_det(c);
+ copy_z_inv_w(c);
+
+
+ for (i = 0; i < c->nr_setup_regs; i++)
+ {
+ /* Pair of incoming attributes:
+ */
+ struct brw_reg a0 = offset(c->vert[0], i);
+ struct brw_reg a1 = offset(c->vert[1], i);
+ struct brw_reg a2 = offset(c->vert[2], i);
+ ushort pc = 0, pc_persp = 0, pc_linear = 0;
+ boolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+
+ if (pc_persp)
+ {
+ brw_set_predicate_control_flag_value(p, pc_persp);
+ brw_MUL(p, a0, a0, c->inv_w[0]);
+ brw_MUL(p, a1, a1, c->inv_w[1]);
+ brw_MUL(p, a2, a2, c->inv_w[2]);
+ }
+
+
+ /* Calculate coefficients for interpolated values:
+ */
+ if (pc_linear)
+ {
+ brw_set_predicate_control_flag_value(p, pc_linear);
+
+ brw_ADD(p, c->a1_sub_a0, a1, negate(a0));
+ brw_ADD(p, c->a2_sub_a0, a2, negate(a0));
+
+ /* calculate dA/dx
+ */
+ brw_MUL(p, brw_null_reg(), c->a1_sub_a0, c->dy2);
+ brw_MAC(p, c->tmp, c->a2_sub_a0, negate(c->dy0));
+ brw_MUL(p, c->m1Cx, c->tmp, c->inv_det);
+
+ /* calculate dA/dy
+ */
+ brw_MUL(p, brw_null_reg(), c->a2_sub_a0, c->dx0);
+ brw_MAC(p, c->tmp, c->a1_sub_a0, negate(c->dx2));
+ brw_MUL(p, c->m2Cy, c->tmp, c->inv_det);
+ }
+
+ {
+ brw_set_predicate_control_flag_value(p, pc);
+ /* start point for interpolation
+ */
+ brw_MOV(p, c->m3C0, a0);
+
+ /* Copy m0..m3 to URB. m0 is implicitly copied from r0 in
+ * the send instruction:
+ */
+ brw_urb_WRITE(p,
+ brw_null_reg(),
+ 0,
+ brw_vec8_grf(0, 0), /* r0, will be copied to m0 */
+ 0, /* allocate */
+ 1, /* used */
+ 4, /* msg len */
+ 0, /* response len */
+ last, /* eot */
+ last, /* writes complete */
+ i*4, /* offset */
+ BRW_URB_SWIZZLE_TRANSPOSE); /* XXX: Swizzle control "SF to windower" */
+ }
+ }
+
+ debug_printf("%s DONE ==============\n", __FUNCTION__);
+
+}
+
+
+
+void brw_emit_line_setup( struct brw_sf_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ unsigned i;
+
+
+ c->nr_verts = 2;
+ alloc_regs(c);
+ invert_det(c);
+ copy_z_inv_w(c);
+
+ for (i = 0; i < c->nr_setup_regs; i++)
+ {
+ /* Pair of incoming attributes:
+ */
+ struct brw_reg a0 = offset(c->vert[0], i);
+ struct brw_reg a1 = offset(c->vert[1], i);
+ ushort pc, pc_persp, pc_linear;
+ boolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+
+ if (pc_persp)
+ {
+ brw_set_predicate_control_flag_value(p, pc_persp);
+ brw_MUL(p, a0, a0, c->inv_w[0]);
+ brw_MUL(p, a1, a1, c->inv_w[1]);
+ }
+
+ /* Calculate coefficients for position, color:
+ */
+ if (pc_linear) {
+ brw_set_predicate_control_flag_value(p, pc_linear);
+
+ brw_ADD(p, c->a1_sub_a0, a1, negate(a0));
+
+ brw_MUL(p, c->tmp, c->a1_sub_a0, c->dx0);
+ brw_MUL(p, c->m1Cx, c->tmp, c->inv_det);
+
+ brw_MUL(p, c->tmp, c->a1_sub_a0, c->dy0);
+ brw_MUL(p, c->m2Cy, c->tmp, c->inv_det);
+ }
+
+ {
+ brw_set_predicate_control_flag_value(p, pc);
+
+ /* start point for interpolation
+ */
+ brw_MOV(p, c->m3C0, a0);
+
+ /* Copy m0..m3 to URB.
+ */
+ brw_urb_WRITE(p,
+ brw_null_reg(),
+ 0,
+ brw_vec8_grf(0, 0),
+ 0, /* allocate */
+ 1, /* used */
+ 4, /* msg len */
+ 0, /* response len */
+ last, /* eot */
+ last, /* writes complete */
+ i*4, /* urb destination offset */
+ BRW_URB_SWIZZLE_TRANSPOSE);
+ }
+ }
+}
+
+
+/* Points setup - several simplifications as all attributes are
+ * constant across the face of the point (point sprites excluded!)
+ */
+void brw_emit_point_setup( struct brw_sf_compile *c )
+{
+ struct brw_compile *p = &c->func;
+ unsigned i;
+
+ c->nr_verts = 1;
+ alloc_regs(c);
+ copy_z_inv_w(c);
+
+ brw_MOV(p, c->m1Cx, brw_imm_ud(0)); /* zero - move out of loop */
+ brw_MOV(p, c->m2Cy, brw_imm_ud(0)); /* zero - move out of loop */
+
+ for (i = 0; i < c->nr_setup_regs; i++)
+ {
+ struct brw_reg a0 = offset(c->vert[0], i);
+ ushort pc, pc_persp, pc_linear;
+ boolean last = calculate_masks(c, i, &pc, &pc_persp, &pc_linear);
+
+ if (pc_persp)
+ {
+ /* This seems odd as the values are all constant, but the
+ * fragment shader will be expecting it:
+ */
+ brw_set_predicate_control_flag_value(p, pc_persp);
+ brw_MUL(p, a0, a0, c->inv_w[0]);
+ }
+
+
+ /* The delta values are always zero, just send the starting
+ * coordinate. Again, this is to fit in with the interpolation
+ * code in the fragment shader.
+ */
+ {
+ brw_set_predicate_control_flag_value(p, pc);
+
+ brw_MOV(p, c->m3C0, a0); /* constant value */
+
+ /* Copy m0..m3 to URB.
+ */
+ brw_urb_WRITE(p,
+ brw_null_reg(),
+ 0,
+ brw_vec8_grf(0, 0),
+ 0, /* allocate */
+ 1, /* used */
+ 4, /* msg len */
+ 0, /* response len */
+ last, /* eot */
+ last, /* writes complete */
+ i*4, /* urb destination offset */
+ BRW_URB_SWIZZLE_TRANSPOSE);
+ }
+ }
+}
diff --git a/src/gallium/drivers/i965simple/brw_sf_state.c b/src/gallium/drivers/i965simple/brw_sf_state.c
new file mode 100644
index 0000000000..2a5de61c21
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_sf_state.c
@@ -0,0 +1,181 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+static void upload_sf_vp(struct brw_context *brw)
+{
+ struct brw_sf_viewport sfv;
+
+ memset(&sfv, 0, sizeof(sfv));
+
+
+ /* BRW_NEW_VIEWPORT */
+ {
+ const float *scale = brw->attribs.Viewport.scale;
+ const float *trans = brw->attribs.Viewport.translate;
+
+ sfv.viewport.m00 = scale[0];
+ sfv.viewport.m11 = scale[1];
+ sfv.viewport.m22 = scale[2];
+ sfv.viewport.m30 = trans[0];
+ sfv.viewport.m31 = trans[1];
+ sfv.viewport.m32 = trans[2];
+ }
+
+ /* _NEW_SCISSOR */
+ sfv.scissor.xmin = brw->attribs.Scissor.minx;
+ sfv.scissor.xmax = brw->attribs.Scissor.maxx - 1;
+ sfv.scissor.ymin = brw->attribs.Scissor.miny;
+ sfv.scissor.ymax = brw->attribs.Scissor.maxy - 1;
+
+ brw->sf.vp_gs_offset = brw_cache_data( &brw->cache[BRW_SF_VP], &sfv );
+}
+
+const struct brw_tracked_state brw_sf_vp = {
+ .dirty = {
+ .brw = (BRW_NEW_SCISSOR |
+ BRW_NEW_VIEWPORT),
+ .cache = 0
+ },
+ .update = upload_sf_vp
+};
+
+static void upload_sf_unit( struct brw_context *brw )
+{
+ struct brw_sf_unit_state sf;
+ memset(&sf, 0, sizeof(sf));
+
+ /* CACHE_NEW_SF_PROG */
+ sf.thread0.grf_reg_count = align(brw->sf.prog_data->total_grf, 16) / 16 - 1;
+ sf.thread0.kernel_start_pointer = brw->sf.prog_gs_offset >> 6;
+ sf.thread3.urb_entry_read_length = brw->sf.prog_data->urb_read_length;
+
+ sf.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
+ sf.thread3.dispatch_grf_start_reg = 3;
+ sf.thread3.urb_entry_read_offset = 1;
+
+ /* BRW_NEW_URB_FENCE */
+ sf.thread4.nr_urb_entries = brw->urb.nr_sf_entries;
+ sf.thread4.urb_entry_allocation_size = brw->urb.sfsize - 1;
+ sf.thread4.max_threads = MIN2(12, brw->urb.nr_sf_entries / 2) - 1;
+
+ if (BRW_DEBUG & DEBUG_SINGLE_THREAD)
+ sf.thread4.max_threads = 0;
+
+ if (BRW_DEBUG & DEBUG_STATS)
+ sf.thread4.stats_enable = 1;
+
+ /* CACHE_NEW_SF_VP */
+ sf.sf5.sf_viewport_state_offset = brw->sf.vp_gs_offset >> 5;
+ sf.sf5.viewport_transform = 1;
+
+ /* BRW_NEW_RASTER */
+ if (brw->attribs.Raster->scissor)
+ sf.sf6.scissor = 1;
+
+#if 0
+ if (brw->attribs.Polygon->FrontFace == GL_CCW)
+ sf.sf5.front_winding = BRW_FRONTWINDING_CCW;
+ else
+ sf.sf5.front_winding = BRW_FRONTWINDING_CW;
+
+
+ if (brw->attribs.Polygon->CullFlag) {
+ switch (brw->attribs.Polygon->CullFaceMode) {
+ case GL_FRONT:
+ sf.sf6.cull_mode = BRW_CULLMODE_FRONT;
+ break;
+ case GL_BACK:
+ sf.sf6.cull_mode = BRW_CULLMODE_BACK;
+ break;
+ case GL_FRONT_AND_BACK:
+ sf.sf6.cull_mode = BRW_CULLMODE_BOTH;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+ else
+ sf.sf6.cull_mode = BRW_CULLMODE_NONE;
+#else
+ sf.sf5.front_winding = BRW_FRONTWINDING_CCW;
+ sf.sf6.cull_mode = BRW_CULLMODE_NONE;
+#endif
+
+ sf.sf6.line_width = CLAMP(brw->attribs.Raster->line_width, 1.0, 5.0) * (1<<1);
+
+ sf.sf6.line_endcap_aa_region_width = 1;
+ if (brw->attribs.Raster->line_smooth)
+ sf.sf6.aa_enable = 1;
+ else if (sf.sf6.line_width <= 0x2)
+ sf.sf6.line_width = 0;
+
+ sf.sf6.point_rast_rule = 1; /* opengl conventions */
+
+ sf.sf7.sprite_point = brw->attribs.Raster->point_sprite;
+ sf.sf7.point_size = CLAMP(brw->attribs.Raster->line_width, 1.0, 255.0) * (1<<3);
+ sf.sf7.use_point_size_state = !brw->attribs.Raster->point_size_per_vertex;
+
+ /* might be BRW_NEW_PRIMITIVE if we have to adjust pv for polygons:
+ */
+ sf.sf7.trifan_pv = 2;
+ sf.sf7.linestrip_pv = 1;
+ sf.sf7.tristrip_pv = 2;
+ sf.sf7.line_last_pixel_enable = 0;
+
+ /* Set bias for OpenGL rasterization rules:
+ */
+ sf.sf6.dest_org_vbias = 0x8;
+ sf.sf6.dest_org_hbias = 0x8;
+
+ brw->sf.state_gs_offset = brw_cache_data( &brw->cache[BRW_SF_UNIT], &sf );
+}
+
+
+const struct brw_tracked_state brw_sf_unit = {
+ .dirty = {
+ .brw = (BRW_NEW_RASTERIZER |
+ BRW_NEW_URB_FENCE),
+ .cache = (CACHE_NEW_SF_VP |
+ CACHE_NEW_SF_PROG)
+ },
+ .update = upload_sf_unit
+};
+
+
diff --git a/src/gallium/drivers/i965simple/brw_shader_info.c b/src/gallium/drivers/i965simple/brw_shader_info.c
new file mode 100644
index 0000000000..86d877d7ef
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_shader_info.c
@@ -0,0 +1,48 @@
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+
+
+/**
+ * XXX this obsolete new and no longer compiled.
+ */
+void brw_shader_info(const struct tgsi_token *tokens,
+ struct brw_shader_info *info )
+{
+ struct tgsi_parse_context parse;
+ int done = 0;
+
+ tgsi_parse_init( &parse, tokens );
+
+ while( !done &&
+ !tgsi_parse_end_of_tokens( &parse ) )
+ {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *decl = &parse.FullToken.FullDeclaration;
+ unsigned last = decl->DeclarationRange.Last;
+
+ // Broken by crazy wpos init:
+ //assert( info->nr_regs[decl->Declaration.File] <= last);
+
+ info->nr_regs[decl->Declaration.File] = MAX2(info->nr_regs[decl->Declaration.File],
+ last+1);
+ break;
+ }
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ default:
+ done = 1;
+ break;
+ }
+ }
+
+ tgsi_parse_free (&parse);
+
+}
diff --git a/src/gallium/drivers/i965simple/brw_state.c b/src/gallium/drivers/i965simple/brw_state.c
new file mode 100644
index 0000000000..b47f5373f3
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_state.c
@@ -0,0 +1,469 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Zack Rusin <zack@tungstengraphics.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_memory.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_parse.h"
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_state.h"
+#include "brw_draw.h"
+
+
+#define DUP( TYPE, VAL ) \
+do { \
+ struct TYPE *x = malloc(sizeof(*x)); \
+ memcpy(x, VAL, sizeof(*x) ); \
+ return x; \
+} while (0)
+
+/************************************************************************
+ * Blend
+ */
+static void *
+brw_create_blend_state(struct pipe_context *pipe,
+ const struct pipe_blend_state *blend)
+{
+ DUP( pipe_blend_state, blend );
+}
+
+static void brw_bind_blend_state(struct pipe_context *pipe,
+ void *blend)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.Blend = (struct pipe_blend_state*)blend;
+ brw->state.dirty.brw |= BRW_NEW_BLEND;
+}
+
+
+static void brw_delete_blend_state(struct pipe_context *pipe, void *blend)
+{
+ free(blend);
+}
+
+static void brw_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.BlendColor = *blend_color;
+
+ brw->state.dirty.brw |= BRW_NEW_BLEND;
+}
+
+/************************************************************************
+ * Sampler
+ */
+
+static void *
+brw_create_sampler_state(struct pipe_context *pipe,
+ const struct pipe_sampler_state *sampler)
+{
+ DUP( pipe_sampler_state, sampler );
+}
+
+static void brw_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == brw->num_samplers &&
+ !memcmp(brw->attribs.Samplers, sampler, num * sizeof(void *)))
+ return;
+
+ memcpy(brw->attribs.Samplers, sampler, num * sizeof(void *));
+ memset(&brw->attribs.Samplers[num], 0, (PIPE_MAX_SAMPLERS - num) *
+ sizeof(void *));
+
+ brw->num_samplers = num;
+
+ brw->state.dirty.brw |= BRW_NEW_SAMPLER;
+}
+
+static void brw_delete_sampler_state(struct pipe_context *pipe,
+ void *sampler)
+{
+ free(sampler);
+}
+
+
+/************************************************************************
+ * Depth stencil
+ */
+
+static void *
+brw_create_depth_stencil_state(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil)
+{
+ DUP( pipe_depth_stencil_alpha_state, depth_stencil );
+}
+
+static void brw_bind_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.DepthStencil = (const struct pipe_depth_stencil_alpha_state *)depth_stencil;
+
+ brw->state.dirty.brw |= BRW_NEW_DEPTH_STENCIL;
+}
+
+static void brw_delete_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ free(depth_stencil);
+}
+
+/************************************************************************
+ * Scissor
+ */
+static void brw_set_scissor_state( struct pipe_context *pipe,
+ const struct pipe_scissor_state *scissor )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ memcpy( &brw->attribs.Scissor, scissor, sizeof(*scissor) );
+ brw->state.dirty.brw |= BRW_NEW_SCISSOR;
+}
+
+
+/************************************************************************
+ * Stipple
+ */
+
+static void brw_set_polygon_stipple( struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple )
+{
+}
+
+
+/************************************************************************
+ * Fragment shader
+ */
+
+static void * brw_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *shader)
+{
+ struct brw_fragment_program *brw_fp = CALLOC_STRUCT(brw_fragment_program);
+
+ brw_fp->program.tokens = tgsi_dup_tokens(shader->tokens);
+ brw_fp->id = brw_context(pipe)->program_id++;
+
+ tgsi_scan_shader(shader->tokens, &brw_fp->info);
+
+#if 0
+ brw_shader_info(shader->tokens,
+ &brw_fp->info2);
+#endif
+
+ tgsi_dump(shader->tokens, 0);
+
+
+ return (void *)brw_fp;
+}
+
+static void brw_bind_fs_state(struct pipe_context *pipe, void *shader)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.FragmentProgram = (struct brw_fragment_program *)shader;
+ brw->state.dirty.brw |= BRW_NEW_FS;
+}
+
+static void brw_delete_fs_state(struct pipe_context *pipe, void *shader)
+{
+ struct brw_fragment_program *brw_fp = (struct brw_fragment_program *) shader;
+
+ FREE((void *) brw_fp->program.tokens);
+ FREE(brw_fp);
+}
+
+
+/************************************************************************
+ * Vertex shader and other TNL state
+ */
+
+static void *brw_create_vs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *shader)
+{
+ struct brw_vertex_program *brw_vp = CALLOC_STRUCT(brw_vertex_program);
+
+ brw_vp->program.tokens = tgsi_dup_tokens(shader->tokens);
+ brw_vp->id = brw_context(pipe)->program_id++;
+
+ tgsi_scan_shader(shader->tokens, &brw_vp->info);
+
+#if 0
+ brw_shader_info(shader->tokens,
+ &brw_vp->info2);
+#endif
+ tgsi_dump(shader->tokens, 0);
+
+ return (void *)brw_vp;
+}
+
+static void brw_bind_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.VertexProgram = (struct brw_vertex_program *)vs;
+ brw->state.dirty.brw |= BRW_NEW_VS;
+
+ debug_printf("YYYYYYYYYYYYY BINDING VERTEX SHADER\n");
+}
+
+static void brw_delete_vs_state(struct pipe_context *pipe, void *shader)
+{
+ struct brw_vertex_program *brw_vp = (struct brw_vertex_program *) shader;
+
+ FREE((void *) brw_vp->program.tokens);
+ FREE(brw_vp);
+}
+
+
+static void brw_set_clip_state( struct pipe_context *pipe,
+ const struct pipe_clip_state *clip )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.Clip = *clip;
+}
+
+
+static void brw_set_viewport_state( struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.Viewport = *viewport; /* struct copy */
+ brw->state.dirty.brw |= BRW_NEW_VIEWPORT;
+
+ /* pass the viewport info to the draw module */
+ //draw_set_viewport_state(brw->draw, viewport);
+}
+
+
+static void brw_set_vertex_buffers(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct brw_context *brw = brw_context(pipe);
+ memcpy(brw->vb.vbo_array, buffers, count * sizeof(buffers[0]));
+}
+
+static void brw_set_vertex_elements(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_element *elements)
+{
+ /* flush ? */
+ struct brw_context *brw = brw_context(pipe);
+ uint i;
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ for (i = 0; i < count; i++) {
+ struct brw_vertex_element_state el;
+ memset(&el, 0, sizeof(el));
+
+ el.ve0.src_offset = elements[i].src_offset;
+ el.ve0.src_format = brw_translate_surface_format(elements[i].src_format);
+ el.ve0.valid = 1;
+ el.ve0.vertex_buffer_index = elements[i].vertex_buffer_index;
+
+ el.ve1.dst_offset = i * 4;
+
+ el.ve1.vfcomponent3 = BRW_VFCOMPONENT_STORE_SRC;
+ el.ve1.vfcomponent2 = BRW_VFCOMPONENT_STORE_SRC;
+ el.ve1.vfcomponent1 = BRW_VFCOMPONENT_STORE_SRC;
+ el.ve1.vfcomponent0 = BRW_VFCOMPONENT_STORE_SRC;
+
+ switch (elements[i].nr_components) {
+ case 1: el.ve1.vfcomponent1 = BRW_VFCOMPONENT_STORE_0;
+ case 2: el.ve1.vfcomponent2 = BRW_VFCOMPONENT_STORE_0;
+ case 3: el.ve1.vfcomponent3 = BRW_VFCOMPONENT_STORE_1_FLT;
+ break;
+ }
+
+ brw->vb.inputs[i] = el;
+ }
+}
+
+
+
+/************************************************************************
+ * Constant buffers
+ */
+
+static void brw_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ assert(buf == 0 || index == 0);
+
+ brw->attribs.Constants[shader] = buf;
+ brw->state.dirty.brw |= BRW_NEW_CONSTANTS;
+}
+
+
+/************************************************************************
+ * Texture surfaces
+ */
+
+
+static void brw_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num,
+ struct pipe_texture **texture)
+{
+ struct brw_context *brw = brw_context(pipe);
+ uint i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == brw->num_textures &&
+ !memcmp(brw->attribs.Texture, texture, num *
+ sizeof(struct pipe_texture *)))
+ return;
+
+ for (i = 0; i < num; i++)
+ pipe_texture_reference((struct pipe_texture **) &brw->attribs.Texture[i],
+ texture[i]);
+
+ for (i = num; i < brw->num_textures; i++)
+ pipe_texture_reference((struct pipe_texture **) &brw->attribs.Texture[i],
+ NULL);
+
+ brw->num_textures = num;
+
+ brw->state.dirty.brw |= BRW_NEW_TEXTURE;
+}
+
+
+/************************************************************************
+ * Render targets, etc
+ */
+
+static void brw_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.FrameBuffer = *fb; /* struct copy */
+
+ brw->state.dirty.brw |= BRW_NEW_FRAMEBUFFER;
+}
+
+
+
+/************************************************************************
+ * Rasterizer state
+ */
+
+static void *
+brw_create_rasterizer_state(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *rasterizer)
+{
+ DUP(pipe_rasterizer_state, rasterizer);
+}
+
+static void brw_bind_rasterizer_state( struct pipe_context *pipe,
+ void *setup )
+{
+ struct brw_context *brw = brw_context(pipe);
+
+ brw->attribs.Raster = (struct pipe_rasterizer_state *)setup;
+
+ /* Also pass-through to draw module:
+ */
+ //draw_set_rasterizer_state(brw->draw, setup);
+
+ brw->state.dirty.brw |= BRW_NEW_RASTERIZER;
+}
+
+static void brw_delete_rasterizer_state(struct pipe_context *pipe,
+ void *setup)
+{
+ free(setup);
+}
+
+
+
+void
+brw_init_state_functions( struct brw_context *brw )
+{
+ brw->pipe.create_blend_state = brw_create_blend_state;
+ brw->pipe.bind_blend_state = brw_bind_blend_state;
+ brw->pipe.delete_blend_state = brw_delete_blend_state;
+
+ brw->pipe.create_sampler_state = brw_create_sampler_state;
+ brw->pipe.bind_sampler_states = brw_bind_sampler_states;
+ brw->pipe.delete_sampler_state = brw_delete_sampler_state;
+
+ brw->pipe.create_depth_stencil_alpha_state = brw_create_depth_stencil_state;
+ brw->pipe.bind_depth_stencil_alpha_state = brw_bind_depth_stencil_state;
+ brw->pipe.delete_depth_stencil_alpha_state = brw_delete_depth_stencil_state;
+
+ brw->pipe.create_rasterizer_state = brw_create_rasterizer_state;
+ brw->pipe.bind_rasterizer_state = brw_bind_rasterizer_state;
+ brw->pipe.delete_rasterizer_state = brw_delete_rasterizer_state;
+ brw->pipe.create_fs_state = brw_create_fs_state;
+ brw->pipe.bind_fs_state = brw_bind_fs_state;
+ brw->pipe.delete_fs_state = brw_delete_fs_state;
+ brw->pipe.create_vs_state = brw_create_vs_state;
+ brw->pipe.bind_vs_state = brw_bind_vs_state;
+ brw->pipe.delete_vs_state = brw_delete_vs_state;
+
+ brw->pipe.set_blend_color = brw_set_blend_color;
+ brw->pipe.set_clip_state = brw_set_clip_state;
+ brw->pipe.set_constant_buffer = brw_set_constant_buffer;
+ brw->pipe.set_framebuffer_state = brw_set_framebuffer_state;
+
+// brw->pipe.set_feedback_state = brw_set_feedback_state;
+// brw->pipe.set_feedback_buffer = brw_set_feedback_buffer;
+
+ brw->pipe.set_polygon_stipple = brw_set_polygon_stipple;
+ brw->pipe.set_scissor_state = brw_set_scissor_state;
+ brw->pipe.set_sampler_textures = brw_set_sampler_textures;
+ brw->pipe.set_viewport_state = brw_set_viewport_state;
+ brw->pipe.set_vertex_buffers = brw_set_vertex_buffers;
+ brw->pipe.set_vertex_elements = brw_set_vertex_elements;
+}
diff --git a/src/gallium/drivers/i965simple/brw_state.h b/src/gallium/drivers/i965simple/brw_state.h
new file mode 100644
index 0000000000..de0a6371b8
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_state.h
@@ -0,0 +1,151 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_STATE_H
+#define BRW_STATE_H
+
+#include "brw_context.h"
+#include "brw_winsys.h"
+
+
+const struct brw_tracked_state brw_blend_constant_color;
+const struct brw_tracked_state brw_cc_unit;
+const struct brw_tracked_state brw_cc_vp;
+const struct brw_tracked_state brw_clip_prog;
+const struct brw_tracked_state brw_clip_unit;
+const struct brw_tracked_state brw_constant_buffer_state;
+const struct brw_tracked_state brw_constant_buffer;
+const struct brw_tracked_state brw_curbe_offsets;
+const struct brw_tracked_state brw_invarient_state;
+const struct brw_tracked_state brw_gs_prog;
+const struct brw_tracked_state brw_gs_unit;
+const struct brw_tracked_state brw_drawing_rect;
+const struct brw_tracked_state brw_line_stipple;
+const struct brw_tracked_state brw_pipelined_state_pointers;
+const struct brw_tracked_state brw_binding_table_pointers;
+const struct brw_tracked_state brw_depthbuffer;
+const struct brw_tracked_state brw_polygon_stipple_offset;
+const struct brw_tracked_state brw_polygon_stipple;
+const struct brw_tracked_state brw_program_parameters;
+const struct brw_tracked_state brw_recalculate_urb_fence;
+const struct brw_tracked_state brw_sf_prog;
+const struct brw_tracked_state brw_sf_unit;
+const struct brw_tracked_state brw_sf_vp;
+const struct brw_tracked_state brw_state_base_address;
+const struct brw_tracked_state brw_urb_fence;
+const struct brw_tracked_state brw_vertex_state;
+const struct brw_tracked_state brw_vs_prog;
+const struct brw_tracked_state brw_vs_unit;
+const struct brw_tracked_state brw_wm_prog;
+const struct brw_tracked_state brw_wm_samplers;
+const struct brw_tracked_state brw_wm_surfaces;
+const struct brw_tracked_state brw_wm_unit;
+
+const struct brw_tracked_state brw_psp_urb_cbs;
+
+const struct brw_tracked_state brw_active_vertprog;
+const struct brw_tracked_state brw_tnl_vertprog;
+const struct brw_tracked_state brw_pipe_control;
+
+const struct brw_tracked_state brw_clear_surface_cache;
+const struct brw_tracked_state brw_clear_batch_cache;
+
+/***********************************************************************
+ * brw_state_cache.c
+ */
+unsigned brw_cache_data(struct brw_cache *cache,
+ const void *data );
+
+unsigned brw_cache_data_sz(struct brw_cache *cache,
+ const void *data,
+ unsigned data_sz);
+
+unsigned brw_upload_cache( struct brw_cache *cache,
+ const void *key,
+ unsigned key_sz,
+ const void *data,
+ unsigned data_sz,
+ const void *aux,
+ void *aux_return );
+
+boolean brw_search_cache( struct brw_cache *cache,
+ const void *key,
+ unsigned key_size,
+ void *aux_return,
+ unsigned *offset_return);
+
+void brw_init_caches( struct brw_context *brw );
+void brw_destroy_caches( struct brw_context *brw );
+
+static inline struct pipe_buffer *brw_cache_buffer(struct brw_context *brw,
+ enum brw_cache_id id)
+{
+ return brw->cache[id].pool->buffer;
+}
+
+/***********************************************************************
+ * brw_state_batch.c
+ */
+#define BRW_CACHED_BATCH_STRUCT(brw, s) brw_cached_batch_struct( brw, (s), sizeof(*(s)) )
+
+boolean brw_cached_batch_struct( struct brw_context *brw,
+ const void *data,
+ unsigned sz );
+
+void brw_destroy_batch_cache( struct brw_context *brw );
+
+
+/***********************************************************************
+ * brw_state_pool.c
+ */
+void brw_init_pools( struct brw_context *brw );
+void brw_destroy_pools( struct brw_context *brw );
+
+boolean brw_pool_alloc( struct brw_mem_pool *pool,
+ unsigned size,
+ unsigned alignment,
+ unsigned *offset_return);
+
+void brw_pool_fence( struct brw_context *brw,
+ struct brw_mem_pool *pool,
+ unsigned fence );
+
+
+void brw_pool_check_wrap( struct brw_context *brw,
+ struct brw_mem_pool *pool );
+
+void brw_clear_all_caches( struct brw_context *brw );
+void brw_invalidate_pools( struct brw_context *brw );
+void brw_clear_batch_cache_flush( struct brw_context *brw );
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_state_batch.c b/src/gallium/drivers/i965simple/brw_state_batch.c
new file mode 100644
index 0000000000..43a1c89fc4
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_state_batch.c
@@ -0,0 +1,113 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_state.h"
+#include "brw_winsys.h"
+
+#include "util/u_memory.h"
+
+/* A facility similar to the data caching code above, which aims to
+ * prevent identical commands being issued repeatedly.
+ */
+boolean brw_cached_batch_struct( struct brw_context *brw,
+ const void *data,
+ unsigned sz )
+{
+ struct brw_cached_batch_item *item = brw->cached_batch_items;
+ struct header *newheader = (struct header *)data;
+
+ if (brw->emit_state_always) {
+ brw_batchbuffer_data(brw->winsys, data, sz);
+ return TRUE;
+ }
+
+ while (item) {
+ if (item->header->opcode == newheader->opcode) {
+ if (item->sz == sz && memcmp(item->header, newheader, sz) == 0)
+ return FALSE;
+ if (item->sz != sz) {
+ FREE(item->header);
+ item->header = MALLOC(sz);
+ item->sz = sz;
+ }
+ goto emit;
+ }
+ item = item->next;
+ }
+
+ assert(!item);
+ item = CALLOC_STRUCT(brw_cached_batch_item);
+ item->header = MALLOC(sz);
+ item->sz = sz;
+ item->next = brw->cached_batch_items;
+ brw->cached_batch_items = item;
+
+emit:
+ memcpy(item->header, newheader, sz);
+ brw_batchbuffer_data(brw->winsys, data, sz);
+ return TRUE;
+}
+
+static void clear_batch_cache( struct brw_context *brw )
+{
+ struct brw_cached_batch_item *item = brw->cached_batch_items;
+
+ while (item) {
+ struct brw_cached_batch_item *next = item->next;
+ free((void *)item->header);
+ free(item);
+ item = next;
+ }
+
+ brw->cached_batch_items = NULL;
+
+
+ brw_clear_all_caches(brw);
+
+ brw_invalidate_pools(brw);
+}
+
+void brw_clear_batch_cache_flush( struct brw_context *brw )
+{
+ clear_batch_cache(brw);
+
+/* brw_do_flush(brw, BRW_FLUSH_STATE_CACHE|BRW_FLUSH_READ_CACHE); */
+
+ brw->state.dirty.brw |= ~0;
+ brw->state.dirty.cache |= ~0;
+}
+
+
+
+void brw_destroy_batch_cache( struct brw_context *brw )
+{
+ clear_batch_cache(brw);
+}
diff --git a/src/gallium/drivers/i965simple/brw_state_cache.c b/src/gallium/drivers/i965simple/brw_state_cache.c
new file mode 100644
index 0000000000..094248fa69
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_state_cache.c
@@ -0,0 +1,443 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_state.h"
+
+#include "brw_wm.h"
+#include "brw_vs.h"
+#include "brw_clip.h"
+#include "brw_sf.h"
+#include "brw_gs.h"
+
+#include "util/u_memory.h"
+
+
+
+/***********************************************************************
+ * Check cache for uploaded version of struct, else upload new one.
+ * Fail when memory is exhausted.
+ *
+ * XXX: FIXME: Currently search is so slow it would be quicker to
+ * regenerate the data every time...
+ */
+
+static unsigned hash_key( const void *key, unsigned key_size )
+{
+ unsigned *ikey = (unsigned *)key;
+ unsigned hash = 0, i;
+
+ assert(key_size % 4 == 0);
+
+ /* I'm sure this can be improved on:
+ */
+ for (i = 0; i < key_size/4; i++)
+ hash ^= ikey[i];
+
+ return hash;
+}
+
+static struct brw_cache_item *search_cache( struct brw_cache *cache,
+ unsigned hash,
+ const void *key,
+ unsigned key_size)
+{
+ struct brw_cache_item *c;
+
+ for (c = cache->items[hash % cache->size]; c; c = c->next) {
+ if (c->hash == hash &&
+ c->key_size == key_size &&
+ memcmp(c->key, key, key_size) == 0)
+ return c;
+ }
+
+ return NULL;
+}
+
+
+static void rehash( struct brw_cache *cache )
+{
+ struct brw_cache_item **items;
+ struct brw_cache_item *c, *next;
+ unsigned size, i;
+
+ size = cache->size * 3;
+ items = (struct brw_cache_item**) MALLOC(size * sizeof(*items));
+ memset(items, 0, size * sizeof(*items));
+
+ for (i = 0; i < cache->size; i++)
+ for (c = cache->items[i]; c; c = next) {
+ next = c->next;
+ c->next = items[c->hash % size];
+ items[c->hash % size] = c;
+ }
+
+ FREE(cache->items);
+ cache->items = items;
+ cache->size = size;
+}
+
+
+boolean brw_search_cache( struct brw_cache *cache,
+ const void *key,
+ unsigned key_size,
+ void *aux_return,
+ unsigned *offset_return)
+{
+ struct brw_cache_item *item;
+ unsigned addr = 0;
+ unsigned hash = hash_key(key, key_size);
+
+ item = search_cache(cache, hash, key, key_size);
+
+ if (item) {
+ if (aux_return)
+ *(void **)aux_return = (void *)((char *)item->key + item->key_size);
+
+ *offset_return = addr = item->offset;
+ }
+
+ if (item == NULL || addr != cache->last_addr) {
+ cache->brw->state.dirty.cache |= 1<<cache->id;
+ cache->last_addr = addr;
+ }
+
+ return item != NULL;
+}
+
+unsigned brw_upload_cache( struct brw_cache *cache,
+ const void *key,
+ unsigned key_size,
+ const void *data,
+ unsigned data_size,
+ const void *aux,
+ void *aux_return )
+{
+ unsigned offset;
+ struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
+ unsigned hash = hash_key(key, key_size);
+ void *tmp = MALLOC(key_size + cache->aux_size);
+
+ if (!brw_pool_alloc(cache->pool, data_size, 1 << 6, &offset)) {
+ /* Should not be possible:
+ */
+ debug_printf("brw_pool_alloc failed\n");
+ exit(1);
+ }
+
+ memcpy(tmp, key, key_size);
+
+ if (cache->aux_size)
+ memcpy(tmp+key_size, aux, cache->aux_size);
+
+ item->key = tmp;
+ item->hash = hash;
+ item->key_size = key_size;
+ item->offset = offset;
+ item->data_size = data_size;
+
+ if (++cache->n_items > cache->size * 1.5)
+ rehash(cache);
+
+ hash %= cache->size;
+ item->next = cache->items[hash];
+ cache->items[hash] = item;
+
+ if (aux_return) {
+ assert(cache->aux_size);
+ *(void **)aux_return = (void *)((char *)item->key + item->key_size);
+ }
+
+ if (BRW_DEBUG & DEBUG_STATE)
+ debug_printf("upload %s: %d bytes to pool buffer %p offset %x\n",
+ cache->name,
+ data_size,
+ (void*)cache->pool->buffer,
+ offset);
+
+ /* Copy data to the buffer:
+ */
+ cache->brw->winsys->buffer_subdata_typed(cache->brw->winsys,
+ cache->pool->buffer,
+ offset,
+ data_size,
+ data,
+ cache->id);
+
+ cache->brw->state.dirty.cache |= 1<<cache->id;
+ cache->last_addr = offset;
+
+ return offset;
+}
+
+/* This doesn't really work with aux data. Use search/upload instead
+ */
+unsigned brw_cache_data_sz(struct brw_cache *cache,
+ const void *data,
+ unsigned data_size)
+{
+ unsigned addr;
+
+ if (!brw_search_cache(cache, data, data_size, NULL, &addr)) {
+ addr = brw_upload_cache(cache,
+ data, data_size,
+ data, data_size,
+ NULL, NULL);
+ }
+
+ return addr;
+}
+
+unsigned brw_cache_data(struct brw_cache *cache,
+ const void *data)
+{
+ return brw_cache_data_sz(cache, data, cache->key_size);
+}
+
+enum pool_type {
+ DW_SURFACE_STATE,
+ DW_GENERAL_STATE
+};
+
+static void brw_init_cache( struct brw_context *brw,
+ const char *name,
+ unsigned id,
+ unsigned key_size,
+ unsigned aux_size,
+ enum pool_type pool_type)
+{
+ struct brw_cache *cache = &brw->cache[id];
+ cache->brw = brw;
+ cache->id = id;
+ cache->name = name;
+ cache->items = NULL;
+
+ cache->size = 7;
+ cache->n_items = 0;
+ cache->items = (struct brw_cache_item **)
+ CALLOC(cache->size, sizeof(struct brw_cache_item));
+
+
+ cache->key_size = key_size;
+ cache->aux_size = aux_size;
+ switch (pool_type) {
+ case DW_GENERAL_STATE: cache->pool = &brw->pool[BRW_GS_POOL]; break;
+ case DW_SURFACE_STATE: cache->pool = &brw->pool[BRW_SS_POOL]; break;
+ default: assert(0); break;
+ }
+}
+
+void brw_init_caches( struct brw_context *brw )
+{
+
+ brw_init_cache(brw,
+ "CC_VP",
+ BRW_CC_VP,
+ sizeof(struct brw_cc_viewport),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "CC_UNIT",
+ BRW_CC_UNIT,
+ sizeof(struct brw_cc_unit_state),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "WM_PROG",
+ BRW_WM_PROG,
+ sizeof(struct brw_wm_prog_key),
+ sizeof(struct brw_wm_prog_data),
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "SAMPLER_DEFAULT_COLOR",
+ BRW_SAMPLER_DEFAULT_COLOR,
+ sizeof(struct brw_sampler_default_color),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "SAMPLER",
+ BRW_SAMPLER,
+ 0, /* variable key/data size */
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "WM_UNIT",
+ BRW_WM_UNIT,
+ sizeof(struct brw_wm_unit_state),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "SF_PROG",
+ BRW_SF_PROG,
+ sizeof(struct brw_sf_prog_key),
+ sizeof(struct brw_sf_prog_data),
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "SF_VP",
+ BRW_SF_VP,
+ sizeof(struct brw_sf_viewport),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "SF_UNIT",
+ BRW_SF_UNIT,
+ sizeof(struct brw_sf_unit_state),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "VS_UNIT",
+ BRW_VS_UNIT,
+ sizeof(struct brw_vs_unit_state),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "VS_PROG",
+ BRW_VS_PROG,
+ sizeof(struct brw_vs_prog_key),
+ sizeof(struct brw_vs_prog_data),
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "CLIP_UNIT",
+ BRW_CLIP_UNIT,
+ sizeof(struct brw_clip_unit_state),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "CLIP_PROG",
+ BRW_CLIP_PROG,
+ sizeof(struct brw_clip_prog_key),
+ sizeof(struct brw_clip_prog_data),
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "GS_UNIT",
+ BRW_GS_UNIT,
+ sizeof(struct brw_gs_unit_state),
+ 0,
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "GS_PROG",
+ BRW_GS_PROG,
+ sizeof(struct brw_gs_prog_key),
+ sizeof(struct brw_gs_prog_data),
+ DW_GENERAL_STATE);
+
+ brw_init_cache(brw,
+ "SS_SURFACE",
+ BRW_SS_SURFACE,
+ sizeof(struct brw_surface_state),
+ 0,
+ DW_SURFACE_STATE);
+
+ brw_init_cache(brw,
+ "SS_SURF_BIND",
+ BRW_SS_SURF_BIND,
+ sizeof(struct brw_surface_binding_table),
+ 0,
+ DW_SURFACE_STATE);
+}
+
+
+/* When we lose hardware context, need to invalidate the surface cache
+ * as these structs must be explicitly re-uploaded. They are subject
+ * to fixup by the memory manager as they contain absolute agp
+ * offsets, so we need to ensure there is a fresh version of the
+ * struct available to receive the fixup.
+ *
+ * XXX: Need to ensure that there aren't two versions of a surface or
+ * bufferobj with different backing data active in the same buffer at
+ * once? Otherwise the cache could confuse them. Maybe better not to
+ * cache at all?
+ *
+ * --> Isn't this the same as saying need to ensure batch is flushed
+ * before new data is uploaded to an existing buffer? We
+ * already try to make sure of that.
+ */
+static void clear_cache( struct brw_cache *cache )
+{
+ struct brw_cache_item *c, *next;
+ unsigned i;
+
+ for (i = 0; i < cache->size; i++) {
+ for (c = cache->items[i]; c; c = next) {
+ next = c->next;
+ free((void *)c->key);
+ free(c);
+ }
+ cache->items[i] = NULL;
+ }
+
+ cache->n_items = 0;
+}
+
+void brw_clear_all_caches( struct brw_context *brw )
+{
+ int i;
+
+ if (BRW_DEBUG & DEBUG_STATE)
+ debug_printf("%s\n", __FUNCTION__);
+
+ for (i = 0; i < BRW_MAX_CACHE; i++)
+ clear_cache(&brw->cache[i]);
+
+ if (brw->curbe.last_buf) {
+ FREE(brw->curbe.last_buf);
+ brw->curbe.last_buf = NULL;
+ }
+
+ brw->state.dirty.brw |= ~0;
+ brw->state.dirty.cache |= ~0;
+}
+
+
+
+
+
+void brw_destroy_caches( struct brw_context *brw )
+{
+ unsigned i;
+
+ for (i = 0; i < BRW_MAX_CACHE; i++)
+ clear_cache(&brw->cache[i]);
+}
diff --git a/src/gallium/drivers/i965simple/brw_state_pool.c b/src/gallium/drivers/i965simple/brw_state_pool.c
new file mode 100644
index 0000000000..e91263cb1f
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_state_pool.c
@@ -0,0 +1,138 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+/** @file brw_state_pool.c
+ * Implements the state pool allocator.
+ *
+ * For the 965, we create two state pools for state cache entries. Objects
+ * will be allocated into the pools depending on which state base address
+ * their pointer is relative to in other 965 state.
+ *
+ * The state pools are relatively simple: As objects are allocated, increment
+ * the offset to allocate space. When the pool is "full" (rather, close to
+ * full), we reset the pool and reset the state cache entries that point into
+ * the pool.
+ */
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_inlines.h"
+#include "brw_context.h"
+#include "brw_state.h"
+
+boolean brw_pool_alloc( struct brw_mem_pool *pool,
+ unsigned size,
+ unsigned alignment,
+ unsigned *offset_return)
+{
+ unsigned fixup = align(pool->offset, alignment) - pool->offset;
+
+ size = align(size, 4);
+
+ if (pool->offset + fixup + size >= pool->size) {
+ debug_printf("%s failed\n", __FUNCTION__);
+ assert(0);
+ exit(0);
+ }
+
+ pool->offset += fixup;
+ *offset_return = pool->offset;
+ pool->offset += size;
+
+ return TRUE;
+}
+
+static
+void brw_invalidate_pool( struct brw_mem_pool *pool )
+{
+ if (BRW_DEBUG & DEBUG_STATE)
+ debug_printf("\n\n\n %s \n\n\n", __FUNCTION__);
+
+ pool->offset = 0;
+
+ brw_clear_all_caches(pool->brw);
+}
+
+
+static void brw_init_pool( struct brw_context *brw,
+ unsigned pool_id,
+ unsigned size )
+{
+ struct brw_mem_pool *pool = &brw->pool[pool_id];
+
+ pool->size = size;
+ pool->brw = brw;
+
+ pool->buffer = pipe_buffer_create(brw->pipe.screen,
+ 4096,
+ 0 /* DRM_BO_FLAG_MEM_TT */,
+ size);
+}
+
+static void brw_destroy_pool( struct brw_context *brw,
+ unsigned pool_id )
+{
+ struct brw_mem_pool *pool = &brw->pool[pool_id];
+
+ pipe_buffer_reference( pool->brw->pipe.screen,
+ &pool->buffer,
+ NULL );
+}
+
+
+void brw_pool_check_wrap( struct brw_context *brw,
+ struct brw_mem_pool *pool )
+{
+ if (pool->offset > (pool->size * 3) / 4) {
+ brw->state.dirty.brw |= BRW_NEW_SCENE;
+ }
+
+}
+
+void brw_init_pools( struct brw_context *brw )
+{
+ brw_init_pool(brw, BRW_GS_POOL, 0x80000);
+ brw_init_pool(brw, BRW_SS_POOL, 0x80000);
+}
+
+void brw_destroy_pools( struct brw_context *brw )
+{
+ brw_destroy_pool(brw, BRW_GS_POOL);
+ brw_destroy_pool(brw, BRW_SS_POOL);
+}
+
+
+void brw_invalidate_pools( struct brw_context *brw )
+{
+ brw_invalidate_pool(&brw->pool[BRW_GS_POOL]);
+ brw_invalidate_pool(&brw->pool[BRW_SS_POOL]);
+}
diff --git a/src/gallium/drivers/i965simple/brw_state_upload.c b/src/gallium/drivers/i965simple/brw_state_upload.c
new file mode 100644
index 0000000000..bac9161b5f
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_state_upload.c
@@ -0,0 +1,202 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_state.h"
+
+#include "util/u_memory.h"
+
+/* This is used to initialize brw->state.atoms[]. We could use this
+ * list directly except for a single atom, brw_constant_buffer, which
+ * has a .dirty value which changes according to the parameters of the
+ * current fragment and vertex programs, and so cannot be a static
+ * value.
+ */
+const struct brw_tracked_state *atoms[] =
+{
+ &brw_vs_prog,
+ &brw_gs_prog,
+ &brw_clip_prog,
+ &brw_sf_prog,
+ &brw_wm_prog,
+
+ /* Once all the programs are done, we know how large urb entry
+ * sizes need to be and can decide if we need to change the urb
+ * layout.
+ */
+ &brw_curbe_offsets,
+ &brw_recalculate_urb_fence,
+
+
+ &brw_cc_vp,
+ &brw_cc_unit,
+
+ &brw_wm_surfaces, /* must do before samplers */
+ &brw_wm_samplers,
+
+ &brw_wm_unit,
+ &brw_sf_vp,
+ &brw_sf_unit,
+ &brw_vs_unit, /* always required, enabled or not */
+ &brw_clip_unit,
+ &brw_gs_unit,
+
+ /* Command packets:
+ */
+ &brw_invarient_state,
+ &brw_state_base_address,
+ &brw_pipe_control,
+
+ &brw_binding_table_pointers,
+ &brw_blend_constant_color,
+
+ &brw_drawing_rect,
+ &brw_depthbuffer,
+
+ &brw_polygon_stipple,
+ &brw_line_stipple,
+
+ &brw_psp_urb_cbs,
+
+ &brw_constant_buffer
+};
+
+
+void brw_init_state( struct brw_context *brw )
+{
+ brw_init_pools(brw);
+ brw_init_caches(brw);
+
+ brw->state.dirty.brw = ~0;
+ brw->emit_state_always = 0;
+}
+
+
+void brw_destroy_state( struct brw_context *brw )
+{
+ brw_destroy_caches(brw);
+ brw_destroy_batch_cache(brw);
+ brw_destroy_pools(brw);
+}
+
+/***********************************************************************
+ */
+
+static boolean check_state( const struct brw_state_flags *a,
+ const struct brw_state_flags *b )
+{
+ return ((a->brw & b->brw) ||
+ (a->cache & b->cache));
+}
+
+static void accumulate_state( struct brw_state_flags *a,
+ const struct brw_state_flags *b )
+{
+ a->brw |= b->brw;
+ a->cache |= b->cache;
+}
+
+
+static void xor_states( struct brw_state_flags *result,
+ const struct brw_state_flags *a,
+ const struct brw_state_flags *b )
+{
+ result->brw = a->brw ^ b->brw;
+ result->cache = a->cache ^ b->cache;
+}
+
+
+/***********************************************************************
+ * Emit all state:
+ */
+void brw_validate_state( struct brw_context *brw )
+{
+ struct brw_state_flags *state = &brw->state.dirty;
+ unsigned i;
+
+ if (brw->emit_state_always)
+ state->brw |= ~0;
+
+ if (state->cache == 0 &&
+ state->brw == 0)
+ return;
+
+ if (brw->state.dirty.brw & BRW_NEW_SCENE)
+ brw_clear_batch_cache_flush(brw);
+
+ if (BRW_DEBUG) {
+ /* Debug version which enforces various sanity checks on the
+ * state flags which are generated and checked to help ensure
+ * state atoms are ordered correctly in the list.
+ */
+ struct brw_state_flags examined, prev;
+ memset(&examined, 0, sizeof(examined));
+ prev = *state;
+
+ for (i = 0; i < Elements(atoms); i++) {
+ const struct brw_tracked_state *atom = atoms[i];
+ struct brw_state_flags generated;
+
+ assert(atom->dirty.brw ||
+ atom->dirty.cache);
+ assert(atom->update);
+
+ if (check_state(state, &atom->dirty)) {
+ atom->update( brw );
+ }
+
+ accumulate_state(&examined, &atom->dirty);
+
+ /* generated = (prev ^ state)
+ * if (examined & generated)
+ * fail;
+ */
+ xor_states(&generated, &prev, state);
+ assert(!check_state(&examined, &generated));
+ prev = *state;
+ }
+ }
+ else {
+ for (i = 0; i < Elements(atoms); i++) {
+ const struct brw_tracked_state *atom = atoms[i];
+
+ assert(atom->dirty.brw ||
+ atom->dirty.cache);
+ assert(atom->update);
+
+ if (check_state(state, &atom->dirty))
+ atom->update( brw );
+ }
+ }
+
+ memset(state, 0, sizeof(*state));
+}
diff --git a/src/gallium/drivers/i965simple/brw_structs.h b/src/gallium/drivers/i965simple/brw_structs.h
new file mode 100644
index 0000000000..bbb087e95d
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_structs.h
@@ -0,0 +1,1348 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_STRUCTS_H
+#define BRW_STRUCTS_H
+
+#include "pipe/p_compiler.h"
+
+/* Command packets:
+ */
+struct header
+{
+ unsigned length:16;
+ unsigned opcode:16;
+};
+
+
+union header_union
+{
+ struct header bits;
+ unsigned dword;
+};
+
+struct brw_3d_control
+{
+ struct
+ {
+ unsigned length:8;
+ unsigned notify_enable:1;
+ unsigned pad:3;
+ unsigned wc_flush_enable:1;
+ unsigned depth_stall_enable:1;
+ unsigned operation:2;
+ unsigned opcode:16;
+ } header;
+
+ struct
+ {
+ unsigned pad:2;
+ unsigned dest_addr_type:1;
+ unsigned dest_addr:29;
+ } dest;
+
+ unsigned dword2;
+ unsigned dword3;
+};
+
+
+struct brw_3d_primitive
+{
+ struct
+ {
+ unsigned length:8;
+ unsigned pad:2;
+ unsigned topology:5;
+ unsigned indexed:1;
+ unsigned opcode:16;
+ } header;
+
+ unsigned verts_per_instance;
+ unsigned start_vert_location;
+ unsigned instance_count;
+ unsigned start_instance_location;
+ unsigned base_vert_location;
+};
+
+/* These seem to be passed around as function args, so it works out
+ * better to keep them as #defines:
+ */
+#define BRW_FLUSH_READ_CACHE 0x1
+#define BRW_FLUSH_STATE_CACHE 0x2
+#define BRW_INHIBIT_FLUSH_RENDER_CACHE 0x4
+#define BRW_FLUSH_SNAPSHOT_COUNTERS 0x8
+
+struct brw_mi_flush
+{
+ unsigned flags:4;
+ unsigned pad:12;
+ unsigned opcode:16;
+};
+
+struct brw_vf_statistics
+{
+ unsigned statistics_enable:1;
+ unsigned pad:15;
+ unsigned opcode:16;
+};
+
+
+
+struct brw_binding_table_pointers
+{
+ struct header header;
+ unsigned vs;
+ unsigned gs;
+ unsigned clp;
+ unsigned sf;
+ unsigned wm;
+};
+
+
+struct brw_blend_constant_color
+{
+ struct header header;
+ float blend_constant_color[4];
+};
+
+
+struct brw_depthbuffer
+{
+ union header_union header;
+
+ union {
+ struct {
+ unsigned pitch:18;
+ unsigned format:3;
+ unsigned pad:4;
+ unsigned depth_offset_disable:1;
+ unsigned tile_walk:1;
+ unsigned tiled_surface:1;
+ unsigned pad2:1;
+ unsigned surface_type:3;
+ } bits;
+ unsigned dword;
+ } dword1;
+
+ unsigned dword2_base_addr;
+
+ union {
+ struct {
+ unsigned pad:1;
+ unsigned mipmap_layout:1;
+ unsigned lod:4;
+ unsigned width:13;
+ unsigned height:13;
+ } bits;
+ unsigned dword;
+ } dword3;
+
+ union {
+ struct {
+ unsigned pad:12;
+ unsigned min_array_element:9;
+ unsigned depth:11;
+ } bits;
+ unsigned dword;
+ } dword4;
+};
+
+struct brw_drawrect
+{
+ struct header header;
+ unsigned xmin:16;
+ unsigned ymin:16;
+ unsigned xmax:16;
+ unsigned ymax:16;
+ unsigned xorg:16;
+ unsigned yorg:16;
+};
+
+
+
+
+struct brw_global_depth_offset_clamp
+{
+ struct header header;
+ float depth_offset_clamp;
+};
+
+struct brw_indexbuffer
+{
+ union {
+ struct
+ {
+ unsigned length:8;
+ unsigned index_format:2;
+ unsigned cut_index_enable:1;
+ unsigned pad:5;
+ unsigned opcode:16;
+ } bits;
+ unsigned dword;
+
+ } header;
+
+ unsigned buffer_start;
+ unsigned buffer_end;
+};
+
+
+struct brw_line_stipple
+{
+ struct header header;
+
+ struct
+ {
+ unsigned pattern:16;
+ unsigned pad:16;
+ } bits0;
+
+ struct
+ {
+ unsigned repeat_count:9;
+ unsigned pad:7;
+ unsigned inverse_repeat_count:16;
+ } bits1;
+};
+
+
+struct brw_pipelined_state_pointers
+{
+ struct header header;
+
+ struct {
+ unsigned pad:5;
+ unsigned offset:27;
+ } vs;
+
+ struct
+ {
+ unsigned enable:1;
+ unsigned pad:4;
+ unsigned offset:27;
+ } gs;
+
+ struct
+ {
+ unsigned enable:1;
+ unsigned pad:4;
+ unsigned offset:27;
+ } clp;
+
+ struct
+ {
+ unsigned pad:5;
+ unsigned offset:27;
+ } sf;
+
+ struct
+ {
+ unsigned pad:5;
+ unsigned offset:27;
+ } wm;
+
+ struct
+ {
+ unsigned pad:5;
+ unsigned offset:27; /* KW: check me! */
+ } cc;
+};
+
+
+struct brw_polygon_stipple_offset
+{
+ struct header header;
+
+ struct {
+ unsigned y_offset:5;
+ unsigned pad:3;
+ unsigned x_offset:5;
+ unsigned pad0:19;
+ } bits0;
+};
+
+
+
+struct brw_polygon_stipple
+{
+ struct header header;
+ unsigned stipple[32];
+};
+
+
+
+struct brw_pipeline_select
+{
+ struct
+ {
+ unsigned pipeline_select:1;
+ unsigned pad:15;
+ unsigned opcode:16;
+ } header;
+};
+
+
+struct brw_pipe_control
+{
+ struct
+ {
+ unsigned length:8;
+ unsigned notify_enable:1;
+ unsigned pad:2;
+ unsigned instruction_state_cache_flush_enable:1;
+ unsigned write_cache_flush_enable:1;
+ unsigned depth_stall_enable:1;
+ unsigned post_sync_operation:2;
+
+ unsigned opcode:16;
+ } header;
+
+ struct
+ {
+ unsigned pad:2;
+ unsigned dest_addr_type:1;
+ unsigned dest_addr:29;
+ } bits1;
+
+ unsigned data0;
+ unsigned data1;
+};
+
+
+struct brw_urb_fence
+{
+ struct
+ {
+ unsigned length:8;
+ unsigned vs_realloc:1;
+ unsigned gs_realloc:1;
+ unsigned clp_realloc:1;
+ unsigned sf_realloc:1;
+ unsigned vfe_realloc:1;
+ unsigned cs_realloc:1;
+ unsigned pad:2;
+ unsigned opcode:16;
+ } header;
+
+ struct
+ {
+ unsigned vs_fence:10;
+ unsigned gs_fence:10;
+ unsigned clp_fence:10;
+ unsigned pad:2;
+ } bits0;
+
+ struct
+ {
+ unsigned sf_fence:10;
+ unsigned vf_fence:10;
+ unsigned cs_fence:10;
+ unsigned pad:2;
+ } bits1;
+};
+
+struct brw_constant_buffer_state /* previously brw_command_streamer */
+{
+ struct header header;
+
+ struct
+ {
+ unsigned nr_urb_entries:3;
+ unsigned pad:1;
+ unsigned urb_entry_size:5;
+ unsigned pad0:23;
+ } bits0;
+};
+
+struct brw_constant_buffer
+{
+ struct
+ {
+ unsigned length:8;
+ unsigned valid:1;
+ unsigned pad:7;
+ unsigned opcode:16;
+ } header;
+
+ struct
+ {
+ unsigned buffer_length:6;
+ unsigned buffer_address:26;
+ } bits0;
+};
+
+struct brw_state_base_address
+{
+ struct header header;
+
+ struct
+ {
+ unsigned modify_enable:1;
+ unsigned pad:4;
+ unsigned general_state_address:27;
+ } bits0;
+
+ struct
+ {
+ unsigned modify_enable:1;
+ unsigned pad:4;
+ unsigned surface_state_address:27;
+ } bits1;
+
+ struct
+ {
+ unsigned modify_enable:1;
+ unsigned pad:4;
+ unsigned indirect_object_state_address:27;
+ } bits2;
+
+ struct
+ {
+ unsigned modify_enable:1;
+ unsigned pad:11;
+ unsigned general_state_upper_bound:20;
+ } bits3;
+
+ struct
+ {
+ unsigned modify_enable:1;
+ unsigned pad:11;
+ unsigned indirect_object_state_upper_bound:20;
+ } bits4;
+};
+
+struct brw_state_prefetch
+{
+ struct header header;
+
+ struct
+ {
+ unsigned prefetch_count:3;
+ unsigned pad:3;
+ unsigned prefetch_pointer:26;
+ } bits0;
+};
+
+struct brw_system_instruction_pointer
+{
+ struct header header;
+
+ struct
+ {
+ unsigned pad:4;
+ unsigned system_instruction_pointer:28;
+ } bits0;
+};
+
+
+
+
+/* State structs for the various fixed function units:
+ */
+
+
+struct thread0
+{
+ unsigned pad0:1;
+ unsigned grf_reg_count:3;
+ unsigned pad1:2;
+ unsigned kernel_start_pointer:26;
+};
+
+struct thread1
+{
+ unsigned ext_halt_exception_enable:1;
+ unsigned sw_exception_enable:1;
+ unsigned mask_stack_exception_enable:1;
+ unsigned timeout_exception_enable:1;
+ unsigned illegal_op_exception_enable:1;
+ unsigned pad0:3;
+ unsigned depth_coef_urb_read_offset:6; /* WM only */
+ unsigned pad1:2;
+ unsigned floating_point_mode:1;
+ unsigned thread_priority:1;
+ unsigned binding_table_entry_count:8;
+ unsigned pad3:5;
+ unsigned single_program_flow:1;
+};
+
+struct thread2
+{
+ unsigned per_thread_scratch_space:4;
+ unsigned pad0:6;
+ unsigned scratch_space_base_pointer:22;
+};
+
+
+struct thread3
+{
+ unsigned dispatch_grf_start_reg:4;
+ unsigned urb_entry_read_offset:6;
+ unsigned pad0:1;
+ unsigned urb_entry_read_length:6;
+ unsigned pad1:1;
+ unsigned const_urb_entry_read_offset:6;
+ unsigned pad2:1;
+ unsigned const_urb_entry_read_length:6;
+ unsigned pad3:1;
+};
+
+
+
+struct brw_clip_unit_state
+{
+ struct thread0 thread0;
+ struct
+ {
+ unsigned pad0:7;
+ unsigned sw_exception_enable:1;
+ unsigned pad1:3;
+ unsigned mask_stack_exception_enable:1;
+ unsigned pad2:1;
+ unsigned illegal_op_exception_enable:1;
+ unsigned pad3:2;
+ unsigned floating_point_mode:1;
+ unsigned thread_priority:1;
+ unsigned binding_table_entry_count:8;
+ unsigned pad4:5;
+ unsigned single_program_flow:1;
+ } thread1;
+
+ struct thread2 thread2;
+ struct thread3 thread3;
+
+ struct
+ {
+ unsigned pad0:9;
+ unsigned gs_output_stats:1; /* not always */
+ unsigned stats_enable:1;
+ unsigned nr_urb_entries:7;
+ unsigned pad1:1;
+ unsigned urb_entry_allocation_size:5;
+ unsigned pad2:1;
+ unsigned max_threads:1; /* may be less */
+ unsigned pad3:6;
+ } thread4;
+
+ struct
+ {
+ unsigned pad0:13;
+ unsigned clip_mode:3;
+ unsigned userclip_enable_flags:8;
+ unsigned userclip_must_clip:1;
+ unsigned pad1:1;
+ unsigned guard_band_enable:1;
+ unsigned viewport_z_clip_enable:1;
+ unsigned viewport_xy_clip_enable:1;
+ unsigned vertex_position_space:1;
+ unsigned api_mode:1;
+ unsigned pad2:1;
+ } clip5;
+
+ struct
+ {
+ unsigned pad0:5;
+ unsigned clipper_viewport_state_ptr:27;
+ } clip6;
+
+
+ float viewport_xmin;
+ float viewport_xmax;
+ float viewport_ymin;
+ float viewport_ymax;
+};
+
+
+
+struct brw_cc_unit_state
+{
+ struct
+ {
+ unsigned pad0:3;
+ unsigned bf_stencil_pass_depth_pass_op:3;
+ unsigned bf_stencil_pass_depth_fail_op:3;
+ unsigned bf_stencil_fail_op:3;
+ unsigned bf_stencil_func:3;
+ unsigned bf_stencil_enable:1;
+ unsigned pad1:2;
+ unsigned stencil_write_enable:1;
+ unsigned stencil_pass_depth_pass_op:3;
+ unsigned stencil_pass_depth_fail_op:3;
+ unsigned stencil_fail_op:3;
+ unsigned stencil_func:3;
+ unsigned stencil_enable:1;
+ } cc0;
+
+
+ struct
+ {
+ unsigned bf_stencil_ref:8;
+ unsigned stencil_write_mask:8;
+ unsigned stencil_test_mask:8;
+ unsigned stencil_ref:8;
+ } cc1;
+
+
+ struct
+ {
+ unsigned logicop_enable:1;
+ unsigned pad0:10;
+ unsigned depth_write_enable:1;
+ unsigned depth_test_function:3;
+ unsigned depth_test:1;
+ unsigned bf_stencil_write_mask:8;
+ unsigned bf_stencil_test_mask:8;
+ } cc2;
+
+
+ struct
+ {
+ unsigned pad0:8;
+ unsigned alpha_test_func:3;
+ unsigned alpha_test:1;
+ unsigned blend_enable:1;
+ unsigned ia_blend_enable:1;
+ unsigned pad1:1;
+ unsigned alpha_test_format:1;
+ unsigned pad2:16;
+ } cc3;
+
+ struct
+ {
+ unsigned pad0:5;
+ unsigned cc_viewport_state_offset:27;
+ } cc4;
+
+ struct
+ {
+ unsigned pad0:2;
+ unsigned ia_dest_blend_factor:5;
+ unsigned ia_src_blend_factor:5;
+ unsigned ia_blend_function:3;
+ unsigned statistics_enable:1;
+ unsigned logicop_func:4;
+ unsigned pad1:11;
+ unsigned dither_enable:1;
+ } cc5;
+
+ struct
+ {
+ unsigned clamp_post_alpha_blend:1;
+ unsigned clamp_pre_alpha_blend:1;
+ unsigned clamp_range:2;
+ unsigned pad0:11;
+ unsigned y_dither_offset:2;
+ unsigned x_dither_offset:2;
+ unsigned dest_blend_factor:5;
+ unsigned src_blend_factor:5;
+ unsigned blend_function:3;
+ } cc6;
+
+ struct {
+ union {
+ float f;
+ ubyte ub[4];
+ } alpha_ref;
+ } cc7;
+};
+
+
+
+struct brw_sf_unit_state
+{
+ struct thread0 thread0;
+ struct thread1 thread1;
+ struct thread2 thread2;
+ struct thread3 thread3;
+
+ struct
+ {
+ unsigned pad0:10;
+ unsigned stats_enable:1;
+ unsigned nr_urb_entries:7;
+ unsigned pad1:1;
+ unsigned urb_entry_allocation_size:5;
+ unsigned pad2:1;
+ unsigned max_threads:6;
+ unsigned pad3:1;
+ } thread4;
+
+ struct
+ {
+ unsigned front_winding:1;
+ unsigned viewport_transform:1;
+ unsigned pad0:3;
+ unsigned sf_viewport_state_offset:27;
+ } sf5;
+
+ struct
+ {
+ unsigned pad0:9;
+ unsigned dest_org_vbias:4;
+ unsigned dest_org_hbias:4;
+ unsigned scissor:1;
+ unsigned disable_2x2_trifilter:1;
+ unsigned disable_zero_pix_trifilter:1;
+ unsigned point_rast_rule:2;
+ unsigned line_endcap_aa_region_width:2;
+ unsigned line_width:4;
+ unsigned fast_scissor_disable:1;
+ unsigned cull_mode:2;
+ unsigned aa_enable:1;
+ } sf6;
+
+ struct
+ {
+ unsigned point_size:11;
+ unsigned use_point_size_state:1;
+ unsigned subpixel_precision:1;
+ unsigned sprite_point:1;
+ unsigned pad0:11;
+ unsigned trifan_pv:2;
+ unsigned linestrip_pv:2;
+ unsigned tristrip_pv:2;
+ unsigned line_last_pixel_enable:1;
+ } sf7;
+
+};
+
+
+struct brw_gs_unit_state
+{
+ struct thread0 thread0;
+ struct thread1 thread1;
+ struct thread2 thread2;
+ struct thread3 thread3;
+
+ struct
+ {
+ unsigned pad0:10;
+ unsigned stats_enable:1;
+ unsigned nr_urb_entries:7;
+ unsigned pad1:1;
+ unsigned urb_entry_allocation_size:5;
+ unsigned pad2:1;
+ unsigned max_threads:1;
+ unsigned pad3:6;
+ } thread4;
+
+ struct
+ {
+ unsigned sampler_count:3;
+ unsigned pad0:2;
+ unsigned sampler_state_pointer:27;
+ } gs5;
+
+
+ struct
+ {
+ unsigned max_vp_index:4;
+ unsigned pad0:26;
+ unsigned reorder_enable:1;
+ unsigned pad1:1;
+ } gs6;
+};
+
+
+struct brw_vs_unit_state
+{
+ struct thread0 thread0;
+ struct thread1 thread1;
+ struct thread2 thread2;
+ struct thread3 thread3;
+
+ struct
+ {
+ unsigned pad0:10;
+ unsigned stats_enable:1;
+ unsigned nr_urb_entries:7;
+ unsigned pad1:1;
+ unsigned urb_entry_allocation_size:5;
+ unsigned pad2:1;
+ unsigned max_threads:4;
+ unsigned pad3:3;
+ } thread4;
+
+ struct
+ {
+ unsigned sampler_count:3;
+ unsigned pad0:2;
+ unsigned sampler_state_pointer:27;
+ } vs5;
+
+ struct
+ {
+ unsigned vs_enable:1;
+ unsigned vert_cache_disable:1;
+ unsigned pad0:30;
+ } vs6;
+};
+
+
+struct brw_wm_unit_state
+{
+ struct thread0 thread0;
+ struct thread1 thread1;
+ struct thread2 thread2;
+ struct thread3 thread3;
+
+ struct {
+ unsigned stats_enable:1;
+ unsigned pad0:1;
+ unsigned sampler_count:3;
+ unsigned sampler_state_pointer:27;
+ } wm4;
+
+ struct
+ {
+ unsigned enable_8_pix:1;
+ unsigned enable_16_pix:1;
+ unsigned enable_32_pix:1;
+ unsigned pad0:7;
+ unsigned legacy_global_depth_bias:1;
+ unsigned line_stipple:1;
+ unsigned depth_offset:1;
+ unsigned polygon_stipple:1;
+ unsigned line_aa_region_width:2;
+ unsigned line_endcap_aa_region_width:2;
+ unsigned early_depth_test:1;
+ unsigned thread_dispatch_enable:1;
+ unsigned program_uses_depth:1;
+ unsigned program_computes_depth:1;
+ unsigned program_uses_killpixel:1;
+ unsigned legacy_line_rast: 1;
+ unsigned pad1:1;
+ unsigned max_threads:6;
+ unsigned pad2:1;
+ } wm5;
+
+ float global_depth_offset_constant;
+ float global_depth_offset_scale;
+};
+
+struct brw_sampler_default_color {
+ float color[4];
+};
+
+struct brw_sampler_state
+{
+
+ struct
+ {
+ unsigned shadow_function:3;
+ unsigned lod_bias:11;
+ unsigned min_filter:3;
+ unsigned mag_filter:3;
+ unsigned mip_filter:2;
+ unsigned base_level:5;
+ unsigned pad:1;
+ unsigned lod_preclamp:1;
+ unsigned default_color_mode:1;
+ unsigned pad0:1;
+ unsigned disable:1;
+ } ss0;
+
+ struct
+ {
+ unsigned r_wrap_mode:3;
+ unsigned t_wrap_mode:3;
+ unsigned s_wrap_mode:3;
+ unsigned pad:3;
+ unsigned max_lod:10;
+ unsigned min_lod:10;
+ } ss1;
+
+
+ struct
+ {
+ unsigned pad:5;
+ unsigned default_color_pointer:27;
+ } ss2;
+
+ struct
+ {
+ unsigned pad:19;
+ unsigned max_aniso:3;
+ unsigned chroma_key_mode:1;
+ unsigned chroma_key_index:2;
+ unsigned chroma_key_enable:1;
+ unsigned monochrome_filter_width:3;
+ unsigned monochrome_filter_height:3;
+ } ss3;
+};
+
+
+struct brw_clipper_viewport
+{
+ float xmin;
+ float xmax;
+ float ymin;
+ float ymax;
+};
+
+struct brw_cc_viewport
+{
+ float min_depth;
+ float max_depth;
+};
+
+struct brw_sf_viewport
+{
+ struct {
+ float m00;
+ float m11;
+ float m22;
+ float m30;
+ float m31;
+ float m32;
+ } viewport;
+
+ struct {
+ short xmin;
+ short ymin;
+ short xmax;
+ short ymax;
+ } scissor;
+};
+
+/* Documented in the subsystem/shared-functions/sampler chapter...
+ */
+struct brw_surface_state
+{
+ struct {
+ unsigned cube_pos_z:1;
+ unsigned cube_neg_z:1;
+ unsigned cube_pos_y:1;
+ unsigned cube_neg_y:1;
+ unsigned cube_pos_x:1;
+ unsigned cube_neg_x:1;
+ unsigned pad:4;
+ unsigned mipmap_layout_mode:1;
+ unsigned vert_line_stride_ofs:1;
+ unsigned vert_line_stride:1;
+ unsigned color_blend:1;
+ unsigned writedisable_blue:1;
+ unsigned writedisable_green:1;
+ unsigned writedisable_red:1;
+ unsigned writedisable_alpha:1;
+ unsigned surface_format:9;
+ unsigned data_return_format:1;
+ unsigned pad0:1;
+ unsigned surface_type:3;
+ } ss0;
+
+ struct {
+ unsigned base_addr;
+ } ss1;
+
+ struct {
+ unsigned pad:2;
+ unsigned mip_count:4;
+ unsigned width:13;
+ unsigned height:13;
+ } ss2;
+
+ struct {
+ unsigned tile_walk:1;
+ unsigned tiled_surface:1;
+ unsigned pad:1;
+ unsigned pitch:18;
+ unsigned depth:11;
+ } ss3;
+
+ struct {
+ unsigned pad:19;
+ unsigned min_array_elt:9;
+ unsigned min_lod:4;
+ } ss4;
+};
+
+
+
+struct brw_vertex_buffer_state
+{
+ struct {
+ unsigned pitch:11;
+ unsigned pad:15;
+ unsigned access_type:1;
+ unsigned vb_index:5;
+ } vb0;
+
+ unsigned start_addr;
+ unsigned max_index;
+#if 1
+ unsigned instance_data_step_rate; /* not included for sequential/random vertices? */
+#endif
+};
+
+#define BRW_VBP_MAX 17
+
+struct brw_vb_array_state {
+ struct header header;
+ struct brw_vertex_buffer_state vb[BRW_VBP_MAX];
+};
+
+
+struct brw_vertex_element_state
+{
+ struct
+ {
+ unsigned src_offset:11;
+ unsigned pad:5;
+ unsigned src_format:9;
+ unsigned pad0:1;
+ unsigned valid:1;
+ unsigned vertex_buffer_index:5;
+ } ve0;
+
+ struct
+ {
+ unsigned dst_offset:8;
+ unsigned pad:8;
+ unsigned vfcomponent3:4;
+ unsigned vfcomponent2:4;
+ unsigned vfcomponent1:4;
+ unsigned vfcomponent0:4;
+ } ve1;
+};
+
+#define BRW_VEP_MAX 18
+
+struct brw_vertex_element_packet {
+ struct header header;
+ struct brw_vertex_element_state ve[BRW_VEP_MAX]; /* note: less than _TNL_ATTRIB_MAX */
+};
+
+
+struct brw_urb_immediate {
+ unsigned opcode:4;
+ unsigned offset:6;
+ unsigned swizzle_control:2;
+ unsigned pad:1;
+ unsigned allocate:1;
+ unsigned used:1;
+ unsigned complete:1;
+ unsigned response_length:4;
+ unsigned msg_length:4;
+ unsigned msg_target:4;
+ unsigned pad1:3;
+ unsigned end_of_thread:1;
+};
+
+/* Instruction format for the execution units:
+ */
+
+struct brw_instruction
+{
+ struct
+ {
+ unsigned opcode:7;
+ unsigned pad:1;
+ unsigned access_mode:1;
+ unsigned mask_control:1;
+ unsigned dependency_control:2;
+ unsigned compression_control:2;
+ unsigned thread_control:2;
+ unsigned predicate_control:4;
+ unsigned predicate_inverse:1;
+ unsigned execution_size:3;
+ unsigned destreg__conditonalmod:4; /* destreg - send, conditionalmod - others */
+ unsigned pad0:2;
+ unsigned debug_control:1;
+ unsigned saturate:1;
+ } header;
+
+ union {
+ struct
+ {
+ unsigned dest_reg_file:2;
+ unsigned dest_reg_type:3;
+ unsigned src0_reg_file:2;
+ unsigned src0_reg_type:3;
+ unsigned src1_reg_file:2;
+ unsigned src1_reg_type:3;
+ unsigned pad:1;
+ unsigned dest_subreg_nr:5;
+ unsigned dest_reg_nr:8;
+ unsigned dest_horiz_stride:2;
+ unsigned dest_address_mode:1;
+ } da1;
+
+ struct
+ {
+ unsigned dest_reg_file:2;
+ unsigned dest_reg_type:3;
+ unsigned src0_reg_file:2;
+ unsigned src0_reg_type:3;
+ unsigned pad:6;
+ int dest_indirect_offset:10; /* offset against the deref'd address reg */
+ unsigned dest_subreg_nr:3; /* subnr for the address reg a0.x */
+ unsigned dest_horiz_stride:2;
+ unsigned dest_address_mode:1;
+ } ia1;
+
+ struct
+ {
+ unsigned dest_reg_file:2;
+ unsigned dest_reg_type:3;
+ unsigned src0_reg_file:2;
+ unsigned src0_reg_type:3;
+ unsigned src1_reg_file:2;
+ unsigned src1_reg_type:3;
+ unsigned pad0:1;
+ unsigned dest_writemask:4;
+ unsigned dest_subreg_nr:1;
+ unsigned dest_reg_nr:8;
+ unsigned pad1:2;
+ unsigned dest_address_mode:1;
+ } da16;
+
+ struct
+ {
+ unsigned dest_reg_file:2;
+ unsigned dest_reg_type:3;
+ unsigned src0_reg_file:2;
+ unsigned src0_reg_type:3;
+ unsigned pad0:6;
+ unsigned dest_writemask:4;
+ int dest_indirect_offset:6;
+ unsigned dest_subreg_nr:3;
+ unsigned pad1:2;
+ unsigned dest_address_mode:1;
+ } ia16;
+ } bits1;
+
+
+ union {
+ struct
+ {
+ unsigned src0_subreg_nr:5;
+ unsigned src0_reg_nr:8;
+ unsigned src0_abs:1;
+ unsigned src0_negate:1;
+ unsigned src0_address_mode:1;
+ unsigned src0_horiz_stride:2;
+ unsigned src0_width:3;
+ unsigned src0_vert_stride:4;
+ unsigned flag_reg_nr:1;
+ unsigned pad:6;
+ } da1;
+
+ struct
+ {
+ int src0_indirect_offset:10;
+ unsigned src0_subreg_nr:3;
+ unsigned src0_abs:1;
+ unsigned src0_negate:1;
+ unsigned src0_address_mode:1;
+ unsigned src0_horiz_stride:2;
+ unsigned src0_width:3;
+ unsigned src0_vert_stride:4;
+ unsigned flag_reg_nr:1;
+ unsigned pad:6;
+ } ia1;
+
+ struct
+ {
+ unsigned src0_swz_x:2;
+ unsigned src0_swz_y:2;
+ unsigned src0_subreg_nr:1;
+ unsigned src0_reg_nr:8;
+ unsigned src0_abs:1;
+ unsigned src0_negate:1;
+ unsigned src0_address_mode:1;
+ unsigned src0_swz_z:2;
+ unsigned src0_swz_w:2;
+ unsigned pad0:1;
+ unsigned src0_vert_stride:4;
+ unsigned flag_reg_nr:1;
+ unsigned pad1:6;
+ } da16;
+
+ struct
+ {
+ unsigned src0_swz_x:2;
+ unsigned src0_swz_y:2;
+ int src0_indirect_offset:6;
+ unsigned src0_subreg_nr:3;
+ unsigned src0_abs:1;
+ unsigned src0_negate:1;
+ unsigned src0_address_mode:1;
+ unsigned src0_swz_z:2;
+ unsigned src0_swz_w:2;
+ unsigned pad0:1;
+ unsigned src0_vert_stride:4;
+ unsigned flag_reg_nr:1;
+ unsigned pad1:6;
+ } ia16;
+
+ } bits2;
+
+ union
+ {
+ struct
+ {
+ unsigned src1_subreg_nr:5;
+ unsigned src1_reg_nr:8;
+ unsigned src1_abs:1;
+ unsigned src1_negate:1;
+ unsigned pad:1;
+ unsigned src1_horiz_stride:2;
+ unsigned src1_width:3;
+ unsigned src1_vert_stride:4;
+ unsigned pad0:7;
+ } da1;
+
+ struct
+ {
+ unsigned src1_swz_x:2;
+ unsigned src1_swz_y:2;
+ unsigned src1_subreg_nr:1;
+ unsigned src1_reg_nr:8;
+ unsigned src1_abs:1;
+ unsigned src1_negate:1;
+ unsigned pad0:1;
+ unsigned src1_swz_z:2;
+ unsigned src1_swz_w:2;
+ unsigned pad1:1;
+ unsigned src1_vert_stride:4;
+ unsigned pad2:7;
+ } da16;
+
+ struct
+ {
+ int src1_indirect_offset:10;
+ unsigned src1_subreg_nr:3;
+ unsigned src1_abs:1;
+ unsigned src1_negate:1;
+ unsigned pad0:1;
+ unsigned src1_horiz_stride:2;
+ unsigned src1_width:3;
+ unsigned src1_vert_stride:4;
+ unsigned flag_reg_nr:1;
+ unsigned pad1:6;
+ } ia1;
+
+ struct
+ {
+ unsigned src1_swz_x:2;
+ unsigned src1_swz_y:2;
+ int src1_indirect_offset:6;
+ unsigned src1_subreg_nr:3;
+ unsigned src1_abs:1;
+ unsigned src1_negate:1;
+ unsigned pad0:1;
+ unsigned src1_swz_z:2;
+ unsigned src1_swz_w:2;
+ unsigned pad1:1;
+ unsigned src1_vert_stride:4;
+ unsigned flag_reg_nr:1;
+ unsigned pad2:6;
+ } ia16;
+
+
+ struct
+ {
+ int jump_count:16; /* note: signed */
+ unsigned pop_count:4;
+ unsigned pad0:12;
+ } if_else;
+
+ struct {
+ unsigned function:4;
+ unsigned int_type:1;
+ unsigned precision:1;
+ unsigned saturate:1;
+ unsigned data_type:1;
+ unsigned pad0:8;
+ unsigned response_length:4;
+ unsigned msg_length:4;
+ unsigned msg_target:4;
+ unsigned pad1:3;
+ unsigned end_of_thread:1;
+ } math;
+
+ struct {
+ unsigned binding_table_index:8;
+ unsigned sampler:4;
+ unsigned return_format:2;
+ unsigned msg_type:2;
+ unsigned response_length:4;
+ unsigned msg_length:4;
+ unsigned msg_target:4;
+ unsigned pad1:3;
+ unsigned end_of_thread:1;
+ } sampler;
+
+ struct brw_urb_immediate urb;
+
+ struct {
+ unsigned binding_table_index:8;
+ unsigned msg_control:4;
+ unsigned msg_type:2;
+ unsigned target_cache:2;
+ unsigned response_length:4;
+ unsigned msg_length:4;
+ unsigned msg_target:4;
+ unsigned pad1:3;
+ unsigned end_of_thread:1;
+ } dp_read;
+
+ struct {
+ unsigned binding_table_index:8;
+ unsigned msg_control:3;
+ unsigned pixel_scoreboard_clear:1;
+ unsigned msg_type:3;
+ unsigned send_commit_msg:1;
+ unsigned response_length:4;
+ unsigned msg_length:4;
+ unsigned msg_target:4;
+ unsigned pad1:3;
+ unsigned end_of_thread:1;
+ } dp_write;
+
+ struct {
+ unsigned pad:16;
+ unsigned response_length:4;
+ unsigned msg_length:4;
+ unsigned msg_target:4;
+ unsigned pad1:3;
+ unsigned end_of_thread:1;
+ } generic;
+
+ int d;
+ unsigned ud;
+ } bits3;
+};
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_surface.c b/src/gallium/drivers/i965simple/brw_surface.c
new file mode 100644
index 0000000000..724a69b2ee
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_surface.c
@@ -0,0 +1,126 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "brw_blit.h"
+#include "brw_context.h"
+#include "brw_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_tile.h"
+#include "util/u_rect.h"
+
+
+
+/* Assumes all values are within bounds -- no checking at this level -
+ * do it higher up if required.
+ */
+static void
+brw_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy, unsigned width, unsigned height)
+{
+ assert( dst != src );
+ assert( dst->block.size == src->block.size );
+ assert( dst->block.width == src->block.height );
+ assert( dst->block.height == src->block.height );
+
+ if (0) {
+ void *dst_map = pipe->screen->surface_map( pipe->screen,
+ dst,
+ PIPE_BUFFER_USAGE_CPU_WRITE );
+
+ const void *src_map = pipe->screen->surface_map( pipe->screen,
+ src,
+ PIPE_BUFFER_USAGE_CPU_READ );
+
+ util_copy_rect(dst_map,
+ &dst->block,
+ dst->stride,
+ dstx, dsty,
+ width, height,
+ src_map,
+ src->stride,
+ srcx, srcy);
+
+ pipe->screen->surface_unmap(pipe->screen, src);
+ pipe->screen->surface_unmap(pipe->screen, dst);
+ }
+ else {
+ struct brw_texture *dst_tex = (struct brw_texture *)dst->texture;
+ struct brw_texture *src_tex = (struct brw_texture *)src->texture;
+ assert(dst->block.width == 1);
+ assert(dst->block.height == 1);
+ brw_copy_blit(brw_context(pipe),
+ FALSE,
+ dst->block.size,
+ (short) src->stride/src->block.size, src_tex->buffer, src->offset, FALSE,
+ (short) dst->stride/dst->block.size, dst_tex->buffer, dst->offset, FALSE,
+ (short) srcx, (short) srcy, (short) dstx, (short) dsty,
+ (short) width, (short) height, PIPE_LOGICOP_COPY);
+ }
+}
+
+
+static void
+brw_surface_fill(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height, unsigned value)
+{
+ if (0) {
+ void *dst_map = pipe->screen->surface_map( pipe->screen,
+ dst,
+ PIPE_BUFFER_USAGE_CPU_WRITE );
+
+ util_fill_rect(dst_map, &dst->block, dst->stride, dstx, dsty, width, height, value);
+
+ pipe->screen->surface_unmap(pipe->screen, dst);
+ }
+ else {
+ struct brw_texture *tex = (struct brw_texture *)dst->texture;
+ assert(dst->block.width == 1);
+ assert(dst->block.height == 1);
+ brw_fill_blit(brw_context(pipe),
+ dst->block.size,
+ (short) dst->stride/dst->block.size,
+ tex->buffer, dst->offset, FALSE,
+ (short) dstx, (short) dsty,
+ (short) width, (short) height,
+ value);
+ }
+}
+
+
+void
+brw_init_surface_functions(struct brw_context *brw)
+{
+ brw->pipe.surface_copy = brw_surface_copy;
+ brw->pipe.surface_fill = brw_surface_fill;
+}
diff --git a/src/gallium/drivers/i965simple/brw_tex_layout.c b/src/gallium/drivers/i965simple/brw_tex_layout.c
new file mode 100644
index 0000000000..998ffaeac4
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_tex_layout.c
@@ -0,0 +1,380 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+/* Code to layout images in a mipmap tree for i965.
+ */
+
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "brw_context.h"
+#include "brw_tex_layout.h"
+
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+#if 0
+unsigned intel_compressed_alignment(unsigned internalFormat)
+{
+ unsigned alignment = 4;
+
+ switch (internalFormat) {
+ case GL_COMPRESSED_RGB_FXT1_3DFX:
+ case GL_COMPRESSED_RGBA_FXT1_3DFX:
+ alignment = 8;
+ break;
+
+ default:
+ break;
+ }
+
+ return alignment;
+}
+#endif
+
+
+static void intel_miptree_set_image_offset(struct brw_texture *tex,
+ unsigned level,
+ unsigned img,
+ unsigned x, unsigned y)
+{
+ struct pipe_texture *pt = &tex->base;
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+ assert(img < tex->nr_images[level]);
+
+ tex->image_offset[level][img] = y * tex->stride + x * pt->block.size;
+}
+
+static void intel_miptree_set_level_info(struct brw_texture *tex,
+ unsigned level,
+ unsigned nr_images,
+ unsigned x, unsigned y,
+ unsigned w, unsigned h, unsigned d)
+{
+ struct pipe_texture *pt = &tex->base;
+
+ assert(level < PIPE_MAX_TEXTURE_LEVELS);
+
+ pt->width[level] = w;
+ pt->height[level] = h;
+ pt->depth[level] = d;
+
+ pt->nblocksx[level] = pf_get_nblocksx(&pt->block, w);
+ pt->nblocksy[level] = pf_get_nblocksy(&pt->block, h);
+
+ tex->level_offset[level] = y * tex->stride + x * tex->base.block.size;
+ tex->nr_images[level] = nr_images;
+
+ /*
+ DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
+ level, w, h, d, x, y, tex->level_offset[level]);
+ */
+
+ /* Not sure when this would happen, but anyway:
+ */
+ if (tex->image_offset[level]) {
+ FREE(tex->image_offset[level]);
+ tex->image_offset[level] = NULL;
+ }
+
+ assert(nr_images);
+ assert(!tex->image_offset[level]);
+
+ tex->image_offset[level] = (unsigned *) MALLOC(nr_images * sizeof(unsigned));
+ tex->image_offset[level][0] = 0;
+}
+
+static void i945_miptree_layout_2d(struct brw_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ const int align_x = 2, align_y = 4;
+ unsigned level;
+ unsigned x = 0;
+ unsigned y = 0;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned nblocksx = pt->nblocksx[0];
+ unsigned nblocksy = pt->nblocksy[0];
+
+ tex->stride = align(pt->nblocksx[0] * pt->block.size, 4);
+
+ /* May need to adjust pitch to accomodate the placement of
+ * the 2nd mipmap level. This occurs when the alignment
+ * constraints of mipmap placement push the right edge of the
+ * 2nd mipmap level out past the width of its parent.
+ */
+ if (pt->last_level > 0) {
+ unsigned mip1_nblocksx
+ = align(pf_get_nblocksx(&pt->block, minify(width)), align_x)
+ + pf_get_nblocksx(&pt->block, minify(minify(width)));
+
+ if (mip1_nblocksx > nblocksx)
+ tex->stride = mip1_nblocksx * pt->block.size;
+ }
+
+ /* Pitch must be a whole number of dwords
+ */
+ tex->stride = align(tex->stride, 64);
+ tex->total_nblocksy = 0;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ intel_miptree_set_level_info(tex, level, 1, x, y, width,
+ height, 1);
+
+ nblocksy = align(nblocksy, align_y);
+
+ /* Because the images are packed better, the final offset
+ * might not be the maximal one:
+ */
+ tex->total_nblocksy = MAX2(tex->total_nblocksy, y + nblocksy);
+
+ /* Layout_below: step right after second mipmap level.
+ */
+ if (level == 1) {
+ x += align(nblocksx, align_x);
+ }
+ else {
+ y += nblocksy;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ nblocksx = pf_get_nblocksx(&pt->block, width);
+ nblocksy = pf_get_nblocksy(&pt->block, height);
+ }
+}
+
+static boolean brw_miptree_layout(struct brw_texture *tex)
+{
+ struct pipe_texture *pt = &tex->base;
+ /* XXX: these vary depending on image format:
+ */
+/* int align_w = 4; */
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_3D: {
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned depth = pt->depth[0];
+ unsigned nblocksx = pt->nblocksx[0];
+ unsigned nblocksy = pt->nblocksy[0];
+ unsigned pack_x_pitch, pack_x_nr;
+ unsigned pack_y_pitch;
+ unsigned level;
+ unsigned align_h = 2;
+ unsigned align_w = 4;
+
+ tex->total_nblocksy = 0;
+
+ tex->stride = align(pt->nblocksx[0], 4);
+ pack_y_pitch = align(pt->nblocksy[0], align_h);
+
+ pack_x_pitch = tex->stride / pt->block.size;
+ pack_x_nr = 1;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ unsigned nr_images = pt->target == PIPE_TEXTURE_3D ? depth : 6;
+ int x = 0;
+ int y = 0;
+ uint q, j;
+
+ intel_miptree_set_level_info(tex, level, nr_images,
+ 0, tex->total_nblocksy,
+ width, height, depth);
+
+ for (q = 0; q < nr_images;) {
+ for (j = 0; j < pack_x_nr && q < nr_images; j++, q++) {
+ intel_miptree_set_image_offset(tex, level, q, x, y);
+ x += pack_x_pitch;
+ }
+
+ x = 0;
+ y += pack_y_pitch;
+ }
+
+
+ tex->total_nblocksy += y;
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ nblocksx = pf_get_nblocksx(&pt->block, width);
+ nblocksy = pf_get_nblocksy(&pt->block, height);
+
+ if (pf_is_compressed(pt->format)) {
+ pack_y_pitch = (height + 3) / 4;
+
+ if (pack_x_pitch > align(width, align_w)) {
+ pack_x_pitch = align(width, align_w);
+ pack_x_nr <<= 1;
+ }
+ } else {
+ if (pack_x_pitch > 4) {
+ pack_x_pitch >>= 1;
+ pack_x_nr <<= 1;
+ assert(pack_x_pitch * pack_x_nr * pt->block.size <= tex->stride);
+ }
+
+ if (pack_y_pitch > 2) {
+ pack_y_pitch >>= 1;
+ pack_y_pitch = align(pack_y_pitch, align_h);
+ }
+ }
+
+ }
+ break;
+ }
+
+ default:
+ i945_miptree_layout_2d(tex);
+ break;
+ }
+#if 0
+ PRINT("%s: %dx%dx%d - sz 0x%x\n", __FUNCTION__,
+ pt->pitch,
+ pt->total_nblocksy,
+ pt->block.size,
+ pt->stride * pt->total_nblocksy );
+#endif
+
+ return TRUE;
+}
+
+
+static struct pipe_texture *
+brw_texture_create_screen(struct pipe_screen *screen,
+ const struct pipe_texture *templat)
+{
+ struct brw_texture *tex = CALLOC_STRUCT(brw_texture);
+
+ if (tex) {
+ tex->base = *templat;
+ pipe_reference_init(&tex->base.reference, 1);
+
+ tex->base.nblocksx[0] = pf_get_nblocksx(&tex->base.block, tex->base.width[0]);
+ tex->base.nblocksy[0] = pf_get_nblocksy(&tex->base.block, tex->base.height[0]);
+
+ if (brw_miptree_layout(tex))
+ tex->buffer = screen->buffer_create(screen, 64,
+ PIPE_BUFFER_USAGE_PIXEL,
+ tex->stride *
+ tex->total_nblocksy);
+
+ if (!tex->buffer) {
+ FREE(tex);
+ return NULL;
+ }
+ }
+
+ return &tex->base;
+}
+
+
+static void
+brw_texture_destroy_screen(struct pipe_texture *pt)
+{
+ struct brw_texture *tex = (struct brw_texture *)pt;
+ uint i;
+
+ /*
+ DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
+ */
+
+ pipe_buffer_reference(&tex->buffer, NULL);
+
+ for (i = 0; i < PIPE_MAX_TEXTURE_LEVELS; i++)
+ if (tex->image_offset[i])
+ free(tex->image_offset[i]);
+
+ free(tex);
+}
+
+
+static struct pipe_surface *
+brw_get_tex_surface_screen(struct pipe_screen *screen,
+ struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice)
+{
+ struct brw_texture *tex = (struct brw_texture *)pt;
+ struct pipe_surface *ps;
+ unsigned offset; /* in bytes */
+
+ offset = tex->level_offset[level];
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ offset += tex->image_offset[level][face];
+ }
+ else if (pt->target == PIPE_TEXTURE_3D) {
+ offset += tex->image_offset[level][zslice];
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+
+ ps = CALLOC_STRUCT(pipe_surface);
+ if (ps) {
+ pipe_reference_init(&ps->reference, 1);
+ pipe_texture_reference(&ps->texture, pt);
+ ps->format = pt->format;
+ ps->width = pt->width[level];
+ ps->height = pt->height[level];
+ ps->block = pt->block;
+ ps->nblocksx = pt->nblocksx[level];
+ ps->nblocksy = pt->nblocksy[level];
+ ps->stride = tex->stride;
+ ps->offset = offset;
+ }
+ return ps;
+}
+
+
+void
+brw_init_texture_functions(struct brw_context *brw)
+{
+// brw->pipe.texture_update = brw_texture_update;
+}
+
+
+void
+brw_init_screen_texture_funcs(struct pipe_screen *screen)
+{
+ screen->texture_create = brw_texture_create_screen;
+ screen->texture_destroy = brw_texture_destroy_screen;
+ screen->get_tex_surface = brw_get_tex_surface_screen;
+}
+
diff --git a/src/gallium/drivers/i965simple/brw_tex_layout.h b/src/gallium/drivers/i965simple/brw_tex_layout.h
new file mode 100644
index 0000000000..a6b6ba8146
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_tex_layout.h
@@ -0,0 +1,44 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+
+
+#ifndef BRW_TEX_LAYOUT_H
+#define BRW_TEX_LAYOUT_H
+
+
+struct brw_context;
+struct pipe_screen;
+
+
+extern void
+brw_init_texture_functions(struct brw_context *brw);
+
+extern void
+brw_init_screen_texture_funcs(struct pipe_screen *screen);
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_urb.c b/src/gallium/drivers/i965simple/brw_urb.c
new file mode 100644
index 0000000000..101a4367b9
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_urb.c
@@ -0,0 +1,186 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+//#include "brw_state.h"
+#include "brw_batch.h"
+#include "brw_defines.h"
+
+#define VS 0
+#define GS 1
+#define CLP 2
+#define SF 3
+#define CS 4
+
+/* XXX: Are the min_entry_size numbers useful?
+ * XXX: Verify min_nr_entries, esp for VS.
+ * XXX: Verify SF min_entry_size.
+ */
+static const struct {
+ unsigned min_nr_entries;
+ unsigned preferred_nr_entries;
+ unsigned min_entry_size;
+ unsigned max_entry_size;
+} limits[CS+1] = {
+ { 8, 32, 1, 5 }, /* vs */
+ { 4, 8, 1, 5 }, /* gs */
+ { 6, 8, 1, 5 }, /* clp */
+ { 1, 8, 1, 12 }, /* sf */
+ { 1, 4, 1, 32 } /* cs */
+};
+
+
+static boolean check_urb_layout( struct brw_context *brw )
+{
+ brw->urb.vs_start = 0;
+ brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize;
+ brw->urb.clip_start = brw->urb.gs_start + brw->urb.nr_gs_entries * brw->urb.vsize;
+ brw->urb.sf_start = brw->urb.clip_start + brw->urb.nr_clip_entries * brw->urb.vsize;
+ brw->urb.cs_start = brw->urb.sf_start + brw->urb.nr_sf_entries * brw->urb.sfsize;
+
+ return brw->urb.cs_start + brw->urb.nr_cs_entries * brw->urb.csize <= 256;
+}
+
+/* Most minimal update, forces re-emit of URB fence packet after GS
+ * unit turned on/off.
+ */
+static void recalculate_urb_fence( struct brw_context *brw )
+{
+ unsigned csize = brw->curbe.total_size;
+ unsigned vsize = brw->vs.prog_data->urb_entry_size;
+ unsigned sfsize = brw->sf.prog_data->urb_entry_size;
+
+ if (csize < limits[CS].min_entry_size)
+ csize = limits[CS].min_entry_size;
+
+ if (vsize < limits[VS].min_entry_size)
+ vsize = limits[VS].min_entry_size;
+
+ if (sfsize < limits[SF].min_entry_size)
+ sfsize = limits[SF].min_entry_size;
+
+ if (brw->urb.vsize < vsize ||
+ brw->urb.sfsize < sfsize ||
+ brw->urb.csize < csize ||
+ (brw->urb.constrained && (brw->urb.vsize > brw->urb.vsize ||
+ brw->urb.sfsize > brw->urb.sfsize ||
+ brw->urb.csize > brw->urb.csize))) {
+
+
+ brw->urb.csize = csize;
+ brw->urb.sfsize = sfsize;
+ brw->urb.vsize = vsize;
+
+ brw->urb.nr_vs_entries = limits[VS].preferred_nr_entries;
+ brw->urb.nr_gs_entries = limits[GS].preferred_nr_entries;
+ brw->urb.nr_clip_entries = limits[CLP].preferred_nr_entries;
+ brw->urb.nr_sf_entries = limits[SF].preferred_nr_entries;
+ brw->urb.nr_cs_entries = limits[CS].preferred_nr_entries;
+
+ if (!check_urb_layout(brw)) {
+ brw->urb.nr_vs_entries = limits[VS].min_nr_entries;
+ brw->urb.nr_gs_entries = limits[GS].min_nr_entries;
+ brw->urb.nr_clip_entries = limits[CLP].min_nr_entries;
+ brw->urb.nr_sf_entries = limits[SF].min_nr_entries;
+ brw->urb.nr_cs_entries = limits[CS].min_nr_entries;
+
+ brw->urb.constrained = 1;
+
+ if (!check_urb_layout(brw)) {
+ /* This is impossible, given the maximal sizes of urb
+ * entries and the values for minimum nr of entries
+ * provided above.
+ */
+ debug_printf("couldn't calculate URB layout!\n");
+ exit(1);
+ }
+
+ if (BRW_DEBUG & (DEBUG_URB|DEBUG_FALLBACKS))
+ debug_printf("URB CONSTRAINED\n");
+ }
+ else
+ brw->urb.constrained = 0;
+
+ if (BRW_DEBUG & DEBUG_URB)
+ debug_printf("URB fence: %d ..VS.. %d ..GS.. %d ..CLP.. %d ..SF.. %d ..CS.. %d\n",
+ brw->urb.vs_start,
+ brw->urb.gs_start,
+ brw->urb.clip_start,
+ brw->urb.sf_start,
+ brw->urb.cs_start,
+ 256);
+
+ brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
+ }
+}
+
+
+const struct brw_tracked_state brw_recalculate_urb_fence = {
+ .dirty = {
+ .brw = BRW_NEW_CURBE_OFFSETS,
+ .cache = (CACHE_NEW_VS_PROG |
+ CACHE_NEW_SF_PROG)
+ },
+ .update = recalculate_urb_fence
+};
+
+
+
+
+
+void brw_upload_urb_fence(struct brw_context *brw)
+{
+ struct brw_urb_fence uf;
+ memset(&uf, 0, sizeof(uf));
+
+ uf.header.opcode = CMD_URB_FENCE;
+ uf.header.length = sizeof(uf)/4-2;
+ uf.header.vs_realloc = 1;
+ uf.header.gs_realloc = 1;
+ uf.header.clp_realloc = 1;
+ uf.header.sf_realloc = 1;
+ uf.header.vfe_realloc = 1;
+ uf.header.cs_realloc = 1;
+
+ /* The ordering below is correct, not the layout in the
+ * instruction.
+ *
+ * There are 256 urb reg pairs in total.
+ */
+ uf.bits0.vs_fence = brw->urb.gs_start;
+ uf.bits0.gs_fence = brw->urb.clip_start;
+ uf.bits0.clp_fence = brw->urb.sf_start;
+ uf.bits1.sf_fence = brw->urb.cs_start;
+ uf.bits1.cs_fence = 256;
+
+ BRW_BATCH_STRUCT(brw, &uf);
+}
diff --git a/src/gallium/drivers/i965simple/brw_util.c b/src/gallium/drivers/i965simple/brw_util.c
new file mode 100644
index 0000000000..42391d7c8c
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_util.c
@@ -0,0 +1,104 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_util.h"
+#include "brw_defines.h"
+
+#include "pipe/p_defines.h"
+
+unsigned brw_count_bits( unsigned val )
+{
+ unsigned i;
+ for (i = 0; val ; val >>= 1)
+ if (val & 1)
+ i++;
+ return i;
+}
+
+
+unsigned brw_translate_blend_equation( int mode )
+{
+ switch (mode) {
+ case PIPE_BLEND_ADD:
+ return BRW_BLENDFUNCTION_ADD;
+ case PIPE_BLEND_MIN:
+ return BRW_BLENDFUNCTION_MIN;
+ case PIPE_BLEND_MAX:
+ return BRW_BLENDFUNCTION_MAX;
+ case PIPE_BLEND_SUBTRACT:
+ return BRW_BLENDFUNCTION_SUBTRACT;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ return BRW_BLENDFUNCTION_REVERSE_SUBTRACT;
+ default:
+ assert(0);
+ return BRW_BLENDFUNCTION_ADD;
+ }
+}
+
+unsigned brw_translate_blend_factor( int factor )
+{
+ switch(factor) {
+ case PIPE_BLENDFACTOR_ZERO:
+ return BRW_BLENDFACTOR_ZERO;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ return BRW_BLENDFACTOR_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_ONE:
+ return BRW_BLENDFACTOR_ONE;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ return BRW_BLENDFACTOR_SRC_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ return BRW_BLENDFACTOR_INV_SRC_COLOR;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ return BRW_BLENDFACTOR_DST_COLOR;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ return BRW_BLENDFACTOR_INV_DST_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ return BRW_BLENDFACTOR_INV_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ return BRW_BLENDFACTOR_DST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ return BRW_BLENDFACTOR_INV_DST_ALPHA;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ return BRW_BLENDFACTOR_SRC_ALPHA_SATURATE;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ return BRW_BLENDFACTOR_CONST_COLOR;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ return BRW_BLENDFACTOR_INV_CONST_COLOR;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ return BRW_BLENDFACTOR_CONST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return BRW_BLENDFACTOR_INV_CONST_ALPHA;
+ default:
+ assert(0);
+ return BRW_BLENDFACTOR_ZERO;
+ }
+}
diff --git a/src/gallium/drivers/i965simple/brw_util.h b/src/gallium/drivers/i965simple/brw_util.h
new file mode 100644
index 0000000000..d60e5934db
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_util.h
@@ -0,0 +1,43 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_UTIL_H
+#define BRW_UTIL_H
+
+#include "pipe/p_state.h"
+
+extern unsigned brw_count_bits( unsigned val );
+extern unsigned brw_translate_blend_factor( int factor );
+extern unsigned brw_translate_blend_equation( int mode );
+
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_vs.c b/src/gallium/drivers/i965simple/brw_vs.c
new file mode 100644
index 0000000000..92327e896d
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_vs.c
@@ -0,0 +1,120 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_vs.h"
+#include "brw_util.h"
+#include "brw_state.h"
+
+
+static void do_vs_prog( struct brw_context *brw,
+ const struct brw_vertex_program *vp,
+ struct brw_vs_prog_key *key )
+{
+ unsigned program_size;
+ const unsigned *program;
+ struct brw_vs_compile c;
+
+ memset(&c, 0, sizeof(c));
+ memcpy(&c.key, key, sizeof(*key));
+
+ brw_init_compile(&c.func);
+ c.vp = vp;
+
+ c.prog_data.outputs_written = vp->info.num_outputs;
+ c.prog_data.inputs_read = vp->info.num_inputs;
+
+#if 0
+ if (c.key.copy_edgeflag) {
+ c.prog_data.outputs_written |= 1<<VERT_RESULT_EDGE;
+ c.prog_data.inputs_read |= 1<<VERT_ATTRIB_EDGEFLAG;
+ }
+#endif
+
+ /* Emit GEN4 code.
+ */
+ brw_vs_emit(&c);
+
+ /* get the program
+ */
+ program = brw_get_program(&c.func, &program_size);
+
+ /*
+ */
+ brw->vs.prog_gs_offset = brw_upload_cache( &brw->cache[BRW_VS_PROG],
+ &c.key,
+ sizeof(c.key),
+ program,
+ program_size,
+ &c.prog_data,
+ &brw->vs.prog_data);
+}
+
+
+static void brw_upload_vs_prog( struct brw_context *brw )
+{
+ struct brw_vs_prog_key key;
+ const struct brw_vertex_program *vp = brw->attribs.VertexProgram;
+
+ assert(vp);
+
+ memset(&key, 0, sizeof(key));
+
+ /* Just upload the program verbatim for now. Always send it all
+ * the inputs it asks for, whether they are varying or not.
+ */
+ key.program_string_id = vp->id;
+ key.nr_userclip = brw->attribs.Clip.nr;
+ key.copy_edgeflag = (brw->attribs.Raster->fill_cw != PIPE_POLYGON_MODE_FILL ||
+ brw->attribs.Raster->fill_ccw != PIPE_POLYGON_MODE_FILL);
+
+ /* Make an early check for the key.
+ */
+ if (brw_search_cache(&brw->cache[BRW_VS_PROG],
+ &key, sizeof(key),
+ &brw->vs.prog_data,
+ &brw->vs.prog_gs_offset))
+ return;
+
+ do_vs_prog(brw, vp, &key);
+}
+
+
+/* See brw_vs.c:
+ */
+const struct brw_tracked_state brw_vs_prog = {
+ .dirty = {
+ .brw = BRW_NEW_VS,
+ .cache = 0
+ },
+ .update = brw_upload_vs_prog
+};
diff --git a/src/gallium/drivers/i965simple/brw_vs.h b/src/gallium/drivers/i965simple/brw_vs.h
new file mode 100644
index 0000000000..070f9dfcae
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_vs.h
@@ -0,0 +1,82 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_VS_H
+#define BRW_VS_H
+
+
+#include "brw_context.h"
+#include "brw_eu.h"
+
+
+struct brw_vs_prog_key {
+ unsigned program_string_id;
+ unsigned nr_userclip:4;
+ unsigned copy_edgeflag:1;
+ unsigned know_w_is_one:1;
+ unsigned pad:26;
+};
+
+
+struct brw_vs_compile {
+ struct brw_compile func;
+ struct brw_vs_prog_key key;
+ struct brw_vs_prog_data prog_data;
+
+ const struct brw_vertex_program *vp;
+
+ unsigned nr_inputs;
+
+ unsigned first_output;
+ unsigned nr_outputs;
+
+ unsigned first_tmp;
+ unsigned last_tmp;
+
+ struct brw_reg r0;
+ struct brw_reg r1;
+ struct brw_reg regs[12][128];
+ struct brw_reg tmp;
+ struct brw_reg stack;
+
+ struct {
+ boolean used_in_src;
+ struct brw_reg reg;
+ } output_regs[128];
+
+ struct brw_reg userplane[6];
+
+};
+
+void brw_vs_emit( struct brw_vs_compile *c );
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_vs_emit.c b/src/gallium/drivers/i965simple/brw_vs_emit.c
new file mode 100644
index 0000000000..3ee82d95b3
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_vs_emit.c
@@ -0,0 +1,1330 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_context.h"
+#include "brw_vs.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+
+struct brw_prog_info {
+ unsigned num_temps;
+ unsigned num_addrs;
+ unsigned num_consts;
+
+ unsigned writes_psize;
+
+ unsigned pos_idx;
+ unsigned result_edge_idx;
+ unsigned edge_flag_idx;
+ unsigned psize_idx;
+};
+
+/* Do things as simply as possible. Allocate and populate all regs
+ * ahead of time.
+ */
+static void brw_vs_alloc_regs( struct brw_vs_compile *c,
+ struct brw_prog_info *info )
+{
+ unsigned i, reg = 0, mrf;
+ unsigned nr_params;
+
+ /* r0 -- reserved as usual
+ */
+ c->r0 = brw_vec8_grf(reg, 0); reg++;
+
+ /* User clip planes from curbe:
+ */
+ if (c->key.nr_userclip) {
+ for (i = 0; i < c->key.nr_userclip; i++) {
+ c->userplane[i] = stride( brw_vec4_grf(reg+3+i/2, (i%2) * 4), 0, 4, 1);
+ }
+
+ /* Deal with curbe alignment:
+ */
+ reg += ((6+c->key.nr_userclip+3)/4)*2;
+ }
+
+ /* Vertex program parameters from curbe:
+ */
+ nr_params = c->prog_data.max_const;
+ for (i = 0; i < nr_params; i++) {
+ c->regs[TGSI_FILE_CONSTANT][i] = stride(brw_vec4_grf(reg+i/2, (i%2) * 4), 0, 4, 1);
+ }
+ reg += (nr_params+1)/2;
+ c->prog_data.curb_read_length = reg - 1;
+
+
+
+ /* Allocate input regs:
+ */
+ c->nr_inputs = c->vp->info.num_inputs;
+ for (i = 0; i < c->nr_inputs; i++) {
+ c->regs[TGSI_FILE_INPUT][i] = brw_vec8_grf(reg, 0);
+ reg++;
+ }
+
+
+ /* Allocate outputs: TODO: could organize the non-position outputs
+ * to go straight into message regs.
+ */
+ c->nr_outputs = 0;
+ c->first_output = reg;
+ mrf = 4;
+ for (i = 0; i < c->vp->info.num_outputs; i++) {
+ c->nr_outputs++;
+#if 0
+ if (i == VERT_RESULT_HPOS) {
+ c->regs[TGSI_FILE_OUTPUT][i] = brw_vec8_grf(reg, 0);
+ reg++;
+ }
+ else if (i == VERT_RESULT_PSIZ) {
+ c->regs[TGSI_FILE_OUTPUT][i] = brw_vec8_grf(reg, 0);
+ reg++;
+ mrf++; /* just a placeholder? XXX fix later stages & remove this */
+ }
+ else {
+ c->regs[TGSI_FILE_OUTPUT][i] = brw_message_reg(mrf);
+ mrf++;
+ }
+#else
+ /*treat pos differently for now */
+ if (i == info->pos_idx) {
+ c->regs[TGSI_FILE_OUTPUT][i] = brw_vec8_grf(reg, 0);
+ reg++;
+ } else {
+ c->regs[TGSI_FILE_OUTPUT][i] = brw_message_reg(mrf);
+ mrf++;
+ }
+#endif
+ }
+
+ /* Allocate program temporaries:
+ */
+ for (i = 0; i < info->num_temps; i++) {
+ c->regs[TGSI_FILE_TEMPORARY][i] = brw_vec8_grf(reg, 0);
+ reg++;
+ }
+
+ /* Address reg(s). Don't try to use the internal address reg until
+ * deref time.
+ */
+ for (i = 0; i < info->num_addrs; i++) {
+ c->regs[TGSI_FILE_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
+ reg,
+ 0,
+ BRW_REGISTER_TYPE_D,
+ BRW_VERTICAL_STRIDE_8,
+ BRW_WIDTH_8,
+ BRW_HORIZONTAL_STRIDE_1,
+ BRW_SWIZZLE_XXXX,
+ TGSI_WRITEMASK_X);
+ reg++;
+ }
+
+ for (i = 0; i < 128; i++) {
+ if (c->output_regs[i].used_in_src) {
+ c->output_regs[i].reg = brw_vec8_grf(reg, 0);
+ reg++;
+ }
+ }
+
+ c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, reg, 0);
+ reg += 2;
+
+
+ /* Some opcodes need an internal temporary:
+ */
+ c->first_tmp = reg;
+ c->last_tmp = reg; /* for allocation purposes */
+
+ /* Each input reg holds data from two vertices. The
+ * urb_read_length is the number of registers read from *each*
+ * vertex urb, so is half the amount:
+ */
+ c->prog_data.urb_read_length = (c->nr_inputs+1)/2;
+
+ c->prog_data.urb_entry_size = (c->nr_outputs+2+3)/4;
+ c->prog_data.total_grf = reg;
+}
+
+
+static struct brw_reg get_tmp( struct brw_vs_compile *c )
+{
+ struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
+
+ if (++c->last_tmp > c->prog_data.total_grf)
+ c->prog_data.total_grf = c->last_tmp;
+
+ return tmp;
+}
+
+static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
+{
+ if (tmp.nr == c->last_tmp-1)
+ c->last_tmp--;
+}
+
+static void release_tmps( struct brw_vs_compile *c )
+{
+ c->last_tmp = c->first_tmp;
+}
+
+
+static void unalias1( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ void (*func)( struct brw_vs_compile *,
+ struct brw_reg,
+ struct brw_reg ))
+{
+ if (dst.file == arg0.file && dst.nr == arg0.nr) {
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
+ func(c, tmp, arg0);
+ brw_MOV(p, dst, tmp);
+ }
+ else {
+ func(c, dst, arg0);
+ }
+}
+
+static void unalias2( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1,
+ void (*func)( struct brw_vs_compile *,
+ struct brw_reg,
+ struct brw_reg,
+ struct brw_reg ))
+{
+ if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
+ (dst.file == arg1.file && dst.nr == arg1.nr)) {
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
+ func(c, tmp, arg0, arg1);
+ brw_MOV(p, dst, tmp);
+ }
+ else {
+ func(c, dst, arg0, arg1);
+ }
+}
+
+static void emit_sop( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1,
+ unsigned cond)
+{
+ brw_push_insn_state(p);
+ brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_MOV(p, dst, brw_imm_f(1.0f));
+ brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
+ brw_MOV(p, dst, brw_imm_f(0.0f));
+ brw_pop_insn_state(p);
+}
+
+static void emit_seq( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
+}
+
+static void emit_sne( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
+}
+static void emit_slt( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_L);
+}
+
+static void emit_sle( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_LE);
+}
+
+static void emit_sgt( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_G);
+}
+
+static void emit_sge( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ emit_sop(p, dst, arg0, arg1, BRW_CONDITIONAL_GE);
+}
+
+static void emit_max( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
+ brw_SEL(p, dst, arg1, arg0);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+}
+
+static void emit_min( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1 )
+{
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
+ brw_SEL(p, dst, arg0, arg1);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+}
+
+
+static void emit_math1( struct brw_vs_compile *c,
+ unsigned function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ unsigned precision)
+{
+ /* There are various odd behaviours with SEND on the simulator. In
+ * addition there are documented issues with the fact that the GEN4
+ * processor doesn't do dependency control properly on SEND
+ * results. So, on balance, this kludge to get around failures
+ * with writemasked math results looks like it might be necessary
+ * whether that turns out to be a simulator bug or not:
+ */
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = dst;
+ boolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
+ dst.file != BRW_GENERAL_REGISTER_FILE);
+
+ if (need_tmp)
+ tmp = get_tmp(c);
+
+ brw_math(p,
+ tmp,
+ function,
+ BRW_MATH_SATURATE_NONE,
+ 2,
+ arg0,
+ BRW_MATH_DATA_SCALAR,
+ precision);
+
+ if (need_tmp) {
+ brw_MOV(p, dst, tmp);
+ release_tmp(c, tmp);
+ }
+}
+
+static void emit_math2( struct brw_vs_compile *c,
+ unsigned function,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1,
+ unsigned precision)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = dst;
+ boolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
+ dst.file != BRW_GENERAL_REGISTER_FILE);
+
+ if (need_tmp)
+ tmp = get_tmp(c);
+
+ brw_MOV(p, brw_message_reg(3), arg1);
+
+ brw_math(p,
+ tmp,
+ function,
+ BRW_MATH_SATURATE_NONE,
+ 2,
+ arg0,
+ BRW_MATH_DATA_SCALAR,
+ precision);
+
+ if (need_tmp) {
+ brw_MOV(p, dst, tmp);
+ release_tmp(c, tmp);
+ }
+}
+
+
+
+static void emit_exp_noalias( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0 )
+{
+ struct brw_compile *p = &c->func;
+
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_X) {
+ struct brw_reg tmp = get_tmp(c);
+ struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
+
+ /* tmp_d = floor(arg0.x) */
+ brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
+
+ /* result[0] = 2.0 ^ tmp */
+
+ /* Adjust exponent for floating point:
+ * exp += 127
+ */
+ brw_ADD(p, brw_writemask(tmp_d, TGSI_WRITEMASK_X), tmp_d, brw_imm_d(127));
+
+ /* Install exponent and sign.
+ * Excess drops off the edge:
+ */
+ brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), TGSI_WRITEMASK_X),
+ tmp_d, brw_imm_d(23));
+
+ release_tmp(c, tmp);
+ }
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_Y) {
+ /* result[1] = arg0.x - floor(arg0.x) */
+ brw_FRC(p, brw_writemask(dst, TGSI_WRITEMASK_Y), brw_swizzle1(arg0, 0));
+ }
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_Z) {
+ /* As with the LOG instruction, we might be better off just
+ * doing a taylor expansion here, seeing as we have to do all
+ * the prep work.
+ *
+ * If mathbox partial precision is too low, consider also:
+ * result[3] = result[0] * EXP(result[1])
+ */
+ emit_math1(c,
+ BRW_MATH_FUNCTION_EXP,
+ brw_writemask(dst, TGSI_WRITEMASK_Z),
+ brw_swizzle1(arg0, 0),
+ BRW_MATH_PRECISION_PARTIAL);
+ }
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_W) {
+ /* result[3] = 1.0; */
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_W), brw_imm_f(1));
+ }
+}
+
+
+static void emit_log_noalias( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0 )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = dst;
+ struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
+ struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
+ boolean need_tmp = (dst.dw1.bits.writemask != 0xf ||
+ dst.file != BRW_GENERAL_REGISTER_FILE);
+
+ if (need_tmp) {
+ tmp = get_tmp(c);
+ tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
+ }
+
+ /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
+ * according to spec:
+ *
+ * These almost look likey they could be joined up, but not really
+ * practical:
+ *
+ * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
+ * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
+ */
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_XZ) {
+ brw_AND(p,
+ brw_writemask(tmp_ud, TGSI_WRITEMASK_X),
+ brw_swizzle1(arg0_ud, 0),
+ brw_imm_ud((1U<<31)-1));
+
+ brw_SHR(p,
+ brw_writemask(tmp_ud, TGSI_WRITEMASK_X),
+ tmp_ud,
+ brw_imm_ud(23));
+
+ brw_ADD(p,
+ brw_writemask(tmp, TGSI_WRITEMASK_X),
+ retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
+ brw_imm_d(-127));
+ }
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_YZ) {
+ brw_AND(p,
+ brw_writemask(tmp_ud, TGSI_WRITEMASK_Y),
+ brw_swizzle1(arg0_ud, 0),
+ brw_imm_ud((1<<23)-1));
+
+ brw_OR(p,
+ brw_writemask(tmp_ud, TGSI_WRITEMASK_Y),
+ tmp_ud,
+ brw_imm_ud(127<<23));
+ }
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_Z) {
+ /* result[2] = result[0] + LOG2(result[1]); */
+
+ /* Why bother? The above is just a hint how to do this with a
+ * taylor series. Maybe we *should* use a taylor series as by
+ * the time all the above has been done it's almost certainly
+ * quicker than calling the mathbox, even with low precision.
+ *
+ * Options are:
+ * - result[0] + mathbox.LOG2(result[1])
+ * - mathbox.LOG2(arg0.x)
+ * - result[0] + inline_taylor_approx(result[1])
+ */
+ emit_math1(c,
+ BRW_MATH_FUNCTION_LOG,
+ brw_writemask(tmp, TGSI_WRITEMASK_Z),
+ brw_swizzle1(tmp, 1),
+ BRW_MATH_PRECISION_FULL);
+
+ brw_ADD(p,
+ brw_writemask(tmp, TGSI_WRITEMASK_Z),
+ brw_swizzle1(tmp, 2),
+ brw_swizzle1(tmp, 0));
+ }
+
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_W) {
+ /* result[3] = 1.0; */
+ brw_MOV(p, brw_writemask(tmp, TGSI_WRITEMASK_W), brw_imm_f(1));
+ }
+
+ if (need_tmp) {
+ brw_MOV(p, dst, tmp);
+ release_tmp(c, tmp);
+ }
+}
+
+
+
+
+/* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
+ */
+static void emit_dst_noalias( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0,
+ struct brw_reg arg1)
+{
+ struct brw_compile *p = &c->func;
+
+ /* There must be a better way to do this:
+ */
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_X)
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_X), brw_imm_f(1.0));
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_Y)
+ brw_MUL(p, brw_writemask(dst, TGSI_WRITEMASK_Y), arg0, arg1);
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_Z)
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_Z), arg0);
+ if (dst.dw1.bits.writemask & TGSI_WRITEMASK_W)
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_W), arg1);
+}
+
+static void emit_xpd( struct brw_compile *p,
+ struct brw_reg dst,
+ struct brw_reg t,
+ struct brw_reg u)
+{
+ brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
+ brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
+}
+
+
+
+static void emit_lit_noalias( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0 )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *if_insn;
+ struct brw_reg tmp = dst;
+ boolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
+
+ if (need_tmp)
+ tmp = get_tmp(c);
+
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_YZ), brw_imm_f(0));
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_XW), brw_imm_f(1));
+
+ /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
+ * to get all channels active inside the IF. In the clipping code
+ * we run with NoMask, so it's not an option and we can use
+ * BRW_EXECUTE_1 for all comparisions.
+ */
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
+ if_insn = brw_IF(p, BRW_EXECUTE_8);
+ {
+ brw_MOV(p, brw_writemask(dst, TGSI_WRITEMASK_Y), brw_swizzle1(arg0,0));
+
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
+ brw_MOV(p, brw_writemask(tmp, TGSI_WRITEMASK_Z), brw_swizzle1(arg0,1));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+ emit_math2(c,
+ BRW_MATH_FUNCTION_POW,
+ brw_writemask(dst, TGSI_WRITEMASK_Z),
+ brw_swizzle1(tmp, 2),
+ brw_swizzle1(arg0, 3),
+ BRW_MATH_PRECISION_PARTIAL);
+ }
+
+ brw_ENDIF(p, if_insn);
+}
+
+
+
+
+
+/* TODO: relative addressing!
+ */
+static struct brw_reg get_reg( struct brw_vs_compile *c,
+ unsigned file,
+ unsigned index )
+{
+ switch (file) {
+ case TGSI_FILE_TEMPORARY:
+ case TGSI_FILE_INPUT:
+ case TGSI_FILE_OUTPUT:
+ assert(c->regs[file][index].nr != 0);
+ return c->regs[file][index];
+ case TGSI_FILE_CONSTANT:
+ assert(c->regs[TGSI_FILE_CONSTANT][index + c->prog_data.num_imm].nr != 0);
+ return c->regs[TGSI_FILE_CONSTANT][index + c->prog_data.num_imm];
+ case TGSI_FILE_IMMEDIATE:
+ assert(c->regs[TGSI_FILE_CONSTANT][index].nr != 0);
+ return c->regs[TGSI_FILE_CONSTANT][index];
+ case TGSI_FILE_ADDRESS:
+ assert(index == 0);
+ return c->regs[file][index];
+
+ case TGSI_FILE_NULL: /* undef values */
+ return brw_null_reg();
+
+ default:
+ assert(0);
+ return brw_null_reg();
+ }
+}
+
+
+
+static struct brw_reg deref( struct brw_vs_compile *c,
+ struct brw_reg arg,
+ int offset)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = vec4(get_tmp(c));
+ struct brw_reg vp_address = retype(vec1(get_reg(c, TGSI_FILE_ADDRESS, 0)), BRW_REGISTER_TYPE_UW);
+ unsigned byte_offset = arg.nr * 32 + arg.subnr + offset * 16;
+ struct brw_reg indirect = brw_vec4_indirect(0,0);
+
+ {
+ brw_push_insn_state(p);
+ brw_set_access_mode(p, BRW_ALIGN_1);
+
+ /* This is pretty clunky - load the address register twice and
+ * fetch each 4-dword value in turn. There must be a way to do
+ * this in a single pass, but I couldn't get it to work.
+ */
+ brw_ADD(p, brw_address_reg(0), vp_address, brw_imm_d(byte_offset));
+ brw_MOV(p, tmp, indirect);
+
+ brw_ADD(p, brw_address_reg(0), suboffset(vp_address, 8), brw_imm_d(byte_offset));
+ brw_MOV(p, suboffset(tmp, 4), indirect);
+
+ brw_pop_insn_state(p);
+ }
+
+ return vec8(tmp);
+}
+
+
+static void emit_arl( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct brw_reg arg0 )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg tmp = dst;
+ boolean need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
+
+ if (need_tmp)
+ tmp = get_tmp(c);
+
+ brw_RNDD(p, tmp, arg0);
+ brw_MUL(p, dst, tmp, brw_imm_d(16));
+
+ if (need_tmp)
+ release_tmp(c, tmp);
+}
+
+
+/* Will return mangled results for SWZ op. The emit_swz() function
+ * ignores this result and recalculates taking extended swizzles into
+ * account.
+ */
+static struct brw_reg get_arg( struct brw_vs_compile *c,
+ struct tgsi_src_register *src )
+{
+ struct brw_reg reg;
+
+ if (src->File == TGSI_FILE_NULL)
+ return brw_null_reg();
+
+#if 0
+ if (src->RelAddr)
+ reg = deref(c, c->regs[PROGRAM_STATE_VAR][0], src->Index);
+ else
+#endif
+ reg = get_reg(c, src->File, src->Index);
+
+ /* Convert 3-bit swizzle to 2-bit.
+ */
+ reg.dw1.bits.swizzle = BRW_SWIZZLE4(src->SwizzleX,
+ src->SwizzleY,
+ src->SwizzleZ,
+ src->SwizzleW);
+
+ /* Note this is ok for non-swizzle instructions:
+ */
+ reg.negate = src->Negate ? 1 : 0;
+
+ return reg;
+}
+
+
+static struct brw_reg get_dst( struct brw_vs_compile *c,
+ const struct tgsi_dst_register *dst )
+{
+ struct brw_reg reg = get_reg(c, dst->File, dst->Index);
+
+ reg.dw1.bits.writemask = dst->WriteMask;
+
+ return reg;
+}
+
+
+
+
+static void emit_swz( struct brw_vs_compile *c,
+ struct brw_reg dst,
+ struct tgsi_src_register src )
+{
+ struct brw_compile *p = &c->func;
+ unsigned zeros_mask = 0;
+ unsigned ones_mask = 0;
+ unsigned src_mask = 0;
+ ubyte src_swz[4];
+ boolean need_tmp = (src.Negate &&
+ dst.file != BRW_GENERAL_REGISTER_FILE);
+ struct brw_reg tmp = dst;
+ unsigned i;
+
+ if (need_tmp)
+ tmp = get_tmp(c);
+
+ for (i = 0; i < 4; i++) {
+ if (dst.dw1.bits.writemask & (1<<i)) {
+ ubyte s = 0;
+ switch(i) {
+ case 0:
+ s = src.SwizzleX;
+ break;
+ s = src.SwizzleY;
+ case 1:
+ break;
+ s = src.SwizzleZ;
+ case 2:
+ break;
+ s = src.SwizzleW;
+ case 3:
+ break;
+ }
+ switch (s) {
+ case TGSI_SWIZZLE_X:
+ case TGSI_SWIZZLE_Y:
+ case TGSI_SWIZZLE_Z:
+ case TGSI_SWIZZLE_W:
+ src_mask |= 1<<i;
+ src_swz[i] = s;
+ break;
+ case TGSI_EXTSWIZZLE_ZERO:
+ zeros_mask |= 1<<i;
+ break;
+ case TGSI_EXTSWIZZLE_ONE:
+ ones_mask |= 1<<i;
+ break;
+ }
+ }
+ }
+
+ /* Do src first, in case dst aliases src:
+ */
+ if (src_mask) {
+ struct brw_reg arg0;
+
+#if 0
+ if (src.RelAddr)
+ arg0 = deref(c, c->regs[PROGRAM_STATE_VAR][0], src.Index);
+ else
+#endif
+ arg0 = get_reg(c, src.File, src.Index);
+
+ arg0 = brw_swizzle(arg0,
+ src_swz[0], src_swz[1],
+ src_swz[2], src_swz[3]);
+
+ brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
+ }
+
+ if (zeros_mask)
+ brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
+
+ if (ones_mask)
+ brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
+
+ if (src.Negate)
+ brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
+
+ if (need_tmp) {
+ brw_MOV(p, dst, tmp);
+ release_tmp(c, tmp);
+ }
+}
+
+
+
+/* Post-vertex-program processing. Send the results to the URB.
+ */
+static void emit_vertex_write( struct brw_vs_compile *c, struct brw_prog_info *info)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg m0 = brw_message_reg(0);
+ struct brw_reg pos = c->regs[TGSI_FILE_OUTPUT][info->pos_idx];
+ struct brw_reg ndc;
+
+ if (c->key.copy_edgeflag) {
+ brw_MOV(p,
+ get_reg(c, TGSI_FILE_OUTPUT, info->result_edge_idx),
+ get_reg(c, TGSI_FILE_INPUT, info->edge_flag_idx));
+ }
+
+
+ /* Build ndc coords? TODO: Shortcircuit when w is known to be one.
+ */
+ if (!c->key.know_w_is_one) {
+ ndc = get_tmp(c);
+ emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
+ brw_MUL(p, brw_writemask(ndc, TGSI_WRITEMASK_XYZ), pos, ndc);
+ }
+ else {
+ ndc = pos;
+ }
+
+ /* This includes the workaround for -ve rhw, so is no longer an
+ * optional step:
+ */
+ if (info->writes_psize ||
+ c->key.nr_userclip ||
+ !c->key.know_w_is_one)
+ {
+ struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
+ unsigned i;
+
+ brw_MOV(p, header1, brw_imm_ud(0));
+
+ brw_set_access_mode(p, BRW_ALIGN_16);
+
+ if (info->writes_psize) {
+ struct brw_reg psiz = c->regs[TGSI_FILE_OUTPUT][info->psize_idx];
+ brw_MUL(p, brw_writemask(header1, TGSI_WRITEMASK_W),
+ brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
+ brw_AND(p, brw_writemask(header1, TGSI_WRITEMASK_W), header1,
+ brw_imm_ud(0x7ff<<8));
+ }
+
+
+ for (i = 0; i < c->key.nr_userclip; i++) {
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
+ brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
+ brw_OR(p, brw_writemask(header1, TGSI_WRITEMASK_W), header1, brw_imm_ud(1<<i));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
+
+
+ /* i965 clipping workaround:
+ * 1) Test for -ve rhw
+ * 2) If set,
+ * set ndc = (0,0,0,0)
+ * set ucp[6] = 1
+ *
+ * Later, clipping will detect ucp[6] and ensure the primitive is
+ * clipped against all fixed planes.
+ */
+ if (!c->key.know_w_is_one) {
+ brw_CMP(p,
+ vec8(brw_null_reg()),
+ BRW_CONDITIONAL_L,
+ brw_swizzle1(ndc, 3),
+ brw_imm_f(0));
+
+ brw_OR(p, brw_writemask(header1, TGSI_WRITEMASK_W), header1, brw_imm_ud(1<<6));
+ brw_MOV(p, ndc, brw_imm_f(0));
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ }
+
+ brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
+ brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
+ brw_set_access_mode(p, BRW_ALIGN_16);
+
+ release_tmp(c, header1);
+ }
+ else {
+ brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
+ }
+
+
+ /* Emit the (interleaved) headers for the two vertices - an 8-reg
+ * of zeros followed by two sets of NDC coordinates:
+ */
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_MOV(p, offset(m0, 2), ndc);
+ brw_MOV(p, offset(m0, 3), pos);
+
+
+ brw_urb_WRITE(p,
+ brw_null_reg(), /* dest */
+ 0, /* starting mrf reg nr */
+ c->r0, /* src */
+ 0, /* allocate */
+ 1, /* used */
+ c->nr_outputs + 3, /* msg len */
+ 0, /* response len */
+ 1, /* eot */
+ 1, /* writes complete */
+ 0, /* urb destination offset */
+ BRW_URB_SWIZZLE_INTERLEAVE);
+
+}
+
+static void
+post_vs_emit( struct brw_vs_compile *c, struct brw_instruction *end_inst )
+{
+ struct tgsi_parse_context parse;
+ const struct tgsi_token *tokens = c->vp->program.tokens;
+ tgsi_parse_init(&parse, tokens);
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+ if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
+#if 0
+ struct brw_instruction *brw_inst1, *brw_inst2;
+ const struct tgsi_full_instruction *inst1, *inst2;
+ int offset;
+ inst1 = &parse.FullToken.FullInstruction;
+ brw_inst1 = inst1->Data;
+ switch (inst1->Opcode) {
+ case TGSI_OPCODE_CAL:
+ case TGSI_OPCODE_BRA:
+ target_insn = inst1->BranchTarget;
+ inst2 = &c->vp->program.Base.Instructions[target_insn];
+ brw_inst2 = inst2->Data;
+ offset = brw_inst2 - brw_inst1;
+ brw_set_src1(brw_inst1, brw_imm_d(offset*16));
+ break;
+ case TGSI_OPCODE_END:
+ offset = end_inst - brw_inst1;
+ brw_set_src1(brw_inst1, brw_imm_d(offset*16));
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+ }
+ tgsi_parse_free(&parse);
+}
+
+static void process_declaration(const struct tgsi_full_declaration *decl,
+ struct brw_prog_info *info)
+{
+ int first = decl->DeclarationRange.First;
+ int last = decl->DeclarationRange.Last;
+
+ switch(decl->Declaration.File) {
+ case TGSI_FILE_CONSTANT:
+ info->num_consts += last - first + 1;
+ break;
+ case TGSI_FILE_INPUT: {
+ }
+ break;
+ case TGSI_FILE_OUTPUT: {
+ assert(last == first); /* for now */
+ if (decl->Declaration.Semantic) {
+ switch (decl->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION: {
+ info->pos_idx = first;
+ }
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ break;
+ case TGSI_SEMANTIC_BCOLOR:
+ break;
+ case TGSI_SEMANTIC_FOG:
+ break;
+ case TGSI_SEMANTIC_PSIZE: {
+ info->writes_psize = TRUE;
+ info->psize_idx = first;
+ }
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ break;
+ }
+ }
+ }
+ break;
+ case TGSI_FILE_TEMPORARY: {
+ info->num_temps += (last - first) + 1;
+ }
+ break;
+ case TGSI_FILE_SAMPLER: {
+ }
+ break;
+ case TGSI_FILE_ADDRESS: {
+ info->num_addrs += (last - first) + 1;
+ }
+ break;
+ case TGSI_FILE_IMMEDIATE: {
+ }
+ break;
+ case TGSI_FILE_NULL: {
+ }
+ break;
+ }
+}
+
+static void process_instruction(struct brw_vs_compile *c,
+ struct tgsi_full_instruction *inst,
+ struct brw_prog_info *info)
+{
+ struct brw_reg args[3], dst;
+ struct brw_compile *p = &c->func;
+ /*struct brw_indirect stack_index = brw_indirect(0, 0);*/
+ unsigned i;
+ unsigned index;
+ unsigned file;
+ /*FIXME: might not be the only one*/
+ const struct tgsi_dst_register *dst_reg = &inst->FullDstRegisters[0].DstRegister;
+ /*
+ struct brw_instruction *if_inst[MAX_IFSN];
+ unsigned insn, if_insn = 0;
+ */
+
+ for (i = 0; i < 3; i++) {
+ struct tgsi_full_src_register *src = &inst->FullSrcRegisters[i];
+ index = src->SrcRegister.Index;
+ file = src->SrcRegister.File;
+ if (file == TGSI_FILE_OUTPUT && c->output_regs[index].used_in_src)
+ args[i] = c->output_regs[index].reg;
+ else
+ args[i] = get_arg(c, &src->SrcRegister);
+ }
+
+ /* Get dest regs. Note that it is possible for a reg to be both
+ * dst and arg, given the static allocation of registers. So
+ * care needs to be taken emitting multi-operation instructions.
+ */
+ index = dst_reg->Index;
+ file = dst_reg->File;
+ if (file == TGSI_FILE_OUTPUT && c->output_regs[index].used_in_src)
+ dst = c->output_regs[index].reg;
+ else
+ dst = get_dst(c, dst_reg);
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ brw_MOV(p, dst, brw_abs(args[0]));
+ break;
+ case TGSI_OPCODE_ADD:
+ brw_ADD(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_DP3:
+ brw_DP3(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_DP4:
+ brw_DP4(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_DPH:
+ brw_DPH(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_DST:
+ unalias2(c, dst, args[0], args[1], emit_dst_noalias);
+ break;
+ case TGSI_OPCODE_EXP:
+ unalias1(c, dst, args[0], emit_exp_noalias);
+ break;
+ case TGSI_OPCODE_EX2:
+ emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
+ break;
+ case TGSI_OPCODE_ARL:
+ emit_arl(c, dst, args[0]);
+ break;
+ case TGSI_OPCODE_FLR:
+ brw_RNDD(p, dst, args[0]);
+ break;
+ case TGSI_OPCODE_FRC:
+ brw_FRC(p, dst, args[0]);
+ break;
+ case TGSI_OPCODE_LOG:
+ unalias1(c, dst, args[0], emit_log_noalias);
+ break;
+ case TGSI_OPCODE_LG2:
+ emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
+ break;
+ case TGSI_OPCODE_LIT:
+ unalias1(c, dst, args[0], emit_lit_noalias);
+ break;
+ case TGSI_OPCODE_MAD:
+ brw_MOV(p, brw_acc_reg(), args[2]);
+ brw_MAC(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_MAX:
+ emit_max(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_MIN:
+ emit_min(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+#if 0
+ /* The args[0] value can't be used here as it won't have
+ * correctly encoded the full swizzle:
+ */
+ emit_swz(c, dst, inst->SrcReg[0] );
+#endif
+ brw_MOV(p, dst, args[0]);
+ break;
+ case TGSI_OPCODE_MUL:
+ brw_MUL(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_POW:
+ emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
+ break;
+ case TGSI_OPCODE_RCP:
+ emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
+ break;
+ case TGSI_OPCODE_RSQ:
+ emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, args[0], BRW_MATH_PRECISION_FULL);
+ break;
+
+ case TGSI_OPCODE_SEQ:
+ emit_seq(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_SNE:
+ emit_sne(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_SGE:
+ emit_sge(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_SGT:
+ emit_sgt(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_SLT:
+ emit_slt(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_SLE:
+ emit_sle(p, dst, args[0], args[1]);
+ break;
+ case TGSI_OPCODE_SUB:
+ brw_ADD(p, dst, args[0], negate(args[1]));
+ break;
+ case TGSI_OPCODE_XPD:
+ emit_xpd(p, dst, args[0], args[1]);
+ break;
+#if 0
+ case TGSI_OPCODE_IF:
+ assert(if_insn < MAX_IFSN);
+ if_inst[if_insn++] = brw_IF(p, BRW_EXECUTE_8);
+ break;
+ case TGSI_OPCODE_ELSE:
+ if_inst[if_insn-1] = brw_ELSE(p, if_inst[if_insn-1]);
+ break;
+ case TGSI_OPCODE_ENDIF:
+ assert(if_insn > 0);
+ brw_ENDIF(p, if_inst[--if_insn]);
+ break;
+ case TGSI_OPCODE_BRA:
+ brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
+ brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
+ brw_set_predicate_control_flag_value(p, 0xff);
+ break;
+ case TGSI_OPCODE_CAL:
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_ADD(p, deref_1uw(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ brw_ADD(p, get_addr_reg(stack_index),
+ get_addr_reg(stack_index), brw_imm_d(4));
+ inst->Data = &p->store[p->nr_insn];
+ brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
+ break;
+#endif
+ case TGSI_OPCODE_RET:
+#if 0
+ brw_ADD(p, get_addr_reg(stack_index),
+ get_addr_reg(stack_index), brw_imm_d(-4));
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_MOV(p, brw_ip_reg(), deref_1uw(stack_index, 0));
+ brw_set_access_mode(p, BRW_ALIGN_16);
+#else
+ /*brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));*/
+#endif
+ break;
+ case TGSI_OPCODE_END:
+ brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
+ break;
+ case TGSI_OPCODE_BGNSUB:
+ case TGSI_OPCODE_ENDSUB:
+ break;
+ default:
+ debug_printf("Unsupport opcode %d in vertex shader\n", inst->Instruction.Opcode);
+ break;
+ }
+
+ if (dst_reg->File == TGSI_FILE_OUTPUT
+ && dst_reg->Index != info->pos_idx
+ && c->output_regs[dst_reg->Index].used_in_src)
+ brw_MOV(p, get_dst(c, dst_reg), dst);
+
+ release_tmps(c);
+}
+
+/* Emit the fragment program instructions here.
+ */
+void brw_vs_emit(struct brw_vs_compile *c)
+{
+#define MAX_IFSN 32
+ struct brw_compile *p = &c->func;
+ struct brw_instruction *end_inst;
+ struct tgsi_parse_context parse;
+ struct brw_indirect stack_index = brw_indirect(0, 0);
+ const struct tgsi_token *tokens = c->vp->program.tokens;
+ struct brw_prog_info prog_info;
+ unsigned allocated_registers = 0;
+ memset(&prog_info, 0, sizeof(struct brw_prog_info));
+
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_set_access_mode(p, BRW_ALIGN_16);
+
+ tgsi_parse_init(&parse, tokens);
+ /* Message registers can't be read, so copy the output into GRF register
+ if they are used in source registers */
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+ unsigned i;
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION: {
+ const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
+ for (i = 0; i < 3; ++i) {
+ const struct tgsi_src_register *src = &inst->FullSrcRegisters[i].SrcRegister;
+ unsigned index = src->Index;
+ unsigned file = src->File;
+ if (file == TGSI_FILE_OUTPUT)
+ c->output_regs[index].used_in_src = TRUE;
+ }
+ }
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+ tgsi_parse_free(&parse);
+
+ tgsi_parse_init(&parse, tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION: {
+ struct tgsi_full_declaration *decl = &parse.FullToken.FullDeclaration;
+ process_declaration(decl, &prog_info);
+ }
+ break;
+ case TGSI_TOKEN_TYPE_IMMEDIATE: {
+ struct tgsi_full_immediate *imm = &parse.FullToken.FullImmediate;
+ assert(imm->Immediate.NrTokens == 4 + 1);
+ c->prog_data.imm_buf[c->prog_data.num_imm][0] = imm->u[0].Float;
+ c->prog_data.imm_buf[c->prog_data.num_imm][1] = imm->u[1].Float;
+ c->prog_data.imm_buf[c->prog_data.num_imm][2] = imm->u[2].Float;
+ c->prog_data.imm_buf[c->prog_data.num_imm][3] = imm->u[3].Float;
+ c->prog_data.num_imm++;
+ }
+ break;
+ case TGSI_TOKEN_TYPE_INSTRUCTION: {
+ struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
+ if (!allocated_registers) {
+ /* first instruction (declerations finished).
+ * now that we know what vars are being used allocate
+ * registers for them.*/
+ c->prog_data.num_consts = prog_info.num_consts;
+ c->prog_data.max_const = prog_info.num_consts + c->prog_data.num_imm;
+ brw_vs_alloc_regs(c, &prog_info);
+
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack));
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ allocated_registers = 1;
+ }
+ process_instruction(c, inst, &prog_info);
+ }
+ break;
+ }
+ }
+
+ end_inst = &p->store[p->nr_insn];
+ emit_vertex_write(c, &prog_info);
+ post_vs_emit(c, end_inst);
+ tgsi_parse_free(&parse);
+
+}
diff --git a/src/gallium/drivers/i965simple/brw_vs_state.c b/src/gallium/drivers/i965simple/brw_vs_state.c
new file mode 100644
index 0000000000..1eaff87892
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_vs_state.c
@@ -0,0 +1,103 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+static void upload_vs_unit( struct brw_context *brw )
+{
+ struct brw_vs_unit_state vs;
+
+ memset(&vs, 0, sizeof(vs));
+
+ /* CACHE_NEW_VS_PROG */
+ vs.thread0.kernel_start_pointer = brw->vs.prog_gs_offset >> 6;
+ vs.thread0.grf_reg_count = align(brw->vs.prog_data->total_grf, 16) / 16 - 1;
+ vs.thread3.urb_entry_read_length = brw->vs.prog_data->urb_read_length;
+ vs.thread3.const_urb_entry_read_length = brw->vs.prog_data->curb_read_length;
+ vs.thread3.dispatch_grf_start_reg = 1;
+
+
+ /* BRW_NEW_URB_FENCE */
+ vs.thread4.nr_urb_entries = brw->urb.nr_vs_entries;
+ vs.thread4.urb_entry_allocation_size = brw->urb.vsize - 1;
+ vs.thread4.max_threads = MIN2(
+ MAX2(0, (brw->urb.nr_vs_entries - 6) / 2 - 1),
+ 15);
+
+
+
+ if (BRW_DEBUG & DEBUG_SINGLE_THREAD)
+ vs.thread4.max_threads = 0;
+
+ /* BRW_NEW_CURBE_OFFSETS, _NEW_TRANSFORM */
+ if (0 /*brw->attribs.Clip->ClipPlanesEnabled*/) {
+ /* Note that we read in the userclip planes as well, hence
+ * clip_start:
+ */
+ vs.thread3.const_urb_entry_read_offset = brw->curbe.clip_start * 2;
+ }
+ else {
+ vs.thread3.const_urb_entry_read_offset = brw->curbe.vs_start * 2;
+ }
+
+ vs.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
+ vs.thread3.urb_entry_read_offset = 0;
+
+ /* No samplers for ARB_vp programs:
+ */
+ vs.vs5.sampler_count = 0;
+
+ if (BRW_DEBUG & DEBUG_STATS)
+ vs.thread4.stats_enable = 1;
+
+ /* Vertex program always enabled:
+ */
+ vs.vs6.vs_enable = 1;
+
+ brw->vs.state_gs_offset = brw_cache_data( &brw->cache[BRW_VS_UNIT], &vs );
+}
+
+
+const struct brw_tracked_state brw_vs_unit = {
+ .dirty = {
+ .brw = (BRW_NEW_CLIP |
+ BRW_NEW_CURBE_OFFSETS |
+ BRW_NEW_URB_FENCE),
+ .cache = CACHE_NEW_VS_PROG
+ },
+ .update = upload_vs_unit
+};
diff --git a/src/gallium/drivers/i965simple/brw_winsys.h b/src/gallium/drivers/i965simple/brw_winsys.h
new file mode 100644
index 0000000000..ec1e400418
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_winsys.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * This is the interface that i965simple requires any window system
+ * hosting it to implement. This is the only include file in i965simple
+ * which is public.
+ *
+ */
+
+#ifndef BRW_WINSYS_H
+#define BRW_WINSYS_H
+
+
+#include "pipe/p_defines.h"
+
+
+/* Pipe drivers are (meant to be!) independent of both GL and the
+ * window system. The window system provides a buffer manager and a
+ * set of additional hooks for things like command buffer submission,
+ * etc.
+ *
+ * There clearly has to be some agreement between the window system
+ * driver and the hardware driver about the format of command buffers,
+ * etc.
+ */
+
+struct pipe_buffer;
+struct pipe_fence_handle;
+struct pipe_winsys;
+struct pipe_screen;
+
+
+/* The pipe driver currently understands the following chipsets:
+ */
+#define PCI_CHIP_I965_G 0x29A2
+#define PCI_CHIP_I965_Q 0x2992
+#define PCI_CHIP_I965_G_1 0x2982
+#define PCI_CHIP_I965_GM 0x2A02
+#define PCI_CHIP_I965_GME 0x2A12
+
+
+/* These are the names of all the state caches managed by the driver.
+ *
+ * When data is uploaded to a buffer with buffer_subdata, we use the
+ * special version of that function below so that information about
+ * what type of data this is can be passed to the winsys backend.
+ * That in turn allows the correct flags to be set in the aub file
+ * dump to allow human-readable file dumps later on.
+ */
+
+enum brw_cache_id {
+ BRW_CC_VP,
+ BRW_CC_UNIT,
+ BRW_WM_PROG,
+ BRW_SAMPLER_DEFAULT_COLOR,
+ BRW_SAMPLER,
+ BRW_WM_UNIT,
+ BRW_SF_PROG,
+ BRW_SF_VP,
+ BRW_SF_UNIT,
+ BRW_VS_UNIT,
+ BRW_VS_PROG,
+ BRW_GS_UNIT,
+ BRW_GS_PROG,
+ BRW_CLIP_VP,
+ BRW_CLIP_UNIT,
+ BRW_CLIP_PROG,
+ BRW_SS_SURFACE,
+ BRW_SS_SURF_BIND,
+
+ BRW_MAX_CACHE
+};
+
+#define BRW_CONSTANT_BUFFER BRW_MAX_CACHE
+
+/**
+ * Additional winsys interface for i965simple.
+ *
+ * It is an over-simple batchbuffer mechanism. Will want to improve the
+ * performance of this, perhaps based on the cmdstream stuff. It
+ * would be pretty impossible to implement swz on top of this
+ * interface.
+ *
+ * Will also need additions/changes to implement static/dynamic
+ * indirect state.
+ */
+struct brw_winsys {
+
+ void (*destroy)(struct brw_winsys *);
+
+ /**
+ * Reserve space on batch buffer.
+ *
+ * Returns a null pointer if there is insufficient space in the batch buffer
+ * to hold the requested number of dwords and relocations.
+ *
+ * The number of dwords should also include the number of relocations.
+ */
+ unsigned *(*batch_start)(struct brw_winsys *sws,
+ unsigned dwords,
+ unsigned relocs);
+
+ void (*batch_dword)(struct brw_winsys *sws,
+ unsigned dword);
+
+ /**
+ * Emit a relocation to a buffer.
+ *
+ * Used not only when the buffer addresses are not pinned, but also to
+ * ensure refered buffers will not be destroyed until the current batch
+ * buffer execution is finished.
+ *
+ * The access flags is a combination of I915_BUFFER_ACCESS_WRITE and
+ * I915_BUFFER_ACCESS_READ macros.
+ */
+ void (*batch_reloc)(struct brw_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned access_flags,
+ unsigned delta);
+
+
+ /* Not used yet, but really want this:
+ */
+ void (*batch_end)( struct brw_winsys *sws );
+
+ /**
+ * Flush the batch buffer.
+ *
+ * Fence argument must point to NULL or to a previous fence, and the caller
+ * must call fence_reference when done with the fence.
+ */
+ void (*batch_flush)(struct brw_winsys *sws,
+ struct pipe_fence_handle **fence);
+
+
+ /* A version of buffer_subdata that includes information for the
+ * simulator:
+ */
+ void (*buffer_subdata_typed)(struct brw_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned long offset,
+ unsigned long size,
+ const void *data,
+ unsigned data_type);
+
+
+ /* A cheat so we don't have to think about relocations in a couple
+ * of places yet:
+ */
+ unsigned (*get_buffer_offset)( struct brw_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned flags );
+
+};
+
+#define BRW_BUFFER_ACCESS_WRITE 0x1
+#define BRW_BUFFER_ACCESS_READ 0x2
+
+#define BRW_BUFFER_USAGE_LIT_VERTEX (PIPE_BUFFER_USAGE_CUSTOM << 0)
+
+
+struct pipe_context *brw_create(struct pipe_screen *,
+ struct brw_winsys *,
+ unsigned pci_id);
+
+static inline boolean brw_batchbuffer_data(struct brw_winsys *winsys,
+ const void *data,
+ unsigned bytes)
+{
+ static const unsigned incr = sizeof(unsigned);
+ uint i;
+ const unsigned *udata = (const unsigned*)(data);
+ unsigned size = bytes/incr;
+
+ winsys->batch_start(winsys, size, 0);
+ for (i = 0; i < size; ++i) {
+ winsys->batch_dword(winsys, udata[i]);
+ }
+ winsys->batch_end(winsys);
+
+ return (i == size);
+}
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_wm.c b/src/gallium/drivers/i965simple/brw_wm.c
new file mode 100644
index 0000000000..10161f2d2f
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm.c
@@ -0,0 +1,209 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_util.h"
+#include "brw_wm.h"
+#include "brw_eu.h"
+#include "brw_state.h"
+#include "util/u_memory.h"
+
+
+
+static void do_wm_prog( struct brw_context *brw,
+ struct brw_fragment_program *fp,
+ struct brw_wm_prog_key *key)
+{
+ struct brw_wm_compile *c = CALLOC_STRUCT(brw_wm_compile);
+ const unsigned *program;
+ unsigned program_size;
+
+ c->key = *key;
+ c->fp = fp;
+
+ c->delta_xy[0] = brw_null_reg();
+ c->delta_xy[1] = brw_null_reg();
+ c->pixel_xy[0] = brw_null_reg();
+ c->pixel_xy[1] = brw_null_reg();
+ c->pixel_w = brw_null_reg();
+
+
+ debug_printf("XXXXXXXX FP\n");
+
+ brw_wm_glsl_emit(c);
+
+ /* get the program
+ */
+ program = brw_get_program(&c->func, &program_size);
+
+ /*
+ */
+ brw->wm.prog_gs_offset = brw_upload_cache( &brw->cache[BRW_WM_PROG],
+ &c->key,
+ sizeof(c->key),
+ program,
+ program_size,
+ &c->prog_data,
+ &brw->wm.prog_data );
+
+ FREE(c);
+}
+
+
+
+static void brw_wm_populate_key( struct brw_context *brw,
+ struct brw_wm_prog_key *key )
+{
+ /* BRW_NEW_FRAGMENT_PROGRAM */
+ struct brw_fragment_program *fp =
+ (struct brw_fragment_program *)brw->attribs.FragmentProgram;
+ unsigned lookup = 0;
+ unsigned line_aa;
+
+ memset(key, 0, sizeof(*key));
+
+ /* Build the index for table lookup
+ */
+ /* BRW_NEW_DEPTH_STENCIL */
+ if (fp->info.uses_kill ||
+ brw->attribs.DepthStencil->alpha.enabled)
+ lookup |= IZ_PS_KILL_ALPHATEST_BIT;
+
+ if (fp->info.writes_z)
+ lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
+
+ if (brw->attribs.DepthStencil->depth.enabled)
+ lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
+
+ if (brw->attribs.DepthStencil->depth.enabled &&
+ brw->attribs.DepthStencil->depth.writemask) /* ?? */
+ lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
+
+ if (brw->attribs.DepthStencil->stencil[0].enabled) {
+ lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
+
+ if (brw->attribs.DepthStencil->stencil[0].writemask ||
+ brw->attribs.DepthStencil->stencil[1].writemask)
+ lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
+ }
+
+ /* XXX: when should this be disabled?
+ */
+ if (1)
+ lookup |= IZ_EARLY_DEPTH_TEST_BIT;
+
+
+ line_aa = AA_NEVER;
+
+ /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
+ if (brw->attribs.Raster->line_smooth) {
+ if (brw->reduced_primitive == PIPE_PRIM_LINES) {
+ line_aa = AA_ALWAYS;
+ }
+ else if (brw->reduced_primitive == PIPE_PRIM_TRIANGLES) {
+ if (brw->attribs.Raster->fill_ccw == PIPE_POLYGON_MODE_LINE) {
+ line_aa = AA_SOMETIMES;
+
+ if (brw->attribs.Raster->fill_cw == PIPE_POLYGON_MODE_LINE ||
+ (brw->attribs.Raster->cull_mode == PIPE_WINDING_CW))
+ line_aa = AA_ALWAYS;
+ }
+ else if (brw->attribs.Raster->fill_cw == PIPE_POLYGON_MODE_LINE) {
+ line_aa = AA_SOMETIMES;
+
+ if (brw->attribs.Raster->cull_mode == PIPE_WINDING_CCW)
+ line_aa = AA_ALWAYS;
+ }
+ }
+ }
+
+ brw_wm_lookup_iz(line_aa,
+ lookup,
+ key);
+
+
+#if 0
+ /* BRW_NEW_SAMPLER
+ *
+ * Not doing any of this at the moment:
+ */
+ for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
+ const struct pipe_sampler_state *unit = brw->attribs.Samplers[i];
+
+ if (unit) {
+
+ if (unit->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ key->shadowtex_mask |= 1<<i;
+ }
+ if (t->Image[0][t->BaseLevel]->InternalFormat == GL_YCBCR_MESA)
+ key->yuvtex_mask |= 1<<i;
+ }
+ }
+#endif
+
+
+ /* Extra info:
+ */
+ key->program_string_id = fp->id;
+
+}
+
+
+static void brw_upload_wm_prog( struct brw_context *brw )
+{
+ struct brw_wm_prog_key key;
+ struct brw_fragment_program *fp = (struct brw_fragment_program *)
+ brw->attribs.FragmentProgram;
+
+ brw_wm_populate_key(brw, &key);
+
+ /* Make an early check for the key.
+ */
+ if (brw_search_cache(&brw->cache[BRW_WM_PROG],
+ &key, sizeof(key),
+ &brw->wm.prog_data,
+ &brw->wm.prog_gs_offset))
+ return;
+
+ do_wm_prog(brw, fp, &key);
+}
+
+
+const struct brw_tracked_state brw_wm_prog = {
+ .dirty = {
+ .brw = (BRW_NEW_FS |
+ BRW_NEW_REDUCED_PRIMITIVE),
+ .cache = 0
+ },
+ .update = brw_upload_wm_prog
+};
+
diff --git a/src/gallium/drivers/i965simple/brw_wm.h b/src/gallium/drivers/i965simple/brw_wm.h
new file mode 100644
index 0000000000..b29c4393f0
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm.h
@@ -0,0 +1,142 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#ifndef BRW_WM_H
+#define BRW_WM_H
+
+
+#include "brw_context.h"
+#include "brw_eu.h"
+
+/* A big lookup table is used to figure out which and how many
+ * additional regs will inserted before the main payload in the WM
+ * program execution. These mainly relate to depth and stencil
+ * processing and the early-depth-test optimization.
+ */
+#define IZ_PS_KILL_ALPHATEST_BIT 0x1
+#define IZ_PS_COMPUTES_DEPTH_BIT 0x2
+#define IZ_DEPTH_WRITE_ENABLE_BIT 0x4
+#define IZ_DEPTH_TEST_ENABLE_BIT 0x8
+#define IZ_STENCIL_WRITE_ENABLE_BIT 0x10
+#define IZ_STENCIL_TEST_ENABLE_BIT 0x20
+#define IZ_EARLY_DEPTH_TEST_BIT 0x40
+#define IZ_BIT_MAX 0x80
+
+#define AA_NEVER 0
+#define AA_SOMETIMES 1
+#define AA_ALWAYS 2
+
+struct brw_wm_prog_key {
+ unsigned source_depth_reg:3;
+ unsigned aa_dest_stencil_reg:3;
+ unsigned dest_depth_reg:3;
+ unsigned nr_depth_regs:3;
+ unsigned shadowtex_mask:8;
+ unsigned computes_depth:1; /* could be derived from program string */
+ unsigned source_depth_to_render_target:1;
+ unsigned runtime_check_aads_emit:1;
+
+ unsigned yuvtex_mask:8;
+
+ unsigned program_string_id;
+};
+
+
+
+
+
+#define PROGRAM_INTERNAL_PARAM
+#define MAX_NV_FRAGMENT_PROGRAM_INSTRUCTIONS 1024 /* 72 for GL_ARB_f_p */
+#define BRW_WM_MAX_INSN (MAX_NV_FRAGMENT_PROGRAM_INSTRUCTIONS*3 + PIPE_MAX_ATTRIBS + 3)
+#define BRW_WM_MAX_GRF 128 /* hardware limit */
+#define BRW_WM_MAX_VREG (BRW_WM_MAX_INSN * 4)
+#define BRW_WM_MAX_REF (BRW_WM_MAX_INSN * 12)
+#define BRW_WM_MAX_PARAM 256
+#define BRW_WM_MAX_CONST 256
+#define BRW_WM_MAX_KILLS MAX_NV_FRAGMENT_PROGRAM_INSTRUCTIONS
+
+#define PAYLOAD_DEPTH (PIPE_MAX_ATTRIBS)
+
+#define MAX_IFSN 32
+#define MAX_LOOP_DEPTH 32
+
+struct brw_wm_compile {
+ struct brw_compile func;
+ struct brw_wm_prog_key key;
+ struct brw_wm_prog_data prog_data; /* result */
+
+ struct brw_fragment_program *fp;
+
+ unsigned grf_limit;
+ unsigned max_wm_grf;
+
+
+ struct brw_reg pixel_xy[2];
+ struct brw_reg delta_xy[2];
+ struct brw_reg pixel_w;
+
+
+ struct brw_reg wm_regs[8][32][4];
+
+ struct brw_reg payload_depth[4];
+ struct brw_reg payload_coef[16];
+
+ struct brw_reg emit_mask_reg;
+
+ struct brw_instruction *if_inst[MAX_IFSN];
+ int if_insn;
+
+ struct brw_instruction *loop_inst[MAX_LOOP_DEPTH];
+ int loop_insn;
+
+ struct brw_instruction *inst0;
+ struct brw_instruction *inst1;
+
+ struct brw_reg stack;
+ struct brw_indirect stack_index;
+
+ unsigned reg_index;
+
+ unsigned tmp_start;
+ unsigned tmp_index;
+};
+
+
+
+void brw_wm_lookup_iz( unsigned line_aa,
+ unsigned lookup,
+ struct brw_wm_prog_key *key );
+
+void brw_wm_glsl_emit(struct brw_wm_compile *c);
+void brw_wm_emit_decls(struct brw_wm_compile *c);
+
+#endif
diff --git a/src/gallium/drivers/i965simple/brw_wm_decl.c b/src/gallium/drivers/i965simple/brw_wm_decl.c
new file mode 100644
index 0000000000..d50e66f613
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm_decl.c
@@ -0,0 +1,392 @@
+
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_wm.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+
+static struct brw_reg alloc_tmp(struct brw_wm_compile *c)
+{
+ c->tmp_index++;
+ c->reg_index = MAX2(c->reg_index, c->tmp_start + c->tmp_index);
+ return brw_vec8_grf(c->tmp_start + c->tmp_index, 0);
+}
+
+static void release_tmps(struct brw_wm_compile *c)
+{
+ c->tmp_index = 0;
+}
+
+
+
+static int is_null( struct brw_reg reg )
+{
+ return (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+ reg.nr == BRW_ARF_NULL);
+}
+
+static void emit_pixel_xy( struct brw_wm_compile *c )
+{
+ if (is_null(c->pixel_xy[0])) {
+
+ struct brw_compile *p = &c->func;
+ struct brw_reg r1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
+
+ c->pixel_xy[0] = vec8(retype(alloc_tmp(c), BRW_REGISTER_TYPE_UW));
+ c->pixel_xy[1] = vec8(retype(alloc_tmp(c), BRW_REGISTER_TYPE_UW));
+
+ /* Calculate pixel centers by adding 1 or 0 to each of the
+ * micro-tile coordinates passed in r1.
+ */
+ brw_ADD(p,
+ c->pixel_xy[0],
+ stride(suboffset(r1_uw, 4), 2, 4, 0),
+ brw_imm_v(0x10101010));
+
+ brw_ADD(p,
+ c->pixel_xy[1],
+ stride(suboffset(r1_uw, 5), 2, 4, 0),
+ brw_imm_v(0x11001100));
+ }
+}
+
+
+
+
+
+
+static void emit_delta_xy( struct brw_wm_compile *c )
+{
+ if (is_null(c->delta_xy[0])) {
+ struct brw_compile *p = &c->func;
+ struct brw_reg r1 = brw_vec1_grf(1, 0);
+
+ emit_pixel_xy(c);
+
+ c->delta_xy[0] = alloc_tmp(c);
+ c->delta_xy[1] = alloc_tmp(c);
+
+ /* Calc delta X,Y by subtracting origin in r1 from the pixel
+ * centers.
+ */
+ brw_ADD(p,
+ c->delta_xy[0],
+ retype(c->pixel_xy[0], BRW_REGISTER_TYPE_UW),
+ negate(r1));
+
+ brw_ADD(p,
+ c->delta_xy[1],
+ retype(c->pixel_xy[1], BRW_REGISTER_TYPE_UW),
+ negate(suboffset(r1,1)));
+ }
+}
+
+
+
+#if 0
+static void emit_pixel_w( struct brw_wm_compile *c )
+{
+ if (is_null(c->pixel_w)) {
+ struct brw_compile *p = &c->func;
+
+ struct brw_reg interp_wpos = c->coef_wpos;
+
+ c->pixel_w = alloc_tmp(c);
+
+ emit_delta_xy(c);
+
+ /* Calc 1/w - just linterp wpos[3] optimized by putting the
+ * result straight into a message reg.
+ */
+ struct brw_reg interp3 = brw_vec1_grf(interp_wpos.nr+1, 4);
+ brw_LINE(p, brw_null_reg(), interp3, c->delta_xy[0]);
+ brw_MAC(p, brw_message_reg(2), suboffset(interp3, 1), c->delta_xy[1]);
+
+ /* Calc w */
+ brw_math_16( p,
+ c->pixel_w,
+ BRW_MATH_FUNCTION_INV,
+ BRW_MATH_SATURATE_NONE,
+ 2,
+ brw_null_reg(),
+ BRW_MATH_PRECISION_FULL);
+ }
+}
+#endif
+
+
+static void emit_cinterp(struct brw_wm_compile *c,
+ int idx,
+ int mask )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg interp[4];
+ struct brw_reg coef = c->payload_coef[idx];
+ int i;
+
+ interp[0] = brw_vec1_grf(coef.nr, 0);
+ interp[1] = brw_vec1_grf(coef.nr, 4);
+ interp[2] = brw_vec1_grf(coef.nr+1, 0);
+ interp[3] = brw_vec1_grf(coef.nr+1, 4);
+
+ for(i = 0; i < 4; i++ ) {
+ if (mask & (1<<i)) {
+ struct brw_reg dst = c->wm_regs[TGSI_FILE_INPUT][idx][i];
+ brw_MOV(p, dst, suboffset(interp[i],3));
+ }
+ }
+}
+
+static void emit_linterp(struct brw_wm_compile *c,
+ int idx,
+ int mask )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg interp[4];
+ struct brw_reg coef = c->payload_coef[idx];
+ int i;
+
+ emit_delta_xy(c);
+
+ interp[0] = brw_vec1_grf(coef.nr, 0);
+ interp[1] = brw_vec1_grf(coef.nr, 4);
+ interp[2] = brw_vec1_grf(coef.nr+1, 0);
+ interp[3] = brw_vec1_grf(coef.nr+1, 4);
+
+ for(i = 0; i < 4; i++ ) {
+ if (mask & (1<<i)) {
+ struct brw_reg dst = c->wm_regs[TGSI_FILE_INPUT][idx][i];
+ brw_LINE(p, brw_null_reg(), interp[i], c->delta_xy[0]);
+ brw_MAC(p, dst, suboffset(interp[i],1), c->delta_xy[1]);
+ }
+ }
+}
+
+#if 0
+static void emit_pinterp(struct brw_wm_compile *c,
+ int idx,
+ int mask )
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg interp[4];
+ struct brw_reg coef = c->payload_coef[idx];
+ int i;
+
+ get_delta_xy(c);
+ get_pixel_w(c);
+
+ interp[0] = brw_vec1_grf(coef.nr, 0);
+ interp[1] = brw_vec1_grf(coef.nr, 4);
+ interp[2] = brw_vec1_grf(coef.nr+1, 0);
+ interp[3] = brw_vec1_grf(coef.nr+1, 4);
+
+ for(i = 0; i < 4; i++ ) {
+ if (mask & (1<<i)) {
+ struct brw_reg dst = allocate_reg(c, TGSI_FILE_INPUT, idx, i);
+ brw_LINE(p, brw_null_reg(), interp[i], c->delta_xy[0]);
+ brw_MAC(p, dst, suboffset(interp[i],1), c->delta_xy[1]);
+ brw_MUL(p, dst, dst, c->pixel_w);
+ }
+ }
+}
+#endif
+
+
+
+#if 0
+static void emit_wpos( )
+{
+ struct prog_dst_register dst = dst_reg(PROGRAM_INPUT, idx);
+ struct tgsi_full_src_register interp = src_reg(PROGRAM_PAYLOAD, idx);
+ struct tgsi_full_src_register deltas = get_delta_xy(c);
+ struct tgsi_full_src_register arg2;
+ unsigned opcode;
+
+ opcode = WM_LINTERP;
+ arg2 = src_undef();
+
+ /* Have to treat wpos.xy specially:
+ */
+ emit_op(c,
+ WM_WPOSXY,
+ dst_mask(dst, WRITEMASK_XY),
+ 0, 0, 0,
+ get_pixel_xy(c),
+ src_undef(),
+ src_undef());
+
+ dst = dst_mask(dst, WRITEMASK_ZW);
+
+ /* PROGRAM_INPUT.attr.xyzw = INTERP payload.interp[attr].x, deltas.xyw
+ */
+ emit_op(c,
+ WM_LINTERP,
+ dst,
+ 0, 0, 0,
+ interp,
+ deltas,
+ arg2);
+}
+#endif
+
+
+
+
+/* Perform register allocation:
+ *
+ * -- r0???
+ * -- passthrough depth regs (and stencil/aa??)
+ * -- curbe ??
+ * -- inputs (coefficients)
+ *
+ * Use a totally static register allocation. This will perform poorly
+ * but is an easy way to get started (again).
+ */
+static void prealloc_reg(struct brw_wm_compile *c)
+{
+ int i, j;
+ int nr_curbe_regs = 0;
+
+ /* R0, then some depth related regs:
+ */
+ for (i = 0; i < c->key.nr_depth_regs; i++) {
+ c->payload_depth[i] = brw_vec8_grf(i*2, 0);
+ c->reg_index += 2;
+ }
+
+
+ /* Then a copy of our part of the CURBE entry:
+ */
+ {
+ int nr_constants = c->fp->info.file_max[TGSI_FILE_CONSTANT] + 1;
+ int index = 0;
+
+ /* XXX number of constants, or highest numbered constant? */
+ assert(nr_constants == c->fp->info.file_count[TGSI_FILE_CONSTANT]);
+
+ c->prog_data.max_const = 4*nr_constants;
+ for (i = 0; i < nr_constants; i++) {
+ for (j = 0; j < 4; j++, index++)
+ c->wm_regs[TGSI_FILE_CONSTANT][i][j] = brw_vec1_grf(c->reg_index + index/8,
+ index%8);
+ }
+
+ nr_curbe_regs = 2*((4*nr_constants+15)/16);
+ c->reg_index += nr_curbe_regs;
+ }
+
+ /* Adjust for parameter coefficients for position, which are
+ * currently always provided.
+ */
+// c->position_coef[i] = brw_vec8_grf(c->reg_index, 0);
+ c->reg_index += 2;
+
+ /* Next we receive the plane coefficients for parameter
+ * interpolation:
+ */
+ assert(c->fp->info.file_max[TGSI_FILE_INPUT] == c->fp->info.num_inputs);
+ for (i = 0; i < c->fp->info.file_max[TGSI_FILE_INPUT] + 1; i++) {
+ c->payload_coef[i] = brw_vec8_grf(c->reg_index, 0);
+ c->reg_index += 2;
+ }
+
+ c->prog_data.first_curbe_grf = c->key.nr_depth_regs * 2;
+ c->prog_data.urb_read_length = (c->fp->info.num_inputs + 1) * 2;
+ c->prog_data.curb_read_length = nr_curbe_regs;
+
+ /* That's the end of the payload, now we can start allocating registers.
+ */
+ c->emit_mask_reg = brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, c->reg_index, 0);
+ c->reg_index++;
+
+ c->stack = brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, c->reg_index, 0);
+ c->reg_index += 2;
+
+ /* Now allocate room for the interpolated inputs and staging
+ * registers for the outputs:
+ */
+ /* XXX do we want to loop over the _number_ of inputs/outputs or loop
+ * to the highest input/output index that's used?
+ * Probably the same, actually.
+ */
+ assert(c->fp->info.file_max[TGSI_FILE_INPUT] + 1 == c->fp->info.num_inputs);
+ assert(c->fp->info.file_max[TGSI_FILE_OUTPUT] + 1 == c->fp->info.num_outputs);
+ for (i = 0; i < c->fp->info.file_max[TGSI_FILE_INPUT] + 1; i++)
+ for (j = 0; j < 4; j++)
+ c->wm_regs[TGSI_FILE_INPUT][i][j] = brw_vec8_grf( c->reg_index++, 0 );
+
+ for (i = 0; i < c->fp->info.file_max[TGSI_FILE_OUTPUT] + 1; i++)
+ for (j = 0; j < 4; j++)
+ c->wm_regs[TGSI_FILE_OUTPUT][i][j] = brw_vec8_grf( c->reg_index++, 0 );
+
+ /* Beyond this we should only need registers for internal temporaries:
+ */
+ c->tmp_start = c->reg_index;
+}
+
+
+
+
+
+/* Need to interpolate fragment program inputs in as a preamble to the
+ * shader. A more sophisticated compiler would do this on demand, but
+ * we'll do it up front:
+ */
+void brw_wm_emit_decls(struct brw_wm_compile *c)
+{
+ struct tgsi_parse_context parse;
+ int done = 0;
+
+ prealloc_reg(c);
+
+ tgsi_parse_init( &parse, c->fp->program.tokens );
+
+ while( !done &&
+ !tgsi_parse_end_of_tokens( &parse ) )
+ {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *decl = &parse.FullToken.FullDeclaration;
+ unsigned first = decl->DeclarationRange.First;
+ unsigned last = decl->DeclarationRange.Last;
+ unsigned mask = decl->Declaration.UsageMask; /* ? */
+ unsigned i;
+
+ if (decl->Declaration.File != TGSI_FILE_INPUT)
+ break;
+
+ for( i = first; i <= last; i++ ) {
+ switch (decl->Declaration.Interpolate) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ emit_cinterp(c, i, mask);
+ break;
+
+ case TGSI_INTERPOLATE_LINEAR:
+ emit_linterp(c, i, mask);
+ break;
+
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ //emit_pinterp(c, i, mask);
+ emit_linterp(c, i, mask);
+ break;
+ }
+ }
+ break;
+ }
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ default:
+ done = 1;
+ break;
+ }
+ }
+
+ tgsi_parse_free (&parse);
+
+ release_tmps(c);
+}
diff --git a/src/gallium/drivers/i965simple/brw_wm_glsl.c b/src/gallium/drivers/i965simple/brw_wm_glsl.c
new file mode 100644
index 0000000000..db75963932
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm_glsl.c
@@ -0,0 +1,1076 @@
+
+#include "brw_context.h"
+#include "brw_eu.h"
+#include "brw_wm.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+
+
+
+static int get_scalar_dst_index(struct tgsi_full_instruction *inst)
+{
+ struct tgsi_dst_register dst = inst->FullDstRegisters[0].DstRegister;
+ int i;
+ for (i = 0; i < 4; i++)
+ if (dst.WriteMask & (1<<i))
+ break;
+ return i;
+}
+
+static struct brw_reg alloc_tmp(struct brw_wm_compile *c)
+{
+ c->tmp_index++;
+ c->reg_index = MAX2(c->reg_index, c->tmp_index);
+ return brw_vec8_grf(c->tmp_start + c->tmp_index, 0);
+}
+
+static void release_tmps(struct brw_wm_compile *c)
+{
+ c->tmp_index = 0;
+}
+
+
+static struct brw_reg
+get_reg(struct brw_wm_compile *c, int file, int index, int component )
+{
+ switch (file) {
+ case TGSI_FILE_NULL:
+ return brw_null_reg();
+
+ case TGSI_FILE_SAMPLER:
+ /* Should never get here:
+ */
+ assert (0);
+ return brw_null_reg();
+
+ case TGSI_FILE_IMMEDIATE:
+ /* These need a different path:
+ */
+ assert(0);
+ return brw_null_reg();
+
+
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_INPUT:
+ case TGSI_FILE_OUTPUT:
+ case TGSI_FILE_TEMPORARY:
+ case TGSI_FILE_ADDRESS:
+ return c->wm_regs[file][index][component];
+
+ default:
+ assert(0);
+ return brw_null_reg();
+ }
+}
+
+
+static struct brw_reg get_dst_reg(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst,
+ int component)
+{
+ return get_reg(c,
+ inst->FullDstRegisters[0].DstRegister.File,
+ inst->FullDstRegisters[0].DstRegister.Index,
+ component);
+}
+
+static int get_swz( struct tgsi_src_register src, int index )
+{
+ switch (index & 3) {
+ case 0: return src.SwizzleX;
+ case 1: return src.SwizzleY;
+ case 2: return src.SwizzleZ;
+ case 3: return src.SwizzleW;
+ default: return 0;
+ }
+}
+
+static int get_ext_swz( struct tgsi_src_register_ext_swz src, int index )
+{
+ switch (index & 3) {
+ case 0: return src.ExtSwizzleX;
+ case 1: return src.ExtSwizzleY;
+ case 2: return src.ExtSwizzleZ;
+ case 3: return src.ExtSwizzleW;
+ default: return 0;
+ }
+}
+
+static struct brw_reg get_src_reg(struct brw_wm_compile *c,
+ struct tgsi_full_src_register *src,
+ int index)
+{
+ struct brw_reg reg;
+ int component = index;
+ int neg = 0;
+ int abs = 0;
+
+ if (src->SrcRegister.Negate)
+ neg = 1;
+
+ component = get_swz(src->SrcRegister, component);
+
+ /* Yes, there are multiple negates:
+ */
+ switch (component & 3) {
+ case 0: neg ^= src->SrcRegisterExtSwz.NegateX; break;
+ case 1: neg ^= src->SrcRegisterExtSwz.NegateY; break;
+ case 2: neg ^= src->SrcRegisterExtSwz.NegateZ; break;
+ case 3: neg ^= src->SrcRegisterExtSwz.NegateW; break;
+ }
+
+ /* And multiple swizzles, fun isn't it:
+ */
+ component = get_ext_swz(src->SrcRegisterExtSwz, component);
+
+ /* Not handling indirect lookups yet:
+ */
+ assert(src->SrcRegister.Indirect == 0);
+
+ /* Don't know what dimension means:
+ */
+ assert(src->SrcRegister.Dimension == 0);
+
+ /* Will never handle any of this stuff:
+ */
+ assert(src->SrcRegisterExtMod.Complement == 0);
+ assert(src->SrcRegisterExtMod.Bias == 0);
+ assert(src->SrcRegisterExtMod.Scale2X == 0);
+
+ if (src->SrcRegisterExtMod.Absolute)
+ abs = 1;
+
+ /* Another negate! This is a post-absolute negate, which we
+ * can't do. Need to clean the crap out of tgsi somehow.
+ */
+ assert(src->SrcRegisterExtMod.Negate == 0);
+
+ switch( component ) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ reg = get_reg(c,
+ src->SrcRegister.File,
+ src->SrcRegister.Index,
+ component );
+
+ if (neg)
+ reg = negate(reg);
+
+ if (abs)
+ reg = brw_abs(reg);
+
+ break;
+
+ /* XXX: this won't really work in the general case, but we know
+ * that the extended swizzle is only allowed in the SWZ
+ * instruction (right??), in which case using an immediate
+ * directly will work.
+ */
+ case TGSI_EXTSWIZZLE_ZERO:
+ reg = brw_imm_f(0);
+ break;
+
+ case TGSI_EXTSWIZZLE_ONE:
+ if (neg && !abs)
+ reg = brw_imm_f(-1.0);
+ else
+ reg = brw_imm_f(1.0);
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+
+ return reg;
+}
+
+static void emit_abs( struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+
+ int i;
+ struct brw_compile *p = &c->func;
+ brw_set_saturate(p, inst->Instruction.Saturate != TGSI_SAT_NONE);
+ for (i = 0; i < 4; i++) {
+ if (mask & (1<<i)) {
+ struct brw_reg src, dst;
+ dst = get_dst_reg(c, inst, i);
+ src = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ brw_MOV(p, dst, brw_abs(src)); /* NOTE */
+ }
+ }
+ brw_set_saturate(p, 0);
+}
+
+
+static void emit_xpd(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ int i;
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ for (i = 0; i < 4; i++) {
+ unsigned i2 = (i+2)%3;
+ unsigned i1 = (i+1)%3;
+ if (mask & (1<<i)) {
+ struct brw_reg src0, src1, dst;
+ dst = get_dst_reg(c, inst, i);
+ src0 = negate(get_src_reg(c, &inst->FullSrcRegisters[0], i2));
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i1);
+ brw_MUL(p, brw_null_reg(), src0, src1);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i1);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i2);
+ brw_set_saturate(p, inst->Instruction.Saturate != TGSI_SAT_NONE);
+ brw_MAC(p, dst, src0, src1);
+ brw_set_saturate(p, 0);
+ }
+ }
+ brw_set_saturate(p, 0);
+}
+
+static void emit_dp3(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_reg src0[3], src1[3], dst;
+ int i;
+ struct brw_compile *p = &c->func;
+ for (i = 0; i < 3; i++) {
+ src0[i] = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1[i] = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ }
+
+ dst = get_dst_reg(c, inst, get_scalar_dst_index(inst));
+ brw_MUL(p, brw_null_reg(), src0[0], src1[0]);
+ brw_MAC(p, brw_null_reg(), src0[1], src1[1]);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_MAC(p, dst, src0[2], src1[2]);
+ brw_set_saturate(p, 0);
+}
+
+static void emit_dp4(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_reg src0[4], src1[4], dst;
+ int i;
+ struct brw_compile *p = &c->func;
+ for (i = 0; i < 4; i++) {
+ src0[i] = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1[i] = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ }
+ dst = get_dst_reg(c, inst, get_scalar_dst_index(inst));
+ brw_MUL(p, brw_null_reg(), src0[0], src1[0]);
+ brw_MAC(p, brw_null_reg(), src0[1], src1[1]);
+ brw_MAC(p, brw_null_reg(), src0[2], src1[2]);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_MAC(p, dst, src0[3], src1[3]);
+ brw_set_saturate(p, 0);
+}
+
+static void emit_dph(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_reg src0[4], src1[4], dst;
+ int i;
+ struct brw_compile *p = &c->func;
+ for (i = 0; i < 4; i++) {
+ src0[i] = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1[i] = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ }
+ dst = get_dst_reg(c, inst, get_scalar_dst_index(inst));
+ brw_MUL(p, brw_null_reg(), src0[0], src1[0]);
+ brw_MAC(p, brw_null_reg(), src0[1], src1[1]);
+ brw_MAC(p, dst, src0[2], src1[2]);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_ADD(p, dst, src0[3], src1[3]);
+ brw_set_saturate(p, 0);
+}
+
+static void emit_math1(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst, unsigned func)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg src0, dst;
+
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], 0);
+ dst = get_dst_reg(c, inst, get_scalar_dst_index(inst));
+ brw_MOV(p, brw_message_reg(2), src0);
+ brw_math(p,
+ dst,
+ func,
+ ((inst->Instruction.Saturate != TGSI_SAT_NONE)
+ ? BRW_MATH_SATURATE_SATURATE
+ : BRW_MATH_SATURATE_NONE),
+ 2,
+ brw_null_reg(),
+ BRW_MATH_DATA_VECTOR,
+ BRW_MATH_PRECISION_FULL);
+}
+
+
+static void emit_alu2(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst,
+ unsigned opcode)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg src0, src1, dst;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ int i;
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ for (i = 0 ; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ brw_alu2(p, opcode, dst, src0, src1);
+ }
+ }
+ brw_set_saturate(p, 0);
+}
+
+
+static void emit_alu1(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst,
+ unsigned opcode)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg src0, dst;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ int i;
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ for (i = 0 ; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ brw_alu1(p, opcode, dst, src0);
+ }
+ }
+ if (inst->Instruction.Saturate != TGSI_SAT_NONE)
+ brw_set_saturate(p, 0);
+}
+
+
+static void emit_max(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg src0, src1, dst;
+ int i;
+ brw_push_insn_state(p);
+ for (i = 0; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_MOV(p, dst, src0);
+ brw_set_saturate(p, 0);
+
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, src0, src1);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
+ brw_MOV(p, dst, src1);
+ brw_set_saturate(p, 0);
+ brw_set_predicate_control_flag_value(p, 0xff);
+ }
+ }
+ brw_pop_insn_state(p);
+}
+
+static void emit_min(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg src0, src1, dst;
+ int i;
+ brw_push_insn_state(p);
+ for (i = 0; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_MOV(p, dst, src0);
+ brw_set_saturate(p, 0);
+
+ brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, src1, src0);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
+ brw_MOV(p, dst, src1);
+ brw_set_saturate(p, 0);
+ brw_set_predicate_control_flag_value(p, 0xff);
+ }
+ }
+ brw_pop_insn_state(p);
+}
+
+static void emit_pow(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg dst, src0, src1;
+ dst = get_dst_reg(c, inst, get_scalar_dst_index(inst));
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], 0);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], 0);
+
+ brw_MOV(p, brw_message_reg(2), src0);
+ brw_MOV(p, brw_message_reg(3), src1);
+
+ brw_math(p,
+ dst,
+ BRW_MATH_FUNCTION_POW,
+ (inst->Instruction.Saturate != TGSI_SAT_NONE
+ ? BRW_MATH_SATURATE_SATURATE
+ : BRW_MATH_SATURATE_NONE),
+ 2,
+ brw_null_reg(),
+ BRW_MATH_DATA_VECTOR,
+ BRW_MATH_PRECISION_FULL);
+}
+
+static void emit_lrp(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg dst, tmp1, tmp2, src0, src1, src2;
+ int i;
+ for (i = 0; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+
+ if (src1.nr == dst.nr) {
+ tmp1 = alloc_tmp(c);
+ brw_MOV(p, tmp1, src1);
+ } else
+ tmp1 = src1;
+
+ src2 = get_src_reg(c, &inst->FullSrcRegisters[2], i);
+ if (src2.nr == dst.nr) {
+ tmp2 = alloc_tmp(c);
+ brw_MOV(p, tmp2, src2);
+ } else
+ tmp2 = src2;
+
+ brw_ADD(p, dst, negate(src0), brw_imm_f(1.0));
+ brw_MUL(p, brw_null_reg(), dst, tmp2);
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_MAC(p, dst, src0, tmp1);
+ brw_set_saturate(p, 0);
+ }
+ release_tmps(c);
+ }
+}
+
+static void emit_kil(struct brw_wm_compile *c)
+{
+ struct brw_compile *p = &c->func;
+ struct brw_reg depth = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_NOT(p, c->emit_mask_reg, brw_mask_reg(1)); //IMASK
+ brw_AND(p, depth, c->emit_mask_reg, depth);
+ brw_pop_insn_state(p);
+}
+
+static void emit_mad(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg dst, src0, src1, src2;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ src2 = get_src_reg(c, &inst->FullSrcRegisters[2], i);
+ brw_MUL(p, dst, src0, src1);
+
+ brw_set_saturate(p, (inst->Instruction.Saturate != TGSI_SAT_NONE) ? 1 : 0);
+ brw_ADD(p, dst, dst, src2);
+ brw_set_saturate(p, 0);
+ }
+ }
+}
+
+static void emit_sop(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst, unsigned cond)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg dst, src0, src1;
+ int i;
+
+ brw_push_insn_state(p);
+ for (i = 0; i < 4; i++) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+ src1 = get_src_reg(c, &inst->FullSrcRegisters[1], i);
+ brw_CMP(p, brw_null_reg(), cond, src0, src1);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_MOV(p, dst, brw_imm_f(0.0));
+ brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
+ brw_MOV(p, dst, brw_imm_f(1.0));
+ }
+ }
+ brw_pop_insn_state(p);
+}
+
+
+static void emit_ddx(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg interp[4];
+ struct brw_reg dst;
+ struct brw_reg src0, w;
+ unsigned nr, i;
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], 0);
+ w = get_src_reg(c, &inst->FullSrcRegisters[1], 3);
+ nr = src0.nr;
+ interp[0] = brw_vec1_grf(nr, 0);
+ interp[1] = brw_vec1_grf(nr, 4);
+ interp[2] = brw_vec1_grf(nr+1, 0);
+ interp[3] = brw_vec1_grf(nr+1, 4);
+ brw_set_saturate(p, inst->Instruction.Saturate != TGSI_SAT_NONE);
+ for(i = 0; i < 4; i++ ) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ brw_MOV(p, dst, interp[i]);
+ brw_MUL(p, dst, dst, w);
+ }
+ }
+ brw_set_saturate(p, 0);
+}
+
+static void emit_ddy(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ unsigned mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ struct brw_reg interp[4];
+ struct brw_reg dst;
+ struct brw_reg src0, w;
+ unsigned nr, i;
+
+ src0 = get_src_reg(c, &inst->FullSrcRegisters[0], 0);
+ nr = src0.nr;
+ w = get_src_reg(c, &inst->FullSrcRegisters[1], 3);
+ interp[0] = brw_vec1_grf(nr, 0);
+ interp[1] = brw_vec1_grf(nr, 4);
+ interp[2] = brw_vec1_grf(nr+1, 0);
+ interp[3] = brw_vec1_grf(nr+1, 4);
+ brw_set_saturate(p, inst->Instruction.Saturate != TGSI_SAT_NONE);
+ for(i = 0; i < 4; i++ ) {
+ if (mask & (1<<i)) {
+ dst = get_dst_reg(c, inst, i);
+ brw_MOV(p, dst, suboffset(interp[i], 1));
+ brw_MUL(p, dst, dst, w);
+ }
+ }
+ brw_set_saturate(p, 0);
+}
+
+/* TODO
+ BIAS on SIMD8 not workind yet...
+*/
+static void emit_txb(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+#if 0
+ struct brw_compile *p = &c->func;
+ struct brw_reg payload_reg = c->payload_depth[0];
+ struct brw_reg dst[4], src[4];
+ unsigned i;
+ for (i = 0; i < 4; i++)
+ dst[i] = get_dst_reg(c, inst, i);
+ for (i = 0; i < 4; i++)
+ src[i] = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+
+#if 0
+ switch (inst->TexSrcTarget) {
+ case TEXTURE_1D_INDEX:
+ brw_MOV(p, brw_message_reg(2), src[0]);
+ brw_MOV(p, brw_message_reg(3), brw_imm_f(0));
+ brw_MOV(p, brw_message_reg(4), brw_imm_f(0));
+ break;
+ case TEXTURE_2D_INDEX:
+ case TEXTURE_RECT_INDEX:
+ brw_MOV(p, brw_message_reg(2), src[0]);
+ brw_MOV(p, brw_message_reg(3), src[1]);
+ brw_MOV(p, brw_message_reg(4), brw_imm_f(0));
+ break;
+ default:
+ brw_MOV(p, brw_message_reg(2), src[0]);
+ brw_MOV(p, brw_message_reg(3), src[1]);
+ brw_MOV(p, brw_message_reg(4), src[2]);
+ break;
+ }
+#else
+ brw_MOV(p, brw_message_reg(2), src[0]);
+ brw_MOV(p, brw_message_reg(3), src[1]);
+ brw_MOV(p, brw_message_reg(4), brw_imm_f(0));
+#endif
+
+ brw_MOV(p, brw_message_reg(5), src[3]);
+ brw_MOV(p, brw_message_reg(6), brw_imm_f(0));
+ brw_SAMPLE(p,
+ retype(vec8(dst[0]), BRW_REGISTER_TYPE_UW),
+ 1,
+ retype(payload_reg, BRW_REGISTER_TYPE_UW),
+ inst->TexSrcUnit + 1, /* surface */
+ inst->TexSrcUnit, /* sampler */
+ inst->FullDstRegisters[0].DstRegister.WriteMask,
+ BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS,
+ 4,
+ 4,
+ 0);
+#endif
+}
+
+static void emit_tex(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+#if 0
+ struct brw_compile *p = &c->func;
+ struct brw_reg payload_reg = c->payload_depth[0];
+ struct brw_reg dst[4], src[4];
+ unsigned msg_len;
+ unsigned i, nr;
+ unsigned emit;
+ boolean shadow = (c->key.shadowtex_mask & (1<<inst->TexSrcUnit)) ? 1 : 0;
+
+ for (i = 0; i < 4; i++)
+ dst[i] = get_dst_reg(c, inst, i);
+ for (i = 0; i < 4; i++)
+ src[i] = get_src_reg(c, &inst->FullSrcRegisters[0], i);
+
+#if 0
+ switch (inst->TexSrcTarget) {
+ case TEXTURE_1D_INDEX:
+ emit = WRITEMASK_X;
+ nr = 1;
+ break;
+ case TEXTURE_2D_INDEX:
+ case TEXTURE_RECT_INDEX:
+ emit = WRITEMASK_XY;
+ nr = 2;
+ break;
+ default:
+ emit = WRITEMASK_XYZ;
+ nr = 3;
+ break;
+ }
+#else
+ emit = WRITEMASK_XY;
+ nr = 2;
+#endif
+
+ msg_len = 1;
+
+ for (i = 0; i < nr; i++) {
+ static const unsigned swz[4] = {0,1,2,2};
+ if (emit & (1<<i))
+ brw_MOV(p, brw_message_reg(msg_len+1), src[swz[i]]);
+ else
+ brw_MOV(p, brw_message_reg(msg_len+1), brw_imm_f(0));
+ msg_len += 1;
+ }
+
+ if (shadow) {
+ brw_MOV(p, brw_message_reg(5), brw_imm_f(0));
+ brw_MOV(p, brw_message_reg(6), src[2]);
+ }
+
+ brw_SAMPLE(p,
+ retype(vec8(dst[0]), BRW_REGISTER_TYPE_UW),
+ 1,
+ retype(payload_reg, BRW_REGISTER_TYPE_UW),
+ inst->TexSrcUnit + 1, /* surface */
+ inst->TexSrcUnit, /* sampler */
+ inst->FullDstRegisters[0].DstRegister.WriteMask,
+ BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE,
+ 4,
+ shadow ? 6 : 4,
+ 0);
+
+ if (shadow)
+ brw_MOV(p, dst[3], brw_imm_f(1.0));
+#endif
+}
+
+
+
+
+
+
+
+
+static void emit_fb_write(struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst)
+{
+ struct brw_compile *p = &c->func;
+ int nr = 2;
+ int channel;
+ int base_reg = 0;
+
+ // src0 = output color
+ // src1 = payload_depth[0]
+ // src2 = output depth
+ // dst = ???
+
+
+
+ /* Reserve a space for AA - may not be needed:
+ */
+ if (c->key.aa_dest_stencil_reg)
+ nr += 1;
+
+ {
+ brw_push_insn_state(p);
+ for (channel = 0; channel < 4; channel++) {
+ struct brw_reg src0 = c->wm_regs[TGSI_FILE_OUTPUT][0][channel];
+
+ /* mov (8) m2.0<1>:ud r28.0<8;8,1>:ud { Align1 } */
+ /* mov (8) m6.0<1>:ud r29.0<8;8,1>:ud { Align1 SecHalf } */
+ brw_MOV(p, brw_message_reg(nr + channel), src0);
+ }
+ /* skip over the regs populated above: */
+ nr += 8;
+ brw_pop_insn_state(p);
+ }
+
+
+ /* Pass through control information:
+ */
+ /* mov (8) m1.0<1>:ud r1.0<8;8,1>:ud { Align1 NoMask } */
+ {
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE); /* ? */
+ brw_MOV(p,
+ brw_message_reg(base_reg + 1),
+ brw_vec8_grf(1, 0));
+ brw_pop_insn_state(p);
+ }
+
+ /* Send framebuffer write message: */
+ brw_fb_WRITE(p,
+ retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW),
+ base_reg,
+ retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW),
+ 0, /* render surface always 0 */
+ nr,
+ 0,
+ 1);
+
+}
+
+
+static void brw_wm_emit_instruction( struct brw_wm_compile *c,
+ struct tgsi_full_instruction *inst )
+{
+ struct brw_compile *p = &c->func;
+
+#if 0
+ if (inst->CondUpdate)
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ);
+ else
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
+#else
+ brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
+#endif
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ emit_abs(c, inst);
+ break;
+ case TGSI_OPCODE_ADD:
+ emit_alu2(c, inst, BRW_OPCODE_ADD);
+ break;
+ case TGSI_OPCODE_SUB:
+ assert(0);
+// emit_alu2(c, inst, BRW_OPCODE_SUB);
+ break;
+ case TGSI_OPCODE_FRC:
+ emit_alu1(c, inst, BRW_OPCODE_FRC);
+ break;
+ case TGSI_OPCODE_FLR:
+ assert(0);
+// emit_alu1(c, inst, BRW_OPCODE_FLR);
+ break;
+ case TGSI_OPCODE_LRP:
+ emit_lrp(c, inst);
+ break;
+ case TGSI_OPCODE_INT:
+ emit_alu1(c, inst, BRW_OPCODE_RNDD);
+ break;
+ case TGSI_OPCODE_MOV:
+ emit_alu1(c, inst, BRW_OPCODE_MOV);
+ break;
+ case TGSI_OPCODE_DP3:
+ emit_dp3(c, inst);
+ break;
+ case TGSI_OPCODE_DP4:
+ emit_dp4(c, inst);
+ break;
+ case TGSI_OPCODE_XPD:
+ emit_xpd(c, inst);
+ break;
+ case TGSI_OPCODE_DPH:
+ emit_dph(c, inst);
+ break;
+ case TGSI_OPCODE_RCP:
+ emit_math1(c, inst, BRW_MATH_FUNCTION_INV);
+ break;
+ case TGSI_OPCODE_RSQ:
+ emit_math1(c, inst, BRW_MATH_FUNCTION_RSQ);
+ break;
+ case TGSI_OPCODE_SIN:
+ emit_math1(c, inst, BRW_MATH_FUNCTION_SIN);
+ break;
+ case TGSI_OPCODE_COS:
+ emit_math1(c, inst, BRW_MATH_FUNCTION_COS);
+ break;
+ case TGSI_OPCODE_EX2:
+ emit_math1(c, inst, BRW_MATH_FUNCTION_EXP);
+ break;
+ case TGSI_OPCODE_LG2:
+ emit_math1(c, inst, BRW_MATH_FUNCTION_LOG);
+ break;
+ case TGSI_OPCODE_MAX:
+ emit_max(c, inst);
+ break;
+ case TGSI_OPCODE_MIN:
+ emit_min(c, inst);
+ break;
+ case TGSI_OPCODE_DDX:
+ emit_ddx(c, inst);
+ break;
+ case TGSI_OPCODE_DDY:
+ emit_ddy(c, inst);
+ break;
+ case TGSI_OPCODE_SLT:
+ emit_sop(c, inst, BRW_CONDITIONAL_L);
+ break;
+ case TGSI_OPCODE_SLE:
+ emit_sop(c, inst, BRW_CONDITIONAL_LE);
+ break;
+ case TGSI_OPCODE_SGT:
+ emit_sop(c, inst, BRW_CONDITIONAL_G);
+ break;
+ case TGSI_OPCODE_SGE:
+ emit_sop(c, inst, BRW_CONDITIONAL_GE);
+ break;
+ case TGSI_OPCODE_SEQ:
+ emit_sop(c, inst, BRW_CONDITIONAL_EQ);
+ break;
+ case TGSI_OPCODE_SNE:
+ emit_sop(c, inst, BRW_CONDITIONAL_NEQ);
+ break;
+ case TGSI_OPCODE_MUL:
+ emit_alu2(c, inst, BRW_OPCODE_MUL);
+ break;
+ case TGSI_OPCODE_POW:
+ emit_pow(c, inst);
+ break;
+ case TGSI_OPCODE_MAD:
+ emit_mad(c, inst);
+ break;
+ case TGSI_OPCODE_TEX:
+ emit_tex(c, inst);
+ break;
+ case TGSI_OPCODE_TXB:
+ emit_txb(c, inst);
+ break;
+ case TGSI_OPCODE_TEXKILL:
+ emit_kil(c);
+ break;
+ case TGSI_OPCODE_IF:
+ assert(c->if_insn < MAX_IFSN);
+ c->if_inst[c->if_insn++] = brw_IF(p, BRW_EXECUTE_8);
+ break;
+ case TGSI_OPCODE_ELSE:
+ c->if_inst[c->if_insn-1] = brw_ELSE(p, c->if_inst[c->if_insn-1]);
+ break;
+ case TGSI_OPCODE_ENDIF:
+ assert(c->if_insn > 0);
+ brw_ENDIF(p, c->if_inst[--c->if_insn]);
+ break;
+ case TGSI_OPCODE_BGNSUB:
+ case TGSI_OPCODE_ENDSUB:
+ break;
+ case TGSI_OPCODE_CAL:
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_ADD(p, deref_1ud(c->stack_index, 0), brw_ip_reg(), brw_imm_d(3*16));
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ brw_ADD(p,
+ get_addr_reg(c->stack_index),
+ get_addr_reg(c->stack_index), brw_imm_d(4));
+// orig_inst = inst->Data;
+// orig_inst->Data = &p->store[p->nr_insn];
+ assert(0);
+ brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
+ brw_pop_insn_state(p);
+ break;
+
+ case TGSI_OPCODE_RET:
+#if 0
+ brw_push_insn_state(p);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
+ brw_ADD(p,
+ get_addr_reg(c->stack_index),
+ get_addr_reg(c->stack_index), brw_imm_d(-4));
+ brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_MOV(p, brw_ip_reg(), deref_1ud(c->stack_index, 0));
+ brw_set_access_mode(p, BRW_ALIGN_16);
+ brw_pop_insn_state(p);
+#else
+ emit_fb_write(c, inst);
+#endif
+
+ break;
+ case TGSI_OPCODE_BGNFOR:
+ c->loop_inst[c->loop_insn++] = brw_DO(p, BRW_EXECUTE_8);
+ break;
+ case TGSI_OPCODE_BRK:
+ brw_BREAK(p);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ break;
+ case TGSI_OPCODE_CONT:
+ brw_CONT(p);
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+ break;
+ case TGSI_OPCODE_ENDFOR:
+ c->loop_insn--;
+ c->inst0 = c->inst1 = brw_WHILE(p, c->loop_inst[c->loop_insn]);
+ /* patch all the BREAK instructions from
+ last BGNFOR */
+ while (c->inst0 > c->loop_inst[c->loop_insn]) {
+ c->inst0--;
+ if (c->inst0->header.opcode == BRW_OPCODE_BREAK) {
+ c->inst0->bits3.if_else.jump_count = c->inst1 - c->inst0 + 1;
+ c->inst0->bits3.if_else.pop_count = 0;
+ } else if (c->inst0->header.opcode == BRW_OPCODE_CONTINUE) {
+ c->inst0->bits3.if_else.jump_count = c->inst1 - c->inst0;
+ c->inst0->bits3.if_else.pop_count = 0;
+ }
+ }
+ break;
+ case TGSI_OPCODE_END:
+ emit_fb_write(c, inst);
+ break;
+
+ default:
+ debug_printf("unsupported IR in fragment shader %d\n",
+ inst->Instruction.Opcode);
+ }
+#if 0
+ if (inst->CondUpdate)
+ brw_set_predicate_control(p, BRW_PREDICATE_NORMAL);
+ else
+ brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+#endif
+}
+
+
+
+
+
+
+void brw_wm_glsl_emit(struct brw_wm_compile *c)
+{
+ struct tgsi_parse_context parse;
+ struct brw_compile *p = &c->func;
+
+ brw_init_compile(&c->func);
+ brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+ c->reg_index = 0;
+ c->if_insn = 0;
+ c->loop_insn = 0;
+ c->stack_index = brw_indirect(0,0);
+
+ /* Do static register allocation and parameter interpolation:
+ */
+ brw_wm_emit_decls( c );
+
+ /* Emit the actual program. All done with very direct translation,
+ * hopefully we can improve on this shortly...
+ */
+ brw_MOV(p, get_addr_reg(c->stack_index), brw_address(c->stack));
+
+ tgsi_parse_init( &parse, c->fp->program.tokens );
+
+ while( !tgsi_parse_end_of_tokens( &parse ) )
+ {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ /* already done */
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ /* not handled yet */
+ assert(0);
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ brw_wm_emit_instruction(c, &parse.FullToken.FullInstruction);
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+
+ tgsi_parse_free (&parse);
+
+ /* Fix up call targets:
+ */
+#if 0
+ {
+ unsigned nr_insns = c->fp->program.Base.NumInstructions;
+ unsigned insn, target_insn;
+ struct tgsi_full_instruction *inst1, *inst2;
+ struct brw_instruction *brw_inst1, *brw_inst2;
+ int offset;
+ for (insn = 0; insn < nr_insns; insn++) {
+ inst1 = &c->fp->program.Base.Instructions[insn];
+ brw_inst1 = inst1->Data;
+ switch (inst1->Opcode) {
+ case TGSI_OPCODE_CAL:
+ target_insn = inst1->BranchTarget;
+ inst2 = &c->fp->program.Base.Instructions[target_insn];
+ brw_inst2 = inst2->Data;
+ offset = brw_inst2 - brw_inst1;
+ brw_set_src1(brw_inst1, brw_imm_d(offset*16));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+#endif
+
+ c->prog_data.total_grf = c->reg_index;
+ c->prog_data.total_scratch = 0;
+}
diff --git a/src/gallium/drivers/i965simple/brw_wm_iz.c b/src/gallium/drivers/i965simple/brw_wm_iz.c
new file mode 100644
index 0000000000..6c5f25bf39
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm_iz.c
@@ -0,0 +1,214 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_wm.h"
+
+
+#undef P /* prompted depth */
+#undef C /* computed */
+#undef N /* non-promoted? */
+
+#define P 0
+#define C 1
+#define N 2
+
+const struct {
+ unsigned mode:2;
+ unsigned sd_present:1;
+ unsigned sd_to_rt:1;
+ unsigned dd_present:1;
+ unsigned ds_present:1;
+} wm_iz_table[IZ_BIT_MAX] =
+{
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 1, 1, 0, 0 },
+ { C, 1, 1, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 1, 1, 0, 0 },
+ { C, 1, 1, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 0, 1, 0, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 1, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 0, 0, 1 },
+ { C, 0, 0, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 1, 1, 0, 1 },
+ { C, 1, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 0, 0, 1 },
+ { C, 0, 0, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 1, 1, 0, 1 },
+ { C, 1, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 1, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { N, 1, 1, 0, 0 },
+ { N, 0, 1, 0, 0 },
+ { N, 0, 1, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { P, 0, 0, 0, 0 },
+ { N, 1, 1, 0, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { N, 1, 1, 0, 0 },
+ { N, 0, 1, 0, 0 },
+ { N, 0, 1, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { P, 0, 0, 0, 0 },
+ { N, 1, 1, 0, 0 },
+ { C, 0, 1, 1, 0 },
+ { C, 0, 1, 1, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { N, 1, 1, 0, 1 },
+ { N, 0, 1, 0, 1 },
+ { N, 0, 1, 0, 1 },
+ { P, 0, 0, 0, 0 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { P, 0, 0, 0, 0 },
+ { N, 1, 1, 0, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 0, 0, 1 },
+ { P, 0, 0, 0, 0 },
+ { C, 0, 1, 0, 1 },
+ { P, 0, 0, 0, 0 },
+ { C, 1, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { C, 0, 1, 0, 1 },
+ { P, 0, 0, 0, 0 },
+ { C, 1, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { P, 0, 0, 0, 0 },
+ { C, 1, 1, 1, 1 },
+ { C, 0, 1, 1, 1 },
+ { C, 0, 1, 1, 1 }
+};
+
+void brw_wm_lookup_iz( unsigned line_aa,
+ unsigned lookup,
+ struct brw_wm_prog_key *key )
+{
+ unsigned reg = 2;
+
+ assert (lookup < IZ_BIT_MAX);
+
+ if (lookup & IZ_PS_COMPUTES_DEPTH_BIT)
+ key->computes_depth = 1;
+
+ if (wm_iz_table[lookup].sd_present) {
+ key->source_depth_reg = reg;
+ reg += 2;
+ }
+
+ if (wm_iz_table[lookup].sd_to_rt)
+ key->source_depth_to_render_target = 1;
+
+ if (wm_iz_table[lookup].ds_present || line_aa != AA_NEVER) {
+ key->aa_dest_stencil_reg = reg;
+ key->runtime_check_aads_emit = (!wm_iz_table[lookup].ds_present &&
+ line_aa == AA_SOMETIMES);
+ reg++;
+ }
+
+ if (wm_iz_table[lookup].dd_present) {
+ key->dest_depth_reg = reg;
+ reg+=2;
+ }
+
+ key->nr_depth_regs = (reg+1)/2;
+}
+
diff --git a/src/gallium/drivers/i965simple/brw_wm_sampler_state.c b/src/gallium/drivers/i965simple/brw_wm_sampler_state.c
new file mode 100644
index 0000000000..52b2909a65
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm_sampler_state.c
@@ -0,0 +1,275 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+#define COMPAREFUNC_ALWAYS 0
+#define COMPAREFUNC_NEVER 0x1
+#define COMPAREFUNC_LESS 0x2
+#define COMPAREFUNC_EQUAL 0x3
+#define COMPAREFUNC_LEQUAL 0x4
+#define COMPAREFUNC_GREATER 0x5
+#define COMPAREFUNC_NOTEQUAL 0x6
+#define COMPAREFUNC_GEQUAL 0x7
+
+/* Samplers aren't strictly wm state from the hardware's perspective,
+ * but that is the only situation in which we use them in this driver.
+ */
+
+static int intel_translate_shadow_compare_func(unsigned func)
+{
+ switch(func) {
+ case PIPE_FUNC_NEVER:
+ return COMPAREFUNC_ALWAYS;
+ case PIPE_FUNC_LESS:
+ return COMPAREFUNC_LEQUAL;
+ case PIPE_FUNC_LEQUAL:
+ return COMPAREFUNC_LESS;
+ case PIPE_FUNC_GREATER:
+ return COMPAREFUNC_GEQUAL;
+ case PIPE_FUNC_GEQUAL:
+ return COMPAREFUNC_GREATER;
+ case PIPE_FUNC_NOTEQUAL:
+ return COMPAREFUNC_EQUAL;
+ case PIPE_FUNC_EQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case PIPE_FUNC_ALWAYS:
+ return COMPAREFUNC_NEVER;
+ }
+
+ debug_printf("Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_NEVER;
+}
+
+/* The brw (and related graphics cores) do not support GL_CLAMP. The
+ * Intel drivers for "other operating systems" implement GL_CLAMP as
+ * GL_CLAMP_TO_EDGE, so the same is done here.
+ */
+static unsigned translate_wrap_mode( int wrap )
+{
+ switch( wrap ) {
+ case PIPE_TEX_WRAP_REPEAT:
+ return BRW_TEXCOORDMODE_WRAP;
+ case PIPE_TEX_WRAP_CLAMP:
+ return BRW_TEXCOORDMODE_CLAMP;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ return BRW_TEXCOORDMODE_CLAMP; /* conform likes it this way */
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ return BRW_TEXCOORDMODE_CLAMP_BORDER;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ return BRW_TEXCOORDMODE_MIRROR;
+ default:
+ return BRW_TEXCOORDMODE_WRAP;
+ }
+}
+
+
+static unsigned U_FIXED(float value, unsigned frac_bits)
+{
+ value *= (1<<frac_bits);
+ return value < 0 ? 0 : value;
+}
+
+static int S_FIXED(float value, unsigned frac_bits)
+{
+ return value * (1<<frac_bits);
+}
+
+
+static unsigned upload_default_color( struct brw_context *brw,
+ const float *color )
+{
+ struct brw_sampler_default_color sdc;
+
+ COPY_4V(sdc.color, color);
+
+ return brw_cache_data( &brw->cache[BRW_SAMPLER_DEFAULT_COLOR], &sdc );
+}
+
+
+/*
+ */
+static void brw_update_sampler_state( const struct pipe_sampler_state *pipe_sampler,
+ unsigned sdc_gs_offset,
+ struct brw_sampler_state *sampler)
+{
+ memset(sampler, 0, sizeof(*sampler));
+
+ switch (pipe_sampler->min_mip_filter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ sampler->ss0.min_filter = BRW_MAPFILTER_NEAREST;
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ sampler->ss0.min_filter = BRW_MAPFILTER_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_ANISO:
+ sampler->ss0.min_filter = BRW_MAPFILTER_ANISOTROPIC;
+ break;
+ default:
+ break;
+ }
+
+ switch (pipe_sampler->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ sampler->ss0.mip_filter = BRW_MIPFILTER_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ sampler->ss0.mip_filter = BRW_MIPFILTER_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ sampler->ss0.mip_filter = BRW_MIPFILTER_NONE;
+ break;
+ default:
+ break;
+ }
+ /* Set Anisotropy:
+ */
+ switch (pipe_sampler->mag_img_filter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ sampler->ss0.mag_filter = BRW_MAPFILTER_NEAREST;
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ sampler->ss0.mag_filter = BRW_MAPFILTER_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_ANISO:
+ sampler->ss0.mag_filter = BRW_MAPFILTER_LINEAR;
+ break;
+ default:
+ break;
+ }
+
+ if (pipe_sampler->max_anisotropy > 2.0) {
+ sampler->ss3.max_aniso = MAX2((pipe_sampler->max_anisotropy - 2) / 2,
+ BRW_ANISORATIO_16);
+ }
+
+ sampler->ss1.s_wrap_mode = translate_wrap_mode(pipe_sampler->wrap_s);
+ sampler->ss1.r_wrap_mode = translate_wrap_mode(pipe_sampler->wrap_r);
+ sampler->ss1.t_wrap_mode = translate_wrap_mode(pipe_sampler->wrap_t);
+
+ /* Fulsim complains if I don't do this. Hardware doesn't mind:
+ */
+#if 0
+ if (texObj->Target == GL_TEXTURE_CUBE_MAP_ARB) {
+ sampler->ss1.r_wrap_mode = BRW_TEXCOORDMODE_CUBE;
+ sampler->ss1.s_wrap_mode = BRW_TEXCOORDMODE_CUBE;
+ sampler->ss1.t_wrap_mode = BRW_TEXCOORDMODE_CUBE;
+ }
+#endif
+
+ /* Set shadow function:
+ */
+ if (pipe_sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ /* Shadowing is "enabled" by emitting a particular sampler
+ * message (sample_c). So need to recompile WM program when
+ * shadow comparison is enabled on each/any texture unit.
+ */
+ sampler->ss0.shadow_function = intel_translate_shadow_compare_func(pipe_sampler->compare_func);
+ }
+
+ /* Set LOD bias:
+ */
+ sampler->ss0.lod_bias = S_FIXED(CLAMP(pipe_sampler->lod_bias, -16, 15), 6);
+
+ sampler->ss0.lod_preclamp = 1; /* OpenGL mode */
+ sampler->ss0.default_color_mode = 0; /* OpenGL/DX10 mode */
+
+ /* Set BaseMipLevel, MaxLOD, MinLOD:
+ *
+ * XXX: I don't think that using firstLevel, lastLevel works,
+ * because we always setup the surface state as if firstLevel ==
+ * level zero. Probably have to subtract firstLevel from each of
+ * these:
+ */
+ sampler->ss0.base_level = U_FIXED(0, 1);
+
+ sampler->ss1.max_lod = U_FIXED(MIN2(MAX2(pipe_sampler->max_lod, 0), 13), 6);
+ sampler->ss1.min_lod = U_FIXED(MIN2(MAX2(pipe_sampler->min_lod, 0), 13), 6);
+
+ sampler->ss2.default_color_pointer = sdc_gs_offset >> 5;
+}
+
+
+
+/* All samplers must be uploaded in a single contiguous array, which
+ * complicates various things. However, this is still too confusing -
+ * FIXME: simplify all the different new texture state flags.
+ */
+static void upload_wm_samplers(struct brw_context *brw)
+{
+ unsigned unit;
+ unsigned sampler_count = 0;
+
+ /* BRW_NEW_SAMPLER */
+ for (unit = 0; unit < brw->num_textures && unit < brw->num_samplers;
+ unit++) {
+ /* determine unit enable/disable by looking for a bound texture */
+ if (brw->attribs.Texture[unit]) {
+ const struct pipe_sampler_state *sampler = brw->attribs.Samplers[unit];
+ unsigned sdc_gs_offset = upload_default_color(brw, sampler->border_color);
+
+ brw_update_sampler_state(sampler,
+ sdc_gs_offset,
+ &brw->wm.sampler[unit]);
+
+ sampler_count = unit + 1;
+ }
+ }
+
+ if (brw->wm.sampler_count != sampler_count) {
+ brw->wm.sampler_count = sampler_count;
+ brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
+ }
+
+ brw->wm.sampler_gs_offset = 0;
+
+ if (brw->wm.sampler_count)
+ brw->wm.sampler_gs_offset =
+ brw_cache_data_sz(&brw->cache[BRW_SAMPLER],
+ brw->wm.sampler,
+ sizeof(struct brw_sampler_state) * brw->wm.sampler_count);
+}
+
+const struct brw_tracked_state brw_wm_samplers = {
+ .dirty = {
+ .brw = BRW_NEW_SAMPLER,
+ .cache = 0
+ },
+ .update = upload_wm_samplers
+};
+
diff --git a/src/gallium/drivers/i965simple/brw_wm_state.c b/src/gallium/drivers/i965simple/brw_wm_state.c
new file mode 100644
index 0000000000..37a9bf919c
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm_state.c
@@ -0,0 +1,195 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+#include "brw_wm.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+/***********************************************************************
+ * WM unit - fragment programs and rasterization
+ */
+static void upload_wm_unit(struct brw_context *brw )
+{
+ struct brw_wm_unit_state wm;
+ unsigned max_threads;
+ unsigned per_thread;
+
+ if (BRW_DEBUG & DEBUG_SINGLE_THREAD)
+ max_threads = 0;
+ else
+ max_threads = 31;
+
+
+ memset(&wm, 0, sizeof(wm));
+
+ /* CACHE_NEW_WM_PROG */
+ wm.thread0.grf_reg_count = align(brw->wm.prog_data->total_grf, 16) / 16 - 1;
+ wm.thread0.kernel_start_pointer = brw->wm.prog_gs_offset >> 6;
+ wm.thread3.dispatch_grf_start_reg = brw->wm.prog_data->first_curbe_grf;
+ wm.thread3.urb_entry_read_length = brw->wm.prog_data->urb_read_length;
+ wm.thread3.const_urb_entry_read_length = brw->wm.prog_data->curb_read_length;
+
+ wm.wm5.max_threads = max_threads;
+
+ per_thread = align(brw->wm.prog_data->total_scratch, 1024);
+ assert(per_thread <= 12 * 1024);
+
+#if 0
+ if (brw->wm.prog_data->total_scratch) {
+ unsigned total = per_thread * (max_threads + 1);
+
+ /* Scratch space -- just have to make sure there is sufficient
+ * allocated for the active program and current number of threads.
+ */
+ brw->wm.scratch_buffer_size = total;
+ if (brw->wm.scratch_buffer &&
+ brw->wm.scratch_buffer_size > brw->wm.scratch_buffer->size) {
+ dri_bo_unreference(brw->wm.scratch_buffer);
+ brw->wm.scratch_buffer = NULL;
+ }
+ if (!brw->wm.scratch_buffer) {
+ brw->wm.scratch_buffer = dri_bo_alloc(intel->intelScreen->bufmgr,
+ "wm scratch",
+ brw->wm.scratch_buffer_size,
+ 4096, DRM_BO_FLAG_MEM_TT);
+ }
+ }
+ /* XXX: Scratch buffers are not implemented correectly.
+ *
+ * The scratch offset to be programmed into wm is relative to the general
+ * state base address. However, using dri_bo_alloc/dri_bo_emit_reloc (or
+ * the previous bmGenBuffers scheme), we get an offset relative to the
+ * start of framebuffer. Even before then, it was broken in other ways,
+ * so just fail for now if we hit that path.
+ */
+ assert(brw->wm.prog_data->total_scratch == 0);
+#endif
+
+ /* CACHE_NEW_SURFACE */
+ wm.thread1.binding_table_entry_count = brw->wm.nr_surfaces;
+
+ /* BRW_NEW_CURBE_OFFSETS */
+ wm.thread3.const_urb_entry_read_offset = brw->curbe.wm_start * 2;
+
+ wm.thread3.urb_entry_read_offset = 0;
+ wm.thread1.depth_coef_urb_read_offset = 1;
+ wm.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
+
+ /* CACHE_NEW_SAMPLER */
+ wm.wm4.sampler_count = (brw->wm.sampler_count + 1) / 4;
+ wm.wm4.sampler_state_pointer = brw->wm.sampler_gs_offset >> 5;
+
+ /* BRW_NEW_FRAGMENT_PROGRAM */
+ {
+ const struct brw_fragment_program *fp = brw->attribs.FragmentProgram;
+
+ if (fp->UsesDepth)
+ wm.wm5.program_uses_depth = 1; /* as far as we can tell */
+
+ if (fp->info.writes_z)
+ wm.wm5.program_computes_depth = 1;
+
+ /* BRW_NEW_ALPHA_TEST */
+ if (fp->info.uses_kill ||
+ brw->attribs.DepthStencil->alpha.enabled)
+ wm.wm5.program_uses_killpixel = 1;
+
+ wm.wm5.enable_8_pix = 1;
+ }
+
+ wm.wm5.thread_dispatch_enable = 1; /* AKA: color_write */
+ wm.wm5.legacy_line_rast = 0;
+ wm.wm5.legacy_global_depth_bias = 0;
+ wm.wm5.early_depth_test = 1; /* never need to disable */
+ wm.wm5.line_aa_region_width = 0;
+ wm.wm5.line_endcap_aa_region_width = 1;
+
+ /* BRW_NEW_RASTERIZER */
+ if (brw->attribs.Raster->poly_stipple_enable)
+ wm.wm5.polygon_stipple = 1;
+
+#if 0
+ if (brw->attribs.Polygon->OffsetFill) {
+ wm.wm5.depth_offset = 1;
+ /* Something wierd going on with legacy_global_depth_bias,
+ * offset_constant, scaling and MRD. This value passes glean
+ * but gives some odd results elsewere (eg. the
+ * quad-offset-units test).
+ */
+ wm.global_depth_offset_constant = brw->attribs.Polygon->OffsetUnits * 2;
+
+ /* This is the only value that passes glean:
+ */
+ wm.global_depth_offset_scale = brw->attribs.Polygon->OffsetFactor;
+ }
+#endif
+
+ if (brw->attribs.Raster->line_stipple_enable) {
+ wm.wm5.line_stipple = 1;
+ }
+
+ if (BRW_DEBUG & DEBUG_STATS)
+ wm.wm4.stats_enable = 1;
+
+ brw->wm.state_gs_offset = brw_cache_data( &brw->cache[BRW_WM_UNIT], &wm );
+
+ if (brw->wm.prog_data->total_scratch) {
+ /*
+ dri_emit_reloc(brw->cache[BRW_WM_UNIT].pool->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ (per_thread / 1024) - 1,
+ brw->wm.state_gs_offset +
+ ((char *)&wm.thread2 - (char *)&wm),
+ brw->wm.scratch_buffer);
+ */
+ } else {
+ wm.thread2.scratch_space_base_pointer = 0;
+ }
+}
+
+const struct brw_tracked_state brw_wm_unit = {
+ .dirty = {
+ .brw = (BRW_NEW_RASTERIZER |
+ BRW_NEW_ALPHA_TEST |
+ BRW_NEW_FS |
+ BRW_NEW_CURBE_OFFSETS),
+
+ .cache = (CACHE_NEW_SURFACE |
+ CACHE_NEW_WM_PROG |
+ CACHE_NEW_SAMPLER)
+ },
+ .update = upload_wm_unit
+};
+
diff --git a/src/gallium/drivers/i965simple/brw_wm_surface_state.c b/src/gallium/drivers/i965simple/brw_wm_surface_state.c
new file mode 100644
index 0000000000..b5b9e0e702
--- /dev/null
+++ b/src/gallium/drivers/i965simple/brw_wm_surface_state.c
@@ -0,0 +1,305 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "brw_context.h"
+#include "brw_state.h"
+#include "brw_defines.h"
+
+static unsigned translate_tex_target( enum pipe_texture_target target )
+{
+ switch (target) {
+ case PIPE_TEXTURE_1D:
+ return BRW_SURFACE_1D;
+
+ case PIPE_TEXTURE_2D:
+ return BRW_SURFACE_2D;
+
+ case PIPE_TEXTURE_3D:
+ return BRW_SURFACE_3D;
+
+ case PIPE_TEXTURE_CUBE:
+ return BRW_SURFACE_CUBE;
+
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static unsigned translate_tex_format( enum pipe_format pipe_format )
+{
+ switch( pipe_format ) {
+ case PIPE_FORMAT_L8_UNORM:
+ return BRW_SURFACEFORMAT_L8_UNORM;
+
+ case PIPE_FORMAT_I8_UNORM:
+ return BRW_SURFACEFORMAT_I8_UNORM;
+
+ case PIPE_FORMAT_A8_UNORM:
+ return BRW_SURFACEFORMAT_A8_UNORM;
+
+ case PIPE_FORMAT_A8L8_UNORM:
+ return BRW_SURFACEFORMAT_L8A8_UNORM;
+
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ assert(0); /* not supported for sampling */
+ return BRW_SURFACEFORMAT_R8G8B8_UNORM;
+
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
+
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ return BRW_SURFACEFORMAT_R8G8B8A8_UNORM;
+
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return BRW_SURFACEFORMAT_B5G6R5_UNORM;
+
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ return BRW_SURFACEFORMAT_B5G5R5A1_UNORM;
+
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ return BRW_SURFACEFORMAT_B4G4R4A4_UNORM;
+
+ case PIPE_FORMAT_YCBCR_REV:
+ return BRW_SURFACEFORMAT_YCRCB_NORMAL;
+
+ case PIPE_FORMAT_YCBCR:
+ return BRW_SURFACEFORMAT_YCRCB_SWAPUVY;
+#if 0
+ case PIPE_FORMAT_RGB_FXT1:
+ case PIPE_FORMAT_RGBA_FXT1:
+ return BRW_SURFACEFORMAT_FXT1;
+#endif
+
+ case PIPE_FORMAT_Z16_UNORM:
+ return BRW_SURFACEFORMAT_I16_UNORM;
+#if 0
+ case PIPE_FORMAT_RGB_DXT1:
+ return BRW_SURFACEFORMAT_DXT1_RGB;
+
+ case PIPE_FORMAT_RGBA_DXT1:
+ return BRW_SURFACEFORMAT_BC1_UNORM;
+
+ case PIPE_FORMAT_RGBA_DXT3:
+ return BRW_SURFACEFORMAT_BC2_UNORM;
+
+ case PIPE_FORMAT_RGBA_DXT5:
+ return BRW_SURFACEFORMAT_BC3_UNORM;
+
+ case PIPE_FORMAT_SRGBA8:
+ return BRW_SURFACEFORMAT_R8G8B8A8_UNORM_SRGB;
+ case PIPE_FORMAT_SRGB_DXT1:
+ return BRW_SURFACEFORMAT_BC1_UNORM_SRGB;
+#endif
+
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+static unsigned brw_buffer_offset(struct brw_context *brw,
+ struct pipe_buffer *buffer)
+{
+ return brw->winsys->get_buffer_offset(brw->winsys,
+ buffer,
+ 0);
+}
+
+static
+void brw_update_texture_surface( struct brw_context *brw,
+ unsigned unit )
+{
+ const struct brw_texture *tObj = brw->attribs.Texture[unit];
+ struct brw_surface_state surf;
+
+ memset(&surf, 0, sizeof(surf));
+
+ surf.ss0.mipmap_layout_mode = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
+ surf.ss0.surface_type = translate_tex_target(tObj->base.target);
+ surf.ss0.surface_format = translate_tex_format(tObj->base.format);
+
+ /* This is ok for all textures with channel width 8bit or less:
+ */
+/* surf.ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
+
+ /* Updated in emit_reloc */
+ surf.ss1.base_addr = brw_buffer_offset( brw, tObj->buffer );
+
+ surf.ss2.mip_count = tObj->base.last_level;
+ surf.ss2.width = tObj->base.width[0] - 1;
+ surf.ss2.height = tObj->base.height[0] - 1;
+
+ surf.ss3.tile_walk = BRW_TILEWALK_XMAJOR;
+ surf.ss3.tiled_surface = 0; /* always zero */
+ surf.ss3.pitch = tObj->stride - 1;
+ surf.ss3.depth = tObj->base.depth[0] - 1;
+
+ surf.ss4.min_lod = 0;
+
+ if (tObj->base.target == PIPE_TEXTURE_CUBE) {
+ surf.ss0.cube_pos_x = 1;
+ surf.ss0.cube_pos_y = 1;
+ surf.ss0.cube_pos_z = 1;
+ surf.ss0.cube_neg_x = 1;
+ surf.ss0.cube_neg_y = 1;
+ surf.ss0.cube_neg_z = 1;
+ }
+
+ brw->wm.bind.surf_ss_offset[unit + 1] =
+ brw_cache_data( &brw->cache[BRW_SS_SURFACE], &surf );
+}
+
+
+
+#define OFFSET(TYPE, FIELD) ( (unsigned)&(((TYPE *)0)->FIELD) )
+
+
+static void upload_wm_surfaces(struct brw_context *brw )
+{
+ unsigned i;
+
+ {
+ struct brw_surface_state surf;
+
+ /* BRW_NEW_FRAMEBUFFER
+ */
+ struct pipe_surface *pipe_surface = brw->attribs.FrameBuffer.cbufs[0];/*fixme*/
+ struct brw_texture *tex = (struct brw_texture *)pipe_surface->texture;
+
+ memset(&surf, 0, sizeof(surf));
+
+ if (pipe_surface != NULL) {
+ if (pipe_surface->block.size == 4)
+ surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
+ else
+ surf.ss0.surface_format = BRW_SURFACEFORMAT_B5G6R5_UNORM;
+
+ surf.ss0.surface_type = BRW_SURFACE_2D;
+
+ surf.ss1.base_addr = brw_buffer_offset( brw, tex->buffer );
+
+ surf.ss2.width = pipe_surface->width - 1;
+ surf.ss2.height = pipe_surface->height - 1;
+ surf.ss3.tile_walk = BRW_TILEWALK_XMAJOR;
+ surf.ss3.tiled_surface = 0;
+ surf.ss3.pitch = pipe_surface->stride - 1;
+ } else {
+ surf.ss0.surface_format = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
+ surf.ss0.surface_type = BRW_SURFACE_NULL;
+ }
+
+ /* BRW_NEW_BLEND */
+ surf.ss0.color_blend = (!brw->attribs.Blend->logicop_enable &&
+ brw->attribs.Blend->blend_enable);
+
+
+ surf.ss0.writedisable_red = !(brw->attribs.Blend->colormask & PIPE_MASK_R);
+ surf.ss0.writedisable_green = !(brw->attribs.Blend->colormask & PIPE_MASK_G);
+ surf.ss0.writedisable_blue = !(brw->attribs.Blend->colormask & PIPE_MASK_B);
+ surf.ss0.writedisable_alpha = !(brw->attribs.Blend->colormask & PIPE_MASK_A);
+
+
+
+
+ brw->wm.bind.surf_ss_offset[0] = brw_cache_data( &brw->cache[BRW_SS_SURFACE], &surf );
+
+ brw->wm.nr_surfaces = 1;
+ }
+
+
+ /* BRW_NEW_TEXTURE
+ */
+ for (i = 0; i < brw->num_textures && i < brw->num_samplers; i++) {
+ const struct brw_texture *texUnit = brw->attribs.Texture[i];
+
+ if (texUnit &&
+ texUnit->base.reference.count/*(texUnit->reference.count > 0) == really used */) {
+
+ brw_update_texture_surface(brw, i);
+
+ brw->wm.nr_surfaces = i+2;
+ }
+ else {
+ brw->wm.bind.surf_ss_offset[i+1] = 0;
+ }
+ }
+
+ brw->wm.bind_ss_offset = brw_cache_data( &brw->cache[BRW_SS_SURF_BIND],
+ &brw->wm.bind );
+}
+
+
+/* KW: Will find a different way to acheive this, see for example the
+ * state caches with relocs in the i915 swz driver.
+ */
+#if 0
+static void emit_reloc_wm_surfaces(struct brw_context *brw)
+{
+ int unit;
+
+ if (brw->state.draw_region != NULL) {
+ /* Emit framebuffer relocation */
+ dri_emit_reloc(brw_cache_buffer(brw, BRW_SS_SURFACE),
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0,
+ brw->wm.bind.surf_ss_offset[0] +
+ offsetof(struct brw_surface_state, ss1),
+ brw->state.draw_region->buffer);
+ }
+
+ /* Emit relocations for texture buffers */
+ for (unit = 0; unit < BRW_MAX_TEX_UNIT; unit++) {
+ struct gl_texture_unit *texUnit = &brw->attribs.Texture->Unit[unit];
+ struct gl_texture_object *tObj = texUnit->_Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+
+ if (texUnit->_ReallyEnabled && intelObj->mt != NULL) {
+ dri_emit_reloc(brw_cache_buffer(brw, BRW_SS_SURFACE),
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ 0,
+ brw->wm.bind.surf_ss_offset[unit + 1] +
+ offsetof(struct brw_surface_state, ss1),
+ intelObj->mt->region->buffer);
+ }
+ }
+}
+#endif
+
+const struct brw_tracked_state brw_wm_surfaces = {
+ .dirty = {
+ .brw = (BRW_NEW_FRAMEBUFFER |
+ BRW_NEW_BLEND |
+ BRW_NEW_TEXTURE),
+ .cache = 0
+ },
+ .update = upload_wm_surfaces,
+};
diff --git a/src/gallium/drivers/identity/Makefile b/src/gallium/drivers/identity/Makefile
new file mode 100644
index 0000000000..e32b9102e5
--- /dev/null
+++ b/src/gallium/drivers/identity/Makefile
@@ -0,0 +1,12 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = identity
+
+C_SOURCES = \
+ id_objects.c \
+ id_context.c \
+ id_screen.c \
+ id_drm.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/identity/SConscript b/src/gallium/drivers/identity/SConscript
new file mode 100644
index 0000000000..7f079dd0a8
--- /dev/null
+++ b/src/gallium/drivers/identity/SConscript
@@ -0,0 +1,13 @@
+Import('*')
+
+env = env.Clone()
+
+identity = env.ConvenienceLibrary(
+ target = 'identity',
+ source = [
+ 'id_screen.c',
+ 'id_context.c',
+ 'id_objects.c',
+ ])
+
+Export('identity')
diff --git a/src/gallium/drivers/identity/id_context.c b/src/gallium/drivers/identity/id_context.c
new file mode 100644
index 0000000000..4e700089e3
--- /dev/null
+++ b/src/gallium/drivers/identity/id_context.c
@@ -0,0 +1,719 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_context.h"
+#include "util/u_memory.h"
+
+#include "id_public.h"
+#include "id_context.h"
+#include "id_objects.h"
+
+
+static void
+identity_destroy(struct pipe_context *_pipe)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->destroy(pipe);
+
+ free(id_pipe);
+}
+
+static void
+identity_set_edgeflags(struct pipe_context *_pipe,
+ const unsigned *bitfield)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_edgeflags(pipe,
+ bitfield);
+}
+
+static boolean
+identity_draw_arrays(struct pipe_context *_pipe,
+ unsigned prim,
+ unsigned start,
+ unsigned count)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->draw_arrays(pipe,
+ prim,
+ start,
+ count);
+}
+
+static boolean
+identity_draw_elements(struct pipe_context *_pipe,
+ struct pipe_buffer *_indexBuffer,
+ unsigned indexSize,
+ unsigned prim,
+ unsigned start,
+ unsigned count)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct identity_buffer *id_buffer = identity_buffer(_indexBuffer);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_buffer *indexBuffer = id_buffer->buffer;
+
+ return pipe->draw_elements(pipe,
+ indexBuffer,
+ indexSize,
+ prim,
+ start,
+ count);
+}
+
+static boolean
+identity_draw_range_elements(struct pipe_context *_pipe,
+ struct pipe_buffer *_indexBuffer,
+ unsigned indexSize,
+ unsigned minIndex,
+ unsigned maxIndex,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct identity_buffer *id_buffer = identity_buffer(_indexBuffer);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_buffer *indexBuffer = id_buffer->buffer;
+
+ return pipe->draw_range_elements(pipe,
+ indexBuffer,
+ indexSize,
+ minIndex,
+ maxIndex,
+ mode,
+ start,
+ count);
+}
+
+static struct pipe_query *
+identity_create_query(struct pipe_context *_pipe,
+ unsigned query_type)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_query(pipe,
+ query_type);
+}
+
+static void
+identity_destroy_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->destroy_query(pipe,
+ query);
+}
+
+static void
+identity_begin_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->begin_query(pipe,
+ query);
+}
+
+static void
+identity_end_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->end_query(pipe,
+ query);
+}
+
+static boolean
+identity_get_query_result(struct pipe_context *_pipe,
+ struct pipe_query *query,
+ boolean wait,
+ uint64_t *result)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->get_query_result(pipe,
+ query,
+ wait,
+ result);
+}
+
+static void *
+identity_create_blend_state(struct pipe_context *_pipe,
+ const struct pipe_blend_state *blend)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_blend_state(pipe,
+ blend);
+}
+
+static void
+identity_bind_blend_state(struct pipe_context *_pipe,
+ void *blend)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->bind_blend_state(pipe,
+ blend);
+}
+
+static void
+identity_delete_blend_state(struct pipe_context *_pipe,
+ void *blend)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->delete_blend_state(pipe,
+ blend);
+}
+
+static void *
+identity_create_sampler_state(struct pipe_context *_pipe,
+ const struct pipe_sampler_state *sampler)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_sampler_state(pipe,
+ sampler);
+}
+
+static void
+identity_bind_sampler_states(struct pipe_context *_pipe,
+ unsigned num,
+ void **samplers)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->bind_sampler_states(pipe,
+ num,
+ samplers);
+}
+
+static void
+identity_delete_sampler_state(struct pipe_context *_pipe,
+ void *sampler)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->delete_sampler_state(pipe,
+ sampler);
+}
+
+static void *
+identity_create_rasterizer_state(struct pipe_context *_pipe,
+ const struct pipe_rasterizer_state *rasterizer)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_rasterizer_state(pipe,
+ rasterizer);
+}
+
+static void
+identity_bind_rasterizer_state(struct pipe_context *_pipe,
+ void *rasterizer)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->bind_rasterizer_state(pipe,
+ rasterizer);
+}
+
+static void
+identity_delete_rasterizer_state(struct pipe_context *_pipe,
+ void *rasterizer)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->delete_rasterizer_state(pipe,
+ rasterizer);
+}
+
+static void *
+identity_create_depth_stencil_alpha_state(struct pipe_context *_pipe,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil_alpha)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_depth_stencil_alpha_state(pipe,
+ depth_stencil_alpha);
+}
+
+static void
+identity_bind_depth_stencil_alpha_state(struct pipe_context *_pipe,
+ void *depth_stencil_alpha)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->bind_depth_stencil_alpha_state(pipe,
+ depth_stencil_alpha);
+}
+
+static void
+identity_delete_depth_stencil_alpha_state(struct pipe_context *_pipe,
+ void *depth_stencil_alpha)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->delete_depth_stencil_alpha_state(pipe,
+ depth_stencil_alpha);
+}
+
+static void *
+identity_create_fs_state(struct pipe_context *_pipe,
+ const struct pipe_shader_state *fs)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_fs_state(pipe,
+ fs);
+}
+
+static void
+identity_bind_fs_state(struct pipe_context *_pipe,
+ void *fs)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->bind_fs_state(pipe,
+ fs);
+}
+
+static void
+identity_delete_fs_state(struct pipe_context *_pipe,
+ void *fs)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->delete_fs_state(pipe,
+ fs);
+}
+
+static void *
+identity_create_vs_state(struct pipe_context *_pipe,
+ const struct pipe_shader_state *vs)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ return pipe->create_vs_state(pipe,
+ vs);
+}
+
+static void
+identity_bind_vs_state(struct pipe_context *_pipe,
+ void *vs)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->bind_vs_state(pipe,
+ vs);
+}
+
+static void
+identity_delete_vs_state(struct pipe_context *_pipe,
+ void *vs)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->delete_vs_state(pipe,
+ vs);
+}
+
+static void
+identity_set_blend_color(struct pipe_context *_pipe,
+ const struct pipe_blend_color *blend_color)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_blend_color(pipe,
+ blend_color);
+}
+
+static void
+identity_set_clip_state(struct pipe_context *_pipe,
+ const struct pipe_clip_state *clip)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_clip_state(pipe,
+ clip);
+}
+
+static void
+identity_set_constant_buffer(struct pipe_context *_pipe,
+ uint shader,
+ uint index,
+ const struct pipe_constant_buffer *_buffer)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_constant_buffer unwrapped_buffer;
+ struct pipe_constant_buffer *buffer = NULL;
+
+ /* unwrap the input state */
+ if (_buffer) {
+ unwrapped_buffer.buffer = identity_buffer_unwrap(_buffer->buffer);
+ buffer = &unwrapped_buffer;
+ }
+
+ pipe->set_constant_buffer(pipe,
+ shader,
+ index,
+ buffer);
+}
+
+static void
+identity_set_framebuffer_state(struct pipe_context *_pipe,
+ const struct pipe_framebuffer_state *_state)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_framebuffer_state unwrapped_state;
+ struct pipe_framebuffer_state *state = NULL;
+ unsigned i;
+
+ /* unwrap the input state */
+ if (_state) {
+ memcpy(&unwrapped_state, _state, sizeof(unwrapped_state));
+ for(i = 0; i < _state->nr_cbufs; i++)
+ unwrapped_state.cbufs[i] = identity_surface_unwrap(_state->cbufs[i]);
+ for (; i < PIPE_MAX_COLOR_BUFS; i++)
+ unwrapped_state.cbufs[i] = NULL;
+ unwrapped_state.zsbuf = identity_surface_unwrap(_state->zsbuf);
+ state = &unwrapped_state;
+ }
+
+ pipe->set_framebuffer_state(pipe,
+ state);
+}
+
+static void
+identity_set_polygon_stipple(struct pipe_context *_pipe,
+ const struct pipe_poly_stipple *poly_stipple)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_polygon_stipple(pipe,
+ poly_stipple);
+}
+
+static void
+identity_set_scissor_state(struct pipe_context *_pipe,
+ const struct pipe_scissor_state *scissor)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_scissor_state(pipe,
+ scissor);
+}
+
+static void
+identity_set_viewport_state(struct pipe_context *_pipe,
+ const struct pipe_viewport_state *viewport)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_viewport_state(pipe,
+ viewport);
+}
+
+static void
+identity_set_sampler_textures(struct pipe_context *_pipe,
+ unsigned num_textures,
+ struct pipe_texture **_textures)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_texture *unwrapped_textures[PIPE_MAX_SAMPLERS];
+ struct pipe_texture **textures = NULL;
+ unsigned i;
+
+ if (_textures) {
+ for (i = 0; i < num_textures; i++)
+ unwrapped_textures[i] = identity_texture_unwrap(_textures[i]);
+ for (; i < PIPE_MAX_SAMPLERS; i++)
+ unwrapped_textures[i] = NULL;
+
+ textures = unwrapped_textures;
+ }
+
+ pipe->set_sampler_textures(pipe,
+ num_textures,
+ textures);
+}
+
+static void
+identity_set_vertex_buffers(struct pipe_context *_pipe,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *_buffers)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_vertex_buffer unwrapped_buffers[PIPE_MAX_SHADER_INPUTS];
+ struct pipe_vertex_buffer *buffers = NULL;
+ unsigned i;
+
+ if (num_buffers) {
+ memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
+ for (i = 0; i < num_buffers; i++)
+ unwrapped_buffers[i].buffer = identity_buffer_unwrap(_buffers[i].buffer);
+ buffers = unwrapped_buffers;
+ }
+
+ pipe->set_vertex_buffers(pipe,
+ num_buffers,
+ buffers);
+}
+
+static void
+identity_set_vertex_elements(struct pipe_context *_pipe,
+ unsigned num_elements,
+ const struct pipe_vertex_element *vertex_elements)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->set_vertex_elements(pipe,
+ num_elements,
+ vertex_elements);
+}
+
+static void
+identity_surface_copy(struct pipe_context *_pipe,
+ struct pipe_surface *_dst,
+ unsigned dstx,
+ unsigned dsty,
+ struct pipe_surface *_src,
+ unsigned srcx,
+ unsigned srcy,
+ unsigned width,
+ unsigned height)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct identity_surface *id_surface_dst = identity_surface(_dst);
+ struct identity_surface *id_surface_src = identity_surface(_src);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_surface *dst = id_surface_dst->surface;
+ struct pipe_surface *src = id_surface_src->surface;
+
+ pipe->surface_copy(pipe,
+ dst,
+ dstx,
+ dsty,
+ src,
+ srcx,
+ srcy,
+ width,
+ height);
+}
+
+static void
+identity_surface_fill(struct pipe_context *_pipe,
+ struct pipe_surface *_dst,
+ unsigned dstx,
+ unsigned dsty,
+ unsigned width,
+ unsigned height,
+ unsigned value)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct identity_surface *id_surface_dst = identity_surface(_dst);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_surface *dst = id_surface_dst->surface;
+
+ pipe->surface_fill(pipe,
+ dst,
+ dstx,
+ dsty,
+ width,
+ height,
+ value);
+}
+
+static void
+identity_clear(struct pipe_context *_pipe,
+ unsigned buffers,
+ const float *rgba,
+ double depth,
+ unsigned stencil)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->clear(pipe,
+ buffers,
+ rgba,
+ depth,
+ stencil);
+}
+
+static void
+identity_flush(struct pipe_context *_pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct pipe_context *pipe = id_pipe->pipe;
+
+ pipe->flush(pipe,
+ flags,
+ fence);
+}
+
+static unsigned int
+identity_is_texture_referenced(struct pipe_context *_pipe,
+ struct pipe_texture *_texture,
+ unsigned face,
+ unsigned level)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct identity_texture *id_texture = identity_texture(_texture);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_texture *texture = id_texture->texture;
+
+ return pipe->is_texture_referenced(pipe,
+ texture,
+ face,
+ level);
+}
+
+static unsigned int
+identity_is_buffer_referenced(struct pipe_context *_pipe,
+ struct pipe_buffer *_buffer)
+{
+ struct identity_context *id_pipe = identity_context(_pipe);
+ struct identity_buffer *id_buffer = identity_buffer(_buffer);
+ struct pipe_context *pipe = id_pipe->pipe;
+ struct pipe_buffer *buffer = id_buffer->buffer;
+
+ return pipe->is_buffer_referenced(pipe,
+ buffer);
+}
+
+struct pipe_context *
+identity_context_create(struct pipe_screen *_screen, struct pipe_context *pipe)
+{
+ struct identity_context *id_pipe;
+ (void)identity_screen(_screen);
+
+ id_pipe = CALLOC_STRUCT(identity_context);
+ if (!id_pipe) {
+ return NULL;
+ }
+
+ id_pipe->base.winsys = NULL;
+ id_pipe->base.screen = _screen;
+ id_pipe->base.priv = pipe->priv;
+ id_pipe->base.draw = NULL;
+
+ id_pipe->base.destroy = identity_destroy;
+ id_pipe->base.set_edgeflags = identity_set_edgeflags;
+ id_pipe->base.draw_arrays = identity_draw_arrays;
+ id_pipe->base.draw_elements = identity_draw_elements;
+ id_pipe->base.draw_range_elements = identity_draw_range_elements;
+ id_pipe->base.create_query = identity_create_query;
+ id_pipe->base.destroy_query = identity_destroy_query;
+ id_pipe->base.begin_query = identity_begin_query;
+ id_pipe->base.end_query = identity_end_query;
+ id_pipe->base.get_query_result = identity_get_query_result;
+ id_pipe->base.create_blend_state = identity_create_blend_state;
+ id_pipe->base.bind_blend_state = identity_bind_blend_state;
+ id_pipe->base.delete_blend_state = identity_delete_blend_state;
+ id_pipe->base.create_sampler_state = identity_create_sampler_state;
+ id_pipe->base.bind_sampler_states = identity_bind_sampler_states;
+ id_pipe->base.delete_sampler_state = identity_delete_sampler_state;
+ id_pipe->base.create_rasterizer_state = identity_create_rasterizer_state;
+ id_pipe->base.bind_rasterizer_state = identity_bind_rasterizer_state;
+ id_pipe->base.delete_rasterizer_state = identity_delete_rasterizer_state;
+ id_pipe->base.create_depth_stencil_alpha_state = identity_create_depth_stencil_alpha_state;
+ id_pipe->base.bind_depth_stencil_alpha_state = identity_bind_depth_stencil_alpha_state;
+ id_pipe->base.delete_depth_stencil_alpha_state = identity_delete_depth_stencil_alpha_state;
+ id_pipe->base.create_fs_state = identity_create_fs_state;
+ id_pipe->base.bind_fs_state = identity_bind_fs_state;
+ id_pipe->base.delete_fs_state = identity_delete_fs_state;
+ id_pipe->base.create_vs_state = identity_create_vs_state;
+ id_pipe->base.bind_vs_state = identity_bind_vs_state;
+ id_pipe->base.delete_vs_state = identity_delete_vs_state;
+ id_pipe->base.set_blend_color = identity_set_blend_color;
+ id_pipe->base.set_clip_state = identity_set_clip_state;
+ id_pipe->base.set_constant_buffer = identity_set_constant_buffer;
+ id_pipe->base.set_framebuffer_state = identity_set_framebuffer_state;
+ id_pipe->base.set_polygon_stipple = identity_set_polygon_stipple;
+ id_pipe->base.set_scissor_state = identity_set_scissor_state;
+ id_pipe->base.set_viewport_state = identity_set_viewport_state;
+ id_pipe->base.set_sampler_textures = identity_set_sampler_textures;
+ id_pipe->base.set_vertex_buffers = identity_set_vertex_buffers;
+ id_pipe->base.set_vertex_elements = identity_set_vertex_elements;
+ id_pipe->base.surface_copy = identity_surface_copy;
+ id_pipe->base.surface_fill = identity_surface_fill;
+ id_pipe->base.clear = identity_clear;
+ id_pipe->base.flush = identity_flush;
+ id_pipe->base.is_texture_referenced = identity_is_texture_referenced;
+ id_pipe->base.is_buffer_referenced = identity_is_buffer_referenced;
+
+ id_pipe->pipe = pipe;
+
+ return &id_pipe->base;
+}
diff --git a/src/gallium/drivers/identity/id_context.h b/src/gallium/drivers/identity/id_context.h
new file mode 100644
index 0000000000..75b73fc7df
--- /dev/null
+++ b/src/gallium/drivers/identity/id_context.h
@@ -0,0 +1,48 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ID_CONTEXT_H
+#define ID_CONTEXT_H
+
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+
+
+struct identity_context {
+ struct pipe_context base; /**< base class */
+
+ struct pipe_context *pipe;
+};
+
+
+static INLINE struct identity_context *
+identity_context(struct pipe_context *pipe)
+{
+ return (struct identity_context *)pipe;
+}
+
+#endif /* ID_CONTEXT_H */
diff --git a/src/gallium/drivers/identity/id_drm.c b/src/gallium/drivers/identity/id_drm.c
new file mode 100644
index 0000000000..14f68ac0d0
--- /dev/null
+++ b/src/gallium/drivers/identity/id_drm.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "state_tracker/drm_api.h"
+
+#include "util/u_memory.h"
+#include "identity/id_drm.h"
+#include "identity/id_screen.h"
+#include "identity/id_public.h"
+#include "identity/id_screen.h"
+#include "identity/id_objects.h"
+
+struct identity_drm_api
+{
+ struct drm_api base;
+
+ struct drm_api *api;
+};
+
+static INLINE struct identity_drm_api *
+identity_drm_api(struct drm_api *_api)
+{
+ return (struct identity_drm_api *)_api;
+}
+
+static struct pipe_screen *
+identity_drm_create_screen(struct drm_api *_api, int fd,
+ struct drm_create_screen_arg *arg)
+{
+ struct identity_drm_api *id_api = identity_drm_api(_api);
+ struct drm_api *api = id_api->api;
+ struct pipe_screen *screen;
+
+ if (arg && arg->mode != DRM_CREATE_NORMAL)
+ return NULL;
+
+ screen = api->create_screen(api, fd, arg);
+
+ return identity_screen_create(screen);
+}
+
+static struct pipe_context *
+identity_drm_create_context(struct drm_api *_api,
+ struct pipe_screen *_screen)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_drm_api *id_api = identity_drm_api(_api);
+ struct pipe_screen *screen = id_screen->screen;
+ struct drm_api *api = id_api->api;
+ struct pipe_context *pipe;
+
+ pipe = api->create_context(api, screen);
+
+ pipe = identity_context_create(_screen, pipe);
+
+ return pipe;
+}
+
+static struct pipe_texture *
+identity_drm_texture_from_shared_handle(struct drm_api *_api,
+ struct pipe_screen *_screen,
+ struct pipe_texture *templ,
+ const char *name,
+ unsigned stride,
+ unsigned handle)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_drm_api *id_api = identity_drm_api(_api);
+ struct pipe_screen *screen = id_screen->screen;
+ struct drm_api *api = id_api->api;
+ struct pipe_texture *result;
+
+ result = api->texture_from_shared_handle(api, screen, templ, name, stride, handle);
+
+ result = identity_texture_create(identity_screen(_screen), result);
+
+ return result;
+}
+
+static boolean
+identity_drm_shared_handle_from_texture(struct drm_api *_api,
+ struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned *stride,
+ unsigned *handle)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_texture *id_texture = identity_texture(_texture);
+ struct identity_drm_api *id_api = identity_drm_api(_api);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_texture *texture = id_texture->texture;
+ struct drm_api *api = id_api->api;
+
+ return api->shared_handle_from_texture(api, screen, texture, stride, handle);
+}
+
+static boolean
+identity_drm_local_handle_from_texture(struct drm_api *_api,
+ struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned *stride,
+ unsigned *handle)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_texture *id_texture = identity_texture(_texture);
+ struct identity_drm_api *id_api = identity_drm_api(_api);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_texture *texture = id_texture->texture;
+ struct drm_api *api = id_api->api;
+
+ return api->local_handle_from_texture(api, screen, texture, stride, handle);
+}
+
+static void
+identity_drm_destroy(struct drm_api *_api)
+{
+ struct identity_drm_api *id_api = identity_drm_api(_api);
+ struct drm_api *api = id_api->api;
+ api->destroy(api);
+
+ free(id_api);
+}
+
+struct drm_api *
+identity_drm_create(struct drm_api *api)
+{
+ struct identity_drm_api *id_api;
+
+ if (!api)
+ goto error;
+
+ id_api = CALLOC_STRUCT(identity_drm_api);
+
+ if (!id_api)
+ goto error;
+
+ id_api->base.create_screen = identity_drm_create_screen;
+ id_api->base.create_context = identity_drm_create_context;
+ id_api->base.texture_from_shared_handle = identity_drm_texture_from_shared_handle;
+ id_api->base.shared_handle_from_texture = identity_drm_shared_handle_from_texture;
+ id_api->base.local_handle_from_texture = identity_drm_local_handle_from_texture;
+ id_api->base.destroy = identity_drm_destroy;
+ id_api->api = api;
+
+ return &id_api->base;
+
+error:
+ return api;
+}
diff --git a/src/gallium/drivers/identity/id_drm.h b/src/gallium/drivers/identity/id_drm.h
new file mode 100644
index 0000000000..cf2ad2ce07
--- /dev/null
+++ b/src/gallium/drivers/identity/id_drm.h
@@ -0,0 +1,35 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ID_DRM_H
+#define ID_DRM_H
+
+struct drm_api;
+
+struct drm_api* identity_drm_create(struct drm_api *api);
+
+#endif /* ID_DRM_H */
diff --git a/src/gallium/drivers/identity/id_objects.c b/src/gallium/drivers/identity/id_objects.c
new file mode 100644
index 0000000000..e893e59940
--- /dev/null
+++ b/src/gallium/drivers/identity/id_objects.c
@@ -0,0 +1,182 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+
+#include "id_public.h"
+#include "id_screen.h"
+#include "id_objects.h"
+
+struct pipe_buffer *
+identity_buffer_create(struct identity_screen *id_screen,
+ struct pipe_buffer *buffer)
+{
+ struct identity_buffer *id_buffer;
+
+ if(!buffer)
+ goto error;
+
+ assert(buffer->screen == id_screen->screen);
+
+ id_buffer = CALLOC_STRUCT(identity_buffer);
+ if(!id_buffer)
+ goto error;
+
+ memcpy(&id_buffer->base, buffer, sizeof(struct pipe_buffer));
+
+ pipe_reference_init(&id_buffer->base.reference, 1);
+ id_buffer->base.screen = &id_screen->base;
+ id_buffer->buffer = buffer;
+
+ return &id_buffer->base;
+
+error:
+ pipe_buffer_reference(&buffer, NULL);
+ return NULL;
+}
+
+void
+identity_buffer_destroy(struct identity_buffer *id_buffer)
+{
+ pipe_buffer_reference(&id_buffer->buffer, NULL);
+ FREE(id_buffer);
+}
+
+
+struct pipe_texture *
+identity_texture_create(struct identity_screen *id_screen,
+ struct pipe_texture *texture)
+{
+ struct identity_texture *id_texture;
+
+ if(!texture)
+ goto error;
+
+ assert(texture->screen == id_screen->screen);
+
+ id_texture = CALLOC_STRUCT(identity_texture);
+ if(!id_texture)
+ goto error;
+
+ memcpy(&id_texture->base, texture, sizeof(struct pipe_texture));
+
+ pipe_reference_init(&id_texture->base.reference, 1);
+ id_texture->base.screen = &id_screen->base;
+ id_texture->texture = texture;
+
+ return &id_texture->base;
+
+error:
+ pipe_texture_reference(&texture, NULL);
+ return NULL;
+}
+
+void
+identity_texture_destroy(struct identity_texture *id_texture)
+{
+ pipe_texture_reference(&id_texture->texture, NULL);
+ FREE(id_texture);
+}
+
+
+struct pipe_surface *
+identity_surface_create(struct identity_texture *id_texture,
+ struct pipe_surface *surface)
+{
+ struct identity_surface *id_surface;
+
+ if(!surface)
+ goto error;
+
+ assert(surface->texture == id_texture->texture);
+
+ id_surface = CALLOC_STRUCT(identity_surface);
+ if(!id_surface)
+ goto error;
+
+ memcpy(&id_surface->base, surface, sizeof(struct pipe_surface));
+
+ pipe_reference_init(&id_surface->base.reference, 1);
+ id_surface->base.texture = NULL;
+ pipe_texture_reference(&id_surface->base.texture, &id_texture->base);
+ id_surface->surface = surface;
+
+ return &id_surface->base;
+
+error:
+ pipe_surface_reference(&surface, NULL);
+ return NULL;
+}
+
+void
+identity_surface_destroy(struct identity_surface *id_surface)
+{
+ pipe_texture_reference(&id_surface->base.texture, NULL);
+ pipe_surface_reference(&id_surface->surface, NULL);
+ FREE(id_surface);
+}
+
+
+struct pipe_transfer *
+identity_transfer_create(struct identity_texture *id_texture,
+ struct pipe_transfer *transfer)
+{
+ struct identity_transfer *id_transfer;
+
+ if(!transfer)
+ goto error;
+
+ assert(transfer->texture == id_texture->texture);
+
+ id_transfer = CALLOC_STRUCT(identity_transfer);
+ if(!id_transfer)
+ goto error;
+
+ memcpy(&id_transfer->base, transfer, sizeof(struct pipe_transfer));
+
+ id_transfer->base.texture = NULL;
+ pipe_texture_reference(&id_transfer->base.texture, &id_texture->base);
+ id_transfer->transfer = transfer;
+ assert(id_transfer->base.texture == &id_texture->base);
+
+ return &id_transfer->base;
+
+error:
+ transfer->texture->screen->tex_transfer_destroy(transfer);
+ return NULL;
+}
+
+void
+identity_transfer_destroy(struct identity_transfer *id_transfer)
+{
+ struct identity_screen *id_screen = identity_screen(id_transfer->base.texture->screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ pipe_texture_reference(&id_transfer->base.texture, NULL);
+ screen->tex_transfer_destroy(id_transfer->transfer);
+ FREE(id_transfer);
+}
diff --git a/src/gallium/drivers/identity/id_objects.h b/src/gallium/drivers/identity/id_objects.h
new file mode 100644
index 0000000000..ce58faa3c7
--- /dev/null
+++ b/src/gallium/drivers/identity/id_objects.h
@@ -0,0 +1,169 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ID_OBJECTS_H
+#define ID_OBJECTS_H
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+#include "id_screen.h"
+
+
+struct identity_buffer
+{
+ struct pipe_buffer base;
+
+ struct pipe_buffer *buffer;
+};
+
+
+struct identity_texture
+{
+ struct pipe_texture base;
+
+ struct pipe_texture *texture;
+};
+
+
+struct identity_surface
+{
+ struct pipe_surface base;
+
+ struct pipe_surface *surface;
+};
+
+
+struct identity_transfer
+{
+ struct pipe_transfer base;
+
+ struct pipe_transfer *transfer;
+};
+
+
+static INLINE struct identity_buffer *
+identity_buffer(struct pipe_buffer *_buffer)
+{
+ if(!_buffer)
+ return NULL;
+ (void)identity_screen(_buffer->screen);
+ return (struct identity_buffer *)_buffer;
+}
+
+static INLINE struct identity_texture *
+identity_texture(struct pipe_texture *_texture)
+{
+ if(!_texture)
+ return NULL;
+ (void)identity_screen(_texture->screen);
+ return (struct identity_texture *)_texture;
+}
+
+static INLINE struct identity_surface *
+identity_surface(struct pipe_surface *_surface)
+{
+ if(!_surface)
+ return NULL;
+ (void)identity_texture(_surface->texture);
+ return (struct identity_surface *)_surface;
+}
+
+static INLINE struct identity_transfer *
+identity_transfer(struct pipe_transfer *_transfer)
+{
+ if(!_transfer)
+ return NULL;
+ (void)identity_texture(_transfer->texture);
+ return (struct identity_transfer *)_transfer;
+}
+
+
+static INLINE struct pipe_buffer *
+identity_buffer_unwrap(struct pipe_buffer *_buffer)
+{
+ if(!_buffer)
+ return NULL;
+ return identity_buffer(_buffer)->buffer;
+}
+
+static INLINE struct pipe_texture *
+identity_texture_unwrap(struct pipe_texture *_texture)
+{
+ if(!_texture)
+ return NULL;
+ return identity_texture(_texture)->texture;
+}
+
+static INLINE struct pipe_surface *
+identity_surface_unwrap(struct pipe_surface *_surface)
+{
+ if(!_surface)
+ return NULL;
+ return identity_surface(_surface)->surface;
+}
+
+static INLINE struct pipe_transfer *
+identity_transfer_unwrap(struct pipe_transfer *_transfer)
+{
+ if(!_transfer)
+ return NULL;
+ return identity_transfer(_transfer)->transfer;
+}
+
+
+struct pipe_buffer *
+identity_buffer_create(struct identity_screen *id_screen,
+ struct pipe_buffer *buffer);
+
+void
+identity_buffer_destroy(struct identity_buffer *id_buffer);
+
+struct pipe_texture *
+identity_texture_create(struct identity_screen *id_screen,
+ struct pipe_texture *texture);
+
+void
+identity_texture_destroy(struct identity_texture *id_texture);
+
+struct pipe_surface *
+identity_surface_create(struct identity_texture *id_texture,
+ struct pipe_surface *surface);
+
+void
+identity_surface_destroy(struct identity_surface *id_surface);
+
+struct pipe_transfer *
+identity_transfer_create(struct identity_texture *id_texture,
+ struct pipe_transfer *transfer);
+
+void
+identity_transfer_destroy(struct identity_transfer *id_transfer);
+
+
+#endif /* ID_OBJECTS_H */
diff --git a/src/gallium/drivers/identity/id_public.h b/src/gallium/drivers/identity/id_public.h
new file mode 100644
index 0000000000..cac14cfd60
--- /dev/null
+++ b/src/gallium/drivers/identity/id_public.h
@@ -0,0 +1,40 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ID_PUBLIC_H
+#define ID_PUBLIC_H
+
+struct pipe_screen;
+struct pipe_context;
+
+struct pipe_screen *
+identity_screen_create(struct pipe_screen *screen);
+
+struct pipe_context *
+identity_context_create(struct pipe_screen *screen, struct pipe_context *pipe);
+
+#endif /* PT_PUBLIC_H */
diff --git a/src/gallium/drivers/identity/id_screen.c b/src/gallium/drivers/identity/id_screen.c
new file mode 100644
index 0000000000..26439637d0
--- /dev/null
+++ b/src/gallium/drivers/identity/id_screen.c
@@ -0,0 +1,483 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+
+#include "id_public.h"
+#include "id_screen.h"
+#include "id_objects.h"
+
+
+static void
+identity_screen_destroy(struct pipe_screen *_screen)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ screen->destroy(screen);
+
+ FREE(id_screen);
+}
+
+static const char *
+identity_screen_get_name(struct pipe_screen *_screen)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->get_name(screen);
+}
+
+static const char *
+identity_screen_get_vendor(struct pipe_screen *_screen)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->get_vendor(screen);
+}
+
+static int
+identity_screen_get_param(struct pipe_screen *_screen,
+ int param)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->get_param(screen,
+ param);
+}
+
+static float
+identity_screen_get_paramf(struct pipe_screen *_screen,
+ int param)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->get_paramf(screen,
+ param);
+}
+
+static boolean
+identity_screen_is_format_supported(struct pipe_screen *_screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->is_format_supported(screen,
+ format,
+ target,
+ tex_usage,
+ geom_flags);
+}
+
+static struct pipe_texture *
+identity_screen_texture_create(struct pipe_screen *_screen,
+ const struct pipe_texture *templat)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_texture *result;
+
+ result = screen->texture_create(screen,
+ templat);
+
+ if (result)
+ return identity_texture_create(id_screen, result);
+ return NULL;
+}
+
+static struct pipe_texture *
+identity_screen_texture_blanket(struct pipe_screen *_screen,
+ const struct pipe_texture *templat,
+ const unsigned *stride,
+ struct pipe_buffer *_buffer)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_buffer *id_buffer = identity_buffer(_buffer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *buffer = id_buffer->buffer;
+ struct pipe_texture *result;
+
+ result = screen->texture_blanket(screen,
+ templat,
+ stride,
+ buffer);
+
+ if (result)
+ return identity_texture_create(id_screen, result);
+ return NULL;
+}
+
+static void
+identity_screen_texture_destroy(struct pipe_texture *_texture)
+{
+ identity_texture_destroy(identity_texture(_texture));
+}
+
+static struct pipe_surface *
+identity_screen_get_tex_surface(struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned face,
+ unsigned level,
+ unsigned zslice,
+ unsigned usage)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_texture *id_texture = identity_texture(_texture);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_texture *texture = id_texture->texture;
+ struct pipe_surface *result;
+
+ result = screen->get_tex_surface(screen,
+ texture,
+ face,
+ level,
+ zslice,
+ usage);
+
+ if (result)
+ return identity_surface_create(id_texture, result);
+ return NULL;
+}
+
+static void
+identity_screen_tex_surface_destroy(struct pipe_surface *_surface)
+{
+ identity_surface_destroy(identity_surface(_surface));
+}
+
+static struct pipe_transfer *
+identity_screen_get_tex_transfer(struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned face,
+ unsigned level,
+ unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x,
+ unsigned y,
+ unsigned w,
+ unsigned h)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_texture *id_texture = identity_texture(_texture);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_texture *texture = id_texture->texture;
+ struct pipe_transfer *result;
+
+ result = screen->get_tex_transfer(screen,
+ texture,
+ face,
+ level,
+ zslice,
+ usage,
+ x,
+ y,
+ w,
+ h);
+
+ if (result)
+ return identity_transfer_create(id_texture, result);
+ return NULL;
+}
+
+static void
+identity_screen_tex_transfer_destroy(struct pipe_transfer *_transfer)
+{
+ identity_transfer_destroy(identity_transfer(_transfer));
+}
+
+static void *
+identity_screen_transfer_map(struct pipe_screen *_screen,
+ struct pipe_transfer *_transfer)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_transfer *id_transfer = identity_transfer(_transfer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_transfer *transfer = id_transfer->transfer;
+
+ return screen->transfer_map(screen,
+ transfer);
+}
+
+static void
+identity_screen_transfer_unmap(struct pipe_screen *_screen,
+ struct pipe_transfer *_transfer)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_transfer *id_transfer = identity_transfer(_transfer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_transfer *transfer = id_transfer->transfer;
+
+ screen->transfer_unmap(screen,
+ transfer);
+}
+
+static struct pipe_buffer *
+identity_screen_buffer_create(struct pipe_screen *_screen,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *result;
+
+ result = screen->buffer_create(screen,
+ alignment,
+ usage,
+ size);
+
+ if (result)
+ return identity_buffer_create(id_screen, result);
+ return NULL;
+}
+
+static struct pipe_buffer *
+identity_screen_user_buffer_create(struct pipe_screen *_screen,
+ void *ptr,
+ unsigned bytes)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *result;
+
+ result = screen->user_buffer_create(screen,
+ ptr,
+ bytes);
+
+ if (result)
+ return identity_buffer_create(id_screen, result);
+ return NULL;
+}
+
+static struct pipe_buffer *
+identity_screen_surface_buffer_create(struct pipe_screen *_screen,
+ unsigned width,
+ unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *result;
+
+ result = screen->surface_buffer_create(screen,
+ width,
+ height,
+ format,
+ usage,
+ tex_usage,
+ stride);
+
+ if (result)
+ return identity_buffer_create(id_screen, result);
+ return NULL;
+}
+
+static void *
+identity_screen_buffer_map(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer,
+ unsigned usage)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_buffer *id_buffer = identity_buffer(_buffer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *buffer = id_buffer->buffer;
+
+ return screen->buffer_map(screen,
+ buffer,
+ usage);
+}
+
+static void *
+identity_screen_buffer_map_range(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer,
+ unsigned offset,
+ unsigned length,
+ unsigned usage)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_buffer *id_buffer = identity_buffer(_buffer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *buffer = id_buffer->buffer;
+
+ return screen->buffer_map_range(screen,
+ buffer,
+ offset,
+ length,
+ usage);
+}
+
+static void
+identity_screen_buffer_flush_mapped_range(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer,
+ unsigned offset,
+ unsigned length)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_buffer *id_buffer = identity_buffer(_buffer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *buffer = id_buffer->buffer;
+
+ screen->buffer_flush_mapped_range(screen,
+ buffer,
+ offset,
+ length);
+}
+
+static void
+identity_screen_buffer_unmap(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_buffer *id_buffer = identity_buffer(_buffer);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_buffer *buffer = id_buffer->buffer;
+
+ screen->buffer_unmap(screen,
+ buffer);
+}
+
+static void
+identity_screen_buffer_destroy(struct pipe_buffer *_buffer)
+{
+ identity_buffer_destroy(identity_buffer(_buffer));
+}
+
+static void
+identity_screen_flush_frontbuffer(struct pipe_screen *_screen,
+ struct pipe_surface *_surface,
+ void *context_private)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct identity_surface *id_surface = identity_surface(_surface);
+ struct pipe_screen *screen = id_screen->screen;
+ struct pipe_surface *surface = id_surface->surface;
+
+ screen->flush_frontbuffer(screen,
+ surface,
+ context_private);
+}
+
+static void
+identity_screen_fence_reference(struct pipe_screen *_screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ screen->fence_reference(screen,
+ ptr,
+ fence);
+}
+
+static int
+identity_screen_fence_signalled(struct pipe_screen *_screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->fence_signalled(screen,
+ fence,
+ flags);
+}
+
+static int
+identity_screen_fence_finish(struct pipe_screen *_screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags)
+{
+ struct identity_screen *id_screen = identity_screen(_screen);
+ struct pipe_screen *screen = id_screen->screen;
+
+ return screen->fence_finish(screen,
+ fence,
+ flags);
+}
+
+struct pipe_screen *
+identity_screen_create(struct pipe_screen *screen)
+{
+ struct identity_screen *id_screen;
+
+ id_screen = CALLOC_STRUCT(identity_screen);
+ if (!id_screen) {
+ return NULL;
+ }
+
+ id_screen->base.winsys = NULL;
+
+ id_screen->base.destroy = identity_screen_destroy;
+ id_screen->base.get_name = identity_screen_get_name;
+ id_screen->base.get_vendor = identity_screen_get_vendor;
+ id_screen->base.get_param = identity_screen_get_param;
+ id_screen->base.get_paramf = identity_screen_get_paramf;
+ id_screen->base.is_format_supported = identity_screen_is_format_supported;
+ id_screen->base.texture_create = identity_screen_texture_create;
+ id_screen->base.texture_blanket = identity_screen_texture_blanket;
+ id_screen->base.texture_destroy = identity_screen_texture_destroy;
+ id_screen->base.get_tex_surface = identity_screen_get_tex_surface;
+ id_screen->base.tex_surface_destroy = identity_screen_tex_surface_destroy;
+ id_screen->base.get_tex_transfer = identity_screen_get_tex_transfer;
+ id_screen->base.tex_transfer_destroy = identity_screen_tex_transfer_destroy;
+ id_screen->base.transfer_map = identity_screen_transfer_map;
+ id_screen->base.transfer_unmap = identity_screen_transfer_unmap;
+ id_screen->base.buffer_create = identity_screen_buffer_create;
+ id_screen->base.user_buffer_create = identity_screen_user_buffer_create;
+ id_screen->base.surface_buffer_create = identity_screen_surface_buffer_create;
+ if (screen->buffer_map)
+ id_screen->base.buffer_map = identity_screen_buffer_map;
+ if (screen->buffer_map_range)
+ id_screen->base.buffer_map_range = identity_screen_buffer_map_range;
+ if (screen->buffer_flush_mapped_range)
+ id_screen->base.buffer_flush_mapped_range = identity_screen_buffer_flush_mapped_range;
+ if (screen->buffer_unmap)
+ id_screen->base.buffer_unmap = identity_screen_buffer_unmap;
+ id_screen->base.buffer_destroy = identity_screen_buffer_destroy;
+ id_screen->base.flush_frontbuffer = identity_screen_flush_frontbuffer;
+ id_screen->base.fence_reference = identity_screen_fence_reference;
+ id_screen->base.fence_signalled = identity_screen_fence_signalled;
+ id_screen->base.fence_finish = identity_screen_fence_finish;
+
+ id_screen->screen = screen;
+
+ return &id_screen->base;
+}
diff --git a/src/gallium/drivers/identity/id_screen.h b/src/gallium/drivers/identity/id_screen.h
new file mode 100644
index 0000000000..2c4f129089
--- /dev/null
+++ b/src/gallium/drivers/identity/id_screen.h
@@ -0,0 +1,48 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ID_SCREEN_H
+#define ID_SCREEN_H
+
+#include "pipe/p_screen.h"
+#include "pipe/p_defines.h"
+
+
+struct identity_screen {
+ struct pipe_screen base;
+
+ struct pipe_screen *screen;
+};
+
+
+static INLINE struct identity_screen *
+identity_screen(struct pipe_screen *screen)
+{
+ return (struct identity_screen *)screen;
+}
+
+#endif /* ID_SCREEN_H */
diff --git a/src/gallium/drivers/llvmpipe/Makefile b/src/gallium/drivers/llvmpipe/Makefile
new file mode 100644
index 0000000000..06c586e6bb
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/Makefile
@@ -0,0 +1,55 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = llvmpipe
+
+C_SOURCES = \
+ lp_bld_alpha.c \
+ lp_bld_arit.c \
+ lp_bld_blend_aos.c \
+ lp_bld_blend_logicop.c \
+ lp_bld_blend_soa.c \
+ lp_bld_const.c \
+ lp_bld_conv.c \
+ lp_bld_debug.c \
+ lp_bld_depth.c \
+ lp_bld_flow.c \
+ lp_bld_format_aos.c \
+ lp_bld_format_soa.c \
+ lp_bld_interp.c \
+ lp_bld_intr.c \
+ lp_bld_logic.c \
+ lp_bld_sample_soa.c \
+ lp_bld_swizzle.c \
+ lp_bld_struct.c \
+ lp_bld_tgsi_soa.c \
+ lp_bld_type.c \
+ lp_buffer.c \
+ lp_clear.c \
+ lp_context.c \
+ lp_draw_arrays.c \
+ lp_flush.c \
+ lp_jit.c \
+ lp_prim_setup.c \
+ lp_prim_vbuf.c \
+ lp_setup.c \
+ lp_query.c \
+ lp_screen.c \
+ lp_state_blend.c \
+ lp_state_clip.c \
+ lp_state_derived.c \
+ lp_state_fs.c \
+ lp_state_rasterizer.c \
+ lp_state_sampler.c \
+ lp_state_surface.c \
+ lp_state_vertex.c \
+ lp_state_vs.c \
+ lp_surface.c \
+ lp_tex_cache.c \
+ lp_tex_sample_c.c \
+ lp_tex_sample_llvm.c \
+ lp_texture.c \
+ lp_tile_cache.c \
+ lp_tile_soa.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/llvmpipe/README b/src/gallium/drivers/llvmpipe/README
new file mode 100644
index 0000000000..89d08834a3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/README
@@ -0,0 +1,149 @@
+LLVMPIPE -- a fork of softpipe that employs LLVM for code generation.
+
+
+Status
+======
+
+Done so far is:
+
+ - the whole fragment pipeline is code generated in a single function
+
+ - input interpolation
+
+ - depth testing
+
+ - texture sampling (not all state/formats are supported)
+
+ - fragment shader TGSI translation
+ - same level of support as the TGSI SSE2 exec machine, with the exception
+ we don't fallback to TGSI interpretation when an unsupported opcode is
+ found, but just ignore it
+ - done in SoA layout
+ - input interpolation also code generated
+
+ - alpha testing
+
+ - blend (including logic ops)
+ - both in SoA and AoS layouts, but only the former used for now
+
+ - code is generic
+ - intermediates can be vectors of floats, ubytes, fixed point, etc, and of
+ any width and length
+ - not all operations are implemented for these types yet though
+
+Most mesa/progs/demos/* work.
+
+To do (probably by this order):
+
+ - code generate stipple and stencil testing
+
+ - translate the remaining bits of texture sampling state
+
+ - translate TGSI control flow instructions, and all other remaining opcodes
+
+ - integrate with the draw module for VS code generation
+
+ - code generate the triangle setup and rasterization
+
+
+Requirements
+============
+
+ - Linux
+
+ - udis86, http://udis86.sourceforge.net/ . Use my repository, which decodes
+ opcodes not yet supported by upstream.
+
+ git clone git://people.freedesktop.org/~jrfonseca/udis86
+ cd udis86
+ ./configure --with-pic
+ make
+ sudo make install
+
+ - LLVM 2.5. On Debian based distributions do:
+
+ aptitude install llvm-dev
+
+ There is a typo in one of the llvm-dev 2.5 headers, that causes compilation
+ errors in the debug build:
+
+ --- /usr/include/llvm-c/Core.h.orig 2009-08-10 15:38:54.000000000 +0100
+ +++ /usr/include/llvm-c/Core.h 2009-08-10 15:38:25.000000000 +0100
+ @@ -831,7 +831,7 @@
+ template<typename T>
+ inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
+ #if DEBUG
+ - for (LLVMValueRef *I = Vals, E = Vals + Length; I != E; ++I)
+ + for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
+ cast<T>(*I);
+ #endif
+ return reinterpret_cast<T**>(Vals);
+
+ - A x86 or amd64 processor with support for sse2, sse3, and sse4.1 SIMD
+ instructions. This is necessary because we emit several SSE intrinsics for
+ convenience. See /proc/cpuinfo to know what your CPU supports.
+
+ - scons
+
+
+Building
+========
+
+To build everything invoke scons as:
+
+ scons debug=yes statetrackers=mesa drivers=llvmpipe winsys=xlib dri=false -k
+
+Alternatively, you can build it with GNU make, if you prefer, by invoking it as
+
+ make linux-llvm
+
+but the rest of these instructions assume that scons is used.
+
+
+Using
+=====
+
+Building will create a drop-in alternative for libGL.so. To use it set the
+environment variables:
+
+ export LD_LIBRARY_PATH=$PWD/build/linux-x86_64-debug/lib:$LD_LIBRARY_PATH
+
+or
+
+ export LD_LIBRARY_PATH=$PWD/build/linux-x86-debug/lib:$LD_LIBRARY_PATH
+
+For performance evaluation pass debug=no to scons, and use the corresponding
+lib directory without the "-debug" suffix.
+
+
+Unit testing
+============
+
+Building will also create several unit tests in
+build/linux-???-debug/gallium/drivers/llvmpipe:
+
+ - lp_test_blend: blending
+ - lp_test_conv: SIMD vector conversion
+ - lp_test_format: pixel unpacking/packing
+
+Some of this tests can output results and benchmarks to a tab-separated-file
+for posterior analysis, e.g.:
+
+ build/linux-x86_64-debug/gallium/drivers/llvmpipe/lp_test_blend -o blend.tsv
+
+
+Development Notes
+=================
+
+- When looking to this code by the first time start in lp_state_fs.c, and
+ then skim through the lp_bld_* functions called in there, and the comments
+ at the top of the lp_bld_*.c functions.
+
+- All lp_bld_*.[ch] are isolated from the rest of the driver, and could/may be
+ put in a stand-alone Gallium state -> LLVM IR translation module.
+
+- We use LLVM-C bindings for now. They are not documented, but follow the C++
+ interfaces very closely, and appear to be complete enough for code
+ generation. See
+ http://npcontemplation.blogspot.com/2008/06/secret-of-llvm-c-bindings.html
+ for a stand-alone example.
diff --git a/src/gallium/drivers/llvmpipe/SConscript b/src/gallium/drivers/llvmpipe/SConscript
new file mode 100644
index 0000000000..dea4b703c4
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/SConscript
@@ -0,0 +1,84 @@
+Import('*')
+
+env = env.Clone()
+
+env.Tool('llvm')
+if env.has_key('LLVM_VERSION') is False:
+ print 'warning: LLVM not found: not building llvmpipe'
+ Return()
+
+env.Tool('udis86')
+
+llvmpipe = env.ConvenienceLibrary(
+ target = 'llvmpipe',
+ source = [
+ 'lp_bld_alpha.c',
+ 'lp_bld_arit.c',
+ 'lp_bld_blend_aos.c',
+ 'lp_bld_blend_logicop.c',
+ 'lp_bld_blend_soa.c',
+ 'lp_bld_const.c',
+ 'lp_bld_conv.c',
+ 'lp_bld_debug.c',
+ 'lp_bld_depth.c',
+ 'lp_bld_flow.c',
+ 'lp_bld_format_aos.c',
+ 'lp_bld_format_soa.c',
+ 'lp_bld_interp.c',
+ 'lp_bld_intr.c',
+ 'lp_bld_sample_soa.c',
+ 'lp_bld_struct.c',
+ 'lp_bld_logic.c',
+ 'lp_bld_swizzle.c',
+ 'lp_bld_tgsi_soa.c',
+ 'lp_bld_type.c',
+ 'lp_buffer.c',
+ 'lp_clear.c',
+ 'lp_context.c',
+ 'lp_draw_arrays.c',
+ 'lp_flush.c',
+ 'lp_jit.c',
+ 'lp_prim_setup.c',
+ 'lp_prim_vbuf.c',
+ 'lp_setup.c',
+ 'lp_query.c',
+ 'lp_screen.c',
+ 'lp_state_blend.c',
+ 'lp_state_clip.c',
+ 'lp_state_derived.c',
+ 'lp_state_fs.c',
+ 'lp_state_rasterizer.c',
+ 'lp_state_sampler.c',
+ 'lp_state_surface.c',
+ 'lp_state_vertex.c',
+ 'lp_state_vs.c',
+ 'lp_surface.c',
+ 'lp_tex_cache.c',
+ 'lp_tex_sample_c.c',
+ 'lp_tex_sample_llvm.c',
+ 'lp_texture.c',
+ 'lp_tile_cache.c',
+ 'lp_tile_soa.c',
+ ])
+
+
+env = env.Clone()
+
+env.Prepend(LIBS = [llvmpipe] + auxiliaries)
+
+env.Program(
+ target = 'lp_test_format',
+ source = ['lp_test_format.c'],
+)
+
+env.Program(
+ target = 'lp_test_blend',
+ source = ['lp_test_blend.c', 'lp_test_main.c'],
+)
+
+env.Program(
+ target = 'lp_test_conv',
+ source = ['lp_test_conv.c', 'lp_test_main.c'],
+)
+
+Export('llvmpipe')
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_alpha.c b/src/gallium/drivers/llvmpipe/lp_bld_alpha.c
new file mode 100644
index 0000000000..49c2f911af
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_alpha.c
@@ -0,0 +1,64 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Alpha testing to LLVM IR translation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_state.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_flow.h"
+#include "lp_bld_debug.h"
+#include "lp_bld_alpha.h"
+
+
+void
+lp_build_alpha_test(LLVMBuilderRef builder,
+ const struct pipe_alpha_state *state,
+ union lp_type type,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef alpha,
+ LLVMValueRef ref)
+{
+ struct lp_build_context bld;
+
+ lp_build_context_init(&bld, builder, type);
+
+ if(state->enabled) {
+ LLVMValueRef test = lp_build_cmp(&bld, state->func, alpha, ref);
+
+ lp_build_name(test, "alpha_mask");
+
+ lp_build_mask_update(mask, test);
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_alpha.h b/src/gallium/drivers/llvmpipe/lp_bld_alpha.h
new file mode 100644
index 0000000000..9dbcdb4daa
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_alpha.h
@@ -0,0 +1,54 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Alpha testing to LLVM IR translation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_BLD_ALPHA_H
+#define LP_BLD_ALPHA_H
+
+
+#include <llvm-c/Core.h>
+
+struct pipe_alpha_state;
+union lp_type;
+struct lp_build_mask_context;
+
+
+void
+lp_build_alpha_test(LLVMBuilderRef builder,
+ const struct pipe_alpha_state *state,
+ union lp_type type,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef alpha,
+ LLVMValueRef ref);
+
+
+#endif /* !LP_BLD_ALPHA_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_arit.c b/src/gallium/drivers/llvmpipe/lp_bld_arit.c
new file mode 100644
index 0000000000..be7442d00a
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_arit.c
@@ -0,0 +1,1082 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Helper
+ *
+ * LLVM IR doesn't support all basic arithmetic operations we care about (most
+ * notably min/max and saturated operations), and it is often necessary to
+ * resort machine-specific intrinsics directly. The functions here hide all
+ * these implementation details from the other modules.
+ *
+ * We also do simple expressions simplification here. Reasons are:
+ * - it is very easy given we have all necessary information readily available
+ * - LLVM optimization passes fail to simplify several vector expressions
+ * - We often know value constraints which the optimization passes have no way
+ * of knowing, such as when source arguments are known to be in [0, 1] range.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "util/u_memory.h"
+#include "util/u_debug.h"
+#include "util/u_string.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_arit.h"
+
+
+/**
+ * Generate min(a, b)
+ * No checks for special case values of a or b = 1 or 0 are done.
+ */
+static LLVMValueRef
+lp_build_min_simple(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+ const char *intrinsic = NULL;
+ LLVMValueRef cond;
+
+ /* TODO: optimize the constant case */
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width * type.length == 128) {
+ if(type.floating) {
+ if(type.width == 32)
+ intrinsic = "llvm.x86.sse.min.ps";
+ if(type.width == 64)
+ intrinsic = "llvm.x86.sse2.min.pd";
+ }
+ else {
+ if(type.width == 8 && !type.sign)
+ intrinsic = "llvm.x86.sse2.pminu.b";
+ if(type.width == 8 && type.sign)
+ intrinsic = "llvm.x86.sse41.pminsb";
+ if(type.width == 16 && !type.sign)
+ intrinsic = "llvm.x86.sse41.pminuw";
+ if(type.width == 16 && type.sign)
+ intrinsic = "llvm.x86.sse2.pmins.w";
+ if(type.width == 32 && !type.sign)
+ intrinsic = "llvm.x86.sse41.pminud";
+ if(type.width == 32 && type.sign)
+ intrinsic = "llvm.x86.sse41.pminsd";
+ }
+ }
+#endif
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
+
+ cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
+ return lp_build_select(bld, cond, a, b);
+}
+
+
+/**
+ * Generate max(a, b)
+ * No checks for special case values of a or b = 1 or 0 are done.
+ */
+static LLVMValueRef
+lp_build_max_simple(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+ const char *intrinsic = NULL;
+ LLVMValueRef cond;
+
+ /* TODO: optimize the constant case */
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width * type.length == 128) {
+ if(type.floating) {
+ if(type.width == 32)
+ intrinsic = "llvm.x86.sse.max.ps";
+ if(type.width == 64)
+ intrinsic = "llvm.x86.sse2.max.pd";
+ }
+ else {
+ if(type.width == 8 && !type.sign)
+ intrinsic = "llvm.x86.sse2.pmaxu.b";
+ if(type.width == 8 && type.sign)
+ intrinsic = "llvm.x86.sse41.pmaxsb";
+ if(type.width == 16 && !type.sign)
+ intrinsic = "llvm.x86.sse41.pmaxuw";
+ if(type.width == 16 && type.sign)
+ intrinsic = "llvm.x86.sse2.pmaxs.w";
+ if(type.width == 32 && !type.sign)
+ intrinsic = "llvm.x86.sse41.pmaxud";
+ if(type.width == 32 && type.sign)
+ intrinsic = "llvm.x86.sse41.pmaxsd";
+ }
+ }
+#endif
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
+
+ cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
+ return lp_build_select(bld, cond, a, b);
+}
+
+
+/**
+ * Generate 1 - a, or ~a depending on bld->type.
+ */
+LLVMValueRef
+lp_build_comp(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+
+ if(a == bld->one)
+ return bld->zero;
+ if(a == bld->zero)
+ return bld->one;
+
+ if(type.norm && !type.floating && !type.fixed && !type.sign) {
+ if(LLVMIsConstant(a))
+ return LLVMConstNot(a);
+ else
+ return LLVMBuildNot(bld->builder, a, "");
+ }
+
+ if(LLVMIsConstant(a))
+ return LLVMConstSub(bld->one, a);
+ else
+ return LLVMBuildSub(bld->builder, bld->one, a, "");
+}
+
+
+/**
+ * Generate a + b
+ */
+LLVMValueRef
+lp_build_add(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+ LLVMValueRef res;
+
+ if(a == bld->zero)
+ return b;
+ if(b == bld->zero)
+ return a;
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(bld->type.norm) {
+ const char *intrinsic = NULL;
+
+ if(a == bld->one || b == bld->one)
+ return bld->one;
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width * type.length == 128 &&
+ !type.floating && !type.fixed) {
+ if(type.width == 8)
+ intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
+ if(type.width == 16)
+ intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
+ }
+#endif
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
+ }
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ res = LLVMConstAdd(a, b);
+ else
+ res = LLVMBuildAdd(bld->builder, a, b, "");
+
+ /* clamp to ceiling of 1.0 */
+ if(bld->type.norm && (bld->type.floating || bld->type.fixed))
+ res = lp_build_min_simple(bld, res, bld->one);
+
+ /* XXX clamp to floor of -1 or 0??? */
+
+ return res;
+}
+
+
+/**
+ * Generate a - b
+ */
+LLVMValueRef
+lp_build_sub(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+ LLVMValueRef res;
+
+ if(b == bld->zero)
+ return a;
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+ if(a == b)
+ return bld->zero;
+
+ if(bld->type.norm) {
+ const char *intrinsic = NULL;
+
+ if(b == bld->one)
+ return bld->zero;
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width * type.length == 128 &&
+ !type.floating && !type.fixed) {
+ if(type.width == 8)
+ intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
+ if(type.width == 16)
+ intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
+ }
+#endif
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
+ }
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ res = LLVMConstSub(a, b);
+ else
+ res = LLVMBuildSub(bld->builder, a, b, "");
+
+ if(bld->type.norm && (bld->type.floating || bld->type.fixed))
+ res = lp_build_max_simple(bld, res, bld->zero);
+
+ return res;
+}
+
+
+/**
+ * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
+ */
+static LLVMValueRef
+lp_build_unpack_shuffle(unsigned n, unsigned lo_hi)
+{
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i, j;
+
+ assert(n <= LP_MAX_VECTOR_LENGTH);
+ assert(lo_hi < 2);
+
+ for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
+ elems[i + 0] = LLVMConstInt(LLVMInt32Type(), 0 + j, 0);
+ elems[i + 1] = LLVMConstInt(LLVMInt32Type(), n + j, 0);
+ }
+
+ return LLVMConstVector(elems, n);
+}
+
+
+/**
+ * Build constant int vector of width 'n' and value 'c'.
+ */
+static LLVMValueRef
+lp_build_const_vec(LLVMTypeRef type, unsigned n, long long c)
+{
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(n <= LP_MAX_VECTOR_LENGTH);
+
+ for(i = 0; i < n; ++i)
+ elems[i] = LLVMConstInt(type, c, 0);
+
+ return LLVMConstVector(elems, n);
+}
+
+
+/**
+ * Normalized 8bit multiplication.
+ *
+ * - alpha plus one
+ *
+ * makes the following approximation to the division (Sree)
+ *
+ * a*b/255 ~= (a*(b + 1)) >> 256
+ *
+ * which is the fastest method that satisfies the following OpenGL criteria
+ *
+ * 0*0 = 0 and 255*255 = 255
+ *
+ * - geometric series
+ *
+ * takes the geometric series approximation to the division
+ *
+ * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
+ *
+ * in this case just the first two terms to fit in 16bit arithmetic
+ *
+ * t/255 ~= (t + (t >> 8)) >> 8
+ *
+ * note that just by itself it doesn't satisfies the OpenGL criteria, as
+ * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
+ * must be used
+ *
+ * - geometric series plus rounding
+ *
+ * when using a geometric series division instead of truncating the result
+ * use roundoff in the approximation (Jim Blinn)
+ *
+ * t/255 ~= (t + (t >> 8) + 0x80) >> 8
+ *
+ * achieving the exact results
+ *
+ * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
+ * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
+ * @sa Michael Herf, The "double blend trick", May 2000,
+ * http://www.stereopsis.com/doubleblend.html
+ */
+static LLVMValueRef
+lp_build_mul_u8n(LLVMBuilderRef builder,
+ LLVMValueRef a, LLVMValueRef b)
+{
+ static LLVMValueRef c01 = NULL;
+ static LLVMValueRef c08 = NULL;
+ static LLVMValueRef c80 = NULL;
+ LLVMValueRef ab;
+
+ if(!c01) c01 = lp_build_const_vec(LLVMInt16Type(), 8, 0x01);
+ if(!c08) c08 = lp_build_const_vec(LLVMInt16Type(), 8, 0x08);
+ if(!c80) c80 = lp_build_const_vec(LLVMInt16Type(), 8, 0x80);
+
+#if 0
+
+ /* a*b/255 ~= (a*(b + 1)) >> 256 */
+ b = LLVMBuildAdd(builder, b, c01, "");
+ ab = LLVMBuildMul(builder, a, b, "");
+
+#else
+
+ /* t/255 ~= (t + (t >> 8) + 0x80) >> 8 */
+ ab = LLVMBuildMul(builder, a, b, "");
+ ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c08, ""), "");
+ ab = LLVMBuildAdd(builder, ab, c80, "");
+
+#endif
+
+ ab = LLVMBuildLShr(builder, ab, c08, "");
+
+ return ab;
+}
+
+
+/**
+ * Generate a * b
+ */
+LLVMValueRef
+lp_build_mul(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+
+ if(a == bld->zero)
+ return bld->zero;
+ if(a == bld->one)
+ return b;
+ if(b == bld->zero)
+ return bld->zero;
+ if(b == bld->one)
+ return a;
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(!type.floating && !type.fixed && type.norm) {
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width == 8 && type.length == 16) {
+ LLVMTypeRef i16x8 = LLVMVectorType(LLVMInt16Type(), 8);
+ LLVMTypeRef i8x16 = LLVMVectorType(LLVMInt8Type(), 16);
+ static LLVMValueRef ml = NULL;
+ static LLVMValueRef mh = NULL;
+ LLVMValueRef al, ah, bl, bh;
+ LLVMValueRef abl, abh;
+ LLVMValueRef ab;
+
+ if(!ml) ml = lp_build_unpack_shuffle(16, 0);
+ if(!mh) mh = lp_build_unpack_shuffle(16, 1);
+
+ /* PUNPCKLBW, PUNPCKHBW */
+ al = LLVMBuildShuffleVector(bld->builder, a, bld->zero, ml, "");
+ bl = LLVMBuildShuffleVector(bld->builder, b, bld->zero, ml, "");
+ ah = LLVMBuildShuffleVector(bld->builder, a, bld->zero, mh, "");
+ bh = LLVMBuildShuffleVector(bld->builder, b, bld->zero, mh, "");
+
+ /* NOP */
+ al = LLVMBuildBitCast(bld->builder, al, i16x8, "");
+ bl = LLVMBuildBitCast(bld->builder, bl, i16x8, "");
+ ah = LLVMBuildBitCast(bld->builder, ah, i16x8, "");
+ bh = LLVMBuildBitCast(bld->builder, bh, i16x8, "");
+
+ /* PMULLW, PSRLW, PADDW */
+ abl = lp_build_mul_u8n(bld->builder, al, bl);
+ abh = lp_build_mul_u8n(bld->builder, ah, bh);
+
+ /* PACKUSWB */
+ ab = lp_build_intrinsic_binary(bld->builder, "llvm.x86.sse2.packuswb.128" , i16x8, abl, abh);
+
+ /* NOP */
+ ab = LLVMBuildBitCast(bld->builder, ab, i8x16, "");
+
+ return ab;
+ }
+#endif
+
+ /* FIXME */
+ assert(0);
+ }
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ return LLVMConstMul(a, b);
+
+ return LLVMBuildMul(bld->builder, a, b, "");
+}
+
+
+/**
+ * Generate a / b
+ */
+LLVMValueRef
+lp_build_div(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+
+ if(a == bld->zero)
+ return bld->zero;
+ if(a == bld->one)
+ return lp_build_rcp(bld, b);
+ if(b == bld->zero)
+ return bld->undef;
+ if(b == bld->one)
+ return a;
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ return LLVMConstFDiv(a, b);
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width == 32 && type.length == 4)
+ return lp_build_mul(bld, a, lp_build_rcp(bld, b));
+#endif
+
+ return LLVMBuildFDiv(bld->builder, a, b, "");
+}
+
+
+LLVMValueRef
+lp_build_lerp(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef v0,
+ LLVMValueRef v1)
+{
+ return lp_build_add(bld, v0, lp_build_mul(bld, x, lp_build_sub(bld, v1, v0)));
+}
+
+
+LLVMValueRef
+lp_build_lerp_2d(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef y,
+ LLVMValueRef v00,
+ LLVMValueRef v01,
+ LLVMValueRef v10,
+ LLVMValueRef v11)
+{
+ LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
+ LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
+ return lp_build_lerp(bld, y, v0, v1);
+}
+
+
+/**
+ * Generate min(a, b)
+ * Do checks for special cases.
+ */
+LLVMValueRef
+lp_build_min(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(a == b)
+ return a;
+
+ if(bld->type.norm) {
+ if(a == bld->zero || b == bld->zero)
+ return bld->zero;
+ if(a == bld->one)
+ return b;
+ if(b == bld->one)
+ return a;
+ }
+
+ return lp_build_min_simple(bld, a, b);
+}
+
+
+/**
+ * Generate max(a, b)
+ * Do checks for special cases.
+ */
+LLVMValueRef
+lp_build_max(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(a == b)
+ return a;
+
+ if(bld->type.norm) {
+ if(a == bld->one || b == bld->one)
+ return bld->one;
+ if(a == bld->zero)
+ return b;
+ if(b == bld->zero)
+ return a;
+ }
+
+ return lp_build_max_simple(bld, a, b);
+}
+
+
+/**
+ * Generate abs(a)
+ */
+LLVMValueRef
+lp_build_abs(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+
+ if(!type.sign)
+ return a;
+
+ if(type.floating) {
+ /* Mask out the sign bit */
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ LLVMValueRef mask = lp_build_int_const_scalar(type, ((unsigned long long)1 << type.width) - 1);
+ a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
+ a = LLVMBuildAnd(bld->builder, a, mask, "");
+ a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
+ return a;
+ }
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width*type.length == 128) {
+ switch(type.width) {
+ case 8:
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
+ case 16:
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
+ case 32:
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
+ }
+ }
+#endif
+
+ return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
+}
+
+
+enum lp_build_round_sse41_mode
+{
+ LP_BUILD_ROUND_SSE41_NEAREST = 0,
+ LP_BUILD_ROUND_SSE41_FLOOR = 1,
+ LP_BUILD_ROUND_SSE41_CEIL = 2,
+ LP_BUILD_ROUND_SSE41_TRUNCATE = 3
+};
+
+
+static INLINE LLVMValueRef
+lp_build_round_sse41(struct lp_build_context *bld,
+ LLVMValueRef a,
+ enum lp_build_round_sse41_mode mode)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ const char *intrinsic;
+
+ assert(type.floating);
+ assert(type.width*type.length == 128);
+
+ switch(type.width) {
+ case 32:
+ intrinsic = "llvm.x86.sse41.round.ps";
+ break;
+ case 64:
+ intrinsic = "llvm.x86.sse41.round.pd";
+ break;
+ default:
+ assert(0);
+ return bld->undef;
+ }
+
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
+ LLVMConstInt(LLVMInt32Type(), mode, 0));
+}
+
+
+LLVMValueRef
+lp_build_floor(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+
+ assert(type.floating);
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
+#endif
+
+ /* FIXME */
+ assert(0);
+ return bld->undef;
+}
+
+
+/**
+ * Convert to integer, through whichever rounding method that's fastest,
+ * typically truncating to zero.
+ */
+LLVMValueRef
+lp_build_int(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+
+ assert(type.floating);
+
+ return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
+}
+
+
+LLVMValueRef
+lp_build_ifloor(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ a = lp_build_floor(bld, a);
+ a = lp_build_int(bld, a);
+ return a;
+}
+
+
+LLVMValueRef
+lp_build_sqrt(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ char intrinsic[32];
+
+ /* TODO: optimize the constant case */
+ /* TODO: optimize the constant case */
+
+ assert(type.floating);
+ util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
+
+ return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
+}
+
+
+LLVMValueRef
+lp_build_rcp(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+
+ if(a == bld->zero)
+ return bld->undef;
+ if(a == bld->one)
+ return bld->one;
+ if(a == bld->undef)
+ return bld->undef;
+
+ assert(type.floating);
+
+ if(LLVMIsConstant(a))
+ return LLVMConstFDiv(bld->one, a);
+
+ /* XXX: is this really necessary? */
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width == 32 && type.length == 4)
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
+#endif
+
+ return LLVMBuildFDiv(bld->builder, bld->one, a, "");
+}
+
+
+/**
+ * Generate 1/sqrt(a)
+ */
+LLVMValueRef
+lp_build_rsqrt(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+
+ assert(type.floating);
+
+ /* XXX: is this really necessary? */
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width == 32 && type.length == 4)
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
+#endif
+
+ return lp_build_rcp(bld, lp_build_sqrt(bld, a));
+}
+
+
+/**
+ * Generate cos(a)
+ */
+LLVMValueRef
+lp_build_cos(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ char intrinsic[32];
+
+ /* TODO: optimize the constant case */
+
+ assert(type.floating);
+ util_snprintf(intrinsic, sizeof intrinsic, "llvm.cos.v%uf%u", type.length, type.width);
+
+ return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
+}
+
+
+/**
+ * Generate sin(a)
+ */
+LLVMValueRef
+lp_build_sin(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ char intrinsic[32];
+
+ /* TODO: optimize the constant case */
+
+ assert(type.floating);
+ util_snprintf(intrinsic, sizeof intrinsic, "llvm.sin.v%uf%u", type.length, type.width);
+
+ return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
+}
+
+
+/**
+ * Generate pow(x, y)
+ */
+LLVMValueRef
+lp_build_pow(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef y)
+{
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x) && LLVMIsConstant(y))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n");
+
+ return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
+}
+
+
+/**
+ * Generate exp(x)
+ */
+LLVMValueRef
+lp_build_exp(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ /* log2(e) = 1/log(2) */
+ LLVMValueRef log2e = lp_build_const_scalar(bld->type, 1.4426950408889634);
+
+ return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
+}
+
+
+/**
+ * Generate log(x)
+ */
+LLVMValueRef
+lp_build_log(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ /* log(2) */
+ LLVMValueRef log2 = lp_build_const_scalar(bld->type, 1.4426950408889634);
+
+ return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
+}
+
+
+#define EXP_POLY_DEGREE 3
+#define LOG_POLY_DEGREE 5
+
+
+/**
+ * Generate polynomial.
+ * Ex: x^2 * coeffs[0] + x * coeffs[1] + coeffs[2].
+ */
+static LLVMValueRef
+lp_build_polynomial(struct lp_build_context *bld,
+ LLVMValueRef x,
+ const double *coeffs,
+ unsigned num_coeffs)
+{
+ const union lp_type type = bld->type;
+ LLVMValueRef res = NULL;
+ unsigned i;
+
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n");
+
+ for (i = num_coeffs; i--; ) {
+ LLVMValueRef coeff = lp_build_const_scalar(type, coeffs[i]);
+ if(res)
+ res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
+ else
+ res = coeff;
+ }
+
+ if(res)
+ return res;
+ else
+ return bld->undef;
+}
+
+
+/**
+ * Minimax polynomial fit of 2**x, in range [-0.5, 0.5[
+ */
+const double lp_build_exp2_polynomial[] = {
+#if EXP_POLY_DEGREE == 5
+ 9.9999994e-1, 6.9315308e-1, 2.4015361e-1, 5.5826318e-2, 8.9893397e-3, 1.8775767e-3
+#elif EXP_POLY_DEGREE == 4
+ 1.0000026, 6.9300383e-1, 2.4144275e-1, 5.2011464e-2, 1.3534167e-2
+#elif EXP_POLY_DEGREE == 3
+ 9.9992520e-1, 6.9583356e-1, 2.2606716e-1, 7.8024521e-2
+#elif EXP_POLY_DEGREE == 2
+ 1.0017247, 6.5763628e-1, 3.3718944e-1
+#else
+#error
+#endif
+};
+
+
+void
+lp_build_exp2_approx(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef *p_exp2_int_part,
+ LLVMValueRef *p_frac_part,
+ LLVMValueRef *p_exp2)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ LLVMValueRef ipart = NULL;
+ LLVMValueRef fpart = NULL;
+ LLVMValueRef expipart = NULL;
+ LLVMValueRef expfpart = NULL;
+ LLVMValueRef res = NULL;
+
+ if(p_exp2_int_part || p_frac_part || p_exp2) {
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n");
+
+ assert(type.floating && type.width == 32);
+
+ x = lp_build_min(bld, x, lp_build_const_scalar(type, 129.0));
+ x = lp_build_max(bld, x, lp_build_const_scalar(type, -126.99999));
+
+ /* ipart = int(x - 0.5) */
+ ipart = LLVMBuildSub(bld->builder, x, lp_build_const_scalar(type, 0.5f), "");
+ ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
+
+ /* fpart = x - ipart */
+ fpart = LLVMBuildSIToFP(bld->builder, ipart, vec_type, "");
+ fpart = LLVMBuildSub(bld->builder, x, fpart, "");
+ }
+
+ if(p_exp2_int_part || p_exp2) {
+ /* expipart = (float) (1 << ipart) */
+ expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_int_const_scalar(type, 127), "");
+ expipart = LLVMBuildShl(bld->builder, expipart, lp_build_int_const_scalar(type, 23), "");
+ expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
+ }
+
+ if(p_exp2) {
+ expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
+ Elements(lp_build_exp2_polynomial));
+
+ res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
+ }
+
+ if(p_exp2_int_part)
+ *p_exp2_int_part = expipart;
+
+ if(p_frac_part)
+ *p_frac_part = fpart;
+
+ if(p_exp2)
+ *p_exp2 = res;
+}
+
+
+LLVMValueRef
+lp_build_exp2(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ LLVMValueRef res;
+ lp_build_exp2_approx(bld, x, NULL, NULL, &res);
+ return res;
+}
+
+
+/**
+ * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
+ * These coefficients can be generate with
+ * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
+ */
+const double lp_build_log2_polynomial[] = {
+#if LOG_POLY_DEGREE == 6
+ 3.11578814719469302614, -3.32419399085241980044, 2.59883907202499966007, -1.23152682416275988241, 0.318212422185251071475, -0.0344359067839062357313
+#elif LOG_POLY_DEGREE == 5
+ 2.8882704548164776201, -2.52074962577807006663, 1.48116647521213171641, -0.465725644288844778798, 0.0596515482674574969533
+#elif LOG_POLY_DEGREE == 4
+ 2.61761038894603480148, -1.75647175389045657003, 0.688243882994381274313, -0.107254423828329604454
+#elif LOG_POLY_DEGREE == 3
+ 2.28330284476918490682, -1.04913055217340124191, 0.204446009836232697516
+#else
+#error
+#endif
+};
+
+
+/**
+ * See http://www.devmaster.net/forums/showthread.php?p=43580
+ */
+void
+lp_build_log2_approx(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef *p_exp,
+ LLVMValueRef *p_floor_log2,
+ LLVMValueRef *p_log2)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+
+ LLVMValueRef expmask = lp_build_int_const_scalar(type, 0x7f800000);
+ LLVMValueRef mantmask = lp_build_int_const_scalar(type, 0x007fffff);
+ LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
+
+ LLVMValueRef i = NULL;
+ LLVMValueRef exp = NULL;
+ LLVMValueRef mant = NULL;
+ LLVMValueRef logexp = NULL;
+ LLVMValueRef logmant = NULL;
+ LLVMValueRef res = NULL;
+
+ if(p_exp || p_floor_log2 || p_log2) {
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n");
+
+ assert(type.floating && type.width == 32);
+
+ i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
+
+ /* exp = (float) exponent(x) */
+ exp = LLVMBuildAnd(bld->builder, i, expmask, "");
+ }
+
+ if(p_floor_log2 || p_log2) {
+ logexp = LLVMBuildLShr(bld->builder, exp, lp_build_int_const_scalar(type, 23), "");
+ logexp = LLVMBuildSub(bld->builder, logexp, lp_build_int_const_scalar(type, 127), "");
+ logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
+ }
+
+ if(p_log2) {
+ /* mant = (float) mantissa(x) */
+ mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
+ mant = LLVMBuildOr(bld->builder, mant, one, "");
+ mant = LLVMBuildSIToFP(bld->builder, mant, vec_type, "");
+
+ logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
+ Elements(lp_build_log2_polynomial));
+
+ /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
+ logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildMul(bld->builder, mant, bld->one, ""), "");
+
+ res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
+ }
+
+ if(p_exp)
+ *p_exp = exp;
+
+ if(p_floor_log2)
+ *p_floor_log2 = logexp;
+
+ if(p_log2)
+ *p_log2 = res;
+}
+
+
+LLVMValueRef
+lp_build_log2(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ LLVMValueRef res;
+ lp_build_log2_approx(bld, x, NULL, NULL, &res);
+ return res;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_arit.h b/src/gallium/drivers/llvmpipe/lp_bld_arit.h
new file mode 100644
index 0000000000..383c3c3313
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_arit.h
@@ -0,0 +1,175 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper arithmetic functions.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_ARIT_H
+#define LP_BLD_ARIT_H
+
+
+#include <llvm-c/Core.h>
+
+
+union lp_type type;
+struct lp_build_context;
+
+
+/**
+ * Complement, i.e., 1 - a.
+ */
+LLVMValueRef
+lp_build_comp(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_add(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_sub(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_mul(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_div(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_lerp(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef v0,
+ LLVMValueRef v1);
+
+/**
+ * Bilinear interpolation.
+ *
+ * Values indices are in v_{yx}.
+ */
+LLVMValueRef
+lp_build_lerp_2d(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef y,
+ LLVMValueRef v00,
+ LLVMValueRef v01,
+ LLVMValueRef v10,
+ LLVMValueRef v11);
+
+LLVMValueRef
+lp_build_min(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_max(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_abs(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_floor(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_int(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_ifloor(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_sqrt(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_rcp(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_rsqrt(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_cos(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_sin(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_pow(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_exp(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_log(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_exp2(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+LLVMValueRef
+lp_build_log2(struct lp_build_context *bld,
+ LLVMValueRef a);
+
+void
+lp_build_exp2_approx(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef *p_exp2_int_part,
+ LLVMValueRef *p_frac_part,
+ LLVMValueRef *p_exp2);
+
+void
+lp_build_log2_approx(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef *p_exp,
+ LLVMValueRef *p_floor_log2,
+ LLVMValueRef *p_log2);
+
+#endif /* !LP_BLD_ARIT_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_blend.h b/src/gallium/drivers/llvmpipe/lp_bld_blend.h
new file mode 100644
index 0000000000..d19e18846c
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_blend.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_BLD_BLEND_H
+#define LP_BLD_BLEND_H
+
+
+/**
+ * @file
+ * LLVM IR building helpers interfaces.
+ *
+ * We use LLVM-C bindings for now. They are not documented, but follow the C++
+ * interfaces very closely, and appear to be complete enough for code
+ * genration. See
+ * http://npcontemplation.blogspot.com/2008/06/secret-of-llvm-c-bindings.html
+ * for a standalone example.
+ */
+
+#include <llvm-c/Core.h>
+
+#include "pipe/p_format.h"
+
+
+struct pipe_blend_state;
+union lp_type;
+struct lp_build_context;
+
+
+/**
+ * Whether the blending function is commutative or not.
+ */
+boolean
+lp_build_blend_func_commutative(unsigned func);
+
+
+/**
+ * Whether the blending functions are the reverse of each other.
+ */
+boolean
+lp_build_blend_func_reverse(unsigned rgb_func, unsigned alpha_func);
+
+
+LLVMValueRef
+lp_build_blend_func(struct lp_build_context *bld,
+ unsigned func,
+ LLVMValueRef term1,
+ LLVMValueRef term2);
+
+
+LLVMValueRef
+lp_build_blend_aos(LLVMBuilderRef builder,
+ const struct pipe_blend_state *blend,
+ union lp_type type,
+ LLVMValueRef src,
+ LLVMValueRef dst,
+ LLVMValueRef const_,
+ unsigned alpha_swizzle);
+
+
+void
+lp_build_blend_soa(LLVMBuilderRef builder,
+ const struct pipe_blend_state *blend,
+ union lp_type type,
+ LLVMValueRef src[4],
+ LLVMValueRef dst[4],
+ LLVMValueRef const_[4],
+ LLVMValueRef res[4]);
+
+
+/**
+ * Apply a logic op.
+ *
+ * src/dst parameters are packed values. It should work regardless the inputs
+ * are scalars, or a vector.
+ */
+LLVMValueRef
+lp_build_logicop(LLVMBuilderRef builder,
+ unsigned logicop_func,
+ LLVMValueRef src,
+ LLVMValueRef dst);
+
+
+#endif /* !LP_BLD_BLEND_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_blend_aos.c b/src/gallium/drivers/llvmpipe/lp_bld_blend_aos.c
new file mode 100644
index 0000000000..c11a9398f8
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_blend_aos.c
@@ -0,0 +1,356 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Blend LLVM IR generation -- AoS layout.
+ *
+ * AoS blending is in general much slower than SoA, but there are some cases
+ * where it might be faster. In particular, if a pixel is rendered only once
+ * then the overhead of tiling and untiling will dominate over the speedup that
+ * SoA gives. So we might want to detect such cases and fallback to AoS in the
+ * future, but for now this function is here for historical/benchmarking
+ * purposes.
+ *
+ * Run lp_blend_test after any change to this file.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "pipe/p_state.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_swizzle.h"
+#include "lp_bld_blend.h"
+#include "lp_bld_debug.h"
+
+
+/**
+ * We may the same values several times, so we keep them here to avoid
+ * recomputing them. Also reusing the values allows us to do simplifications
+ * that LLVM optimization passes wouldn't normally be able to do.
+ */
+struct lp_build_blend_aos_context
+{
+ struct lp_build_context base;
+
+ LLVMValueRef src;
+ LLVMValueRef dst;
+ LLVMValueRef const_;
+
+ LLVMValueRef inv_src;
+ LLVMValueRef inv_dst;
+ LLVMValueRef inv_const;
+ LLVMValueRef saturate;
+
+ LLVMValueRef rgb_src_factor;
+ LLVMValueRef alpha_src_factor;
+ LLVMValueRef rgb_dst_factor;
+ LLVMValueRef alpha_dst_factor;
+};
+
+
+static LLVMValueRef
+lp_build_blend_factor_unswizzled(struct lp_build_blend_aos_context *bld,
+ unsigned factor,
+ boolean alpha)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ZERO:
+ return bld->base.zero;
+ case PIPE_BLENDFACTOR_ONE:
+ return bld->base.one;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ return bld->src;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ return bld->dst;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ if(alpha)
+ return bld->base.one;
+ else {
+ if(!bld->inv_dst)
+ bld->inv_dst = lp_build_comp(&bld->base, bld->dst);
+ if(!bld->saturate)
+ bld->saturate = lp_build_min(&bld->base, bld->src, bld->inv_dst);
+ return bld->saturate;
+ }
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ return bld->const_;
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ /* TODO */
+ assert(0);
+ return bld->base.zero;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ if(!bld->inv_src)
+ bld->inv_src = lp_build_comp(&bld->base, bld->src);
+ return bld->inv_src;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ if(!bld->inv_dst)
+ bld->inv_dst = lp_build_comp(&bld->base, bld->dst);
+ return bld->inv_dst;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ if(!bld->inv_const)
+ bld->inv_const = lp_build_comp(&bld->base, bld->const_);
+ return bld->inv_const;
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ /* TODO */
+ assert(0);
+ return bld->base.zero;
+ default:
+ assert(0);
+ return bld->base.zero;
+ }
+}
+
+
+enum lp_build_blend_swizzle {
+ LP_BUILD_BLEND_SWIZZLE_RGBA = 0,
+ LP_BUILD_BLEND_SWIZZLE_AAAA = 1,
+};
+
+
+/**
+ * How should we shuffle the base factor.
+ */
+static enum lp_build_blend_swizzle
+lp_build_blend_factor_swizzle(unsigned factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ case PIPE_BLENDFACTOR_ZERO:
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ return LP_BUILD_BLEND_SWIZZLE_RGBA;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ return LP_BUILD_BLEND_SWIZZLE_AAAA;
+ default:
+ assert(0);
+ return LP_BUILD_BLEND_SWIZZLE_RGBA;
+ }
+}
+
+
+static LLVMValueRef
+lp_build_blend_swizzle(struct lp_build_blend_aos_context *bld,
+ LLVMValueRef rgb,
+ LLVMValueRef alpha,
+ enum lp_build_blend_swizzle rgb_swizzle,
+ unsigned alpha_swizzle)
+{
+ if(rgb == alpha) {
+ if(rgb_swizzle == LP_BUILD_BLEND_SWIZZLE_RGBA)
+ return rgb;
+ if(rgb_swizzle == LP_BUILD_BLEND_SWIZZLE_AAAA)
+ return lp_build_broadcast_aos(&bld->base, rgb, alpha_swizzle);
+ }
+ else {
+ if(rgb_swizzle == LP_BUILD_BLEND_SWIZZLE_RGBA) {
+ boolean cond[4] = {0, 0, 0, 0};
+ cond[alpha_swizzle] = 1;
+ return lp_build_select_aos(&bld->base, alpha, rgb, cond);
+ }
+ if(rgb_swizzle == LP_BUILD_BLEND_SWIZZLE_AAAA) {
+ unsigned char swizzle[4];
+ swizzle[0] = alpha_swizzle;
+ swizzle[1] = alpha_swizzle;
+ swizzle[2] = alpha_swizzle;
+ swizzle[3] = alpha_swizzle;
+ swizzle[alpha_swizzle] += 4;
+ return lp_build_swizzle2_aos(&bld->base, rgb, alpha, swizzle);
+ }
+ }
+ assert(0);
+ return bld->base.undef;
+}
+
+
+/**
+ * @sa http://www.opengl.org/sdk/docs/man/xhtml/glBlendFuncSeparate.xml
+ */
+static LLVMValueRef
+lp_build_blend_factor(struct lp_build_blend_aos_context *bld,
+ LLVMValueRef factor1,
+ unsigned rgb_factor,
+ unsigned alpha_factor,
+ unsigned alpha_swizzle)
+{
+ LLVMValueRef rgb_factor_;
+ LLVMValueRef alpha_factor_;
+ LLVMValueRef factor2;
+ enum lp_build_blend_swizzle rgb_swizzle;
+
+ rgb_factor_ = lp_build_blend_factor_unswizzled(bld, rgb_factor, FALSE);
+ alpha_factor_ = lp_build_blend_factor_unswizzled(bld, alpha_factor, TRUE);
+
+ rgb_swizzle = lp_build_blend_factor_swizzle(rgb_factor);
+
+ factor2 = lp_build_blend_swizzle(bld, rgb_factor_, alpha_factor_, rgb_swizzle, alpha_swizzle);
+
+ return lp_build_mul(&bld->base, factor1, factor2);
+}
+
+
+boolean
+lp_build_blend_func_commutative(unsigned func)
+{
+ switch (func) {
+ case PIPE_BLEND_ADD:
+ case PIPE_BLEND_MIN:
+ case PIPE_BLEND_MAX:
+ return TRUE;
+ case PIPE_BLEND_SUBTRACT:
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ return FALSE;
+ default:
+ assert(0);
+ return TRUE;
+ }
+}
+
+
+boolean
+lp_build_blend_func_reverse(unsigned rgb_func, unsigned alpha_func)
+{
+ if(rgb_func == alpha_func)
+ return FALSE;
+ if(rgb_func == PIPE_BLEND_SUBTRACT && alpha_func == PIPE_BLEND_REVERSE_SUBTRACT)
+ return TRUE;
+ if(rgb_func == PIPE_BLEND_REVERSE_SUBTRACT && alpha_func == PIPE_BLEND_SUBTRACT)
+ return TRUE;
+ return FALSE;
+}
+
+
+/**
+ * @sa http://www.opengl.org/sdk/docs/man/xhtml/glBlendEquationSeparate.xml
+ */
+LLVMValueRef
+lp_build_blend_func(struct lp_build_context *bld,
+ unsigned func,
+ LLVMValueRef term1,
+ LLVMValueRef term2)
+{
+ switch (func) {
+ case PIPE_BLEND_ADD:
+ return lp_build_add(bld, term1, term2);
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ return lp_build_sub(bld, term1, term2);
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ return lp_build_sub(bld, term2, term1);
+ case PIPE_BLEND_MIN:
+ return lp_build_min(bld, term1, term2);
+ case PIPE_BLEND_MAX:
+ return lp_build_max(bld, term1, term2);
+ default:
+ assert(0);
+ return bld->zero;
+ }
+}
+
+
+LLVMValueRef
+lp_build_blend_aos(LLVMBuilderRef builder,
+ const struct pipe_blend_state *blend,
+ union lp_type type,
+ LLVMValueRef src,
+ LLVMValueRef dst,
+ LLVMValueRef const_,
+ unsigned alpha_swizzle)
+{
+ struct lp_build_blend_aos_context bld;
+ LLVMValueRef src_term;
+ LLVMValueRef dst_term;
+
+ /* FIXME */
+ assert(blend->colormask == 0xf);
+
+ if(!blend->blend_enable)
+ return src;
+
+ /* It makes no sense to blend unless values are normalized */
+ assert(type.norm);
+
+ /* Setup build context */
+ memset(&bld, 0, sizeof bld);
+ lp_build_context_init(&bld.base, builder, type);
+ bld.src = src;
+ bld.dst = dst;
+ bld.const_ = const_;
+
+ /* TODO: There are still a few optimization opportunities here. For certain
+ * combinations it is possible to reorder the operations and therefore saving
+ * some instructions. */
+
+ src_term = lp_build_blend_factor(&bld, src, blend->rgb_src_factor, blend->alpha_src_factor, alpha_swizzle);
+ dst_term = lp_build_blend_factor(&bld, dst, blend->rgb_dst_factor, blend->alpha_dst_factor, alpha_swizzle);
+
+ lp_build_name(src_term, "src_term");
+ lp_build_name(dst_term, "dst_term");
+
+ if(blend->rgb_func == blend->alpha_func) {
+ return lp_build_blend_func(&bld.base, blend->rgb_func, src_term, dst_term);
+ }
+ else {
+ /* Seperate RGB / A functions */
+
+ LLVMValueRef rgb;
+ LLVMValueRef alpha;
+
+ rgb = lp_build_blend_func(&bld.base, blend->rgb_func, src_term, dst_term);
+ alpha = lp_build_blend_func(&bld.base, blend->alpha_func, src_term, dst_term);
+
+ return lp_build_blend_swizzle(&bld, rgb, alpha, LP_BUILD_BLEND_SWIZZLE_RGBA, alpha_swizzle);
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_blend_logicop.c b/src/gallium/drivers/llvmpipe/lp_bld_blend_logicop.c
new file mode 100644
index 0000000000..88321f62a2
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_blend_logicop.c
@@ -0,0 +1,108 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Blend LLVM IR generation -- logic ops.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "pipe/p_state.h"
+
+#include "lp_bld_blend.h"
+
+
+LLVMValueRef
+lp_build_logicop(LLVMBuilderRef builder,
+ unsigned logicop_func,
+ LLVMValueRef src,
+ LLVMValueRef dst)
+{
+ LLVMTypeRef type;
+ LLVMValueRef res;
+
+ type = LLVMTypeOf(src);
+
+ switch (logicop_func) {
+ case PIPE_LOGICOP_CLEAR:
+ res = LLVMConstNull(type);
+ break;
+ case PIPE_LOGICOP_NOR:
+ res = LLVMBuildNot(builder, LLVMBuildOr(builder, src, dst, ""), "");
+ break;
+ case PIPE_LOGICOP_AND_INVERTED:
+ res = LLVMBuildAnd(builder, LLVMBuildNot(builder, src, ""), dst, "");
+ break;
+ case PIPE_LOGICOP_COPY_INVERTED:
+ res = LLVMBuildNot(builder, src, "");
+ break;
+ case PIPE_LOGICOP_AND_REVERSE:
+ res = LLVMBuildAnd(builder, src, LLVMBuildNot(builder, dst, ""), "");
+ break;
+ case PIPE_LOGICOP_INVERT:
+ res = LLVMBuildNot(builder, dst, "");
+ break;
+ case PIPE_LOGICOP_XOR:
+ res = LLVMBuildXor(builder, src, dst, "");
+ break;
+ case PIPE_LOGICOP_NAND:
+ res = LLVMBuildNot(builder, LLVMBuildAnd(builder, src, dst, ""), "");
+ break;
+ case PIPE_LOGICOP_AND:
+ res = LLVMBuildAnd(builder, src, dst, "");
+ break;
+ case PIPE_LOGICOP_EQUIV:
+ res = LLVMBuildNot(builder, LLVMBuildXor(builder, src, dst, ""), "");
+ break;
+ case PIPE_LOGICOP_NOOP:
+ res = dst;
+ break;
+ case PIPE_LOGICOP_OR_INVERTED:
+ res = LLVMBuildOr(builder, LLVMBuildNot(builder, src, ""), dst, "");
+ break;
+ case PIPE_LOGICOP_COPY:
+ res = src;
+ break;
+ case PIPE_LOGICOP_OR_REVERSE:
+ res = LLVMBuildOr(builder, src, LLVMBuildNot(builder, dst, ""), "");
+ break;
+ case PIPE_LOGICOP_OR:
+ res = LLVMBuildOr(builder, src, dst, "");
+ break;
+ case PIPE_LOGICOP_SET:
+ res = LLVMConstAllOnes(type);
+ break;
+ default:
+ assert(0);
+ res = src;
+ }
+
+ return res;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_blend_soa.c b/src/gallium/drivers/llvmpipe/lp_bld_blend_soa.c
new file mode 100644
index 0000000000..b92254a7d6
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_blend_soa.c
@@ -0,0 +1,298 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Blend LLVM IR generation -- SoA layout.
+ *
+ * Blending in SoA is much faster than AoS, especially when separate rgb/alpha
+ * factors/functions are used, since no channel masking/shuffling is necessary
+ * and we can achieve the full throughput of the SIMD operations. Furthermore
+ * the fragment shader output is also in SoA, so it fits nicely with the rest of
+ * the fragment pipeline.
+ *
+ * The drawback is that to be displayed the color buffer needs to be in AoS
+ * layout, so we need to tile/untile the color buffer before/after rendering.
+ * A color buffer like
+ *
+ * R11 G11 B11 A11 R12 G12 B12 A12 R13 G13 B13 A13 R14 G14 B14 A14 ...
+ * R21 G21 B21 A21 R22 G22 B22 A22 R23 G23 B23 A23 R24 G24 B24 A24 ...
+ *
+ * R31 G31 B31 A31 R32 G32 B32 A32 R33 G33 B33 A33 R34 G34 B34 A34 ...
+ * R41 G41 B41 A41 R42 G42 B42 A42 R43 G43 B43 A43 R44 G44 B44 A44 ...
+ *
+ * ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
+ *
+ * will actually be stored in memory as
+ *
+ * R11 R12 R21 R22 R13 R14 R23 R24 ... G11 G12 G21 G22 G13 G14 G23 G24 ... B11 B12 B21 B22 B13 B14 B23 B24 ... A11 A12 A21 A22 A13 A14 A23 A24 ...
+ * R31 R32 R41 R42 R33 R34 R43 R44 ... G31 G32 G41 G42 G33 G34 G43 G44 ... B31 B32 B41 B42 B33 B34 B43 B44 ... A31 A32 A41 A42 A33 A34 A43 A44 ...
+ * ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
+ *
+ * NOTE: Run lp_blend_test after any change to this file.
+ *
+ * You can also run lp_blend_test to obtain AoS vs SoA benchmarks. Invoking it
+ * as:
+ *
+ * lp_blend_test -o blend.tsv
+ *
+ * will generate a tab-seperated-file with the test results and performance
+ * measurements.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "pipe/p_state.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_blend.h"
+
+
+/**
+ * We may the same values several times, so we keep them here to avoid
+ * recomputing them. Also reusing the values allows us to do simplifications
+ * that LLVM optimization passes wouldn't normally be able to do.
+ */
+struct lp_build_blend_soa_context
+{
+ struct lp_build_context base;
+
+ LLVMValueRef src[4];
+ LLVMValueRef dst[4];
+ LLVMValueRef con[4];
+
+ LLVMValueRef inv_src[4];
+ LLVMValueRef inv_dst[4];
+ LLVMValueRef inv_con[4];
+
+ LLVMValueRef src_alpha_saturate;
+
+ /**
+ * We store all factors in a table in order to eliminate redundant
+ * multiplications later.
+ */
+ LLVMValueRef factor[2][2][4];
+
+ /**
+ * Table with all terms.
+ */
+ LLVMValueRef term[2][4];
+};
+
+
+static LLVMValueRef
+lp_build_blend_soa_factor(struct lp_build_blend_soa_context *bld,
+ unsigned factor, unsigned i)
+{
+ /*
+ * Compute src/first term RGB
+ */
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ return bld->base.one;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ return bld->src[i];
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ return bld->src[3];
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ return bld->dst[i];
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ return bld->dst[3];
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ if(i == 3)
+ return bld->base.one;
+ else {
+ if(!bld->inv_dst[3])
+ bld->inv_dst[3] = lp_build_comp(&bld->base, bld->dst[3]);
+ if(!bld->src_alpha_saturate)
+ bld->src_alpha_saturate = lp_build_min(&bld->base, bld->src[3], bld->inv_dst[3]);
+ return bld->src_alpha_saturate;
+ }
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ return bld->con[i];
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ return bld->con[3];
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ /* TODO */
+ assert(0);
+ return bld->base.zero;
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ /* TODO */
+ assert(0);
+ return bld->base.zero;
+ case PIPE_BLENDFACTOR_ZERO:
+ return bld->base.zero;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ if(!bld->inv_src[i])
+ bld->inv_src[i] = lp_build_comp(&bld->base, bld->src[i]);
+ return bld->inv_src[i];
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ if(!bld->inv_src[3])
+ bld->inv_src[3] = lp_build_comp(&bld->base, bld->src[3]);
+ return bld->inv_src[3];
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ if(!bld->inv_dst[i])
+ bld->inv_dst[i] = lp_build_comp(&bld->base, bld->dst[i]);
+ return bld->inv_dst[i];
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ if(!bld->inv_dst[3])
+ bld->inv_dst[3] = lp_build_comp(&bld->base, bld->dst[3]);
+ return bld->inv_dst[3];
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ if(!bld->inv_con[i])
+ bld->inv_con[i] = lp_build_comp(&bld->base, bld->con[i]);
+ return bld->inv_con[i];
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ if(!bld->inv_con[3])
+ bld->inv_con[3] = lp_build_comp(&bld->base, bld->con[3]);
+ return bld->inv_con[3];
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ /* TODO */
+ assert(0);
+ return bld->base.zero;
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ /* TODO */
+ assert(0);
+ return bld->base.zero;
+ default:
+ assert(0);
+ return bld->base.zero;
+ }
+}
+
+
+/**
+ * Generate blend code in SOA mode.
+ * \param src src/fragment color
+ * \param dst dst/framebuffer color
+ * \param con constant blend color
+ * \param res the result/output
+ */
+void
+lp_build_blend_soa(LLVMBuilderRef builder,
+ const struct pipe_blend_state *blend,
+ union lp_type type,
+ LLVMValueRef src[4],
+ LLVMValueRef dst[4],
+ LLVMValueRef con[4],
+ LLVMValueRef res[4])
+{
+ struct lp_build_blend_soa_context bld;
+ unsigned i, j, k;
+
+ /* Setup build context */
+ memset(&bld, 0, sizeof bld);
+ lp_build_context_init(&bld.base, builder, type);
+ for (i = 0; i < 4; ++i) {
+ bld.src[i] = src[i];
+ bld.dst[i] = dst[i];
+ bld.con[i] = con[i];
+ }
+
+ for (i = 0; i < 4; ++i) {
+ if (blend->colormask & (1 << i)) {
+ if (blend->logicop_enable) {
+ if(!type.floating) {
+ res[i] = lp_build_logicop(builder, blend->logicop_func, src[i], dst[i]);
+ }
+ else
+ res[i] = dst[i];
+ }
+ else if (blend->blend_enable) {
+ unsigned src_factor = i < 3 ? blend->rgb_src_factor : blend->alpha_src_factor;
+ unsigned dst_factor = i < 3 ? blend->rgb_dst_factor : blend->alpha_dst_factor;
+ unsigned func = i < 3 ? blend->rgb_func : blend->alpha_func;
+ boolean func_commutative = lp_build_blend_func_commutative(func);
+
+ /* It makes no sense to blend unless values are normalized */
+ assert(type.norm);
+
+ /*
+ * Compute src/dst factors.
+ */
+
+ bld.factor[0][0][i] = src[i];
+ bld.factor[0][1][i] = lp_build_blend_soa_factor(&bld, src_factor, i);
+ bld.factor[1][0][i] = dst[i];
+ bld.factor[1][1][i] = lp_build_blend_soa_factor(&bld, dst_factor, i);
+
+ /*
+ * Compute src/dst terms
+ */
+
+ for(k = 0; k < 2; ++k) {
+ /* See if this multiplication has been previously computed */
+ for(j = 0; j < i; ++j) {
+ if((bld.factor[k][0][j] == bld.factor[k][0][i] &&
+ bld.factor[k][1][j] == bld.factor[k][1][i]) ||
+ (bld.factor[k][0][j] == bld.factor[k][1][i] &&
+ bld.factor[k][1][j] == bld.factor[k][0][i]))
+ break;
+ }
+
+ if(j < i)
+ bld.term[k][i] = bld.term[k][j];
+ else
+ bld.term[k][i] = lp_build_mul(&bld.base, bld.factor[k][0][i], bld.factor[k][1][i]);
+ }
+
+ /*
+ * Combine terms
+ */
+
+ /* See if this function has been previously applied */
+ for(j = 0; j < i; ++j) {
+ unsigned prev_func = j < 3 ? blend->rgb_func : blend->alpha_func;
+ unsigned func_reverse = lp_build_blend_func_reverse(func, prev_func);
+
+ if((!func_reverse &&
+ bld.term[0][j] == bld.term[0][i] &&
+ bld.term[1][j] == bld.term[1][i]) ||
+ ((func_commutative || func_reverse) &&
+ bld.term[0][j] == bld.term[1][i] &&
+ bld.term[1][j] == bld.term[0][i]))
+ break;
+ }
+
+ if(j < i)
+ res[i] = res[j];
+ else
+ res[i] = lp_build_blend_func(&bld.base, func, bld.term[0][i], bld.term[1][i]);
+ }
+ else {
+ res[i] = src[i];
+ }
+ }
+ else {
+ res[i] = dst[i];
+ }
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_const.c b/src/gallium/drivers/llvmpipe/lp_bld_const.c
new file mode 100644
index 0000000000..21487365ea
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_const.c
@@ -0,0 +1,369 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Helper functions for constant building.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include <float.h>
+
+#include "util/u_debug.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+
+
+unsigned
+lp_mantissa(union lp_type type)
+{
+ assert(type.floating);
+
+ if(type.floating) {
+ switch(type.width) {
+ case 32:
+ return 23;
+ case 64:
+ return 53;
+ default:
+ assert(0);
+ return 0;
+ }
+ }
+ else {
+ if(type.sign)
+ return type.width - 1;
+ else
+ return type.width;
+ }
+}
+
+
+/**
+ * Shift of the unity.
+ *
+ * Same as lp_const_scale(), but in terms of shifts.
+ */
+unsigned
+lp_const_shift(union lp_type type)
+{
+ if(type.floating)
+ return 0;
+ else if(type.fixed)
+ return type.width/2;
+ else if(type.norm)
+ return type.sign ? type.width - 1 : type.width;
+ else
+ return 0;
+}
+
+
+unsigned
+lp_const_offset(union lp_type type)
+{
+ if(type.floating || type.fixed)
+ return 0;
+ else if(type.norm)
+ return 1;
+ else
+ return 0;
+}
+
+
+/**
+ * Scaling factor between the LLVM native value and its interpretation.
+ *
+ * This is 1.0 for all floating types and unnormalized integers, and something
+ * else for the fixed points types and normalized integers.
+ */
+double
+lp_const_scale(union lp_type type)
+{
+ unsigned long long llscale;
+ double dscale;
+
+ llscale = (unsigned long long)1 << lp_const_shift(type);
+ llscale -= lp_const_offset(type);
+ dscale = (double)llscale;
+ assert((unsigned long long)dscale == llscale);
+
+ return dscale;
+}
+
+
+/**
+ * Minimum value representable by the type.
+ */
+double
+lp_const_min(union lp_type type)
+{
+ unsigned bits;
+
+ if(!type.sign)
+ return 0.0;
+
+ if(type.norm)
+ return -1.0;
+
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ return -FLT_MAX;
+ case 64:
+ return -DBL_MAX;
+ default:
+ assert(0);
+ return 0.0;
+ }
+ }
+
+ if(type.fixed)
+ /* FIXME: consider the fractional bits? */
+ bits = type.width / 2 - 1;
+ else
+ bits = type.width - 1;
+
+ return (double)-((long long)1 << bits);
+}
+
+
+/**
+ * Maximum value representable by the type.
+ */
+double
+lp_const_max(union lp_type type)
+{
+ unsigned bits;
+
+ if(type.norm)
+ return 1.0;
+
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ return FLT_MAX;
+ case 64:
+ return DBL_MAX;
+ default:
+ assert(0);
+ return 0.0;
+ }
+ }
+
+ if(type.fixed)
+ bits = type.width / 2;
+ else
+ bits = type.width;
+
+ if(type.sign)
+ bits -= 1;
+
+ return (double)(((unsigned long long)1 << bits) - 1);
+}
+
+
+double
+lp_const_eps(union lp_type type)
+{
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ return FLT_EPSILON;
+ case 64:
+ return DBL_EPSILON;
+ default:
+ assert(0);
+ return 0.0;
+ }
+ }
+ else {
+ double scale = lp_const_scale(type);
+ return 1.0/scale;
+ }
+}
+
+
+LLVMValueRef
+lp_build_undef(union lp_type type)
+{
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ return LLVMGetUndef(vec_type);
+}
+
+
+LLVMValueRef
+lp_build_zero(union lp_type type)
+{
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ return LLVMConstNull(vec_type);
+}
+
+
+LLVMValueRef
+lp_build_one(union lp_type type)
+{
+ LLVMTypeRef elem_type;
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(type.length <= LP_MAX_VECTOR_LENGTH);
+
+ elem_type = lp_build_elem_type(type);
+
+ if(type.floating)
+ elems[0] = LLVMConstReal(elem_type, 1.0);
+ else if(type.fixed)
+ elems[0] = LLVMConstInt(elem_type, 1LL << (type.width/2), 0);
+ else if(!type.norm)
+ elems[0] = LLVMConstInt(elem_type, 1, 0);
+ else if(type.sign)
+ elems[0] = LLVMConstInt(elem_type, (1LL << (type.width - 1)) - 1, 0);
+ else {
+ /* special case' -- 1.0 for normalized types is more easily attained if
+ * we start with a vector consisting of all bits set */
+ LLVMTypeRef vec_type = LLVMVectorType(elem_type, type.length);
+ LLVMValueRef vec = LLVMConstAllOnes(vec_type);
+
+#if 0
+ if(type.sign)
+ /* TODO: Unfortunately this caused "Tried to create a shift operation
+ * on a non-integer type!" */
+ vec = LLVMConstLShr(vec, lp_build_int_const_scalar(type, 1));
+#endif
+
+ return vec;
+ }
+
+ for(i = 1; i < type.length; ++i)
+ elems[i] = elems[0];
+
+ return LLVMConstVector(elems, type.length);
+}
+
+
+LLVMValueRef
+lp_build_const_scalar(union lp_type type,
+ double val)
+{
+ LLVMTypeRef elem_type = lp_build_elem_type(type);
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(type.length <= LP_MAX_VECTOR_LENGTH);
+
+ if(type.floating) {
+ elems[0] = LLVMConstReal(elem_type, val);
+ }
+ else {
+ double dscale = lp_const_scale(type);
+
+ elems[0] = LLVMConstInt(elem_type, val*dscale + 0.5, 0);
+ }
+
+ for(i = 1; i < type.length; ++i)
+ elems[i] = elems[0];
+
+ return LLVMConstVector(elems, type.length);
+}
+
+
+LLVMValueRef
+lp_build_int_const_scalar(union lp_type type,
+ long long val)
+{
+ LLVMTypeRef elem_type = lp_build_int_elem_type(type);
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(type.length <= LP_MAX_VECTOR_LENGTH);
+
+ for(i = 0; i < type.length; ++i)
+ elems[i] = LLVMConstInt(elem_type, val, type.sign ? 1 : 0);
+
+ return LLVMConstVector(elems, type.length);
+}
+
+
+LLVMValueRef
+lp_build_const_aos(union lp_type type,
+ double r, double g, double b, double a,
+ const unsigned char *swizzle)
+{
+ const unsigned char default_swizzle[4] = {0, 1, 2, 3};
+ LLVMTypeRef elem_type;
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(type.length % 4 == 0);
+ assert(type.length <= LP_MAX_VECTOR_LENGTH);
+
+ elem_type = lp_build_elem_type(type);
+
+ if(swizzle == NULL)
+ swizzle = default_swizzle;
+
+ if(type.floating) {
+ elems[swizzle[0]] = LLVMConstReal(elem_type, r);
+ elems[swizzle[1]] = LLVMConstReal(elem_type, g);
+ elems[swizzle[2]] = LLVMConstReal(elem_type, b);
+ elems[swizzle[3]] = LLVMConstReal(elem_type, a);
+ }
+ else {
+ double dscale = lp_const_scale(type);
+
+ elems[swizzle[0]] = LLVMConstInt(elem_type, r*dscale + 0.5, 0);
+ elems[swizzle[1]] = LLVMConstInt(elem_type, g*dscale + 0.5, 0);
+ elems[swizzle[2]] = LLVMConstInt(elem_type, b*dscale + 0.5, 0);
+ elems[swizzle[3]] = LLVMConstInt(elem_type, a*dscale + 0.5, 0);
+ }
+
+ for(i = 4; i < type.length; ++i)
+ elems[i] = elems[i % 4];
+
+ return LLVMConstVector(elems, type.length);
+}
+
+
+LLVMValueRef
+lp_build_const_mask_aos(union lp_type type,
+ boolean cond[4])
+{
+ LLVMTypeRef elem_type = LLVMIntType(type.width);
+ LLVMValueRef masks[LP_MAX_VECTOR_LENGTH];
+ unsigned i, j;
+
+ assert(type.length <= LP_MAX_VECTOR_LENGTH);
+
+ for(j = 0; j < type.length; j += 4)
+ for(i = 0; i < 4; ++i)
+ masks[j + i] = LLVMConstInt(elem_type, cond[i] ? ~0 : 0, 0);
+
+ return LLVMConstVector(masks, type.length);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_const.h b/src/gallium/drivers/llvmpipe/lp_bld_const.h
new file mode 100644
index 0000000000..1934530ea3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_const.h
@@ -0,0 +1,108 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for constant building.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_CONST_H
+#define LP_BLD_CONST_H
+
+
+#include <llvm-c/Core.h>
+
+#include <pipe/p_compiler.h>
+
+
+union lp_type type;
+
+
+unsigned
+lp_mantissa(union lp_type type);
+
+
+unsigned
+lp_const_shift(union lp_type type);
+
+
+unsigned
+lp_const_offset(union lp_type type);
+
+
+double
+lp_const_scale(union lp_type type);
+
+double
+lp_const_min(union lp_type type);
+
+
+double
+lp_const_max(union lp_type type);
+
+
+double
+lp_const_eps(union lp_type type);
+
+
+LLVMValueRef
+lp_build_undef(union lp_type type);
+
+
+LLVMValueRef
+lp_build_zero(union lp_type type);
+
+
+LLVMValueRef
+lp_build_one(union lp_type type);
+
+
+LLVMValueRef
+lp_build_const_scalar(union lp_type type,
+ double val);
+
+
+LLVMValueRef
+lp_build_int_const_scalar(union lp_type type,
+ long long val);
+
+
+LLVMValueRef
+lp_build_const_aos(union lp_type type,
+ double r, double g, double b, double a,
+ const unsigned char *swizzle);
+
+
+LLVMValueRef
+lp_build_const_mask_aos(union lp_type type,
+ boolean cond[4]);
+
+
+#endif /* !LP_BLD_CONST_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_conv.c b/src/gallium/drivers/llvmpipe/lp_bld_conv.c
new file mode 100644
index 0000000000..cd01496d54
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_conv.c
@@ -0,0 +1,702 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Helper functions for type conversions.
+ *
+ * We want to use the fastest type for a given computation whenever feasible.
+ * The other side of this is that we need to be able convert between several
+ * types accurately and efficiently.
+ *
+ * Conversion between types of different bit width is quite complex since a
+ *
+ * To remember there are a few invariants in type conversions:
+ *
+ * - register width must remain constant:
+ *
+ * src_type.width * src_type.length == dst_type.width * dst_type.length
+ *
+ * - total number of elements must remain constant:
+ *
+ * src_type.length * num_srcs == dst_type.length * num_dsts
+ *
+ * It is not always possible to do the conversion both accurately and
+ * efficiently, usually due to lack of adequate machine instructions. In these
+ * cases it is important not to cut shortcuts here and sacrifice accuracy, as
+ * there this functions can be used anywhere. In the future we might have a
+ * precision parameter which can gauge the accuracy vs efficiency compromise,
+ * but for now if the data conversion between two stages happens to be the
+ * bottleneck, then most likely should just avoid converting at all and run
+ * both stages with the same type.
+ *
+ * Make sure to run lp_test_conv unit test after any change to this file.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "util/u_math.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_conv.h"
+
+
+/**
+ * Special case for converting clamped IEEE-754 floats to unsigned norms.
+ *
+ * The mathematical voodoo below may seem excessive but it is actually
+ * paramount we do it this way for several reasons. First, there is no single
+ * precision FP to unsigned integer conversion Intel SSE instruction. Second,
+ * secondly, even if there was, since the FP's mantissa takes only a fraction
+ * of register bits the typically scale and cast approach would require double
+ * precision for accurate results, and therefore half the throughput
+ *
+ * Although the result values can be scaled to an arbitrary bit width specified
+ * by dst_width, the actual result type will have the same width.
+ */
+LLVMValueRef
+lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
+ union lp_type src_type,
+ unsigned dst_width,
+ LLVMValueRef src)
+{
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(src_type);
+ LLVMValueRef res;
+ unsigned mantissa;
+ unsigned n;
+ unsigned long long ubound;
+ unsigned long long mask;
+ double scale;
+ double bias;
+
+ assert(src_type.floating);
+
+ mantissa = lp_mantissa(src_type);
+
+ /* We cannot carry more bits than the mantissa */
+ n = MIN2(mantissa, dst_width);
+
+ /* This magic coefficients will make the desired result to appear in the
+ * lowest significant bits of the mantissa.
+ */
+ ubound = ((unsigned long long)1 << n);
+ mask = ubound - 1;
+ scale = (double)mask/ubound;
+ bias = (double)((unsigned long long)1 << (mantissa - n));
+
+ res = LLVMBuildMul(builder, src, lp_build_const_scalar(src_type, scale), "");
+ res = LLVMBuildAdd(builder, res, lp_build_const_scalar(src_type, bias), "");
+ res = LLVMBuildBitCast(builder, res, int_vec_type, "");
+
+ if(dst_width > n) {
+ int shift = dst_width - n;
+ res = LLVMBuildShl(builder, res, lp_build_int_const_scalar(src_type, shift), "");
+
+ /* TODO: Fill in the empty lower bits for additional precision? */
+#if 0
+ {
+ LLVMValueRef msb;
+ msb = LLVMBuildLShr(builder, res, lp_build_int_const_scalar(src_type, dst_width - 1), "");
+ msb = LLVMBuildShl(builder, msb, lp_build_int_const_scalar(src_type, shift), "");
+ msb = LLVMBuildSub(builder, msb, lp_build_int_const_scalar(src_type, 1), "");
+ res = LLVMBuildOr(builder, res, msb, "");
+ }
+#elif 0
+ while(shift > 0) {
+ res = LLVMBuildOr(builder, res, LLVMBuildLShr(builder, res, lp_build_int_const_scalar(src_type, n), ""), "");
+ shift -= n;
+ n *= 2;
+ }
+#endif
+ }
+ else
+ res = LLVMBuildAnd(builder, res, lp_build_int_const_scalar(src_type, mask), "");
+
+ return res;
+}
+
+
+/**
+ * Inverse of lp_build_clamped_float_to_unsigned_norm above.
+ */
+LLVMValueRef
+lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
+ unsigned src_width,
+ union lp_type dst_type,
+ LLVMValueRef src)
+{
+ LLVMTypeRef vec_type = lp_build_vec_type(dst_type);
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(dst_type);
+ LLVMValueRef bias_;
+ LLVMValueRef res;
+ unsigned mantissa;
+ unsigned n;
+ unsigned long long ubound;
+ unsigned long long mask;
+ double scale;
+ double bias;
+
+ mantissa = lp_mantissa(dst_type);
+
+ n = MIN2(mantissa, src_width);
+
+ ubound = ((unsigned long long)1 << n);
+ mask = ubound - 1;
+ scale = (double)ubound/mask;
+ bias = (double)((unsigned long long)1 << (mantissa - n));
+
+ res = src;
+
+ if(src_width > mantissa) {
+ int shift = src_width - mantissa;
+ res = LLVMBuildLShr(builder, res, lp_build_int_const_scalar(dst_type, shift), "");
+ }
+
+ bias_ = lp_build_const_scalar(dst_type, bias);
+
+ res = LLVMBuildOr(builder,
+ res,
+ LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
+
+ res = LLVMBuildBitCast(builder, res, vec_type, "");
+
+ res = LLVMBuildSub(builder, res, bias_, "");
+ res = LLVMBuildMul(builder, res, lp_build_const_scalar(dst_type, scale), "");
+
+ return res;
+}
+
+
+/**
+ * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
+ */
+static LLVMValueRef
+lp_build_const_unpack_shuffle(unsigned n, unsigned lo_hi)
+{
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i, j;
+
+ assert(n <= LP_MAX_VECTOR_LENGTH);
+ assert(lo_hi < 2);
+
+ /* TODO: cache results in a static table */
+
+ for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
+ elems[i + 0] = LLVMConstInt(LLVMInt32Type(), 0 + j, 0);
+ elems[i + 1] = LLVMConstInt(LLVMInt32Type(), n + j, 0);
+ }
+
+ return LLVMConstVector(elems, n);
+}
+
+
+/**
+ * Build shuffle vectors that match PACKxx instructions.
+ */
+static LLVMValueRef
+lp_build_const_pack_shuffle(unsigned n)
+{
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(n <= LP_MAX_VECTOR_LENGTH);
+
+ /* TODO: cache results in a static table */
+
+ for(i = 0; i < n; ++i)
+ elems[i] = LLVMConstInt(LLVMInt32Type(), 2*i, 0);
+
+ return LLVMConstVector(elems, n);
+}
+
+
+/**
+ * Expand the bit width.
+ *
+ * This will only change the number of bits the values are represented, not the
+ * values themselves.
+ */
+static void
+lp_build_expand(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ LLVMValueRef src,
+ LLVMValueRef *dst, unsigned num_dsts)
+{
+ unsigned num_tmps;
+ unsigned i;
+
+ /* Register width must remain constant */
+ assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
+
+ /* We must not loose or gain channels. Only precision */
+ assert(src_type.length == dst_type.length * num_dsts);
+
+ num_tmps = 1;
+ dst[0] = src;
+
+ while(src_type.width < dst_type.width) {
+ union lp_type new_type = src_type;
+ LLVMTypeRef new_vec_type;
+
+ new_type.width *= 2;
+ new_type.length /= 2;
+ new_vec_type = lp_build_vec_type(new_type);
+
+ for(i = num_tmps; i--; ) {
+ LLVMValueRef zero;
+ LLVMValueRef shuffle_lo;
+ LLVMValueRef shuffle_hi;
+ LLVMValueRef lo;
+ LLVMValueRef hi;
+
+ zero = lp_build_zero(src_type);
+ shuffle_lo = lp_build_const_unpack_shuffle(src_type.length, 0);
+ shuffle_hi = lp_build_const_unpack_shuffle(src_type.length, 1);
+
+ /* PUNPCKLBW, PUNPCKHBW */
+ lo = LLVMBuildShuffleVector(builder, dst[i], zero, shuffle_lo, "");
+ hi = LLVMBuildShuffleVector(builder, dst[i], zero, shuffle_hi, "");
+
+ dst[2*i + 0] = LLVMBuildBitCast(builder, lo, new_vec_type, "");
+ dst[2*i + 1] = LLVMBuildBitCast(builder, hi, new_vec_type, "");
+ }
+
+ src_type = new_type;
+
+ num_tmps *= 2;
+ }
+
+ assert(num_tmps == num_dsts);
+}
+
+
+/**
+ * Non-interleaved pack.
+ *
+ * This will move values as
+ *
+ * lo = __ l0 __ l1 __ l2 __.. __ ln
+ * hi = __ h0 __ h1 __ h2 __.. __ hn
+ * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
+ *
+ * TODO: handle saturation consistently.
+ */
+static LLVMValueRef
+lp_build_pack2(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ boolean clamped,
+ LLVMValueRef lo,
+ LLVMValueRef hi)
+{
+ LLVMTypeRef src_vec_type = lp_build_vec_type(src_type);
+ LLVMTypeRef dst_vec_type = lp_build_vec_type(dst_type);
+ LLVMValueRef shuffle;
+ LLVMValueRef res;
+
+ /* Register width must remain constant */
+ assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
+
+ /* We must not loose or gain channels. Only precision */
+ assert(src_type.length * 2 == dst_type.length);
+
+ assert(!src_type.floating);
+ assert(!dst_type.floating);
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(src_type.width * src_type.length == 128) {
+ /* All X86 non-interleaved pack instructions all take signed inputs and
+ * saturate them, so saturate beforehand. */
+ if(!src_type.sign && !clamped) {
+ struct lp_build_context bld;
+ unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width;
+ LLVMValueRef dst_max = lp_build_int_const_scalar(src_type, ((unsigned long long)1 << dst_bits) - 1);
+ lp_build_context_init(&bld, builder, src_type);
+ lo = lp_build_min(&bld, lo, dst_max);
+ hi = lp_build_min(&bld, hi, dst_max);
+ }
+
+ switch(src_type.width) {
+ case 32:
+ if(dst_type.sign)
+ res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", src_vec_type, lo, hi);
+ else
+ /* PACKUSDW is the only instrinsic with a consistent signature */
+ return lp_build_intrinsic_binary(builder, "llvm.x86.sse41.packusdw", dst_vec_type, lo, hi);
+ break;
+
+ case 16:
+ if(dst_type.sign)
+ res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", src_vec_type, lo, hi);
+ else
+ res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", src_vec_type, lo, hi);
+ break;
+
+ default:
+ assert(0);
+ return LLVMGetUndef(dst_vec_type);
+ break;
+ }
+
+ res = LLVMBuildBitCast(builder, res, dst_vec_type, "");
+ return res;
+ }
+#endif
+
+ lo = LLVMBuildBitCast(builder, lo, dst_vec_type, "");
+ hi = LLVMBuildBitCast(builder, hi, dst_vec_type, "");
+
+ shuffle = lp_build_const_pack_shuffle(dst_type.length);
+
+ res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, "");
+
+ return res;
+}
+
+
+/**
+ * Truncate the bit width.
+ *
+ * TODO: Handle saturation consistently.
+ */
+static LLVMValueRef
+lp_build_trunc(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ boolean clamped,
+ const LLVMValueRef *src, unsigned num_srcs)
+{
+ LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ /* Register width must remain constant */
+ assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
+
+ /* We must not loose or gain channels. Only precision */
+ assert(src_type.length * num_srcs == dst_type.length);
+
+ for(i = 0; i < num_srcs; ++i)
+ tmp[i] = src[i];
+
+ while(src_type.width > dst_type.width) {
+ union lp_type new_type = src_type;
+
+ new_type.width /= 2;
+ new_type.length *= 2;
+
+ /* Take in consideration the sign changes only in the last step */
+ if(new_type.width == dst_type.width)
+ new_type.sign = dst_type.sign;
+
+ num_srcs /= 2;
+
+ for(i = 0; i < num_srcs; ++i)
+ tmp[i] = lp_build_pack2(builder, src_type, new_type, clamped,
+ tmp[2*i + 0], tmp[2*i + 1]);
+
+ src_type = new_type;
+ }
+
+ assert(num_srcs == 1);
+
+ return tmp[0];
+}
+
+
+/**
+ * Generic type conversion.
+ *
+ * TODO: Take a precision argument, or even better, add a new precision member
+ * to the lp_type union.
+ */
+void
+lp_build_conv(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ const LLVMValueRef *src, unsigned num_srcs,
+ LLVMValueRef *dst, unsigned num_dsts)
+{
+ union lp_type tmp_type;
+ LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
+ unsigned num_tmps;
+ unsigned i;
+
+ /* Register width must remain constant */
+ assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
+
+ /* We must not loose or gain channels. Only precision */
+ assert(src_type.length * num_srcs == dst_type.length * num_dsts);
+
+ assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
+ assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
+
+ tmp_type = src_type;
+ for(i = 0; i < num_srcs; ++i)
+ tmp[i] = src[i];
+ num_tmps = num_srcs;
+
+ /*
+ * Clamp if necessary
+ */
+
+ if(src_type.value != dst_type.value) {
+ struct lp_build_context bld;
+ double src_min = lp_const_min(src_type);
+ double dst_min = lp_const_min(dst_type);
+ double src_max = lp_const_max(src_type);
+ double dst_max = lp_const_max(dst_type);
+ LLVMValueRef thres;
+
+ lp_build_context_init(&bld, builder, tmp_type);
+
+ if(src_min < dst_min) {
+ if(dst_min == 0.0)
+ thres = bld.zero;
+ else
+ thres = lp_build_const_scalar(src_type, dst_min);
+ for(i = 0; i < num_tmps; ++i)
+ tmp[i] = lp_build_max(&bld, tmp[i], thres);
+ }
+
+ if(src_max > dst_max) {
+ if(dst_max == 1.0)
+ thres = bld.one;
+ else
+ thres = lp_build_const_scalar(src_type, dst_max);
+ for(i = 0; i < num_tmps; ++i)
+ tmp[i] = lp_build_min(&bld, tmp[i], thres);
+ }
+ }
+
+ /*
+ * Scale to the narrowest range
+ */
+
+ if(dst_type.floating) {
+ /* Nothing to do */
+ }
+ else if(tmp_type.floating) {
+ if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
+ for(i = 0; i < num_tmps; ++i) {
+ tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
+ tmp_type,
+ dst_type.width,
+ tmp[i]);
+ }
+ tmp_type.floating = FALSE;
+ }
+ else {
+ double dst_scale = lp_const_scale(dst_type);
+ LLVMTypeRef tmp_vec_type;
+
+ if (dst_scale != 1.0) {
+ LLVMValueRef scale = lp_build_const_scalar(tmp_type, dst_scale);
+ for(i = 0; i < num_tmps; ++i)
+ tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
+ }
+
+ /* Use an equally sized integer for intermediate computations */
+ tmp_type.floating = FALSE;
+ tmp_vec_type = lp_build_vec_type(tmp_type);
+ for(i = 0; i < num_tmps; ++i) {
+#if 0
+ if(dst_type.sign)
+ tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
+ else
+ tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
+#else
+ /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
+ tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
+#endif
+ }
+ }
+ }
+ else {
+ unsigned src_shift = lp_const_shift(src_type);
+ unsigned dst_shift = lp_const_shift(dst_type);
+
+ /* FIXME: compensate different offsets too */
+ if(src_shift > dst_shift) {
+ LLVMValueRef shift = lp_build_int_const_scalar(tmp_type, src_shift - dst_shift);
+ for(i = 0; i < num_tmps; ++i)
+ if(src_type.sign)
+ tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
+ else
+ tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
+ }
+ }
+
+ /*
+ * Truncate or expand bit width
+ */
+
+ assert(!tmp_type.floating || tmp_type.width == dst_type.width);
+
+ if(tmp_type.width > dst_type.width) {
+ assert(num_dsts == 1);
+ tmp[0] = lp_build_trunc(builder, tmp_type, dst_type, TRUE, tmp, num_tmps);
+ tmp_type.width = dst_type.width;
+ tmp_type.length = dst_type.length;
+ num_tmps = 1;
+ }
+
+ if(tmp_type.width < dst_type.width) {
+ assert(num_tmps == 1);
+ lp_build_expand(builder, tmp_type, dst_type, tmp[0], tmp, num_dsts);
+ tmp_type.width = dst_type.width;
+ tmp_type.length = dst_type.length;
+ num_tmps = num_dsts;
+ }
+
+ assert(tmp_type.width == dst_type.width);
+ assert(tmp_type.length == dst_type.length);
+ assert(num_tmps == num_dsts);
+
+ /*
+ * Scale to the widest range
+ */
+
+ if(src_type.floating) {
+ /* Nothing to do */
+ }
+ else if(!src_type.floating && dst_type.floating) {
+ if(!src_type.fixed && !src_type.sign && src_type.norm) {
+ for(i = 0; i < num_tmps; ++i) {
+ tmp[i] = lp_build_unsigned_norm_to_float(builder,
+ src_type.width,
+ dst_type,
+ tmp[i]);
+ }
+ tmp_type.floating = TRUE;
+ }
+ else {
+ double src_scale = lp_const_scale(src_type);
+ LLVMTypeRef tmp_vec_type;
+
+ /* Use an equally sized integer for intermediate computations */
+ tmp_type.floating = TRUE;
+ tmp_type.sign = TRUE;
+ tmp_vec_type = lp_build_vec_type(tmp_type);
+ for(i = 0; i < num_tmps; ++i) {
+#if 0
+ if(dst_type.sign)
+ tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
+ else
+ tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
+#else
+ /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
+ tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
+#endif
+ }
+
+ if (src_scale != 1.0) {
+ LLVMValueRef scale = lp_build_const_scalar(tmp_type, 1.0/src_scale);
+ for(i = 0; i < num_tmps; ++i)
+ tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
+ }
+ }
+ }
+ else {
+ unsigned src_shift = lp_const_shift(src_type);
+ unsigned dst_shift = lp_const_shift(dst_type);
+
+ /* FIXME: compensate different offsets too */
+ if(src_shift < dst_shift) {
+ LLVMValueRef shift = lp_build_int_const_scalar(tmp_type, dst_shift - src_shift);
+ for(i = 0; i < num_tmps; ++i)
+ tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
+ }
+ }
+
+ for(i = 0; i < num_dsts; ++i)
+ dst[i] = tmp[i];
+}
+
+
+/**
+ * Bit mask conversion.
+ *
+ * This will convert the integer masks that match the given types.
+ *
+ * The mask values should 0 or -1, i.e., all bits either set to zero or one.
+ * Any other value will likely cause in unpredictable results.
+ *
+ * This is basically a very trimmed down version of lp_build_conv.
+ */
+void
+lp_build_conv_mask(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ const LLVMValueRef *src, unsigned num_srcs,
+ LLVMValueRef *dst, unsigned num_dsts)
+{
+ /* Register width must remain constant */
+ assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
+
+ /* We must not loose or gain channels. Only precision */
+ assert(src_type.length * num_srcs == dst_type.length * num_dsts);
+
+ /*
+ * Drop
+ *
+ * We assume all values are 0 or -1
+ */
+
+ src_type.floating = FALSE;
+ src_type.fixed = FALSE;
+ src_type.sign = TRUE;
+ src_type.norm = FALSE;
+
+ dst_type.floating = FALSE;
+ dst_type.fixed = FALSE;
+ dst_type.sign = TRUE;
+ dst_type.norm = FALSE;
+
+ /*
+ * Truncate or expand bit width
+ */
+
+ if(src_type.width > dst_type.width) {
+ assert(num_dsts == 1);
+ dst[0] = lp_build_trunc(builder, src_type, dst_type, TRUE, src, num_srcs);
+ }
+ else if(src_type.width < dst_type.width) {
+ assert(num_srcs == 1);
+ lp_build_expand(builder, src_type, dst_type, src[0], dst, num_dsts);
+ }
+ else {
+ assert(num_srcs == num_dsts);
+ memcpy(dst, src, num_dsts * sizeof *dst);
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_conv.h b/src/gallium/drivers/llvmpipe/lp_bld_conv.h
new file mode 100644
index 0000000000..05c1ef2a10
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_conv.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for type conversions.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_CONV_H
+#define LP_BLD_CONV_H
+
+
+#include <llvm-c/Core.h>
+
+
+union lp_type type;
+
+
+LLVMValueRef
+lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
+ union lp_type src_type,
+ unsigned dst_width,
+ LLVMValueRef src);
+
+LLVMValueRef
+lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
+ unsigned src_width,
+ union lp_type dst_type,
+ LLVMValueRef src);
+
+
+void
+lp_build_conv(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ const LLVMValueRef *srcs, unsigned num_srcs,
+ LLVMValueRef *dsts, unsigned num_dsts);
+
+void
+lp_build_conv_mask(LLVMBuilderRef builder,
+ union lp_type src_type,
+ union lp_type dst_type,
+ const LLVMValueRef *src, unsigned num_srcs,
+ LLVMValueRef *dst, unsigned num_dsts);
+
+#endif /* !LP_BLD_CONV_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_debug.c b/src/gallium/drivers/llvmpipe/lp_bld_debug.c
new file mode 100644
index 0000000000..59d8f492e6
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_debug.c
@@ -0,0 +1,125 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifdef HAVE_UDIS86
+#include <udis86.h>
+#endif
+
+#include "util/u_math.h"
+#include "util/u_debug.h"
+#include "lp_bld_debug.h"
+
+
+/**
+ * Check alignment.
+ *
+ * It is important that this check is not implemented as a macro or inlined
+ * function, as the compiler assumptions in respect to alignment of global
+ * and stack variables would often make the check a no op, defeating the
+ * whole purpose of the exercise.
+ */
+boolean
+lp_check_alignment(const void *ptr, unsigned alignment)
+{
+ assert(util_is_pot(alignment));
+ return ((uintptr_t)ptr & (alignment - 1)) == 0;
+}
+
+
+void
+lp_disassemble(const void* func)
+{
+#ifdef HAVE_UDIS86
+ ud_t ud_obj;
+ uint64_t max_jmp_pc;
+
+ ud_init(&ud_obj);
+
+ ud_set_input_buffer(&ud_obj, (void*)func, 0xffff);
+
+ max_jmp_pc = (uint64_t) (uintptr_t) func;
+ ud_set_pc(&ud_obj, max_jmp_pc);
+
+#ifdef PIPE_ARCH_X86
+ ud_set_mode(&ud_obj, 32);
+#endif
+#ifdef PIPE_ARCH_X86_64
+ ud_set_mode(&ud_obj, 64);
+#endif
+
+ ud_set_syntax(&ud_obj, UD_SYN_ATT);
+
+ while (ud_disassemble(&ud_obj)) {
+
+#ifdef PIPE_ARCH_X86
+ debug_printf("%08lx: ", (unsigned long)ud_insn_off(&ud_obj));
+#endif
+#ifdef PIPE_ARCH_X86_64
+ debug_printf("%016llx: ", (unsigned long long)ud_insn_off(&ud_obj));
+#endif
+
+#if 0
+ debug_printf("%-16s ", ud_insn_hex(&ud_obj));
+#endif
+
+ debug_printf("%s\n", ud_insn_asm(&ud_obj));
+
+ if(ud_obj.mnemonic != UD_Icall) {
+ unsigned i;
+ for(i = 0; i < 3; ++i) {
+ const struct ud_operand *op = &ud_obj.operand[i];
+ if (op->type == UD_OP_JIMM){
+ uint64_t pc = ud_obj.pc;
+
+ switch (op->size) {
+ case 8:
+ pc += op->lval.sbyte;
+ break;
+ case 16:
+ pc += op->lval.sword;
+ break;
+ case 32:
+ pc += op->lval.sdword;
+ break;
+ default:
+ break;
+ }
+ if(pc > max_jmp_pc)
+ max_jmp_pc = pc;
+ }
+ }
+ }
+
+ if (ud_insn_off(&ud_obj) >= max_jmp_pc && ud_obj.mnemonic == UD_Iret)
+ break;
+ }
+ debug_printf("\n");
+#else
+ (void)func;
+#endif
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_debug.h b/src/gallium/drivers/llvmpipe/lp_bld_debug.h
new file mode 100644
index 0000000000..583e6132b4
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_debug.h
@@ -0,0 +1,64 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef LP_BLD_DEBUG_H
+#define LP_BLD_DEBUG_H
+
+
+#include <llvm-c/Core.h>
+
+#include "pipe/p_compiler.h"
+#include "util/u_string.h"
+
+
+static INLINE void
+lp_build_name(LLVMValueRef val, const char *format, ...)
+{
+#ifdef DEBUG
+ char name[32];
+ va_list ap;
+ va_start(ap, format);
+ util_vsnprintf(name, sizeof name, format, ap);
+ va_end(ap);
+ LLVMSetValueName(val, name);
+#else
+ (void)val;
+ (void)format;
+#endif
+}
+
+
+boolean
+lp_check_alignment(const void *ptr, unsigned alignment);
+
+
+void
+lp_disassemble(const void* func);
+
+
+#endif /* !LP_BLD_DEBUG_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_depth.c b/src/gallium/drivers/llvmpipe/lp_bld_depth.c
new file mode 100644
index 0000000000..e5fe81193f
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_depth.c
@@ -0,0 +1,216 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Depth/stencil testing to LLVM IR translation.
+ *
+ * To be done accurately/efficiently the depth/stencil test must be done with
+ * the same type/format of the depth/stencil buffer, which implies massaging
+ * the incoming depths to fit into place. Using a more straightforward
+ * type/format for depth/stencil values internally and only convert when
+ * flushing would avoid this, but it would most likely result in depth fighting
+ * artifacts.
+ *
+ * We are free to use a different pixel layout though. Since our basic
+ * processing unit is a quad (2x2 pixel block) we store the depth/stencil
+ * values tiled, a quad at time. That is, a depth buffer containing
+ *
+ * Z11 Z12 Z13 Z14 ...
+ * Z21 Z22 Z23 Z24 ...
+ * Z31 Z32 Z33 Z34 ...
+ * Z41 Z42 Z43 Z44 ...
+ * ... ... ... ... ...
+ *
+ * will actually be stored in memory as
+ *
+ * Z11 Z12 Z21 Z22 Z13 Z14 Z23 Z24 ...
+ * Z31 Z32 Z41 Z42 Z33 Z34 Z43 Z44 ...
+ * ... ... ... ... ... ... ... ... ...
+ *
+ * FIXME: Code generate stencil test
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_state.h"
+#include "util/u_format.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_flow.h"
+#include "lp_bld_debug.h"
+#include "lp_bld_depth.h"
+
+
+/**
+ * Return a type appropriate for depth/stencil testing.
+ */
+union lp_type
+lp_depth_type(const struct util_format_description *format_desc,
+ unsigned length)
+{
+ union lp_type type;
+ unsigned swizzle;
+
+ assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
+ assert(format_desc->block.width == 1);
+ assert(format_desc->block.height == 1);
+
+ swizzle = format_desc->swizzle[0];
+ assert(swizzle < 4);
+
+ type.value = 0;
+ type.width = format_desc->block.bits;
+
+ if(format_desc->channel[swizzle].type == UTIL_FORMAT_TYPE_FLOAT) {
+ type.floating = TRUE;
+ assert(swizzle = 0);
+ assert(format_desc->channel[swizzle].size == format_desc->block.bits);
+ }
+ else if(format_desc->channel[swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED) {
+ assert(format_desc->block.bits <= 32);
+ if(format_desc->channel[swizzle].normalized)
+ type.norm = TRUE;
+ }
+ else
+ assert(0);
+
+ assert(type.width <= length);
+ type.length = length / type.width;
+
+ return type;
+}
+
+
+/**
+ * Depth test.
+ */
+void
+lp_build_depth_test(LLVMBuilderRef builder,
+ const struct pipe_depth_state *state,
+ union lp_type type,
+ const struct util_format_description *format_desc,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef src,
+ LLVMValueRef dst_ptr)
+{
+ struct lp_build_context bld;
+ unsigned z_swizzle;
+ LLVMValueRef dst;
+ LLVMValueRef z_bitmask = NULL;
+ LLVMValueRef test;
+
+ if(!state->enabled)
+ return;
+
+ assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS);
+ assert(format_desc->block.width == 1);
+ assert(format_desc->block.height == 1);
+
+ z_swizzle = format_desc->swizzle[0];
+ if(z_swizzle == UTIL_FORMAT_SWIZZLE_NONE)
+ return;
+
+ /* Sanity checking */
+ assert(z_swizzle < 4);
+ assert(format_desc->block.bits == type.width);
+ if(type.floating) {
+ assert(z_swizzle == 0);
+ assert(format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_FLOAT);
+ assert(format_desc->channel[z_swizzle].size == format_desc->block.bits);
+ }
+ else {
+ assert(format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED);
+ assert(format_desc->channel[z_swizzle].normalized);
+ assert(!type.fixed);
+ assert(!type.sign);
+ assert(type.norm);
+ }
+
+ /* Setup build context */
+ lp_build_context_init(&bld, builder, type);
+
+ dst = LLVMBuildLoad(builder, dst_ptr, "");
+
+ lp_build_name(dst, "zsbuf");
+
+ /* Align the source depth bits with the destination's, and mask out any
+ * stencil or padding bits from both */
+ if(format_desc->channel[z_swizzle].size == format_desc->block.bits) {
+ assert(z_swizzle == 0);
+ /* nothing to do */
+ }
+ else {
+ unsigned padding_left;
+ unsigned padding_right;
+ unsigned chan;
+
+ assert(format_desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(format_desc->channel[z_swizzle].type == UTIL_FORMAT_TYPE_UNSIGNED);
+ assert(format_desc->channel[z_swizzle].size <= format_desc->block.bits);
+ assert(format_desc->channel[z_swizzle].normalized);
+
+ padding_right = 0;
+ for(chan = 0; chan < z_swizzle; ++chan)
+ padding_right += format_desc->channel[chan].size;
+ padding_left = format_desc->block.bits -
+ (padding_right + format_desc->channel[z_swizzle].size);
+
+ if(padding_left || padding_right) {
+ const unsigned long long mask_left = ((unsigned long long)1 << (format_desc->block.bits - padding_left)) - 1;
+ const unsigned long long mask_right = ((unsigned long long)1 << (padding_right)) - 1;
+ z_bitmask = lp_build_int_const_scalar(type, mask_left ^ mask_right);
+ }
+
+ if(padding_left)
+ src = LLVMBuildLShr(builder, src, lp_build_int_const_scalar(type, padding_left), "");
+ if(padding_right)
+ src = LLVMBuildAnd(builder, src, z_bitmask, "");
+ if(padding_left || padding_right)
+ dst = LLVMBuildAnd(builder, dst, z_bitmask, "");
+ }
+
+ lp_build_name(dst, "zsbuf.z");
+
+ test = lp_build_cmp(&bld, state->func, src, dst);
+ lp_build_mask_update(mask, test);
+
+ if(state->writemask) {
+ if(z_bitmask)
+ z_bitmask = LLVMBuildAnd(builder, mask->value, z_bitmask, "");
+ else
+ z_bitmask = mask->value;
+
+ dst = lp_build_select(&bld, z_bitmask, src, dst);
+ LLVMBuildStore(builder, dst, dst_ptr);
+ }
+
+ /* FIXME */
+ assert(!state->occlusion_count);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_depth.h b/src/gallium/drivers/llvmpipe/lp_bld_depth.h
new file mode 100644
index 0000000000..5d2e042fcc
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_depth.h
@@ -0,0 +1,63 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Depth/stencil testing to LLVM IR translation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_BLD_DEPTH_H
+#define LP_BLD_DEPTH_H
+
+
+#include <llvm-c/Core.h>
+
+
+struct pipe_depth_state;
+struct util_format_description;
+union lp_type;
+struct lp_build_mask_context;
+
+
+union lp_type
+lp_depth_type(const struct util_format_description *format_desc,
+ unsigned length);
+
+
+void
+lp_build_depth_test(LLVMBuilderRef builder,
+ const struct pipe_depth_state *state,
+ union lp_type type,
+ const struct util_format_description *format_desc,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef src,
+ LLVMValueRef dst_ptr);
+
+
+#endif /* !LP_BLD_DEPTH_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_flow.c b/src/gallium/drivers/llvmpipe/lp_bld_flow.c
new file mode 100644
index 0000000000..69ed014ff3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_flow.c
@@ -0,0 +1,493 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * LLVM control flow build helpers.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_flow.h"
+
+
+#define LP_BUILD_FLOW_MAX_VARIABLES 32
+#define LP_BUILD_FLOW_MAX_DEPTH 32
+
+
+/**
+ * Enumeration of all possible flow constructs.
+ */
+enum lp_build_flow_construct_kind {
+ lP_BUILD_FLOW_SCOPE,
+ LP_BUILD_FLOW_SKIP,
+};
+
+
+/**
+ * Variable declaration scope.
+ */
+struct lp_build_flow_scope
+{
+ /** Number of variables declared in this scope */
+ unsigned num_variables;
+};
+
+
+/**
+ * Early exit. Useful to skip to the end of a function or block when
+ * the execution mask becomes zero or when there is an error condition.
+ */
+struct lp_build_flow_skip
+{
+ /** Block to skip to */
+ LLVMBasicBlockRef block;
+
+ /** Number of variables declared at the beginning */
+ unsigned num_variables;
+
+ LLVMValueRef *phi;
+};
+
+
+/**
+ * Union of all possible flow constructs' data
+ */
+union lp_build_flow_construct_data
+{
+ struct lp_build_flow_scope scope;
+ struct lp_build_flow_skip skip;
+};
+
+
+/**
+ * Element of the flow construct stack.
+ */
+struct lp_build_flow_construct
+{
+ enum lp_build_flow_construct_kind kind;
+ union lp_build_flow_construct_data data;
+};
+
+
+/**
+ * All necessary data to generate LLVM control flow constructs.
+ *
+ * Besides keeping track of the control flow construct themselves we also
+ * need to keep track of variables in order to generate SSA Phi values.
+ */
+struct lp_build_flow_context
+{
+ LLVMBuilderRef builder;
+
+ /**
+ * Control flow stack.
+ */
+ struct lp_build_flow_construct constructs[LP_BUILD_FLOW_MAX_DEPTH];
+ unsigned num_constructs;
+
+ /**
+ * Variable stack
+ */
+ LLVMValueRef *variables[LP_BUILD_FLOW_MAX_VARIABLES];
+ unsigned num_variables;
+};
+
+
+struct lp_build_flow_context *
+lp_build_flow_create(LLVMBuilderRef builder)
+{
+ struct lp_build_flow_context *flow;
+
+ flow = CALLOC_STRUCT(lp_build_flow_context);
+ if(!flow)
+ return NULL;
+
+ flow->builder = builder;
+
+ return flow;
+}
+
+
+void
+lp_build_flow_destroy(struct lp_build_flow_context *flow)
+{
+ assert(flow->num_constructs == 0);
+ assert(flow->num_variables == 0);
+ FREE(flow);
+}
+
+
+static union lp_build_flow_construct_data *
+lp_build_flow_push(struct lp_build_flow_context *flow,
+ enum lp_build_flow_construct_kind kind)
+{
+ assert(flow->num_constructs < LP_BUILD_FLOW_MAX_DEPTH);
+ if(flow->num_constructs >= LP_BUILD_FLOW_MAX_DEPTH)
+ return NULL;
+
+ flow->constructs[flow->num_constructs].kind = kind;
+ return &flow->constructs[flow->num_constructs++].data;
+}
+
+
+static union lp_build_flow_construct_data *
+lp_build_flow_peek(struct lp_build_flow_context *flow,
+ enum lp_build_flow_construct_kind kind)
+{
+ assert(flow->num_constructs);
+ if(!flow->num_constructs)
+ return NULL;
+
+ assert(flow->constructs[flow->num_constructs - 1].kind == kind);
+ if(flow->constructs[flow->num_constructs - 1].kind != kind)
+ return NULL;
+
+ return &flow->constructs[flow->num_constructs - 1].data;
+}
+
+
+static union lp_build_flow_construct_data *
+lp_build_flow_pop(struct lp_build_flow_context *flow,
+ enum lp_build_flow_construct_kind kind)
+{
+ assert(flow->num_constructs);
+ if(!flow->num_constructs)
+ return NULL;
+
+ assert(flow->constructs[flow->num_constructs - 1].kind == kind);
+ if(flow->constructs[flow->num_constructs - 1].kind != kind)
+ return NULL;
+
+ return &flow->constructs[--flow->num_constructs].data;
+}
+
+
+/**
+ * Begin a variable scope.
+ *
+ *
+ */
+void
+lp_build_flow_scope_begin(struct lp_build_flow_context *flow)
+{
+ struct lp_build_flow_scope *scope;
+
+ scope = &lp_build_flow_push(flow, lP_BUILD_FLOW_SCOPE)->scope;
+ if(!scope)
+ return;
+
+ scope->num_variables = 0;
+}
+
+
+/**
+ * Declare a variable.
+ *
+ * A variable is a named entity which can have different LLVMValueRef's at
+ * different points of the program. This is relevant for control flow because
+ * when there are mutiple branches to a same location we need to replace
+ * the variable's value with a Phi function as explained in
+ * http://en.wikipedia.org/wiki/Static_single_assignment_form .
+ *
+ * We keep track of variables by keeping around a pointer to where their
+ * current.
+ *
+ * There are a few cautions to observe:
+ *
+ * - Variable's value must not be NULL. If there is no initial value then
+ * LLVMGetUndef() should be used.
+ *
+ * - Variable's value must be kept up-to-date. If the variable is going to be
+ * modified by a function then a pointer should be passed so that its value
+ * is accurate. Failure to do this will cause some of the variables'
+ * transient values to be lost, leading to wrong results.
+ *
+ * - A program should be written from top to bottom, by always appending
+ * instructions to the bottom with a single LLVMBuilderRef. Inserting and/or
+ * modifying existing statements will most likely lead to wrong results.
+ *
+ */
+void
+lp_build_flow_scope_declare(struct lp_build_flow_context *flow,
+ LLVMValueRef *variable)
+{
+ struct lp_build_flow_scope *scope;
+
+ scope = &lp_build_flow_peek(flow, lP_BUILD_FLOW_SCOPE)->scope;
+ if(!scope)
+ return;
+
+ assert(*variable);
+ if(!*variable)
+ return;
+
+ assert(flow->num_variables < LP_BUILD_FLOW_MAX_VARIABLES);
+ if(flow->num_variables >= LP_BUILD_FLOW_MAX_VARIABLES)
+ return;
+
+ flow->variables[flow->num_variables++] = variable;
+ ++scope->num_variables;
+}
+
+
+void
+lp_build_flow_scope_end(struct lp_build_flow_context *flow)
+{
+ struct lp_build_flow_scope *scope;
+
+ scope = &lp_build_flow_pop(flow, lP_BUILD_FLOW_SCOPE)->scope;
+ if(!scope)
+ return;
+
+ assert(flow->num_variables >= scope->num_variables);
+ if(flow->num_variables < scope->num_variables) {
+ flow->num_variables = 0;
+ return;
+ }
+
+ flow->num_variables -= scope->num_variables;
+}
+
+
+static LLVMBasicBlockRef
+lp_build_flow_insert_block(struct lp_build_flow_context *flow)
+{
+ LLVMBasicBlockRef current_block;
+ LLVMBasicBlockRef next_block;
+ LLVMBasicBlockRef new_block;
+
+ current_block = LLVMGetInsertBlock(flow->builder);
+
+ next_block = LLVMGetNextBasicBlock(current_block);
+ if(next_block) {
+ new_block = LLVMInsertBasicBlock(next_block, "");
+ }
+ else {
+ LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
+ new_block = LLVMAppendBasicBlock(function, "");
+ }
+
+ return new_block;
+}
+
+void
+lp_build_flow_skip_begin(struct lp_build_flow_context *flow)
+{
+ struct lp_build_flow_skip *skip;
+ LLVMBuilderRef builder;
+ unsigned i;
+
+ skip = &lp_build_flow_push(flow, LP_BUILD_FLOW_SKIP)->skip;
+ if(!skip)
+ return;
+
+ skip->block = lp_build_flow_insert_block(flow);
+ skip->num_variables = flow->num_variables;
+ if(!skip->num_variables) {
+ skip->phi = NULL;
+ return;
+ }
+
+ skip->phi = MALLOC(skip->num_variables * sizeof *skip->phi);
+ if(!skip->phi) {
+ skip->num_variables = 0;
+ return;
+ }
+
+ builder = LLVMCreateBuilder();
+ LLVMPositionBuilderAtEnd(builder, skip->block);
+
+ for(i = 0; i < skip->num_variables; ++i)
+ skip->phi[i] = LLVMBuildPhi(builder, LLVMTypeOf(*flow->variables[i]), "");
+
+ LLVMDisposeBuilder(builder);
+}
+
+
+void
+lp_build_flow_skip_cond_break(struct lp_build_flow_context *flow,
+ LLVMValueRef cond)
+{
+ struct lp_build_flow_skip *skip;
+ LLVMBasicBlockRef current_block;
+ LLVMBasicBlockRef new_block;
+ unsigned i;
+
+ skip = &lp_build_flow_peek(flow, LP_BUILD_FLOW_SKIP)->skip;
+ if(!skip)
+ return;
+
+ current_block = LLVMGetInsertBlock(flow->builder);
+
+ new_block = lp_build_flow_insert_block(flow);
+
+ for(i = 0; i < skip->num_variables; ++i) {
+ assert(*flow->variables[i]);
+ LLVMAddIncoming(skip->phi[i], flow->variables[i], &current_block, 1);
+ }
+
+ LLVMBuildCondBr(flow->builder, cond, skip->block, new_block);
+
+ LLVMPositionBuilderAtEnd(flow->builder, new_block);
+ }
+
+
+void
+lp_build_flow_skip_end(struct lp_build_flow_context *flow)
+{
+ struct lp_build_flow_skip *skip;
+ LLVMBasicBlockRef current_block;
+ unsigned i;
+
+ skip = &lp_build_flow_pop(flow, LP_BUILD_FLOW_SKIP)->skip;
+ if(!skip)
+ return;
+
+ current_block = LLVMGetInsertBlock(flow->builder);
+
+ for(i = 0; i < skip->num_variables; ++i) {
+ assert(*flow->variables[i]);
+ LLVMAddIncoming(skip->phi[i], flow->variables[i], &current_block, 1);
+ *flow->variables[i] = skip->phi[i];
+ }
+
+ LLVMBuildBr(flow->builder, skip->block);
+ LLVMPositionBuilderAtEnd(flow->builder, skip->block);
+
+ FREE(skip->phi);
+}
+
+
+static void
+lp_build_mask_check(struct lp_build_mask_context *mask)
+{
+ LLVMBuilderRef builder = mask->flow->builder;
+ LLVMValueRef cond;
+
+ cond = LLVMBuildICmp(builder,
+ LLVMIntEQ,
+ LLVMBuildBitCast(builder, mask->value, mask->reg_type, ""),
+ LLVMConstNull(mask->reg_type),
+ "");
+
+ lp_build_flow_skip_cond_break(mask->flow, cond);
+}
+
+
+void
+lp_build_mask_begin(struct lp_build_mask_context *mask,
+ struct lp_build_flow_context *flow,
+ union lp_type type,
+ LLVMValueRef value)
+{
+ memset(mask, 0, sizeof *mask);
+
+ mask->flow = flow;
+ mask->reg_type = LLVMIntType(type.width * type.length);
+ mask->value = value;
+
+ lp_build_flow_scope_begin(flow);
+ lp_build_flow_scope_declare(flow, &mask->value);
+ lp_build_flow_skip_begin(flow);
+
+ lp_build_mask_check(mask);
+}
+
+
+void
+lp_build_mask_update(struct lp_build_mask_context *mask,
+ LLVMValueRef value)
+{
+ mask->value = LLVMBuildAnd( mask->flow->builder, mask->value, value, "");
+
+ lp_build_mask_check(mask);
+}
+
+
+LLVMValueRef
+lp_build_mask_end(struct lp_build_mask_context *mask)
+{
+ lp_build_flow_skip_end(mask->flow);
+ lp_build_flow_scope_end(mask->flow);
+ return mask->value;
+}
+
+
+
+void
+lp_build_loop_begin(LLVMBuilderRef builder,
+ LLVMValueRef start,
+ struct lp_build_loop_state *state)
+{
+ LLVMBasicBlockRef block = LLVMGetInsertBlock(builder);
+ LLVMValueRef function = LLVMGetBasicBlockParent(block);
+
+ state->block = LLVMAppendBasicBlock(function, "loop");
+
+ LLVMBuildBr(builder, state->block);
+
+ LLVMPositionBuilderAtEnd(builder, state->block);
+
+ state->counter = LLVMBuildPhi(builder, LLVMTypeOf(start), "");
+
+ LLVMAddIncoming(state->counter, &start, &block, 1);
+
+}
+
+
+void
+lp_build_loop_end(LLVMBuilderRef builder,
+ LLVMValueRef end,
+ LLVMValueRef step,
+ struct lp_build_loop_state *state)
+{
+ LLVMBasicBlockRef block = LLVMGetInsertBlock(builder);
+ LLVMValueRef function = LLVMGetBasicBlockParent(block);
+ LLVMValueRef next;
+ LLVMValueRef cond;
+ LLVMBasicBlockRef after_block;
+
+ if (!step)
+ step = LLVMConstInt(LLVMTypeOf(end), 1, 0);
+
+ next = LLVMBuildAdd(builder, state->counter, step, "");
+
+ cond = LLVMBuildICmp(builder, LLVMIntNE, next, end, "");
+
+ after_block = LLVMAppendBasicBlock(function, "");
+
+ LLVMBuildCondBr(builder, cond, after_block, state->block);
+
+ LLVMAddIncoming(state->counter, &next, &block, 1);
+
+ LLVMPositionBuilderAtEnd(builder, after_block);
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_flow.h b/src/gallium/drivers/llvmpipe/lp_bld_flow.h
new file mode 100644
index 0000000000..9d76e3064d
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_flow.h
@@ -0,0 +1,129 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * LLVM control flow build helpers.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_BLD_FLOW_H
+#define LP_BLD_FLOW_H
+
+
+#include <llvm-c/Core.h>
+
+
+union lp_type;
+
+
+struct lp_build_flow_context;
+
+
+struct lp_build_flow_context *
+lp_build_flow_create(LLVMBuilderRef builder);
+
+void
+lp_build_flow_destroy(struct lp_build_flow_context *flow);
+
+void
+lp_build_flow_scope_begin(struct lp_build_flow_context *flow);
+
+void
+lp_build_flow_scope_declare(struct lp_build_flow_context *flow,
+ LLVMValueRef *variable);
+
+void
+lp_build_flow_scope_end(struct lp_build_flow_context *flow);
+
+void
+lp_build_flow_skip_begin(struct lp_build_flow_context *flow);
+
+void
+lp_build_flow_skip_cond_break(struct lp_build_flow_context *flow,
+ LLVMValueRef cond);
+
+void
+lp_build_flow_skip_end(struct lp_build_flow_context *flow);
+
+
+struct lp_build_mask_context
+{
+ struct lp_build_flow_context *flow;
+
+ LLVMTypeRef reg_type;
+
+ LLVMValueRef value;
+};
+
+
+void
+lp_build_mask_begin(struct lp_build_mask_context *mask,
+ struct lp_build_flow_context *flow,
+ union lp_type type,
+ LLVMValueRef value);
+
+/**
+ * Bitwise AND the mask with the given value, if a previous mask was set.
+ */
+void
+lp_build_mask_update(struct lp_build_mask_context *mask,
+ LLVMValueRef value);
+
+LLVMValueRef
+lp_build_mask_end(struct lp_build_mask_context *mask);
+
+
+/**
+ * LLVM's IR doesn't represent for-loops directly. Furthermore it
+ * it requires creating code blocks, branches, phi variables, so it
+ * requires a fair amount of code.
+ *
+ * @sa http://www.llvm.org/docs/tutorial/LangImpl5.html#for
+ */
+struct lp_build_loop_state
+{
+ LLVMBasicBlockRef block;
+ LLVMValueRef counter;
+};
+
+
+void
+lp_build_loop_begin(LLVMBuilderRef builder,
+ LLVMValueRef start,
+ struct lp_build_loop_state *state);
+
+
+void
+lp_build_loop_end(LLVMBuilderRef builder,
+ LLVMValueRef end,
+ LLVMValueRef step,
+ struct lp_build_loop_state *state);
+
+
+
+#endif /* !LP_BLD_FLOW_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_format.h b/src/gallium/drivers/llvmpipe/lp_bld_format.h
new file mode 100644
index 0000000000..5ee0656093
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_format.h
@@ -0,0 +1,119 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_BLD_H
+#define LP_BLD_H
+
+
+/**
+ * @file
+ * Pixel format helpers.
+ */
+
+#include <llvm-c/Core.h>
+
+#include "pipe/p_format.h"
+
+struct util_format_description;
+union lp_type;
+
+
+/**
+ * Unpack a pixel into its RGBA components.
+ *
+ * @param packed integer.
+ *
+ * @return RGBA in a 4 floats vector.
+ */
+LLVMValueRef
+lp_build_unpack_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef packed);
+
+
+/**
+ * Pack a pixel.
+ *
+ * @param rgba 4 float vector with the unpacked components.
+ */
+LLVMValueRef
+lp_build_pack_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef rgba);
+
+
+/**
+ * Load a pixel into its RGBA components.
+ *
+ * @param ptr value with the pointer to the packed pixel. Pointer type is
+ * irrelevant.
+ *
+ * @return RGBA in a 4 floats vector.
+ */
+LLVMValueRef
+lp_build_load_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef ptr);
+
+
+/**
+ * Store a pixel.
+ *
+ * @param rgba 4 float vector with the unpacked components.
+ */
+void
+lp_build_store_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef ptr,
+ LLVMValueRef rgba);
+
+LLVMValueRef
+lp_build_gather(LLVMBuilderRef builder,
+ unsigned length,
+ unsigned src_width,
+ unsigned dst_width,
+ LLVMValueRef base_ptr,
+ LLVMValueRef offsets);
+
+
+void
+lp_build_unpack_rgba_soa(LLVMBuilderRef builder,
+ const struct util_format_description *format_desc,
+ union lp_type type,
+ LLVMValueRef packed,
+ LLVMValueRef *rgba);
+
+
+void
+lp_build_load_rgba_soa(LLVMBuilderRef builder,
+ const struct util_format_description *format_desc,
+ union lp_type type,
+ LLVMValueRef base_ptr,
+ LLVMValueRef offsets,
+ LLVMValueRef *rgba);
+
+#endif /* !LP_BLD_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_format_aos.c b/src/gallium/drivers/llvmpipe/lp_bld_format_aos.c
new file mode 100644
index 0000000000..b9b5d84bed
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_format_aos.c
@@ -0,0 +1,303 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_format.h"
+
+#include "lp_bld_format.h"
+
+
+LLVMValueRef
+lp_build_unpack_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef packed)
+{
+ const struct util_format_description *desc;
+ LLVMTypeRef type;
+ LLVMValueRef shifted, casted, scaled, masked;
+ LLVMValueRef shifts[4];
+ LLVMValueRef masks[4];
+ LLVMValueRef scales[4];
+ LLVMValueRef swizzles[4];
+ LLVMValueRef aux[4];
+ bool normalized;
+ int empty_channel;
+ unsigned shift;
+ unsigned i;
+
+ desc = util_format_description(format);
+
+ /* FIXME: Support more formats */
+ assert(desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(desc->block.width == 1);
+ assert(desc->block.height == 1);
+ assert(desc->block.bits <= 32);
+
+ type = LLVMIntType(desc->block.bits);
+
+ /* Do the intermediate integer computations with 32bit integers since it
+ * matches floating point size */
+ if (desc->block.bits < 32)
+ packed = LLVMBuildZExt(builder, packed, LLVMInt32Type(), "");
+
+ /* Broadcast the packed value to all four channels */
+ packed = LLVMBuildInsertElement(builder,
+ LLVMGetUndef(LLVMVectorType(LLVMInt32Type(), 4)),
+ packed,
+ LLVMConstNull(LLVMInt32Type()),
+ "");
+ packed = LLVMBuildShuffleVector(builder,
+ packed,
+ LLVMGetUndef(LLVMVectorType(LLVMInt32Type(), 4)),
+ LLVMConstNull(LLVMVectorType(LLVMInt32Type(), 4)),
+ "");
+
+ /* Initialize vector constants */
+ normalized = FALSE;
+ empty_channel = -1;
+ shift = 0;
+ for (i = 0; i < 4; ++i) {
+ unsigned bits = desc->channel[i].size;
+
+ if (desc->channel[i].type == UTIL_FORMAT_TYPE_VOID) {
+ shifts[i] = LLVMGetUndef(LLVMInt32Type());
+ masks[i] = LLVMConstNull(LLVMInt32Type());
+ scales[i] = LLVMConstNull(LLVMFloatType());
+ empty_channel = i;
+ }
+ else {
+ unsigned mask = (1 << bits) - 1;
+
+ assert(desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED);
+ assert(bits < 32);
+
+ shifts[i] = LLVMConstInt(LLVMInt32Type(), shift, 0);
+ masks[i] = LLVMConstInt(LLVMInt32Type(), mask, 0);
+
+ if (desc->channel[i].normalized) {
+ scales[i] = LLVMConstReal(LLVMFloatType(), 1.0/mask);
+ normalized = TRUE;
+ }
+ else
+ scales[i] = LLVMConstReal(LLVMFloatType(), 1.0);
+ }
+
+ shift += bits;
+ }
+
+ shifted = LLVMBuildLShr(builder, packed, LLVMConstVector(shifts, 4), "");
+ masked = LLVMBuildAnd(builder, shifted, LLVMConstVector(masks, 4), "");
+ // UIToFP can't be expressed in SSE2
+ casted = LLVMBuildSIToFP(builder, masked, LLVMVectorType(LLVMFloatType(), 4), "");
+
+ if (normalized)
+ scaled = LLVMBuildMul(builder, casted, LLVMConstVector(scales, 4), "");
+ else
+ scaled = casted;
+
+ for (i = 0; i < 4; ++i)
+ aux[i] = LLVMGetUndef(LLVMFloatType());
+
+ for (i = 0; i < 4; ++i) {
+ enum util_format_swizzle swizzle = desc->swizzle[i];
+
+ switch (swizzle) {
+ case UTIL_FORMAT_SWIZZLE_X:
+ case UTIL_FORMAT_SWIZZLE_Y:
+ case UTIL_FORMAT_SWIZZLE_Z:
+ case UTIL_FORMAT_SWIZZLE_W:
+ swizzles[i] = LLVMConstInt(LLVMInt32Type(), swizzle, 0);
+ break;
+ case UTIL_FORMAT_SWIZZLE_0:
+ assert(empty_channel >= 0);
+ swizzles[i] = LLVMConstInt(LLVMInt32Type(), empty_channel, 0);
+ break;
+ case UTIL_FORMAT_SWIZZLE_1:
+ swizzles[i] = LLVMConstInt(LLVMInt32Type(), 4, 0);
+ aux[0] = LLVMConstReal(LLVMFloatType(), 1.0);
+ break;
+ case UTIL_FORMAT_SWIZZLE_NONE:
+ swizzles[i] = LLVMGetUndef(LLVMFloatType());
+ assert(0);
+ break;
+ }
+ }
+
+ return LLVMBuildShuffleVector(builder, scaled, LLVMConstVector(aux, 4), LLVMConstVector(swizzles, 4), "");
+}
+
+
+LLVMValueRef
+lp_build_pack_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef rgba)
+{
+ const struct util_format_description *desc;
+ LLVMTypeRef type;
+ LLVMValueRef packed = NULL;
+ LLVMValueRef swizzles[4];
+ LLVMValueRef shifted, casted, scaled, unswizzled;
+ LLVMValueRef shifts[4];
+ LLVMValueRef scales[4];
+ bool normalized;
+ unsigned shift;
+ unsigned i, j;
+
+ desc = util_format_description(format);
+
+ assert(desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(desc->block.width == 1);
+ assert(desc->block.height == 1);
+
+ type = LLVMIntType(desc->block.bits);
+
+ /* Unswizzle the color components into the source vector. */
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 4; ++j) {
+ if (desc->swizzle[j] == i)
+ break;
+ }
+ if (j < 4)
+ swizzles[i] = LLVMConstInt(LLVMInt32Type(), j, 0);
+ else
+ swizzles[i] = LLVMGetUndef(LLVMInt32Type());
+ }
+
+ unswizzled = LLVMBuildShuffleVector(builder, rgba,
+ LLVMGetUndef(LLVMVectorType(LLVMFloatType(), 4)),
+ LLVMConstVector(swizzles, 4), "");
+
+ normalized = FALSE;
+ shift = 0;
+ for (i = 0; i < 4; ++i) {
+ unsigned bits = desc->channel[i].size;
+
+ if (desc->channel[i].type == UTIL_FORMAT_TYPE_VOID) {
+ shifts[i] = LLVMGetUndef(LLVMInt32Type());
+ scales[i] = LLVMGetUndef(LLVMFloatType());
+ }
+ else {
+ unsigned mask = (1 << bits) - 1;
+
+ assert(desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED);
+ assert(bits < 32);
+
+ shifts[i] = LLVMConstInt(LLVMInt32Type(), shift, 0);
+
+ if (desc->channel[i].normalized) {
+ scales[i] = LLVMConstReal(LLVMFloatType(), mask);
+ normalized = TRUE;
+ }
+ else
+ scales[i] = LLVMConstReal(LLVMFloatType(), 1.0);
+ }
+
+ shift += bits;
+ }
+
+ if (normalized)
+ scaled = LLVMBuildMul(builder, unswizzled, LLVMConstVector(scales, 4), "");
+ else
+ scaled = unswizzled;
+
+ casted = LLVMBuildFPToSI(builder, scaled, LLVMVectorType(LLVMInt32Type(), 4), "");
+
+ shifted = LLVMBuildShl(builder, casted, LLVMConstVector(shifts, 4), "");
+
+ /* Bitwise or all components */
+ for (i = 0; i < 4; ++i) {
+ if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
+ LLVMValueRef component = LLVMBuildExtractElement(builder, shifted, LLVMConstInt(LLVMInt32Type(), i, 0), "");
+ if (packed)
+ packed = LLVMBuildOr(builder, packed, component, "");
+ else
+ packed = component;
+ }
+ }
+
+ if (!packed)
+ packed = LLVMGetUndef(LLVMInt32Type());
+
+ if (desc->block.bits < 32)
+ packed = LLVMBuildTrunc(builder, packed, type, "");
+
+ return packed;
+}
+
+
+LLVMValueRef
+lp_build_load_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef ptr)
+{
+ const struct util_format_description *desc;
+ LLVMTypeRef type;
+ LLVMValueRef packed;
+
+ desc = util_format_description(format);
+
+ /* FIXME: Support more formats */
+ assert(desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(desc->block.width == 1);
+ assert(desc->block.height == 1);
+ assert(desc->block.bits <= 32);
+
+ type = LLVMIntType(desc->block.bits);
+
+ ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, 0), "");
+
+ packed = LLVMBuildLoad(builder, ptr, "");
+
+ return lp_build_unpack_rgba_aos(builder, format, packed);
+}
+
+
+void
+lp_build_store_rgba_aos(LLVMBuilderRef builder,
+ enum pipe_format format,
+ LLVMValueRef ptr,
+ LLVMValueRef rgba)
+{
+ const struct util_format_description *desc;
+ LLVMTypeRef type;
+ LLVMValueRef packed;
+
+ desc = util_format_description(format);
+
+ assert(desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(desc->block.width == 1);
+ assert(desc->block.height == 1);
+
+ type = LLVMIntType(desc->block.bits);
+
+ packed = lp_build_pack_rgba_aos(builder, format, rgba);
+
+ ptr = LLVMBuildBitCast(builder, ptr, LLVMPointerType(type, 0), "");
+
+ LLVMBuildStore(builder, packed, ptr);
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_format_soa.c b/src/gallium/drivers/llvmpipe/lp_bld_format_soa.c
new file mode 100644
index 0000000000..569e8d10a3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_format_soa.c
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_format.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_conv.h"
+#include "lp_bld_format.h"
+
+
+/**
+ * Gather elements from scatter positions in memory into a single vector.
+ *
+ * @param src_width src element width
+ * @param dst_width result element width (source will be expanded to fit)
+ * @param length length of the offsets,
+ * @param base_ptr base pointer, should be a i8 pointer type.
+ * @param offsets vector with offsets
+ */
+LLVMValueRef
+lp_build_gather(LLVMBuilderRef builder,
+ unsigned length,
+ unsigned src_width,
+ unsigned dst_width,
+ LLVMValueRef base_ptr,
+ LLVMValueRef offsets)
+{
+ LLVMTypeRef src_type = LLVMIntType(src_width);
+ LLVMTypeRef src_ptr_type = LLVMPointerType(src_type, 0);
+ LLVMTypeRef dst_elem_type = LLVMIntType(dst_width);
+ LLVMTypeRef dst_vec_type = LLVMVectorType(dst_elem_type, length);
+ LLVMValueRef res;
+ unsigned i;
+
+ res = LLVMGetUndef(dst_vec_type);
+ for(i = 0; i < length; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef elem_offset;
+ LLVMValueRef elem_ptr;
+ LLVMValueRef elem;
+
+ elem_offset = LLVMBuildExtractElement(builder, offsets, index, "");
+ elem_ptr = LLVMBuildGEP(builder, base_ptr, &elem_offset, 1, "");
+ elem_ptr = LLVMBuildBitCast(builder, elem_ptr, src_ptr_type, "");
+ elem = LLVMBuildLoad(builder, elem_ptr, "");
+
+ assert(src_width <= dst_width);
+ if(src_width > dst_width)
+ elem = LLVMBuildTrunc(builder, elem, dst_elem_type, "");
+ if(src_width < dst_width)
+ elem = LLVMBuildZExt(builder, elem, dst_elem_type, "");
+
+ res = LLVMBuildInsertElement(builder, res, elem, index, "");
+ }
+
+ return res;
+}
+
+
+static LLVMValueRef
+lp_build_format_swizzle(union lp_type type,
+ const LLVMValueRef *inputs,
+ enum util_format_swizzle swizzle)
+{
+ switch (swizzle) {
+ case UTIL_FORMAT_SWIZZLE_X:
+ case UTIL_FORMAT_SWIZZLE_Y:
+ case UTIL_FORMAT_SWIZZLE_Z:
+ case UTIL_FORMAT_SWIZZLE_W:
+ return inputs[swizzle];
+ case UTIL_FORMAT_SWIZZLE_0:
+ return lp_build_zero(type);
+ case UTIL_FORMAT_SWIZZLE_1:
+ return lp_build_one(type);
+ case UTIL_FORMAT_SWIZZLE_NONE:
+ return lp_build_undef(type);
+ default:
+ assert(0);
+ return lp_build_undef(type);
+ }
+}
+
+
+void
+lp_build_unpack_rgba_soa(LLVMBuilderRef builder,
+ const struct util_format_description *format_desc,
+ union lp_type type,
+ LLVMValueRef packed,
+ LLVMValueRef *rgba)
+{
+ LLVMValueRef inputs[4];
+ unsigned start;
+ unsigned chan;
+
+ /* FIXME: Support more formats */
+ assert(format_desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(format_desc->block.width == 1);
+ assert(format_desc->block.height == 1);
+ assert(format_desc->block.bits <= 32);
+
+ /* Decode the input vector components */
+ start = 0;
+ for (chan = 0; chan < 4; ++chan) {
+ unsigned width = format_desc->channel[chan].size;
+ unsigned stop = start + width;
+ LLVMValueRef input;
+
+ input = packed;
+
+ switch(format_desc->channel[chan].type) {
+ case UTIL_FORMAT_TYPE_VOID:
+ input = NULL;
+ break;
+
+ case UTIL_FORMAT_TYPE_UNSIGNED:
+ if(type.floating) {
+ if(start)
+ input = LLVMBuildLShr(builder, input, lp_build_int_const_scalar(type, start), "");
+ if(stop < format_desc->block.bits) {
+ unsigned mask = ((unsigned long long)1 << width) - 1;
+ input = LLVMBuildAnd(builder, input, lp_build_int_const_scalar(type, mask), "");
+ }
+
+ if(format_desc->channel[chan].normalized)
+ input = lp_build_unsigned_norm_to_float(builder, width, type, input);
+ else
+ input = LLVMBuildFPToSI(builder, input, lp_build_vec_type(type), "");
+ }
+ else {
+ /* FIXME */
+ assert(0);
+ input = lp_build_undef(type);
+ }
+ break;
+
+ default:
+ /* fall through */
+ input = lp_build_undef(type);
+ break;
+ }
+
+ inputs[chan] = input;
+
+ start = stop;
+ }
+
+ if(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) {
+ enum util_format_swizzle swizzle = format_desc->swizzle[0];
+ LLVMValueRef depth = lp_build_format_swizzle(type, inputs, swizzle);
+ rgba[2] = rgba[1] = rgba[0] = depth;
+ rgba[3] = lp_build_one(type);
+ }
+ else {
+ for (chan = 0; chan < 4; ++chan) {
+ enum util_format_swizzle swizzle = format_desc->swizzle[chan];
+ rgba[chan] = lp_build_format_swizzle(type, inputs, swizzle);
+ }
+ }
+}
+
+
+void
+lp_build_load_rgba_soa(LLVMBuilderRef builder,
+ const struct util_format_description *format_desc,
+ union lp_type type,
+ LLVMValueRef base_ptr,
+ LLVMValueRef offsets,
+ LLVMValueRef *rgba)
+{
+ LLVMValueRef packed;
+
+ assert(format_desc->layout == UTIL_FORMAT_LAYOUT_ARITH);
+ assert(format_desc->block.width == 1);
+ assert(format_desc->block.height == 1);
+ assert(format_desc->block.bits <= 32);
+
+ packed = lp_build_gather(builder,
+ type.length, format_desc->block.bits, type.width,
+ base_ptr, offsets);
+
+ lp_build_unpack_rgba_soa(builder, format_desc, type, packed, rgba);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_interp.c b/src/gallium/drivers/llvmpipe/lp_bld_interp.c
new file mode 100644
index 0000000000..cfe20a0d75
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_interp.c
@@ -0,0 +1,377 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Position and shader input interpolation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_shader_tokens.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "tgsi/tgsi_parse.h"
+#include "lp_bld_debug.h"
+#include "lp_bld_const.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_swizzle.h"
+#include "lp_bld_interp.h"
+
+
+static void
+attrib_name(LLVMValueRef val, unsigned attrib, unsigned chan, const char *suffix)
+{
+ if(attrib == 0)
+ lp_build_name(val, "pos.%c%s", "xyzw"[chan], suffix);
+ else
+ lp_build_name(val, "input%u.%c%s", attrib - 1, "xyzw"[chan], suffix);
+}
+
+
+static void
+coeffs_init(struct lp_build_interp_soa_context *bld,
+ LLVMValueRef a0_ptr,
+ LLVMValueRef dadx_ptr,
+ LLVMValueRef dady_ptr)
+{
+ LLVMBuilderRef builder = bld->base.builder;
+ unsigned attrib;
+ unsigned chan;
+
+ for(attrib = 0; attrib < bld->num_attribs; ++attrib) {
+ unsigned mask = bld->mask[attrib];
+ unsigned mode = bld->mode[attrib];
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ if(mask & (1 << chan)) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), attrib*NUM_CHANNELS + chan, 0);
+ LLVMValueRef a0 = NULL;
+ LLVMValueRef dadx = NULL;
+ LLVMValueRef dady = NULL;
+
+ switch( mode ) {
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ /* fall-through */
+
+ case TGSI_INTERPOLATE_LINEAR:
+ dadx = LLVMBuildLoad(builder, LLVMBuildGEP(builder, dadx_ptr, &index, 1, ""), "");
+ dady = LLVMBuildLoad(builder, LLVMBuildGEP(builder, dady_ptr, &index, 1, ""), "");
+ dadx = lp_build_broadcast_scalar(&bld->base, dadx);
+ dady = lp_build_broadcast_scalar(&bld->base, dady);
+ attrib_name(dadx, attrib, chan, ".dadx");
+ attrib_name(dady, attrib, chan, ".dady");
+ /* fall-through */
+
+ case TGSI_INTERPOLATE_CONSTANT:
+ a0 = LLVMBuildLoad(builder, LLVMBuildGEP(builder, a0_ptr, &index, 1, ""), "");
+ a0 = lp_build_broadcast_scalar(&bld->base, a0);
+ attrib_name(a0, attrib, chan, ".dady");
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ bld->a0 [attrib][chan] = a0;
+ bld->dadx[attrib][chan] = dadx;
+ bld->dady[attrib][chan] = dady;
+ }
+ }
+ }
+}
+
+
+/**
+ * Small vector x scale multiplication optimization.
+ *
+ * TODO: Should be elsewhere.
+ */
+static LLVMValueRef
+coeff_multiply(struct lp_build_interp_soa_context *bld,
+ LLVMValueRef coeff,
+ int step)
+{
+ LLVMValueRef factor;
+
+ switch(step) {
+ case 0:
+ return bld->base.zero;
+ case 1:
+ return coeff;
+ case 2:
+ return lp_build_add(&bld->base, coeff, coeff);
+ default:
+ factor = lp_build_const_scalar(bld->base.type, (double)step);
+ return lp_build_mul(&bld->base, coeff, factor);
+ }
+}
+
+
+/**
+ * Multiply the dadx and dady with the xstep and ystep respectively.
+ */
+static void
+coeffs_update(struct lp_build_interp_soa_context *bld)
+{
+ unsigned attrib;
+ unsigned chan;
+
+ for(attrib = 0; attrib < bld->num_attribs; ++attrib) {
+ unsigned mask = bld->mask[attrib];
+ unsigned mode = bld->mode[attrib];
+ if (mode != TGSI_INTERPOLATE_CONSTANT) {
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ if(mask & (1 << chan)) {
+ bld->dadx[attrib][chan] = coeff_multiply(bld, bld->dadx[attrib][chan], bld->xstep);
+ bld->dady[attrib][chan] = coeff_multiply(bld, bld->dady[attrib][chan], bld->ystep);
+ }
+ }
+ }
+ }
+}
+
+
+static void
+attribs_init(struct lp_build_interp_soa_context *bld)
+{
+ LLVMValueRef x = bld->pos[0];
+ LLVMValueRef y = bld->pos[1];
+ LLVMValueRef oow = NULL;
+ unsigned attrib;
+ unsigned chan;
+
+ for(attrib = 0; attrib < bld->num_attribs; ++attrib) {
+ unsigned mask = bld->mask[attrib];
+ unsigned mode = bld->mode[attrib];
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ if(mask & (1 << chan)) {
+ LLVMValueRef a0 = bld->a0 [attrib][chan];
+ LLVMValueRef dadx = bld->dadx[attrib][chan];
+ LLVMValueRef dady = bld->dady[attrib][chan];
+ LLVMValueRef res;
+
+ res = a0;
+
+ if (mode != TGSI_INTERPOLATE_CONSTANT) {
+ res = lp_build_add(&bld->base, res, lp_build_mul(&bld->base, x, dadx));
+ res = lp_build_add(&bld->base, res, lp_build_mul(&bld->base, y, dady));
+ }
+
+ /* Keep the value of the attribue before perspective divide for faster updates */
+ bld->attribs_pre[attrib][chan] = res;
+
+ if (mode == TGSI_INTERPOLATE_PERSPECTIVE) {
+ LLVMValueRef w = bld->pos[3];
+ assert(attrib != 0);
+ if(!oow)
+ oow = lp_build_rcp(&bld->base, w);
+ res = lp_build_mul(&bld->base, res, oow);
+ }
+
+ attrib_name(res, attrib, chan, "");
+
+ bld->attribs[attrib][chan] = res;
+ }
+ }
+ }
+}
+
+
+static void
+attribs_update(struct lp_build_interp_soa_context *bld)
+{
+ LLVMValueRef oow = NULL;
+ unsigned attrib;
+ unsigned chan;
+
+ for(attrib = 0; attrib < bld->num_attribs; ++attrib) {
+ unsigned mask = bld->mask[attrib];
+ unsigned mode = bld->mode[attrib];
+
+ if (mode != TGSI_INTERPOLATE_CONSTANT) {
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ if(mask & (1 << chan)) {
+ LLVMValueRef dadx = bld->dadx[attrib][chan];
+ LLVMValueRef dady = bld->dady[attrib][chan];
+ LLVMValueRef res;
+
+ res = bld->attribs_pre[attrib][chan];
+
+ if(bld->xstep)
+ res = lp_build_add(&bld->base, res, dadx);
+
+ if(bld->ystep)
+ res = lp_build_add(&bld->base, res, dady);
+
+ bld->attribs_pre[attrib][chan] = res;
+
+ if (mode == TGSI_INTERPOLATE_PERSPECTIVE) {
+ LLVMValueRef w = bld->pos[3];
+ assert(attrib != 0);
+ if(!oow)
+ oow = lp_build_rcp(&bld->base, w);
+ res = lp_build_mul(&bld->base, res, oow);
+ }
+
+ attrib_name(res, attrib, chan, "");
+
+ bld->attribs[attrib][chan] = res;
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * Generate the position vectors.
+ *
+ * Parameter x0, y0 are the integer values with the quad upper left coordinates.
+ */
+static void
+pos_init(struct lp_build_interp_soa_context *bld,
+ LLVMValueRef x0,
+ LLVMValueRef y0)
+{
+ lp_build_name(x0, "pos.x");
+ lp_build_name(y0, "pos.y");
+
+ bld->attribs[0][0] = x0;
+ bld->attribs[0][1] = y0;
+}
+
+
+static void
+pos_update(struct lp_build_interp_soa_context *bld)
+{
+ LLVMValueRef x = bld->attribs[0][0];
+ LLVMValueRef y = bld->attribs[0][1];
+
+ if(bld->xstep)
+ x = lp_build_add(&bld->base, x, lp_build_const_scalar(bld->base.type, bld->xstep));
+
+ if(bld->ystep)
+ y = lp_build_add(&bld->base, y, lp_build_const_scalar(bld->base.type, bld->ystep));
+
+ lp_build_name(x, "pos.x");
+ lp_build_name(y, "pos.y");
+
+ bld->attribs[0][0] = x;
+ bld->attribs[0][1] = y;
+}
+
+
+void
+lp_build_interp_soa_init(struct lp_build_interp_soa_context *bld,
+ const struct tgsi_token *tokens,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ LLVMValueRef a0_ptr,
+ LLVMValueRef dadx_ptr,
+ LLVMValueRef dady_ptr,
+ LLVMValueRef x0,
+ LLVMValueRef y0,
+ int xstep,
+ int ystep)
+{
+ struct tgsi_parse_context parse;
+ struct tgsi_full_declaration *decl;
+
+ memset(bld, 0, sizeof *bld);
+
+ lp_build_context_init(&bld->base, builder, type);
+
+ /* For convenience */
+ bld->pos = bld->attribs[0];
+ bld->inputs = (const LLVMValueRef (*)[NUM_CHANNELS]) bld->attribs[1];
+
+ /* Position */
+ bld->num_attribs = 1;
+ bld->mask[0] = TGSI_WRITEMASK_ZW;
+ bld->mode[0] = TGSI_INTERPOLATE_LINEAR;
+
+ /* Inputs */
+ tgsi_parse_init( &parse, tokens );
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ decl = &parse.FullToken.FullDeclaration;
+ if( decl->Declaration.File == TGSI_FILE_INPUT ) {
+ unsigned first, last, mask;
+ unsigned attrib;
+
+ first = decl->DeclarationRange.First;
+ last = decl->DeclarationRange.Last;
+ mask = decl->Declaration.UsageMask;
+
+ for( attrib = first; attrib <= last; ++attrib ) {
+ bld->mask[1 + attrib] = mask;
+ bld->mode[1 + attrib] = decl->Declaration.Interpolate;
+ }
+
+ bld->num_attribs = MAX2(bld->num_attribs, 1 + last + 1);
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+ tgsi_parse_free( &parse );
+
+ coeffs_init(bld, a0_ptr, dadx_ptr, dady_ptr);
+
+ pos_init(bld, x0, y0);
+
+ attribs_init(bld);
+
+ bld->xstep = xstep;
+ bld->ystep = ystep;
+
+ coeffs_update(bld);
+}
+
+
+/**
+ * Advance the position and inputs with the xstep and ystep.
+ */
+void
+lp_build_interp_soa_update(struct lp_build_interp_soa_context *bld)
+{
+ pos_update(bld);
+
+ attribs_update(bld);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_interp.h b/src/gallium/drivers/llvmpipe/lp_bld_interp.h
new file mode 100644
index 0000000000..9194f6233a
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_interp.h
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Position and shader input interpolation.
+ *
+ * Special attention is given to the interpolation of side by side quads.
+ * Multiplications are made only for the first quad. Interpolation of
+ * inputs for posterior quads are done exclusively with additions, and
+ * perspective divide if necessary.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_BLD_INTERP_H
+#define LP_BLD_INTERP_H
+
+
+#include <llvm-c/Core.h>
+
+#include "tgsi/tgsi_exec.h"
+
+#include "lp_bld_type.h"
+
+
+struct tgsi_token;
+
+
+struct lp_build_interp_soa_context
+{
+ struct lp_build_context base;
+
+ unsigned num_attribs;
+ unsigned mask[1 + PIPE_MAX_SHADER_INPUTS];
+ unsigned mode[1 + PIPE_MAX_SHADER_INPUTS];
+
+ LLVMValueRef a0 [1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+ LLVMValueRef dadx[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+ LLVMValueRef dady[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+
+ int xstep;
+ int ystep;
+
+ /* Attribute values before perspective divide */
+ LLVMValueRef attribs_pre[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+
+ LLVMValueRef attribs[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+
+ /*
+ * Convenience pointers. Callers may access this one.
+ */
+ const LLVMValueRef *pos;
+ const LLVMValueRef (*inputs)[NUM_CHANNELS];
+};
+
+
+void
+lp_build_interp_soa_init(struct lp_build_interp_soa_context *bld,
+ const struct tgsi_token *tokens,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ LLVMValueRef a0_ptr,
+ LLVMValueRef dadx_ptr,
+ LLVMValueRef dady_ptr,
+ LLVMValueRef x0,
+ LLVMValueRef y0,
+ int xstep,
+ int ystep);
+
+void
+lp_build_interp_soa_update(struct lp_build_interp_soa_context *bld);
+
+
+#endif /* LP_BLD_INTERP_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_intr.c b/src/gallium/drivers/llvmpipe/lp_bld_intr.c
new file mode 100644
index 0000000000..9895749d56
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_intr.c
@@ -0,0 +1,192 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Helpers for emiting intrinsic calls.
+ *
+ * LLVM vanilla IR doesn't represent all basic arithmetic operations we care
+ * about, and it is often necessary to resort target-specific intrinsics for
+ * performance, convenience.
+ *
+ * Ideally we would like to stay away from target specific intrinsics and
+ * move all the instruction selection logic into upstream LLVM where it belongs.
+ *
+ * These functions are also used for calling C functions provided by us from
+ * generated LLVM code.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "util/u_debug.h"
+
+#include "lp_bld_intr.h"
+
+
+LLVMValueRef
+lp_declare_intrinsic(LLVMModuleRef module,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMTypeRef *arg_types,
+ unsigned num_args)
+{
+ LLVMTypeRef function_type;
+ LLVMValueRef function;
+
+ assert(!LLVMGetNamedFunction(module, name));
+
+ function_type = LLVMFunctionType(ret_type, arg_types, num_args, 0);
+ function = LLVMAddFunction(module, name, function_type);
+
+ LLVMSetFunctionCallConv(function, LLVMCCallConv);
+ LLVMSetLinkage(function, LLVMExternalLinkage);
+
+ assert(LLVMIsDeclaration(function));
+
+ if(name[0] == 'l' &&
+ name[1] == 'l' &&
+ name[2] == 'v' &&
+ name[3] == 'm' &&
+ name[4] == '.')
+ assert(LLVMGetIntrinsicID(function));
+
+ return function;
+}
+
+
+LLVMValueRef
+lp_build_intrinsic(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef *args,
+ unsigned num_args)
+{
+ LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(LLVMGetInsertBlock(builder)));
+ LLVMValueRef function;
+
+ function = LLVMGetNamedFunction(module, name);
+ if(!function) {
+ LLVMTypeRef arg_types[LP_MAX_FUNC_ARGS];
+ unsigned i;
+
+ assert(num_args <= LP_MAX_FUNC_ARGS);
+
+ for(i = 0; i < num_args; ++i) {
+ assert(args[i]);
+ arg_types[i] = LLVMTypeOf(args[i]);
+ }
+
+ function = lp_declare_intrinsic(module, name, ret_type, arg_types, num_args);
+ }
+
+ return LLVMBuildCall(builder, function, args, num_args, "");
+}
+
+
+LLVMValueRef
+lp_build_intrinsic_unary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a)
+{
+ return lp_build_intrinsic(builder, name, ret_type, &a, 1);
+}
+
+
+LLVMValueRef
+lp_build_intrinsic_binary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ LLVMValueRef args[2];
+
+ args[0] = a;
+ args[1] = b;
+
+ return lp_build_intrinsic(builder, name, ret_type, args, 2);
+}
+
+
+LLVMValueRef
+lp_build_intrinsic_map(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef *args,
+ unsigned num_args)
+{
+ LLVMTypeRef ret_elem_type = LLVMGetElementType(ret_type);
+ unsigned n = LLVMGetVectorSize(ret_type);
+ unsigned i, j;
+ LLVMValueRef res;
+
+ assert(num_args <= LP_MAX_FUNC_ARGS);
+
+ res = LLVMGetUndef(ret_type);
+ for(i = 0; i < n; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef arg_elems[LP_MAX_FUNC_ARGS];
+ LLVMValueRef res_elem;
+ for(j = 0; j < num_args; ++j)
+ arg_elems[j] = LLVMBuildExtractElement(builder, args[j], index, "");
+ res_elem = lp_build_intrinsic(builder, name, ret_elem_type, arg_elems, num_args);
+ res = LLVMBuildInsertElement(builder, res, res_elem, index, "");
+ }
+
+ return res;
+}
+
+
+LLVMValueRef
+lp_build_intrinsic_map_unary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a)
+{
+ return lp_build_intrinsic_map(builder, name, ret_type, &a, 1);
+}
+
+
+LLVMValueRef
+lp_build_intrinsic_map_binary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ LLVMValueRef args[2];
+
+ args[0] = a;
+ args[1] = b;
+
+ return lp_build_intrinsic_map(builder, name, ret_type, args, 2);
+}
+
+
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_intr.h b/src/gallium/drivers/llvmpipe/lp_bld_intr.h
new file mode 100644
index 0000000000..f813f27074
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_intr.h
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for calling intrinsics.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_INTR_H
+#define LP_BLD_INTR_H
+
+
+#include <llvm-c/Core.h>
+
+
+/**
+ * Max number of arguments in an intrinsic.
+ */
+#define LP_MAX_FUNC_ARGS 32
+
+
+LLVMValueRef
+lp_declare_intrinsic(LLVMModuleRef module,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMTypeRef *arg_types,
+ unsigned num_args);
+
+LLVMValueRef
+lp_build_intrinsic(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef *args,
+ unsigned num_args);
+
+
+LLVMValueRef
+lp_build_intrinsic_unary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a);
+
+
+LLVMValueRef
+lp_build_intrinsic_binary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+
+LLVMValueRef
+lp_build_intrinsic_map(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef *args,
+ unsigned num_args);
+
+
+LLVMValueRef
+lp_build_intrinsic_map_unary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a);
+
+
+LLVMValueRef
+lp_build_intrinsic_map_binary(LLVMBuilderRef builder,
+ const char *name,
+ LLVMTypeRef ret_type,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+
+#endif /* !LP_BLD_INTR_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_logic.c b/src/gallium/drivers/llvmpipe/lp_bld_logic.c
new file mode 100644
index 0000000000..d566780e5d
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_logic.c
@@ -0,0 +1,394 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for logical operations.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_logic.h"
+
+
+LLVMValueRef
+lp_build_cmp(struct lp_build_context *bld,
+ unsigned func,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const union lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ LLVMValueRef zeros = LLVMConstNull(int_vec_type);
+ LLVMValueRef ones = LLVMConstAllOnes(int_vec_type);
+ LLVMValueRef cond;
+ LLVMValueRef res;
+ unsigned i;
+
+ if(func == PIPE_FUNC_NEVER)
+ return zeros;
+ if(func == PIPE_FUNC_ALWAYS)
+ return ones;
+
+ /* TODO: optimize the constant case */
+
+ /* XXX: It is not clear if we should use the ordered or unordered operators */
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width * type.length == 128) {
+ if(type.floating) {
+ LLVMValueRef args[3];
+ unsigned cc;
+ boolean swap;
+
+ swap = FALSE;
+ switch(func) {
+ case PIPE_FUNC_EQUAL:
+ cc = 0;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ cc = 4;
+ break;
+ case PIPE_FUNC_LESS:
+ cc = 1;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ cc = 2;
+ break;
+ case PIPE_FUNC_GREATER:
+ cc = 1;
+ swap = TRUE;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ cc = 2;
+ swap = TRUE;
+ break;
+ default:
+ assert(0);
+ return bld->undef;
+ }
+
+ if(swap) {
+ args[0] = b;
+ args[1] = a;
+ }
+ else {
+ args[0] = a;
+ args[1] = b;
+ }
+
+ args[2] = LLVMConstInt(LLVMInt8Type(), cc, 0);
+ res = lp_build_intrinsic(bld->builder,
+ "llvm.x86.sse.cmp.ps",
+ vec_type,
+ args, 3);
+ res = LLVMBuildBitCast(bld->builder, res, int_vec_type, "");
+ return res;
+ }
+ else {
+ static const struct {
+ unsigned swap:1;
+ unsigned eq:1;
+ unsigned gt:1;
+ unsigned not:1;
+ } table[] = {
+ {0, 0, 0, 1}, /* PIPE_FUNC_NEVER */
+ {1, 0, 1, 0}, /* PIPE_FUNC_LESS */
+ {0, 1, 0, 0}, /* PIPE_FUNC_EQUAL */
+ {0, 0, 1, 1}, /* PIPE_FUNC_LEQUAL */
+ {0, 0, 1, 0}, /* PIPE_FUNC_GREATER */
+ {0, 1, 0, 1}, /* PIPE_FUNC_NOTEQUAL */
+ {1, 0, 1, 1}, /* PIPE_FUNC_GEQUAL */
+ {0, 0, 0, 0} /* PIPE_FUNC_ALWAYS */
+ };
+ const char *pcmpeq;
+ const char *pcmpgt;
+ LLVMValueRef args[2];
+ LLVMValueRef res;
+
+ switch (type.width) {
+ case 8:
+ pcmpeq = "llvm.x86.sse2.pcmpeq.b";
+ pcmpgt = "llvm.x86.sse2.pcmpgt.b";
+ break;
+ case 16:
+ pcmpeq = "llvm.x86.sse2.pcmpeq.w";
+ pcmpgt = "llvm.x86.sse2.pcmpgt.w";
+ break;
+ case 32:
+ pcmpeq = "llvm.x86.sse2.pcmpeq.d";
+ pcmpgt = "llvm.x86.sse2.pcmpgt.d";
+ break;
+ default:
+ assert(0);
+ return bld->undef;
+ }
+
+ /* There are no signed byte and unsigned word/dword comparison
+ * instructions. So flip the sign bit so that the results match.
+ */
+ if(table[func].gt &&
+ ((type.width == 8 && type.sign) ||
+ (type.width != 8 && !type.sign))) {
+ LLVMValueRef msb = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
+ a = LLVMBuildXor(bld->builder, a, msb, "");
+ b = LLVMBuildXor(bld->builder, b, msb, "");
+ }
+
+ if(table[func].swap) {
+ args[0] = b;
+ args[1] = a;
+ }
+ else {
+ args[0] = a;
+ args[1] = b;
+ }
+
+ if(table[func].eq)
+ res = lp_build_intrinsic(bld->builder, pcmpeq, vec_type, args, 2);
+ else if (table[func].gt)
+ res = lp_build_intrinsic(bld->builder, pcmpgt, vec_type, args, 2);
+ else
+ res = LLVMConstNull(vec_type);
+
+ if(table[func].not)
+ res = LLVMBuildNot(bld->builder, res, "");
+
+ return res;
+ }
+ }
+#endif
+
+ if(type.floating) {
+ LLVMRealPredicate op;
+ switch(func) {
+ case PIPE_FUNC_NEVER:
+ op = LLVMRealPredicateFalse;
+ break;
+ case PIPE_FUNC_ALWAYS:
+ op = LLVMRealPredicateTrue;
+ break;
+ case PIPE_FUNC_EQUAL:
+ op = LLVMRealUEQ;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ op = LLVMRealUNE;
+ break;
+ case PIPE_FUNC_LESS:
+ op = LLVMRealULT;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ op = LLVMRealULE;
+ break;
+ case PIPE_FUNC_GREATER:
+ op = LLVMRealUGT;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ op = LLVMRealUGE;
+ break;
+ default:
+ assert(0);
+ return bld->undef;
+ }
+
+#if 0
+ /* XXX: Although valid IR, no LLVM target currently support this */
+ cond = LLVMBuildFCmp(bld->builder, op, a, b, "");
+ res = LLVMBuildSelect(bld->builder, cond, ones, zeros, "");
+#else
+ debug_printf("%s: warning: using slow element-wise vector comparison\n",
+ __FUNCTION__);
+ res = LLVMGetUndef(int_vec_type);
+ for(i = 0; i < type.length; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ cond = LLVMBuildFCmp(bld->builder, op,
+ LLVMBuildExtractElement(bld->builder, a, index, ""),
+ LLVMBuildExtractElement(bld->builder, b, index, ""),
+ "");
+ cond = LLVMBuildSelect(bld->builder, cond,
+ LLVMConstExtractElement(ones, index),
+ LLVMConstExtractElement(zeros, index),
+ "");
+ res = LLVMBuildInsertElement(bld->builder, res, cond, index, "");
+ }
+#endif
+ }
+ else {
+ LLVMIntPredicate op;
+ switch(func) {
+ case PIPE_FUNC_EQUAL:
+ op = LLVMIntEQ;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ op = LLVMIntNE;
+ break;
+ case PIPE_FUNC_LESS:
+ op = type.sign ? LLVMIntSLT : LLVMIntULT;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ op = type.sign ? LLVMIntSLE : LLVMIntULE;
+ break;
+ case PIPE_FUNC_GREATER:
+ op = type.sign ? LLVMIntSGT : LLVMIntUGT;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ op = type.sign ? LLVMIntSGE : LLVMIntUGE;
+ break;
+ default:
+ assert(0);
+ return bld->undef;
+ }
+
+#if 0
+ /* XXX: Although valid IR, no LLVM target currently support this */
+ cond = LLVMBuildICmp(bld->builder, op, a, b, "");
+ res = LLVMBuildSelect(bld->builder, cond, ones, zeros, "");
+#else
+ debug_printf("%s: warning: using slow element-wise vector comparison\n",
+ __FUNCTION__);
+ res = LLVMGetUndef(int_vec_type);
+ for(i = 0; i < type.length; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ cond = LLVMBuildICmp(bld->builder, op,
+ LLVMBuildExtractElement(bld->builder, a, index, ""),
+ LLVMBuildExtractElement(bld->builder, b, index, ""),
+ "");
+ cond = LLVMBuildSelect(bld->builder, cond,
+ LLVMConstExtractElement(ones, index),
+ LLVMConstExtractElement(zeros, index),
+ "");
+ res = LLVMBuildInsertElement(bld->builder, res, cond, index, "");
+ }
+#endif
+ }
+
+ return res;
+}
+
+
+LLVMValueRef
+lp_build_select(struct lp_build_context *bld,
+ LLVMValueRef mask,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ union lp_type type = bld->type;
+ LLVMValueRef res;
+
+ if(a == b)
+ return a;
+
+ if(type.floating) {
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
+ b = LLVMBuildBitCast(bld->builder, b, int_vec_type, "");
+ }
+
+ a = LLVMBuildAnd(bld->builder, a, mask, "");
+
+ /* This often gets translated to PANDN, but sometimes the NOT is
+ * pre-computed and stored in another constant. The best strategy depends
+ * on available registers, so it is not a big deal -- hopefully LLVM does
+ * the right decision attending the rest of the program.
+ */
+ b = LLVMBuildAnd(bld->builder, b, LLVMBuildNot(bld->builder, mask, ""), "");
+
+ res = LLVMBuildOr(bld->builder, a, b, "");
+
+ if(type.floating) {
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
+ }
+
+ return res;
+}
+
+
+LLVMValueRef
+lp_build_select_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b,
+ boolean cond[4])
+{
+ const union lp_type type = bld->type;
+ const unsigned n = type.length;
+ unsigned i, j;
+
+ if(a == b)
+ return a;
+ if(cond[0] && cond[1] && cond[2] && cond[3])
+ return a;
+ if(!cond[0] && !cond[1] && !cond[2] && !cond[3])
+ return b;
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ /*
+ * There are three major ways of accomplishing this:
+ * - with a shuffle,
+ * - with a select,
+ * - or with a bit mask.
+ *
+ * Select isn't supported for vector types yet.
+ * The flip between these is empirical and might need to be.
+ */
+ if (n <= 4) {
+ /*
+ * Shuffle.
+ */
+ LLVMTypeRef elem_type = LLVMInt32Type();
+ LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
+
+ for(j = 0; j < n; j += 4)
+ for(i = 0; i < 4; ++i)
+ shuffles[j + i] = LLVMConstInt(elem_type, (cond[i] ? 0 : n) + j + i, 0);
+
+ return LLVMBuildShuffleVector(bld->builder, a, b, LLVMConstVector(shuffles, n), "");
+ }
+ else {
+#if 0
+ /* XXX: Unfortunately select of vectors do not work */
+ /* Use a select */
+ LLVMTypeRef elem_type = LLVMInt1Type();
+ LLVMValueRef cond[LP_MAX_VECTOR_LENGTH];
+
+ for(j = 0; j < n; j += 4)
+ for(i = 0; i < 4; ++i)
+ cond[j + i] = LLVMConstInt(elem_type, cond[i] ? 1 : 0, 0);
+
+ return LLVMBuildSelect(bld->builder, LLVMConstVector(cond, n), a, b, "");
+#else
+ LLVMValueRef mask = lp_build_const_mask_aos(type, cond);
+ return lp_build_select(bld, mask, a, b);
+#endif
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_logic.h b/src/gallium/drivers/llvmpipe/lp_bld_logic.h
new file mode 100644
index 0000000000..29b9e1c45b
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_logic.h
@@ -0,0 +1,72 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for logical operations.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_LOGIC_H
+#define LP_BLD_LOGIC_H
+
+
+#include <llvm-c/Core.h>
+
+#include "pipe/p_defines.h" /* For PIPE_FUNC_xxx */
+
+
+union lp_type type;
+struct lp_build_context;
+
+
+/**
+ * @param func is one of PIPE_FUNC_xxx
+ */
+LLVMValueRef
+lp_build_cmp(struct lp_build_context *bld,
+ unsigned func,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+
+LLVMValueRef
+lp_build_select(struct lp_build_context *bld,
+ LLVMValueRef mask,
+ LLVMValueRef a,
+ LLVMValueRef b);
+
+LLVMValueRef
+lp_build_select_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b,
+ boolean cond[4]);
+
+
+#endif /* !LP_BLD_LOGIC_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_sample.h b/src/gallium/drivers/llvmpipe/lp_bld_sample.h
new file mode 100644
index 0000000000..6f565af76d
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_sample.h
@@ -0,0 +1,135 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Texture sampling.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_BLD_SAMPLE_H
+#define LP_BLD_SAMPLE_H
+
+
+#include <llvm-c/Core.h>
+
+struct pipe_texture;
+struct pipe_sampler_state;
+union lp_type;
+
+
+/**
+ * Sampler static state.
+ *
+ * These are the bits of state from pipe_texture and pipe_sampler_state that
+ * are embedded in the generated code.
+ */
+struct lp_sampler_static_state
+{
+ /* pipe_texture's state */
+ enum pipe_format format;
+ unsigned target:2;
+ unsigned pot_width:1;
+ unsigned pot_height:1;
+ unsigned pot_depth:1;
+
+ /* pipe_sampler_state's state */
+ unsigned wrap_s:3;
+ unsigned wrap_t:3;
+ unsigned wrap_r:3;
+ unsigned min_img_filter:2;
+ unsigned min_mip_filter:2;
+ unsigned mag_img_filter:2;
+ unsigned compare_mode:1;
+ unsigned compare_func:3;
+ unsigned normalized_coords:1;
+ unsigned prefilter:4;
+};
+
+
+/**
+ * Sampler dynamic state.
+ *
+ * These are the bits of state from pipe_texture and pipe_sampler_state that
+ * are computed in runtime.
+ *
+ * There are obtained through callbacks, as we don't want to tie the texture
+ * sampling code generation logic to any particular texture layout or pipe
+ * driver.
+ */
+struct lp_sampler_dynamic_state
+{
+
+ /** Obtain the base texture width. */
+ LLVMValueRef
+ (*width)( struct lp_sampler_dynamic_state *state,
+ LLVMBuilderRef builder,
+ unsigned unit);
+
+ /** Obtain the base texture height. */
+ LLVMValueRef
+ (*height)( struct lp_sampler_dynamic_state *state,
+ LLVMBuilderRef builder,
+ unsigned unit);
+
+ LLVMValueRef
+ (*stride)( struct lp_sampler_dynamic_state *state,
+ LLVMBuilderRef builder,
+ unsigned unit);
+
+ LLVMValueRef
+ (*data_ptr)( struct lp_sampler_dynamic_state *state,
+ LLVMBuilderRef builder,
+ unsigned unit);
+
+};
+
+
+/**
+ * Derive the sampler static state.
+ */
+void
+lp_sampler_static_state(struct lp_sampler_static_state *state,
+ const struct pipe_texture *texture,
+ const struct pipe_sampler_state *sampler);
+
+
+void
+lp_build_sample_soa(LLVMBuilderRef builder,
+ const struct lp_sampler_static_state *static_state,
+ struct lp_sampler_dynamic_state *dynamic_state,
+ union lp_type fp_type,
+ unsigned unit,
+ unsigned num_coords,
+ const LLVMValueRef *coords,
+ LLVMValueRef lodbias,
+ LLVMValueRef *texel);
+
+
+
+#endif /* LP_BLD_SAMPLE_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_sample_soa.c b/src/gallium/drivers/llvmpipe/lp_bld_sample_soa.c
new file mode 100644
index 0000000000..3ca25b0e76
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_sample_soa.c
@@ -0,0 +1,411 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Texture sampling.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "util/u_format.h"
+#include "lp_bld_debug.h"
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_swizzle.h"
+#include "lp_bld_format.h"
+#include "lp_bld_sample.h"
+
+
+void
+lp_sampler_static_state(struct lp_sampler_static_state *state,
+ const struct pipe_texture *texture,
+ const struct pipe_sampler_state *sampler)
+{
+ memset(state, 0, sizeof *state);
+
+ if(!texture)
+ return;
+
+ if(!sampler)
+ return;
+
+ state->format = texture->format;
+ state->target = texture->target;
+ state->pot_width = util_is_pot(texture->width[0]);
+ state->pot_height = util_is_pot(texture->height[0]);
+ state->pot_depth = util_is_pot(texture->depth[0]);
+
+ state->wrap_s = sampler->wrap_s;
+ state->wrap_t = sampler->wrap_t;
+ state->wrap_r = sampler->wrap_r;
+ state->min_img_filter = sampler->min_img_filter;
+ state->min_mip_filter = sampler->min_mip_filter;
+ state->mag_img_filter = sampler->mag_img_filter;
+ if(sampler->compare_mode) {
+ state->compare_mode = sampler->compare_mode;
+ state->compare_func = sampler->compare_func;
+ }
+ state->normalized_coords = sampler->normalized_coords;
+ state->prefilter = sampler->prefilter;
+}
+
+
+
+/**
+ * Keep all information for sampling code generation in a single place.
+ */
+struct lp_build_sample_context
+{
+ LLVMBuilderRef builder;
+
+ const struct lp_sampler_static_state *static_state;
+
+ struct lp_sampler_dynamic_state *dynamic_state;
+
+ const struct util_format_description *format_desc;
+
+ /** Incoming coordinates type and build context */
+ union lp_type coord_type;
+ struct lp_build_context coord_bld;
+
+ /** Integer coordinates */
+ union lp_type int_coord_type;
+ struct lp_build_context int_coord_bld;
+
+ /** Output texels type and build context */
+ union lp_type texel_type;
+ struct lp_build_context texel_bld;
+};
+
+
+static void
+lp_build_sample_texel(struct lp_build_sample_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef y,
+ LLVMValueRef y_stride,
+ LLVMValueRef data_ptr,
+ LLVMValueRef *texel)
+{
+ struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
+ LLVMValueRef x_stride;
+ LLVMValueRef offset;
+
+ x_stride = lp_build_const_scalar(bld->int_coord_type, bld->format_desc->block.bits/8);
+
+ if(bld->format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) {
+ LLVMValueRef x_lo, x_hi;
+ LLVMValueRef y_lo, y_hi;
+ LLVMValueRef x_stride_lo, x_stride_hi;
+ LLVMValueRef y_stride_lo, y_stride_hi;
+ LLVMValueRef x_offset_lo, x_offset_hi;
+ LLVMValueRef y_offset_lo, y_offset_hi;
+ LLVMValueRef offset_lo, offset_hi;
+
+ x_lo = LLVMBuildAnd(bld->builder, x, int_coord_bld->one, "");
+ y_lo = LLVMBuildAnd(bld->builder, y, int_coord_bld->one, "");
+
+ x_hi = LLVMBuildLShr(bld->builder, x, int_coord_bld->one, "");
+ y_hi = LLVMBuildLShr(bld->builder, y, int_coord_bld->one, "");
+
+ x_stride_lo = x_stride;
+ y_stride_lo = lp_build_const_scalar(bld->int_coord_type, 2*bld->format_desc->block.bits/8);
+
+ x_stride_hi = lp_build_const_scalar(bld->int_coord_type, 4*bld->format_desc->block.bits/8);
+ y_stride_hi = LLVMBuildShl(bld->builder, y_stride, int_coord_bld->one, "");
+
+ x_offset_lo = lp_build_mul(int_coord_bld, x_lo, x_stride_lo);
+ y_offset_lo = lp_build_mul(int_coord_bld, y_lo, y_stride_lo);
+ offset_lo = lp_build_add(int_coord_bld, x_offset_lo, y_offset_lo);
+
+ x_offset_hi = lp_build_mul(int_coord_bld, x_hi, x_stride_hi);
+ y_offset_hi = lp_build_mul(int_coord_bld, y_hi, y_stride_hi);
+ offset_hi = lp_build_add(int_coord_bld, x_offset_hi, y_offset_hi);
+
+ offset = lp_build_add(int_coord_bld, offset_hi, offset_lo);
+ }
+ else {
+ LLVMValueRef x_offset;
+ LLVMValueRef y_offset;
+
+ x_offset = lp_build_mul(int_coord_bld, x, x_stride);
+ y_offset = lp_build_mul(int_coord_bld, y, y_stride);
+
+ offset = lp_build_add(int_coord_bld, x_offset, y_offset);
+ }
+
+ lp_build_load_rgba_soa(bld->builder,
+ bld->format_desc,
+ bld->texel_type,
+ data_ptr,
+ offset,
+ texel);
+}
+
+
+static LLVMValueRef
+lp_build_sample_wrap(struct lp_build_sample_context *bld,
+ LLVMValueRef coord,
+ LLVMValueRef length,
+ boolean is_pot,
+ unsigned wrap_mode)
+{
+ struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
+ LLVMValueRef length_minus_one;
+
+ length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
+
+ switch(wrap_mode) {
+ case PIPE_TEX_WRAP_REPEAT:
+ if(is_pot)
+ coord = LLVMBuildAnd(bld->builder, coord, length_minus_one, "");
+ else
+ /* Signed remainder won't give the right results for negative
+ * dividends but unsigned remainder does.*/
+ coord = LLVMBuildURem(bld->builder, coord, length, "");
+ break;
+
+ case PIPE_TEX_WRAP_CLAMP:
+ coord = lp_build_max(int_coord_bld, coord, int_coord_bld->zero);
+ coord = lp_build_min(int_coord_bld, coord, length_minus_one);
+ break;
+
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ /* FIXME */
+ default:
+ assert(0);
+ }
+
+ return coord;
+}
+
+
+static void
+lp_build_sample_2d_nearest_soa(struct lp_build_sample_context *bld,
+ LLVMValueRef s,
+ LLVMValueRef t,
+ LLVMValueRef width,
+ LLVMValueRef height,
+ LLVMValueRef stride,
+ LLVMValueRef data_ptr,
+ LLVMValueRef *texel)
+{
+ LLVMValueRef x;
+ LLVMValueRef y;
+
+ x = lp_build_ifloor(&bld->coord_bld, s);
+ y = lp_build_ifloor(&bld->coord_bld, t);
+
+ x = lp_build_sample_wrap(bld, x, width, bld->static_state->pot_width, bld->static_state->wrap_s);
+ y = lp_build_sample_wrap(bld, y, height, bld->static_state->pot_height, bld->static_state->wrap_t);
+
+ lp_build_sample_texel(bld, x, y, stride, data_ptr, texel);
+}
+
+
+static void
+lp_build_sample_2d_linear_soa(struct lp_build_sample_context *bld,
+ LLVMValueRef s,
+ LLVMValueRef t,
+ LLVMValueRef width,
+ LLVMValueRef height,
+ LLVMValueRef stride,
+ LLVMValueRef data_ptr,
+ LLVMValueRef *texel)
+{
+ LLVMValueRef half;
+ LLVMValueRef s_ipart;
+ LLVMValueRef t_ipart;
+ LLVMValueRef s_fpart;
+ LLVMValueRef t_fpart;
+ LLVMValueRef x0, x1;
+ LLVMValueRef y0, y1;
+ LLVMValueRef neighbors[2][2][4];
+ unsigned chan;
+
+ half = lp_build_const_scalar(bld->coord_type, 0.5);
+ s = lp_build_sub(&bld->coord_bld, s, half);
+ t = lp_build_sub(&bld->coord_bld, t, half);
+
+ s_ipart = lp_build_floor(&bld->coord_bld, s);
+ t_ipart = lp_build_floor(&bld->coord_bld, t);
+
+ s_fpart = lp_build_sub(&bld->coord_bld, s, s_ipart);
+ t_fpart = lp_build_sub(&bld->coord_bld, t, t_ipart);
+
+ x0 = lp_build_int(&bld->coord_bld, s_ipart);
+ y0 = lp_build_int(&bld->coord_bld, t_ipart);
+
+ x0 = lp_build_sample_wrap(bld, x0, width, bld->static_state->pot_width, bld->static_state->wrap_s);
+ y0 = lp_build_sample_wrap(bld, y0, height, bld->static_state->pot_height, bld->static_state->wrap_t);
+
+ x1 = lp_build_add(&bld->int_coord_bld, x0, bld->int_coord_bld.one);
+ y1 = lp_build_add(&bld->int_coord_bld, y0, bld->int_coord_bld.one);
+
+ x1 = lp_build_sample_wrap(bld, x1, width, bld->static_state->pot_width, bld->static_state->wrap_s);
+ y1 = lp_build_sample_wrap(bld, y1, height, bld->static_state->pot_height, bld->static_state->wrap_t);
+
+ lp_build_sample_texel(bld, x0, y0, stride, data_ptr, neighbors[0][0]);
+ lp_build_sample_texel(bld, x1, y0, stride, data_ptr, neighbors[0][1]);
+ lp_build_sample_texel(bld, x0, y1, stride, data_ptr, neighbors[1][0]);
+ lp_build_sample_texel(bld, x1, y1, stride, data_ptr, neighbors[1][1]);
+
+ /* TODO: Don't interpolate missing channels */
+ for(chan = 0; chan < 4; ++chan) {
+ texel[chan] = lp_build_lerp_2d(&bld->texel_bld,
+ s_fpart, t_fpart,
+ neighbors[0][0][chan],
+ neighbors[0][1][chan],
+ neighbors[1][0][chan],
+ neighbors[1][1][chan]);
+ }
+}
+
+
+static void
+lp_build_sample_compare(struct lp_build_sample_context *bld,
+ LLVMValueRef p,
+ LLVMValueRef *texel)
+{
+ struct lp_build_context *texel_bld = &bld->texel_bld;
+ LLVMValueRef res;
+ unsigned chan;
+
+ if(!bld->static_state->compare_mode)
+ return;
+
+ /* TODO: Compare before swizzling, to avoid redundant computations */
+ res = NULL;
+ for(chan = 0; chan < 4; ++chan) {
+ LLVMValueRef cmp;
+ cmp = lp_build_cmp(texel_bld, bld->static_state->compare_func, p, texel[chan]);
+ cmp = lp_build_select(texel_bld, cmp, texel_bld->one, texel_bld->zero);
+
+ if(res)
+ res = lp_build_add(texel_bld, res, cmp);
+ else
+ res = cmp;
+ }
+
+ assert(res);
+ res = lp_build_mul(texel_bld, res, lp_build_const_scalar(texel_bld->type, 0.25));
+
+ /* XXX returning result for default GL_DEPTH_TEXTURE_MODE = GL_LUMINANCE */
+ for(chan = 0; chan < 3; ++chan)
+ texel[chan] = res;
+ texel[3] = texel_bld->one;
+}
+
+
+void
+lp_build_sample_soa(LLVMBuilderRef builder,
+ const struct lp_sampler_static_state *static_state,
+ struct lp_sampler_dynamic_state *dynamic_state,
+ union lp_type type,
+ unsigned unit,
+ unsigned num_coords,
+ const LLVMValueRef *coords,
+ LLVMValueRef lodbias,
+ LLVMValueRef *texel)
+{
+ struct lp_build_sample_context bld;
+ LLVMValueRef width;
+ LLVMValueRef height;
+ LLVMValueRef stride;
+ LLVMValueRef data_ptr;
+ LLVMValueRef s;
+ LLVMValueRef t;
+ LLVMValueRef p;
+
+ /* Setup our build context */
+ memset(&bld, 0, sizeof bld);
+ bld.builder = builder;
+ bld.static_state = static_state;
+ bld.dynamic_state = dynamic_state;
+ bld.format_desc = util_format_description(static_state->format);
+ bld.coord_type = type;
+ bld.int_coord_type = lp_int_type(type);
+ bld.texel_type = type;
+ lp_build_context_init(&bld.coord_bld, builder, bld.coord_type);
+ lp_build_context_init(&bld.int_coord_bld, builder, bld.int_coord_type);
+ lp_build_context_init(&bld.texel_bld, builder, bld.texel_type);
+
+ /* Get the dynamic state */
+ width = dynamic_state->width(dynamic_state, builder, unit);
+ height = dynamic_state->height(dynamic_state, builder, unit);
+ stride = dynamic_state->stride(dynamic_state, builder, unit);
+ data_ptr = dynamic_state->data_ptr(dynamic_state, builder, unit);
+
+ s = coords[0];
+ t = coords[1];
+ p = coords[2];
+
+ width = lp_build_broadcast_scalar(&bld.int_coord_bld, width);
+ height = lp_build_broadcast_scalar(&bld.int_coord_bld, height);
+ stride = lp_build_broadcast_scalar(&bld.int_coord_bld, stride);
+
+ if(static_state->target == PIPE_TEXTURE_1D)
+ t = bld.coord_bld.zero;
+
+ if(static_state->normalized_coords) {
+ LLVMTypeRef coord_vec_type = lp_build_vec_type(bld.coord_type);
+ LLVMValueRef fp_width = LLVMBuildSIToFP(builder, width, coord_vec_type, "");
+ LLVMValueRef fp_height = LLVMBuildSIToFP(builder, height, coord_vec_type, "");
+ s = lp_build_mul(&bld.coord_bld, s, fp_width);
+ t = lp_build_mul(&bld.coord_bld, t, fp_height);
+ }
+
+ switch (static_state->min_img_filter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ lp_build_sample_2d_nearest_soa(&bld, s, t, width, height, stride, data_ptr, texel);
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ lp_build_sample_2d_linear_soa(&bld, s, t, width, height, stride, data_ptr, texel);
+ break;
+ default:
+ assert(0);
+ }
+
+ /* FIXME: respect static_state->min_mip_filter */;
+ /* FIXME: respect static_state->mag_img_filter */;
+ /* FIXME: respect static_state->prefilter */;
+
+ lp_build_sample_compare(&bld, p, texel);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_struct.c b/src/gallium/drivers/llvmpipe/lp_bld_struct.c
new file mode 100644
index 0000000000..3998ac374f
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_struct.c
@@ -0,0 +1,72 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Helper functions for manipulation structures.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+#include "lp_bld_debug.h"
+#include "lp_bld_struct.h"
+
+
+LLVMValueRef
+lp_build_struct_get_ptr(LLVMBuilderRef builder,
+ LLVMValueRef ptr,
+ unsigned member,
+ const char *name)
+{
+ LLVMValueRef indices[2];
+ LLVMValueRef member_ptr;
+ indices[0] = LLVMConstInt(LLVMInt32Type(), 0, 0);
+ indices[1] = LLVMConstInt(LLVMInt32Type(), member, 0);
+ member_ptr = LLVMBuildGEP(builder, ptr, indices, Elements(indices), "");
+ lp_build_name(member_ptr, "%s.%s_ptr", LLVMGetValueName(ptr), name);
+ return member_ptr;
+}
+
+
+LLVMValueRef
+lp_build_struct_get(LLVMBuilderRef builder,
+ LLVMValueRef ptr,
+ unsigned member,
+ const char *name)
+{
+ LLVMValueRef member_ptr;
+ LLVMValueRef res;
+ member_ptr = lp_build_struct_get_ptr(builder, ptr, member, name);
+ res = LLVMBuildLoad(builder, member_ptr, "");
+ lp_build_name(res, "%s.%s", LLVMGetValueName(ptr), name);
+ return res;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_struct.h b/src/gallium/drivers/llvmpipe/lp_bld_struct.h
new file mode 100644
index 0000000000..740392f561
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_struct.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for type conversions.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_STRUCT_H
+#define LP_BLD_STRUCT_H
+
+
+#include <llvm-c/Core.h>
+#include <llvm-c/Target.h>
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+
+#define LP_CHECK_STRUCT_SIZE(_ctype, _ltarget, _ltype) \
+ assert(LLVMABISizeOfType(_ltarget, _ltype) == \
+ sizeof(_ctype))
+
+#define LP_CHECK_MEMBER_OFFSET(_ctype, _cmember, _ltarget, _ltype, _lindex) \
+ assert(LLVMOffsetOfElement(_ltarget, _ltype, _lindex) == \
+ offsetof(_ctype, _cmember))
+
+
+/**
+ * Get value pointer to a structure member.
+ */
+LLVMValueRef
+lp_build_struct_get_ptr(LLVMBuilderRef builder,
+ LLVMValueRef ptr,
+ unsigned member,
+ const char *name);
+
+/**
+ * Get the value of a structure member.
+ */
+LLVMValueRef
+lp_build_struct_get(LLVMBuilderRef builder,
+ LLVMValueRef ptr,
+ unsigned member,
+ const char *name);
+
+
+#endif /* !LP_BLD_STRUCT_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_swizzle.c b/src/gallium/drivers/llvmpipe/lp_bld_swizzle.c
new file mode 100644
index 0000000000..ac7eed9379
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_swizzle.c
@@ -0,0 +1,238 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for swizzling/shuffling.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "util/u_debug.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_swizzle.h"
+
+
+LLVMValueRef
+lp_build_broadcast(LLVMBuilderRef builder,
+ LLVMTypeRef vec_type,
+ LLVMValueRef scalar)
+{
+ const unsigned n = LLVMGetVectorSize(vec_type);
+ LLVMValueRef res;
+ unsigned i;
+
+ res = LLVMGetUndef(vec_type);
+ for(i = 0; i < n; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ res = LLVMBuildInsertElement(builder, res, scalar, index, "");
+ }
+
+ return res;
+}
+
+
+LLVMValueRef
+lp_build_broadcast_scalar(struct lp_build_context *bld,
+ LLVMValueRef scalar)
+{
+ const union lp_type type = bld->type;
+ LLVMValueRef res;
+ unsigned i;
+
+ res = bld->undef;
+ for(i = 0; i < type.length; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ res = LLVMBuildInsertElement(bld->builder, res, scalar, index, "");
+ }
+
+ return res;
+}
+
+
+LLVMValueRef
+lp_build_broadcast_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ unsigned channel)
+{
+ const union lp_type type = bld->type;
+ const unsigned n = type.length;
+ unsigned i, j;
+
+ if(a == bld->undef || a == bld->zero || a == bld->one)
+ return a;
+
+ /* XXX: SSE3 has PSHUFB which should be better than bitmasks, but forcing
+ * using shuffles here actually causes worst results. More investigation is
+ * needed. */
+ if (n <= 4) {
+ /*
+ * Shuffle.
+ */
+ LLVMTypeRef elem_type = LLVMInt32Type();
+ LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
+
+ for(j = 0; j < n; j += 4)
+ for(i = 0; i < 4; ++i)
+ shuffles[j + i] = LLVMConstInt(elem_type, j + channel, 0);
+
+ return LLVMBuildShuffleVector(bld->builder, a, bld->undef, LLVMConstVector(shuffles, n), "");
+ }
+ else {
+ /*
+ * Bit mask and recursive shifts
+ *
+ * XYZW XYZW .... XYZW <= input
+ * 0Y00 0Y00 .... 0Y00
+ * YY00 YY00 .... YY00
+ * YYYY YYYY .... YYYY <= output
+ */
+ union lp_type type4 = type;
+ const char shifts[4][2] = {
+ { 1, 2},
+ {-1, 2},
+ { 1, -2},
+ {-1, -2}
+ };
+ boolean cond[4];
+ unsigned i;
+
+ memset(cond, 0, sizeof cond);
+ cond[channel] = 1;
+
+ a = LLVMBuildAnd(bld->builder, a, lp_build_const_mask_aos(type, cond), "");
+
+ type4.width *= 4;
+ type4.length /= 4;
+
+ a = LLVMBuildBitCast(bld->builder, a, lp_build_vec_type(type4), "");
+
+ for(i = 0; i < 2; ++i) {
+ LLVMValueRef tmp = NULL;
+ int shift = shifts[channel][i];
+
+#ifdef PIPE_ARCH_LITTLE_ENDIAN
+ shift = -shift;
+#endif
+
+ if(shift > 0)
+ tmp = LLVMBuildLShr(bld->builder, a, lp_build_int_const_scalar(type4, shift*type.width), "");
+ if(shift < 0)
+ tmp = LLVMBuildShl(bld->builder, a, lp_build_int_const_scalar(type4, -shift*type.width), "");
+
+ assert(tmp);
+ if(tmp)
+ a = LLVMBuildOr(bld->builder, a, tmp, "");
+ }
+
+ return LLVMBuildBitCast(bld->builder, a, lp_build_vec_type(type), "");
+ }
+}
+
+
+LLVMValueRef
+lp_build_swizzle1_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ unsigned char swizzle[4])
+{
+ const unsigned n = bld->type.length;
+ unsigned i, j;
+
+ if(a == bld->undef || a == bld->zero || a == bld->one)
+ return a;
+
+ if(swizzle[0] == swizzle[1] && swizzle[1] == swizzle[2] && swizzle[2] == swizzle[3])
+ return lp_build_broadcast_aos(bld, a, swizzle[0]);
+
+ {
+ /*
+ * Shuffle.
+ */
+ LLVMTypeRef elem_type = LLVMInt32Type();
+ LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
+
+ for(j = 0; j < n; j += 4)
+ for(i = 0; i < 4; ++i)
+ shuffles[j + i] = LLVMConstInt(elem_type, j + swizzle[i], 0);
+
+ return LLVMBuildShuffleVector(bld->builder, a, bld->undef, LLVMConstVector(shuffles, n), "");
+ }
+}
+
+
+LLVMValueRef
+lp_build_swizzle2_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b,
+ unsigned char swizzle[4])
+{
+ const unsigned n = bld->type.length;
+ unsigned i, j;
+
+ if(swizzle[0] < 4 && swizzle[1] < 4 && swizzle[2] < 4 && swizzle[3] < 4)
+ return lp_build_swizzle1_aos(bld, a, swizzle);
+
+ if(a == b) {
+ swizzle[0] %= 4;
+ swizzle[1] %= 4;
+ swizzle[2] %= 4;
+ swizzle[3] %= 4;
+ return lp_build_swizzle1_aos(bld, a, swizzle);
+ }
+
+ if(swizzle[0] % 4 == 0 &&
+ swizzle[1] % 4 == 1 &&
+ swizzle[2] % 4 == 2 &&
+ swizzle[3] % 4 == 3) {
+ boolean cond[4];
+ cond[0] = swizzle[0] / 4;
+ cond[1] = swizzle[1] / 4;
+ cond[2] = swizzle[2] / 4;
+ cond[3] = swizzle[3] / 4;
+ return lp_build_select_aos(bld, a, b, cond);
+ }
+
+ {
+ /*
+ * Shuffle.
+ */
+ LLVMTypeRef elem_type = LLVMInt32Type();
+ LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
+
+ for(j = 0; j < n; j += 4)
+ for(i = 0; i < 4; ++i)
+ shuffles[j + i] = LLVMConstInt(elem_type, j + (swizzle[i] % 4) + (swizzle[i] / 4 * n), 0);
+
+ return LLVMBuildShuffleVector(bld->builder, a, b, LLVMConstVector(shuffles, n), "");
+ }
+}
+
+
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_swizzle.h b/src/gallium/drivers/llvmpipe/lp_bld_swizzle.h
new file mode 100644
index 0000000000..d7dd6a8a60
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_swizzle.h
@@ -0,0 +1,91 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Helper functions for swizzling/shuffling.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_SWIZZLE_H
+#define LP_BLD_SWIZZLE_H
+
+
+#include <llvm-c/Core.h>
+
+
+union lp_type type;
+struct lp_build_context;
+
+
+LLVMValueRef
+lp_build_broadcast(LLVMBuilderRef builder,
+ LLVMTypeRef vec_type,
+ LLVMValueRef scalar);
+
+
+LLVMValueRef
+lp_build_broadcast_scalar(struct lp_build_context *bld,
+ LLVMValueRef scalar);
+
+
+/**
+ * Broadcast one channel of a vector composed of arrays of XYZW structures into
+ * all four channel.
+ */
+LLVMValueRef
+lp_build_broadcast_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ unsigned channel);
+
+
+/**
+ * Swizzle a vector consisting of an array of XYZW structs.
+ *
+ * @param swizzle is the in [0,4[ range.
+ */
+LLVMValueRef
+lp_build_swizzle1_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ unsigned char swizzle[4]);
+
+
+/**
+ * Swizzle two vector consisting of an array of XYZW structs.
+ *
+ * @param swizzle is the in [0,8[ range. Values in [4,8[ range refer to b.
+ */
+LLVMValueRef
+lp_build_swizzle2_aos(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b,
+ unsigned char swizzle[4]);
+
+
+#endif /* !LP_BLD_SWIZZLE_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_tgsi.h b/src/gallium/drivers/llvmpipe/lp_bld_tgsi.h
new file mode 100644
index 0000000000..10c251c416
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_tgsi.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * TGSI to LLVM IR translation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_BLD_TGSI_H
+#define LP_BLD_TGSI_H
+
+#include <llvm-c/Core.h>
+
+
+struct tgsi_token;
+union lp_type;
+struct lp_build_context;
+struct lp_build_mask_context;
+
+
+/**
+ * Sampler code generation interface.
+ *
+ * Although texture sampling is a requirement for TGSI translation, it is
+ * a very different problem with several different approaches to it. This
+ * structure establishes an interface for texture sampling code generation, so
+ * that we can easily use different texture sampling strategies.
+ */
+struct lp_build_sampler_soa
+{
+ void
+ (*destroy)( struct lp_build_sampler_soa *sampler );
+
+ void
+ (*emit_fetch_texel)( struct lp_build_sampler_soa *sampler,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ unsigned unit,
+ unsigned num_coords,
+ const LLVMValueRef *coords,
+ LLVMValueRef lodbias,
+ LLVMValueRef *texel);
+};
+
+
+void
+lp_build_tgsi_soa(LLVMBuilderRef builder,
+ const struct tgsi_token *tokens,
+ union lp_type type,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef consts_ptr,
+ const LLVMValueRef *pos,
+ const LLVMValueRef (*inputs)[4],
+ LLVMValueRef (*outputs)[4],
+ struct lp_build_sampler_soa *sampler);
+
+
+#endif /* LP_BLD_TGSI_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_tgsi_soa.c b/src/gallium/drivers/llvmpipe/lp_bld_tgsi_soa.c
new file mode 100644
index 0000000000..4756811dc3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_tgsi_soa.c
@@ -0,0 +1,1339 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * TGSI to LLVM IR translation -- SoA.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ *
+ * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
+ * Brian Paul, and others.
+ */
+
+#include "pipe/p_config.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_debug.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_info.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_exec.h"
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_swizzle.h"
+#include "lp_bld_flow.h"
+#include "lp_bld_tgsi.h"
+#include "lp_bld_debug.h"
+
+
+#define LP_MAX_TEMPS 256
+#define LP_MAX_IMMEDIATES 256
+
+
+#define FOR_EACH_CHANNEL( CHAN )\
+ for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
+
+#define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
+ ((INST)->FullDstRegisters[0].DstRegister.WriteMask & (1 << (CHAN)))
+
+#define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
+ if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
+
+#define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
+ FOR_EACH_CHANNEL( CHAN )\
+ IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
+
+#define CHAN_X 0
+#define CHAN_Y 1
+#define CHAN_Z 2
+#define CHAN_W 3
+
+
+struct lp_build_tgsi_soa_context
+{
+ struct lp_build_context base;
+
+ LLVMValueRef consts_ptr;
+ const LLVMValueRef *pos;
+ const LLVMValueRef (*inputs)[NUM_CHANNELS];
+ LLVMValueRef (*outputs)[NUM_CHANNELS];
+
+ struct lp_build_sampler_soa *sampler;
+
+ LLVMValueRef immediates[LP_MAX_IMMEDIATES][NUM_CHANNELS];
+ LLVMValueRef temps[LP_MAX_TEMPS][NUM_CHANNELS];
+
+ struct lp_build_mask_context *mask;
+};
+
+
+/**
+ * Register fetch.
+ */
+static LLVMValueRef
+emit_fetch(
+ struct lp_build_tgsi_soa_context *bld,
+ const struct tgsi_full_instruction *inst,
+ unsigned index,
+ const unsigned chan_index )
+{
+ const struct tgsi_full_src_register *reg = &inst->FullSrcRegisters[index];
+ unsigned swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
+ LLVMValueRef res;
+
+ switch (swizzle) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+
+ switch (reg->SrcRegister.File) {
+ case TGSI_FILE_CONSTANT: {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), reg->SrcRegister.Index*4 + swizzle, 0);
+ LLVMValueRef scalar_ptr = LLVMBuildGEP(bld->base.builder, bld->consts_ptr, &index, 1, "");
+ LLVMValueRef scalar = LLVMBuildLoad(bld->base.builder, scalar_ptr, "");
+ res = lp_build_broadcast_scalar(&bld->base, scalar);
+ break;
+ }
+
+ case TGSI_FILE_IMMEDIATE:
+ res = bld->immediates[reg->SrcRegister.Index][swizzle];
+ assert(res);
+ break;
+
+ case TGSI_FILE_INPUT:
+ res = bld->inputs[reg->SrcRegister.Index][swizzle];
+ assert(res);
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ res = bld->temps[reg->SrcRegister.Index][swizzle];
+ if(!res)
+ return bld->base.undef;
+ break;
+
+ default:
+ assert( 0 );
+ return bld->base.undef;
+ }
+ break;
+
+ case TGSI_EXTSWIZZLE_ZERO:
+ res = bld->base.zero;
+ break;
+
+ case TGSI_EXTSWIZZLE_ONE:
+ res = bld->base.one;
+ break;
+
+ default:
+ assert( 0 );
+ return bld->base.undef;
+ }
+
+ switch( tgsi_util_get_full_src_register_sign_mode( reg, chan_index ) ) {
+ case TGSI_UTIL_SIGN_CLEAR:
+ res = lp_build_abs( &bld->base, res );
+ break;
+
+ case TGSI_UTIL_SIGN_SET:
+ /* TODO: Use bitwese OR for floating point */
+ res = lp_build_abs( &bld->base, res );
+ res = LLVMBuildNeg( bld->base.builder, res, "" );
+ break;
+
+ case TGSI_UTIL_SIGN_TOGGLE:
+ res = LLVMBuildNeg( bld->base.builder, res, "" );
+ break;
+
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+ }
+
+ return res;
+}
+
+
+/**
+ * Register store.
+ */
+static void
+emit_store(
+ struct lp_build_tgsi_soa_context *bld,
+ const struct tgsi_full_instruction *inst,
+ unsigned index,
+ unsigned chan_index,
+ LLVMValueRef value)
+{
+ const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[index];
+
+ switch( inst->Instruction.Saturate ) {
+ case TGSI_SAT_NONE:
+ break;
+
+ case TGSI_SAT_ZERO_ONE:
+ value = lp_build_max(&bld->base, value, bld->base.zero);
+ value = lp_build_min(&bld->base, value, bld->base.one);
+ break;
+
+ case TGSI_SAT_MINUS_PLUS_ONE:
+ value = lp_build_max(&bld->base, value, lp_build_const_scalar(bld->base.type, -1.0));
+ value = lp_build_min(&bld->base, value, bld->base.one);
+ break;
+
+ default:
+ assert(0);
+ }
+
+ switch( reg->DstRegister.File ) {
+ case TGSI_FILE_OUTPUT:
+ bld->outputs[reg->DstRegister.Index][chan_index] = value;
+ break;
+
+ case TGSI_FILE_TEMPORARY:
+ bld->temps[reg->DstRegister.Index][chan_index] = value;
+ break;
+
+ case TGSI_FILE_ADDRESS:
+ /* FIXME */
+ assert(0);
+ break;
+
+ default:
+ assert( 0 );
+ }
+}
+
+
+/**
+ * High-level instruction translators.
+ */
+
+static void
+emit_tex( struct lp_build_tgsi_soa_context *bld,
+ const struct tgsi_full_instruction *inst,
+ boolean apply_lodbias,
+ boolean projected)
+{
+ const uint unit = inst->FullSrcRegisters[1].SrcRegister.Index;
+ LLVMValueRef lodbias;
+ LLVMValueRef oow;
+ LLVMValueRef coords[3];
+ LLVMValueRef texel[4];
+ unsigned num_coords;
+ unsigned i;
+
+ switch (inst->InstructionExtTexture.Texture) {
+ case TGSI_TEXTURE_1D:
+ num_coords = 1;
+ break;
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_RECT:
+ num_coords = 2;
+ break;
+ case TGSI_TEXTURE_SHADOW1D:
+ case TGSI_TEXTURE_SHADOW2D:
+ case TGSI_TEXTURE_SHADOWRECT:
+ case TGSI_TEXTURE_3D:
+ case TGSI_TEXTURE_CUBE:
+ num_coords = 3;
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+ if(apply_lodbias)
+ lodbias = emit_fetch( bld, inst, 0, 3 );
+ else
+ lodbias = bld->base.zero;
+
+ if (projected) {
+ oow = emit_fetch( bld, inst, 0, 3 );
+ oow = lp_build_rcp(&bld->base, oow);
+ }
+
+ for (i = 0; i < num_coords; i++) {
+ coords[i] = emit_fetch( bld, inst, 0, i );
+ if (projected)
+ coords[i] = lp_build_mul(&bld->base, coords[i], oow);
+ }
+
+ bld->sampler->emit_fetch_texel(bld->sampler,
+ bld->base.builder,
+ bld->base.type,
+ unit, num_coords, coords, lodbias,
+ texel);
+
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, i ) {
+ emit_store( bld, inst, 0, i, texel[i] );
+ }
+}
+
+
+static void
+emit_kil(
+ struct lp_build_tgsi_soa_context *bld,
+ const struct tgsi_full_instruction *inst )
+{
+ const struct tgsi_full_src_register *reg = &inst->FullSrcRegisters[0];
+ LLVMValueRef terms[NUM_CHANNELS];
+ LLVMValueRef mask;
+ unsigned chan_index;
+
+ memset(&terms, 0, sizeof terms);
+
+ FOR_EACH_CHANNEL( chan_index ) {
+ unsigned swizzle;
+
+ /* Unswizzle channel */
+ swizzle = tgsi_util_get_full_src_register_extswizzle( reg, chan_index );
+
+ /* Note that we test if the value is less than zero, so 1.0 and 0.0 need
+ * not to be tested. */
+ if(swizzle == TGSI_EXTSWIZZLE_ZERO || swizzle == TGSI_EXTSWIZZLE_ONE)
+ continue;
+
+ /* Check if the component has not been already tested. */
+ assert(swizzle < NUM_CHANNELS);
+ if( !terms[swizzle] )
+ /* TODO: change the comparison operator instead of setting the sign */
+ terms[swizzle] = emit_fetch(bld, inst, 0, chan_index );
+ }
+
+ mask = NULL;
+ FOR_EACH_CHANNEL( chan_index ) {
+ if(terms[chan_index]) {
+ LLVMValueRef chan_mask;
+
+ chan_mask = lp_build_cmp(&bld->base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->base.zero);
+
+ if(mask)
+ mask = LLVMBuildAnd(bld->base.builder, mask, chan_mask, "");
+ else
+ mask = chan_mask;
+ }
+ }
+
+ if(mask)
+ lp_build_mask_update(bld->mask, mask);
+}
+
+
+/**
+ * Check if inst src/dest regs use indirect addressing into temporary
+ * register file.
+ */
+static boolean
+indirect_temp_reference(const struct tgsi_full_instruction *inst)
+{
+ uint i;
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *reg = &inst->FullSrcRegisters[i];
+ if (reg->SrcRegister.File == TGSI_FILE_TEMPORARY &&
+ reg->SrcRegister.Indirect)
+ return TRUE;
+ }
+ for (i = 0; i < inst->Instruction.NumDstRegs; i++) {
+ const struct tgsi_full_dst_register *reg = &inst->FullDstRegisters[i];
+ if (reg->DstRegister.File == TGSI_FILE_TEMPORARY &&
+ reg->DstRegister.Indirect)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+static int
+emit_instruction(
+ struct lp_build_tgsi_soa_context *bld,
+ struct tgsi_full_instruction *inst )
+{
+ unsigned chan_index;
+ LLVMValueRef src0, src1, src2;
+ LLVMValueRef tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ LLVMValueRef dst0;
+
+ /* we can't handle indirect addressing into temp register file yet */
+ if (indirect_temp_reference(inst))
+ return FALSE;
+
+ switch (inst->Instruction.Opcode) {
+#if 0
+ case TGSI_OPCODE_ARL:
+ /* FIXME */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_flr(bld, 0, 0);
+ emit_f2it( bld, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+#endif
+
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_LIT:
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ) {
+ emit_store( bld, inst, 0, CHAN_X, bld->base.one);
+ }
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
+ src0 = emit_fetch( bld, inst, 0, CHAN_X );
+ dst0 = lp_build_max( &bld->base, src0, bld->base.zero);
+ emit_store( bld, inst, 0, CHAN_Y, dst0);
+ }
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
+ /* XMM[1] = SrcReg[0].yyyy */
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
+ /* XMM[1] = max(XMM[1], 0) */
+ tmp1 = lp_build_max( &bld->base, tmp1, bld->base.zero);
+ /* XMM[2] = SrcReg[0].wwww */
+ tmp2 = emit_fetch( bld, inst, 0, CHAN_W );
+ tmp1 = lp_build_pow( &bld->base, tmp1, tmp2);
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp2 = lp_build_cmp(&bld->base, PIPE_FUNC_GREATER, tmp0, bld->base.zero);
+ dst0 = lp_build_select(&bld->base, tmp2, tmp1, bld->base.zero);
+ emit_store( bld, inst, 0, CHAN_Z, dst0);
+ }
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) ) {
+ emit_store( bld, inst, 0, CHAN_W, bld->base.one);
+ }
+ break;
+
+ case TGSI_OPCODE_RCP:
+ /* TGSI_OPCODE_RECIP */
+ src0 = emit_fetch( bld, inst, 0, CHAN_X );
+ dst0 = lp_build_rcp(&bld->base, src0);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, dst0 );
+ }
+ break;
+
+ case TGSI_OPCODE_RSQ:
+ /* TGSI_OPCODE_RECIPSQRT */
+ src0 = emit_fetch( bld, inst, 0, CHAN_X );
+ src0 = lp_build_abs(&bld->base, src0);
+ dst0 = lp_build_rsqrt(&bld->base, src0);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, dst0 );
+ }
+ break;
+
+ case TGSI_OPCODE_EXP:
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
+ LLVMValueRef *p_exp2_int_part = NULL;
+ LLVMValueRef *p_frac_part = NULL;
+ LLVMValueRef *p_exp2 = NULL;
+
+ src0 = emit_fetch( bld, inst, 0, CHAN_X );
+
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
+ p_exp2_int_part = &tmp0;
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
+ p_frac_part = &tmp1;
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
+ p_exp2 = &tmp2;
+
+ lp_build_exp2_approx(&bld->base, src0, p_exp2_int_part, p_frac_part, p_exp2);
+
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
+ emit_store( bld, inst, 0, CHAN_X, tmp0);
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
+ emit_store( bld, inst, 0, CHAN_Y, tmp1);
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
+ emit_store( bld, inst, 0, CHAN_Z, tmp2);
+ }
+ /* dst.w = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
+ tmp0 = bld->base.one;
+ emit_store( bld, inst, 0, CHAN_W, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_LOG:
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z )) {
+ LLVMValueRef *p_floor_log2;
+ LLVMValueRef *p_exp;
+ LLVMValueRef *p_log2;
+
+ src0 = emit_fetch( bld, inst, 0, CHAN_X );
+ src0 = lp_build_abs( &bld->base, src0 );
+
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
+ p_floor_log2 = &tmp0;
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ))
+ p_exp = &tmp1;
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
+ p_log2 = &tmp2;
+
+ lp_build_log2_approx(&bld->base, src0, p_exp, p_floor_log2, p_log2);
+
+ /* dst.x = floor(lg2(abs(src.x))) */
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ))
+ emit_store( bld, inst, 0, CHAN_X, tmp0);
+ /* dst.y = abs(src)/ex2(floor(lg2(abs(src.x)))) */
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y )) {
+ tmp1 = lp_build_div( &bld->base, src0, tmp1);
+ emit_store( bld, inst, 0, CHAN_Y, tmp1);
+ }
+ /* dst.z = lg2(abs(src.x)) */
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ))
+ emit_store( bld, inst, 0, CHAN_Z, tmp2);
+ }
+ /* dst.w = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED( inst, CHAN_W )) {
+ tmp0 = bld->base.one;
+ emit_store( bld, inst, 0, CHAN_W, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_MUL:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ dst0 = lp_build_mul(&bld->base, src0, src1);
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_ADD:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ dst0 = lp_build_add(&bld->base, src0, src1);
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_DP3:
+ /* TGSI_OPCODE_DOT3 */
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_DP4:
+ /* TGSI_OPCODE_DOT4 */
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_W );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_W );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_DST:
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
+ tmp0 = bld->base.one;
+ emit_store( bld, inst, 0, CHAN_X, tmp0);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_Y );
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
+ emit_store( bld, inst, 0, CHAN_Y, tmp0);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_Z );
+ emit_store( bld, inst, 0, CHAN_Z, tmp0);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
+ tmp0 = emit_fetch( bld, inst, 1, CHAN_W );
+ emit_store( bld, inst, 0, CHAN_W, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_MIN:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ dst0 = lp_build_min( &bld->base, src0, src1 );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_MAX:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ dst0 = lp_build_max( &bld->base, src0, src1 );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_SLT:
+ /* TGSI_OPCODE_SETLT */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, src1 );
+ dst0 = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_SGE:
+ /* TGSI_OPCODE_SETGE */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GEQUAL, src0, src1 );
+ dst0 = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_MAD:
+ /* TGSI_OPCODE_MADD */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ tmp1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp2 = emit_fetch( bld, inst, 2, chan_index );
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp2);
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_SUB:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ tmp1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_sub( &bld->base, tmp0, tmp1);
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_LRP:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ src2 = emit_fetch( bld, inst, 2, chan_index );
+ tmp0 = lp_build_sub( &bld->base, src1, src2 );
+ tmp0 = lp_build_mul( &bld->base, src0, tmp0 );
+ dst0 = lp_build_add( &bld->base, tmp0, src2 );
+ emit_store( bld, inst, 0, chan_index, dst0 );
+ }
+ break;
+
+ case TGSI_OPCODE_CND:
+ /* FIXME */
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DP2A:
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
+ tmp1 = emit_fetch( bld, inst, 2, CHAN_X ); /* xmm1 = src[2].x */
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0); /* dest[ch] = xmm0 */
+ }
+ break;
+
+#if 0
+ case TGSI_OPCODE_FRC:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_frc( bld, 0, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_CLAMP:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_FLR:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_flr( bld, 0, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_ROUND:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_rnd( bld, 0, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+#endif
+
+ case TGSI_OPCODE_EX2: {
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp0 = lp_build_exp2( &bld->base, tmp0);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+ }
+
+ case TGSI_OPCODE_LG2:
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp0 = lp_build_log2( &bld->base, tmp0);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_POW:
+ src0 = emit_fetch( bld, inst, 0, CHAN_X );
+ src1 = emit_fetch( bld, inst, 1, CHAN_X );
+ dst0 = lp_build_pow( &bld->base, src0, src1 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, dst0 );
+ }
+ break;
+
+ case TGSI_OPCODE_XPD:
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ) {
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_Z );
+ tmp3 = emit_fetch( bld, inst, 0, CHAN_Z );
+ }
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_Y );
+ tmp4 = emit_fetch( bld, inst, 1, CHAN_Y );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
+ tmp2 = tmp0;
+ tmp2 = lp_build_mul( &bld->base, tmp2, tmp1);
+ tmp5 = tmp3;
+ tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
+ tmp2 = lp_build_sub( &bld->base, tmp2, tmp5);
+ emit_store( bld, inst, 0, CHAN_X, tmp2);
+ }
+ if( IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) ||
+ IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) ) {
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_X );
+ tmp5 = emit_fetch( bld, inst, 0, CHAN_X );
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
+ tmp3 = lp_build_mul( &bld->base, tmp3, tmp2);
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp5);
+ tmp3 = lp_build_sub( &bld->base, tmp3, tmp1);
+ emit_store( bld, inst, 0, CHAN_Y, tmp3);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
+ tmp5 = lp_build_mul( &bld->base, tmp5, tmp4);
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp2);
+ tmp5 = lp_build_sub( &bld->base, tmp5, tmp0);
+ emit_store( bld, inst, 0, CHAN_Z, tmp5);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
+ tmp0 = bld->base.one;
+ emit_store( bld, inst, 0, CHAN_W, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_ABS:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ tmp0 = lp_build_abs( &bld->base, tmp0 ) ;
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_RCC:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DPH:
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_X );
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Y );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Y );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Z );
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Z );
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_W );
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_COS:
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp0 = lp_build_cos( &bld->base, tmp0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_DDX:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DDY:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_KILP:
+ /* predicated kill */
+ /* FIXME */
+ return 0;
+ break;
+
+ case TGSI_OPCODE_KIL:
+ /* conditional kill */
+ emit_kil( bld, inst );
+ break;
+
+ case TGSI_OPCODE_PK2H:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PK2US:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PK4B:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PK4UB:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_RFL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SEQ:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_EQUAL, src0, src1 );
+ dst0 = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_SFL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SGT:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_GREATER, src0, src1 );
+ dst0 = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_SIN:
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp0 = lp_build_sin( &bld->base, tmp0 );
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_SLE:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LEQUAL, src0, src1 );
+ dst0 = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_SNE:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_NOTEQUAL, src0, src1 );
+ dst0 = lp_build_select( &bld->base, tmp0, bld->base.one, bld->base.zero );
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_STR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TEX:
+ emit_tex( bld, inst, FALSE, FALSE );
+ break;
+
+ case TGSI_OPCODE_TXD:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP2H:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP2US:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP4B:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_UP4UB:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_X2D:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ARA:
+ return 0;
+ break;
+
+#if 0
+ case TGSI_OPCODE_ARR:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_rnd( bld, 0, 0 );
+ emit_f2it( bld, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+#endif
+
+ case TGSI_OPCODE_BRA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_CAL:
+ return 0;
+ break;
+
+#if 0
+ case TGSI_OPCODE_RET:
+ emit_ret( bld );
+ break;
+#endif
+
+ case TGSI_OPCODE_END:
+ break;
+
+#if 0
+ case TGSI_OPCODE_SSG:
+ /* TGSI_OPCODE_SGN */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_sgn( bld, 0, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+#endif
+
+ case TGSI_OPCODE_CMP:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ src0 = emit_fetch( bld, inst, 0, chan_index );
+ src1 = emit_fetch( bld, inst, 1, chan_index );
+ src2 = emit_fetch( bld, inst, 2, chan_index );
+ tmp0 = lp_build_cmp( &bld->base, PIPE_FUNC_LESS, src0, bld->base.zero );
+ dst0 = lp_build_select( &bld->base, tmp0, src1, src2);
+ emit_store( bld, inst, 0, chan_index, dst0);
+ }
+ break;
+
+ case TGSI_OPCODE_SCS:
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_X ) {
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp0 = lp_build_cos( &bld->base, tmp0 );
+ emit_store( bld, inst, 0, CHAN_X, tmp0);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Y ) {
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X );
+ tmp0 = lp_build_sin( &bld->base, tmp0 );
+ emit_store( bld, inst, 0, CHAN_Y, tmp0);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_Z ) {
+ tmp0 = bld->base.zero;
+ emit_store( bld, inst, 0, CHAN_Z, tmp0);
+ }
+ IF_IS_DST0_CHANNEL_ENABLED( inst, CHAN_W ) {
+ tmp0 = bld->base.one;
+ emit_store( bld, inst, 0, CHAN_W, tmp0);
+ }
+ break;
+
+ case TGSI_OPCODE_TXB:
+ emit_tex( bld, inst, TRUE, FALSE );
+ break;
+
+ case TGSI_OPCODE_NRM:
+ /* fall-through */
+ case TGSI_OPCODE_NRM4:
+ /* 3 or 4-component normalization */
+ {
+ uint dims = (inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
+
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) ||
+ IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y) ||
+ IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z) ||
+ (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 4)) {
+
+ /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
+
+ /* xmm4 = src.x */
+ /* xmm0 = src.x * src.x */
+ tmp0 = emit_fetch(bld, inst, 0, CHAN_X);
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
+ tmp4 = tmp0;
+ }
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp0);
+
+ /* xmm5 = src.y */
+ /* xmm0 = xmm0 + src.y * src.y */
+ tmp1 = emit_fetch(bld, inst, 0, CHAN_Y);
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
+ tmp5 = tmp1;
+ }
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+
+ /* xmm6 = src.z */
+ /* xmm0 = xmm0 + src.z * src.z */
+ tmp1 = emit_fetch(bld, inst, 0, CHAN_Z);
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
+ tmp6 = tmp1;
+ }
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+
+ if (dims == 4) {
+ /* xmm7 = src.w */
+ /* xmm0 = xmm0 + src.w * src.w */
+ tmp1 = emit_fetch(bld, inst, 0, CHAN_W);
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W)) {
+ tmp7 = tmp1;
+ }
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp1);
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1);
+ }
+
+ /* xmm1 = 1 / sqrt(xmm0) */
+ tmp1 = lp_build_rsqrt( &bld->base, tmp0);
+
+ /* dst.x = xmm1 * src.x */
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X)) {
+ tmp4 = lp_build_mul( &bld->base, tmp4, tmp1);
+ emit_store(bld, inst, 0, CHAN_X, tmp4);
+ }
+
+ /* dst.y = xmm1 * src.y */
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Y)) {
+ tmp5 = lp_build_mul( &bld->base, tmp5, tmp1);
+ emit_store(bld, inst, 0, CHAN_Y, tmp5);
+ }
+
+ /* dst.z = xmm1 * src.z */
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_Z)) {
+ tmp6 = lp_build_mul( &bld->base, tmp6, tmp1);
+ emit_store(bld, inst, 0, CHAN_Z, tmp6);
+ }
+
+ /* dst.w = xmm1 * src.w */
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_X) && dims == 4) {
+ tmp7 = lp_build_mul( &bld->base, tmp7, tmp1);
+ emit_store(bld, inst, 0, CHAN_W, tmp7);
+ }
+ }
+
+ /* dst0.w = 1.0 */
+ if (IS_DST0_CHANNEL_ENABLED(inst, CHAN_W) && dims == 3) {
+ tmp0 = bld->base.one;
+ emit_store(bld, inst, 0, CHAN_W, tmp0);
+ }
+ }
+ break;
+
+ case TGSI_OPCODE_DIV:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_DP2:
+ tmp0 = emit_fetch( bld, inst, 0, CHAN_X ); /* xmm0 = src[0].x */
+ tmp1 = emit_fetch( bld, inst, 1, CHAN_X ); /* xmm1 = src[1].x */
+ tmp0 = lp_build_mul( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 * xmm1 */
+ tmp1 = emit_fetch( bld, inst, 0, CHAN_Y ); /* xmm1 = src[0].y */
+ tmp2 = emit_fetch( bld, inst, 1, CHAN_Y ); /* xmm2 = src[1].y */
+ tmp1 = lp_build_mul( &bld->base, tmp1, tmp2); /* xmm1 = xmm1 * xmm2 */
+ tmp0 = lp_build_add( &bld->base, tmp0, tmp1); /* xmm0 = xmm0 + xmm1 */
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ emit_store( bld, inst, 0, chan_index, tmp0); /* dest[ch] = xmm0 */
+ }
+ break;
+
+ case TGSI_OPCODE_TXL:
+ emit_tex( bld, inst, TRUE, FALSE );
+ break;
+
+ case TGSI_OPCODE_TXP:
+ emit_tex( bld, inst, FALSE, TRUE );
+ break;
+
+ case TGSI_OPCODE_BRK:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_IF:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_BGNFOR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_REP:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ELSE:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDIF:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDFOR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDREP:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_PUSHA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_POPA:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_CEIL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_I2F:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_NOT:
+ return 0;
+ break;
+
+#if 0
+ case TGSI_OPCODE_TRUNC:
+ FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
+ tmp0 = emit_fetch( bld, inst, 0, chan_index );
+ emit_f2it( bld, 0 );
+ emit_i2f( bld, 0 );
+ emit_store( bld, inst, 0, chan_index, tmp0);
+ }
+ break;
+#endif
+
+ case TGSI_OPCODE_SHL:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SHR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_AND:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_OR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_MOD:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_XOR:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_SAD:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TXF:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_TXQ:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_CONT:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_EMIT:
+ return 0;
+ break;
+
+ case TGSI_OPCODE_ENDPRIM:
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+
+void
+lp_build_tgsi_soa(LLVMBuilderRef builder,
+ const struct tgsi_token *tokens,
+ union lp_type type,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef consts_ptr,
+ const LLVMValueRef *pos,
+ const LLVMValueRef (*inputs)[NUM_CHANNELS],
+ LLVMValueRef (*outputs)[NUM_CHANNELS],
+ struct lp_build_sampler_soa *sampler)
+{
+ struct lp_build_tgsi_soa_context bld;
+ struct tgsi_parse_context parse;
+ uint num_immediates = 0;
+ unsigned i;
+
+ /* Setup build context */
+ memset(&bld, 0, sizeof bld);
+ lp_build_context_init(&bld.base, builder, type);
+ bld.mask = mask;
+ bld.pos = pos;
+ bld.inputs = inputs;
+ bld.outputs = outputs;
+ bld.consts_ptr = consts_ptr;
+ bld.sampler = sampler;
+
+ tgsi_parse_init( &parse, tokens );
+
+ while( !tgsi_parse_end_of_tokens( &parse ) ) {
+ tgsi_parse_token( &parse );
+
+ switch( parse.FullToken.Token.Type ) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ /* Inputs already interpolated */
+ break;
+
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ if (!emit_instruction( &bld, &parse.FullToken.FullInstruction )) {
+ unsigned opcode = parse.FullToken.FullInstruction.Instruction.Opcode;
+ const struct tgsi_opcode_info *info = tgsi_get_opcode_info(opcode);
+ _debug_printf("warning: failed to translate tgsi opcode %s to LLVM\n",
+ info ? info->mnemonic : "<invalid>");
+ }
+ break;
+
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ /* simply copy the immediate values into the next immediates[] slot */
+ {
+ const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
+ assert(size <= 4);
+ assert(num_immediates < LP_MAX_IMMEDIATES);
+ for( i = 0; i < size; ++i )
+ bld.immediates[num_immediates][i] =
+ lp_build_const_scalar(type, parse.FullToken.FullImmediate.u[i].Float);
+ for( i = size; i < 4; ++i )
+ bld.immediates[num_immediates][i] = bld.base.undef;
+ num_immediates++;
+ }
+ break;
+
+ default:
+ assert( 0 );
+ }
+ }
+
+ tgsi_parse_free( &parse );
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_type.c b/src/gallium/drivers/llvmpipe/lp_bld_type.c
new file mode 100644
index 0000000000..577644b7ab
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_type.c
@@ -0,0 +1,181 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_debug.h"
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+
+
+LLVMTypeRef
+lp_build_elem_type(union lp_type type)
+{
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ return LLVMFloatType();
+ break;
+ case 64:
+ return LLVMDoubleType();
+ break;
+ default:
+ assert(0);
+ return LLVMFloatType();
+ }
+ }
+ else {
+ return LLVMIntType(type.width);
+ }
+}
+
+
+LLVMTypeRef
+lp_build_vec_type(union lp_type type)
+{
+ LLVMTypeRef elem_type = lp_build_elem_type(type);
+ return LLVMVectorType(elem_type, type.length);
+}
+
+
+/**
+ * This function is a mirror of lp_build_elem_type() above.
+ *
+ * XXX: I'm not sure if it wouldn't be easier/efficient to just recreate the
+ * type and check for identity.
+ */
+boolean
+lp_check_elem_type(union lp_type type, LLVMTypeRef elem_type)
+{
+ LLVMTypeKind elem_kind;
+
+ assert(elem_type);
+ if(!elem_type)
+ return FALSE;
+
+ elem_kind = LLVMGetTypeKind(elem_type);
+
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ if(elem_kind != LLVMFloatTypeKind)
+ return FALSE;
+ break;
+ case 64:
+ if(elem_kind != LLVMDoubleTypeKind)
+ return FALSE;
+ break;
+ default:
+ assert(0);
+ return FALSE;
+ }
+ }
+ else {
+ if(elem_kind != LLVMIntegerTypeKind)
+ return FALSE;
+
+ if(LLVMGetIntTypeWidth(elem_type) != type.width)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+boolean
+lp_check_vec_type(union lp_type type, LLVMTypeRef vec_type)
+{
+ LLVMTypeRef elem_type;
+
+ assert(vec_type);
+ if(!vec_type)
+ return FALSE;
+
+ if(LLVMGetTypeKind(vec_type) != LLVMVectorTypeKind)
+ return FALSE;
+
+ if(LLVMGetVectorSize(vec_type) != type.length)
+ return FALSE;
+
+ elem_type = LLVMGetElementType(vec_type);
+
+ return lp_check_elem_type(type, elem_type);
+}
+
+
+boolean
+lp_check_value(union lp_type type, LLVMValueRef val)
+{
+ LLVMTypeRef vec_type;
+
+ assert(val);
+ if(!val)
+ return FALSE;
+
+ vec_type = LLVMTypeOf(val);
+
+ return lp_check_vec_type(type, vec_type);
+}
+
+
+LLVMTypeRef
+lp_build_int_elem_type(union lp_type type)
+{
+ return LLVMIntType(type.width);
+}
+
+
+LLVMTypeRef
+lp_build_int_vec_type(union lp_type type)
+{
+ LLVMTypeRef elem_type = lp_build_int_elem_type(type);
+ return LLVMVectorType(elem_type, type.length);
+}
+
+
+union lp_type
+lp_int_type(union lp_type type)
+{
+ union lp_type int_type;
+ int_type.value = 0;
+ int_type.width = type.width;
+ int_type.length = type.length;
+ return int_type;
+}
+
+
+void
+lp_build_context_init(struct lp_build_context *bld,
+ LLVMBuilderRef builder,
+ union lp_type type)
+{
+ bld->builder = builder;
+ bld->type = type;
+ bld->undef = lp_build_undef(type);
+ bld->zero = lp_build_zero(type);
+ bld->one = lp_build_one(type);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_bld_type.h b/src/gallium/drivers/llvmpipe/lp_bld_type.h
new file mode 100644
index 0000000000..9933e0b45c
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_bld_type.h
@@ -0,0 +1,178 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Convenient representation of SIMD types.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_BLD_TYPE_H
+#define LP_BLD_TYPE_H
+
+
+#include <llvm-c/Core.h>
+
+#include <pipe/p_compiler.h>
+
+
+/**
+ * Several functions can only cope with vectors of length up to this value.
+ * You may need to increase that value if you want to represent bigger vectors.
+ */
+#define LP_MAX_VECTOR_LENGTH 16
+
+#define LP_MAX_TYPE_WIDTH 64
+
+
+/**
+ * The LLVM type system can't conveniently express all the things we care about
+ * on the types used for intermediate computations, such as signed vs unsigned,
+ * normalized values, or fixed point.
+ */
+union lp_type {
+ struct {
+ /**
+ * Floating-point. Cannot be used with fixed. Integer numbers are
+ * represented by this zero.
+ */
+ unsigned floating:1;
+
+ /**
+ * Fixed-point. Cannot be used with floating. Integer numbers are
+ * represented by this zero.
+ */
+ unsigned fixed:1;
+
+ /**
+ * Whether it can represent negative values or not.
+ *
+ * If this is not set for floating point, it means that all values are
+ * assumed to be positive.
+ */
+ unsigned sign:1;
+
+ /**
+ * Whether values are normalized to fit [0, 1] interval, or [-1, 1]
+ * interval for signed types.
+ *
+ * For integer types it means the representable integer range should be
+ * interpreted as the interval above.
+ *
+ * For floating and fixed point formats it means the values should be
+ * clamped to the interval above.
+ */
+ unsigned norm:1;
+
+ /**
+ * Element width.
+ *
+ * For fixed point values, the fixed point is assumed to be at half the
+ * width.
+ */
+ unsigned width:14;
+
+ /**
+ * Vector length.
+ *
+ * width*length should be a power of two greater or equal to eight.
+ *
+ * @sa LP_MAX_VECTOR_LENGTH
+ */
+ unsigned length:14;
+ };
+ uint32_t value;
+};
+
+
+/**
+ * We need most of the information here in order to correctly and efficiently
+ * translate an arithmetic operation into LLVM IR. Putting it here avoids the
+ * trouble of passing it as parameters.
+ */
+struct lp_build_context
+{
+ LLVMBuilderRef builder;
+
+ /**
+ * This not only describes the input/output LLVM types, but also whether
+ * to normalize/clamp the results.
+ */
+ union lp_type type;
+
+ /** Same as lp_build_undef(type) */
+ LLVMValueRef undef;
+
+ /** Same as lp_build_zero(type) */
+ LLVMValueRef zero;
+
+ /** Same as lp_build_one(type) */
+ LLVMValueRef one;
+};
+
+
+LLVMTypeRef
+lp_build_elem_type(union lp_type type);
+
+
+LLVMTypeRef
+lp_build_vec_type(union lp_type type);
+
+
+boolean
+lp_check_elem_type(union lp_type type, LLVMTypeRef elem_type);
+
+
+boolean
+lp_check_vec_type(union lp_type type, LLVMTypeRef vec_type);
+
+
+boolean
+lp_check_value(union lp_type type, LLVMValueRef val);
+
+
+LLVMTypeRef
+lp_build_int_elem_type(union lp_type type);
+
+
+LLVMTypeRef
+lp_build_int_vec_type(union lp_type type);
+
+
+union lp_type
+lp_int_type(union lp_type type);
+
+
+void
+lp_build_context_init(struct lp_build_context *bld,
+ LLVMBuilderRef builder,
+ union lp_type type);
+
+
+#endif /* !LP_BLD_TYPE_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_buffer.c b/src/gallium/drivers/llvmpipe/lp_buffer.c
new file mode 100644
index 0000000000..66f1f8e138
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_buffer.c
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "lp_winsys.h"
+#include "lp_screen.h"
+#include "lp_texture.h"
+#include "lp_buffer.h"
+
+
+static void *
+llvmpipe_buffer_map(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned flags)
+{
+ struct llvmpipe_buffer *llvmpipe_buf = llvmpipe_buffer(buf);
+ return llvmpipe_buf->data;
+}
+
+
+static void
+llvmpipe_buffer_unmap(struct pipe_screen *screen,
+ struct pipe_buffer *buf)
+{
+}
+
+
+static void
+llvmpipe_buffer_destroy(struct pipe_buffer *buf)
+{
+ struct llvmpipe_buffer *sbuf = llvmpipe_buffer(buf);
+
+ if (!sbuf->userBuffer)
+ align_free(sbuf->data);
+
+ FREE(sbuf);
+}
+
+
+static struct pipe_buffer *
+llvmpipe_buffer_create(struct pipe_screen *screen,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct llvmpipe_buffer *buffer = CALLOC_STRUCT(llvmpipe_buffer);
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.screen = screen;
+ buffer->base.alignment = MAX2(alignment, 16);
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+
+ buffer->data = align_malloc(size, alignment);
+
+ return &buffer->base;
+}
+
+
+/**
+ * Create buffer which wraps user-space data.
+ */
+static struct pipe_buffer *
+llvmpipe_user_buffer_create(struct pipe_screen *screen,
+ void *ptr,
+ unsigned bytes)
+{
+ struct llvmpipe_buffer *buffer;
+
+ buffer = CALLOC_STRUCT(llvmpipe_buffer);
+ if(!buffer)
+ return NULL;
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.screen = screen;
+ buffer->base.size = bytes;
+ buffer->userBuffer = TRUE;
+ buffer->data = ptr;
+
+ return &buffer->base;
+}
+
+
+static void
+llvmpipe_fence_reference(struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+}
+
+
+static int
+llvmpipe_fence_signalled(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static int
+llvmpipe_fence_finish(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+void
+llvmpipe_init_screen_buffer_funcs(struct pipe_screen *screen)
+{
+ screen->buffer_create = llvmpipe_buffer_create;
+ screen->user_buffer_create = llvmpipe_user_buffer_create;
+ screen->buffer_map = llvmpipe_buffer_map;
+ screen->buffer_unmap = llvmpipe_buffer_unmap;
+ screen->buffer_destroy = llvmpipe_buffer_destroy;
+
+ screen->fence_reference = llvmpipe_fence_reference;
+ screen->fence_signalled = llvmpipe_fence_signalled;
+ screen->fence_finish = llvmpipe_fence_finish;
+
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_buffer.h b/src/gallium/drivers/llvmpipe/lp_buffer.h
new file mode 100644
index 0000000000..d6b8184a0b
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_buffer.h
@@ -0,0 +1,55 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_BUFFER_H
+#define LP_BUFFER_H
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+
+struct llvmpipe_buffer
+{
+ struct pipe_buffer base;
+ boolean userBuffer; /** Is this a user-space buffer? */
+ void *data;
+};
+
+
+/** Cast wrapper */
+static INLINE struct llvmpipe_buffer *
+llvmpipe_buffer( struct pipe_buffer *buf )
+{
+ return (struct llvmpipe_buffer *)buf;
+}
+
+
+void
+llvmpipe_init_screen_buffer_funcs(struct pipe_screen *screen);
+
+
+#endif /* LP_BUFFER_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_clear.c b/src/gallium/drivers/llvmpipe/lp_clear.c
new file mode 100644
index 0000000000..bdcff94b9b
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_clear.c
@@ -0,0 +1,81 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ * Michel Dänzer
+ */
+
+
+#include "pipe/p_defines.h"
+#include "util/u_pack_color.h"
+#include "lp_clear.h"
+#include "lp_context.h"
+#include "lp_surface.h"
+#include "lp_state.h"
+#include "lp_tile_cache.h"
+
+
+/**
+ * Clear the given buffers to the specified values.
+ * No masking, no scissor (clear entire buffer).
+ */
+void
+llvmpipe_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ unsigned cv;
+ uint i;
+
+ if (llvmpipe->no_rast)
+ return;
+
+#if 0
+ llvmpipe_update_derived(llvmpipe); /* not needed?? */
+#endif
+
+ if (buffers & PIPE_CLEAR_COLOR) {
+ for (i = 0; i < llvmpipe->framebuffer.nr_cbufs; i++) {
+ struct pipe_surface *ps = llvmpipe->framebuffer.cbufs[i];
+
+ util_pack_color(rgba, ps->format, &cv);
+ lp_tile_cache_clear(llvmpipe->cbuf_cache[i], rgba, cv);
+ }
+ llvmpipe->dirty_render_cache = TRUE;
+ }
+
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
+ struct pipe_surface *ps = llvmpipe->framebuffer.zsbuf;
+
+ cv = util_pack_z_stencil(ps->format, depth, stencil);
+
+ /* non-cached surface */
+ pipe->surface_fill(pipe, ps, 0, 0, ps->width, ps->height, cv);
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_clear.h b/src/gallium/drivers/llvmpipe/lp_clear.h
new file mode 100644
index 0000000000..6d4ffccdf4
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_clear.h
@@ -0,0 +1,43 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ */
+
+#ifndef LP_CLEAR_H
+#define LP_CLEAR_H
+
+#include "pipe/p_state.h"
+struct pipe_context;
+
+extern void
+llvmpipe_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil);
+
+
+#endif /* LP_CLEAR_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_context.c b/src/gallium/drivers/llvmpipe/lp_context.c
new file mode 100644
index 0000000000..a4b2bd8c2a
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_context.c
@@ -0,0 +1,295 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "lp_clear.h"
+#include "lp_context.h"
+#include "lp_flush.h"
+#include "lp_prim_setup.h"
+#include "lp_prim_vbuf.h"
+#include "lp_state.h"
+#include "lp_surface.h"
+#include "lp_tile_cache.h"
+#include "lp_tex_cache.h"
+#include "lp_texture.h"
+#include "lp_winsys.h"
+#include "lp_query.h"
+
+
+
+/**
+ * Map any drawing surfaces which aren't already mapped
+ */
+void
+llvmpipe_map_transfers(struct llvmpipe_context *lp)
+{
+ struct pipe_screen *screen = lp->pipe.screen;
+ struct pipe_surface *zsbuf = lp->framebuffer.zsbuf;
+ unsigned i;
+
+ for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
+ lp_tile_cache_map_transfers(lp->cbuf_cache[i]);
+ }
+
+ if(zsbuf) {
+ if(!lp->zsbuf_transfer)
+ lp->zsbuf_transfer = screen->get_tex_transfer(screen, zsbuf->texture,
+ zsbuf->face, zsbuf->level, zsbuf->zslice,
+ PIPE_TRANSFER_READ_WRITE,
+ 0, 0, zsbuf->width, zsbuf->height);
+ if(lp->zsbuf_transfer && !lp->zsbuf_map)
+ lp->zsbuf_map = screen->transfer_map(screen, lp->zsbuf_transfer);
+
+ }
+}
+
+
+/**
+ * Unmap any mapped drawing surfaces
+ */
+void
+llvmpipe_unmap_transfers(struct llvmpipe_context *lp)
+{
+ uint i;
+
+ for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
+ lp_tile_cache_unmap_transfers(lp->cbuf_cache[i]);
+ }
+
+ if(lp->zsbuf_transfer) {
+ struct pipe_screen *screen = lp->pipe.screen;
+
+ if(lp->zsbuf_map) {
+ screen->transfer_unmap(screen, lp->zsbuf_transfer);
+ lp->zsbuf_map = NULL;
+ }
+ }
+}
+
+
+static void llvmpipe_destroy( struct pipe_context *pipe )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe );
+ uint i;
+
+ if (llvmpipe->draw)
+ draw_destroy( llvmpipe->draw );
+
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
+ lp_destroy_tile_cache(llvmpipe->cbuf_cache[i]);
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ lp_destroy_tex_tile_cache(llvmpipe->tex_cache[i]);
+
+ for (i = 0; i < Elements(llvmpipe->constants); i++) {
+ if (llvmpipe->constants[i].buffer) {
+ pipe_buffer_reference(&llvmpipe->constants[i].buffer, NULL);
+ }
+ }
+
+ align_free( llvmpipe );
+}
+
+static unsigned int
+llvmpipe_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe );
+ unsigned i;
+
+ if(llvmpipe->dirty_render_cache) {
+ for (i = 0; i < llvmpipe->framebuffer.nr_cbufs; i++) {
+ if(llvmpipe->framebuffer.cbufs[i] &&
+ llvmpipe->framebuffer.cbufs[i]->texture == texture)
+ return PIPE_REFERENCED_FOR_WRITE;
+ }
+ if(llvmpipe->framebuffer.zsbuf &&
+ llvmpipe->framebuffer.zsbuf->texture == texture)
+ return PIPE_REFERENCED_FOR_WRITE;
+ }
+
+ return PIPE_UNREFERENCED;
+}
+
+static unsigned int
+llvmpipe_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ return PIPE_UNREFERENCED;
+}
+
+struct pipe_context *
+llvmpipe_create( struct pipe_screen *screen )
+{
+ struct llvmpipe_context *llvmpipe;
+ uint i;
+
+ llvmpipe = align_malloc(sizeof(struct llvmpipe_context), 16);
+ if (!llvmpipe)
+ return NULL;
+
+ util_init_math();
+
+ memset(llvmpipe, 0, sizeof *llvmpipe);
+
+ llvmpipe->pipe.winsys = screen->winsys;
+ llvmpipe->pipe.screen = screen;
+ llvmpipe->pipe.destroy = llvmpipe_destroy;
+
+ /* state setters */
+ llvmpipe->pipe.create_blend_state = llvmpipe_create_blend_state;
+ llvmpipe->pipe.bind_blend_state = llvmpipe_bind_blend_state;
+ llvmpipe->pipe.delete_blend_state = llvmpipe_delete_blend_state;
+
+ llvmpipe->pipe.create_sampler_state = llvmpipe_create_sampler_state;
+ llvmpipe->pipe.bind_sampler_states = llvmpipe_bind_sampler_states;
+ llvmpipe->pipe.delete_sampler_state = llvmpipe_delete_sampler_state;
+
+ llvmpipe->pipe.create_depth_stencil_alpha_state = llvmpipe_create_depth_stencil_state;
+ llvmpipe->pipe.bind_depth_stencil_alpha_state = llvmpipe_bind_depth_stencil_state;
+ llvmpipe->pipe.delete_depth_stencil_alpha_state = llvmpipe_delete_depth_stencil_state;
+
+ llvmpipe->pipe.create_rasterizer_state = llvmpipe_create_rasterizer_state;
+ llvmpipe->pipe.bind_rasterizer_state = llvmpipe_bind_rasterizer_state;
+ llvmpipe->pipe.delete_rasterizer_state = llvmpipe_delete_rasterizer_state;
+
+ llvmpipe->pipe.create_fs_state = llvmpipe_create_fs_state;
+ llvmpipe->pipe.bind_fs_state = llvmpipe_bind_fs_state;
+ llvmpipe->pipe.delete_fs_state = llvmpipe_delete_fs_state;
+
+ llvmpipe->pipe.create_vs_state = llvmpipe_create_vs_state;
+ llvmpipe->pipe.bind_vs_state = llvmpipe_bind_vs_state;
+ llvmpipe->pipe.delete_vs_state = llvmpipe_delete_vs_state;
+
+ llvmpipe->pipe.set_blend_color = llvmpipe_set_blend_color;
+ llvmpipe->pipe.set_clip_state = llvmpipe_set_clip_state;
+ llvmpipe->pipe.set_constant_buffer = llvmpipe_set_constant_buffer;
+ llvmpipe->pipe.set_framebuffer_state = llvmpipe_set_framebuffer_state;
+ llvmpipe->pipe.set_polygon_stipple = llvmpipe_set_polygon_stipple;
+ llvmpipe->pipe.set_scissor_state = llvmpipe_set_scissor_state;
+ llvmpipe->pipe.set_sampler_textures = llvmpipe_set_sampler_textures;
+ llvmpipe->pipe.set_viewport_state = llvmpipe_set_viewport_state;
+
+ llvmpipe->pipe.set_vertex_buffers = llvmpipe_set_vertex_buffers;
+ llvmpipe->pipe.set_vertex_elements = llvmpipe_set_vertex_elements;
+
+ llvmpipe->pipe.draw_arrays = llvmpipe_draw_arrays;
+ llvmpipe->pipe.draw_elements = llvmpipe_draw_elements;
+ llvmpipe->pipe.draw_range_elements = llvmpipe_draw_range_elements;
+ llvmpipe->pipe.set_edgeflags = llvmpipe_set_edgeflags;
+
+
+ llvmpipe->pipe.clear = llvmpipe_clear;
+ llvmpipe->pipe.flush = llvmpipe_flush;
+
+ llvmpipe->pipe.is_texture_referenced = llvmpipe_is_texture_referenced;
+ llvmpipe->pipe.is_buffer_referenced = llvmpipe_is_buffer_referenced;
+
+ llvmpipe_init_query_funcs( llvmpipe );
+ llvmpipe_init_texture_funcs( llvmpipe );
+
+ /*
+ * Alloc caches for accessing drawing surfaces and textures.
+ */
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
+ llvmpipe->cbuf_cache[i] = lp_create_tile_cache( screen );
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ llvmpipe->tex_cache[i] = lp_create_tex_tile_cache( screen );
+
+
+ /* vertex shader samplers */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ llvmpipe->tgsi.vert_samplers[i].base.get_samples = lp_get_samples;
+ llvmpipe->tgsi.vert_samplers[i].processor = TGSI_PROCESSOR_VERTEX;
+ llvmpipe->tgsi.vert_samplers[i].cache = llvmpipe->tex_cache[i];
+ llvmpipe->tgsi.vert_samplers_list[i] = &llvmpipe->tgsi.vert_samplers[i];
+ }
+
+ /* fragment shader samplers */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ llvmpipe->tgsi.frag_samplers[i].base.get_samples = lp_get_samples;
+ llvmpipe->tgsi.frag_samplers[i].processor = TGSI_PROCESSOR_FRAGMENT;
+ llvmpipe->tgsi.frag_samplers[i].cache = llvmpipe->tex_cache[i];
+ llvmpipe->tgsi.frag_samplers_list[i] = &llvmpipe->tgsi.frag_samplers[i];
+ }
+
+ /*
+ * Create drawing context and plug our rendering stage into it.
+ */
+ llvmpipe->draw = draw_create();
+ if (!llvmpipe->draw)
+ goto fail;
+
+ draw_texture_samplers(llvmpipe->draw,
+ PIPE_MAX_SAMPLERS,
+ (struct tgsi_sampler **)
+ llvmpipe->tgsi.vert_samplers_list);
+
+ llvmpipe->setup = lp_draw_render_stage(llvmpipe);
+ if (!llvmpipe->setup)
+ goto fail;
+
+ if (debug_get_bool_option( "LP_NO_RAST", FALSE ))
+ llvmpipe->no_rast = TRUE;
+
+ if (debug_get_bool_option( "LP_NO_VBUF", FALSE )) {
+ /* Deprecated path -- vbuf is the intended interface to the draw module:
+ */
+ draw_set_rasterize_stage(llvmpipe->draw, llvmpipe->setup);
+ }
+ else {
+ lp_init_vbuf(llvmpipe);
+ }
+
+ /* plug in AA line/point stages */
+ draw_install_aaline_stage(llvmpipe->draw, &llvmpipe->pipe);
+ draw_install_aapoint_stage(llvmpipe->draw, &llvmpipe->pipe);
+
+#if USE_DRAW_STAGE_PSTIPPLE
+ /* Do polygon stipple w/ texture map + frag prog? */
+ draw_install_pstipple_stage(llvmpipe->draw, &llvmpipe->pipe);
+#endif
+
+ lp_init_surface_functions(llvmpipe);
+
+ return &llvmpipe->pipe;
+
+ fail:
+ llvmpipe_destroy(&llvmpipe->pipe);
+ return NULL;
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_context.h b/src/gallium/drivers/llvmpipe/lp_context.h
new file mode 100644
index 0000000000..8d5a0d4f1f
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_context.h
@@ -0,0 +1,155 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef LP_CONTEXT_H
+#define LP_CONTEXT_H
+
+#include "pipe/p_context.h"
+
+#include "draw/draw_vertex.h"
+
+#include "lp_tex_sample.h"
+#include "lp_jit.h"
+
+
+struct llvmpipe_vbuf_render;
+struct draw_context;
+struct draw_stage;
+struct llvmpipe_tile_cache;
+struct llvmpipe_tex_tile_cache;
+struct lp_fragment_shader;
+struct lp_vertex_shader;
+struct lp_blend_state;
+
+
+struct llvmpipe_context {
+ struct pipe_context pipe; /**< base class */
+
+ /** Constant state objects */
+ const struct pipe_blend_state *blend;
+ const struct pipe_sampler_state *sampler[PIPE_MAX_SAMPLERS];
+ const struct pipe_depth_stencil_alpha_state *depth_stencil;
+ const struct pipe_rasterizer_state *rasterizer;
+ struct lp_fragment_shader *fs;
+ const struct lp_vertex_shader *vs;
+
+ /** Other rendering state */
+ struct pipe_blend_color blend_color[4][16];
+ struct pipe_clip_state clip;
+ struct pipe_constant_buffer constants[PIPE_SHADER_TYPES];
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_poly_stipple poly_stipple;
+ struct pipe_scissor_state scissor;
+ struct pipe_texture *texture[PIPE_MAX_SAMPLERS];
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vertex_element[PIPE_MAX_ATTRIBS];
+
+ unsigned num_samplers;
+ unsigned num_textures;
+ unsigned num_vertex_elements;
+ unsigned num_vertex_buffers;
+
+ unsigned dirty; /**< Mask of LP_NEW_x flags */
+
+ /* Counter for occlusion queries. Note this supports overlapping
+ * queries.
+ */
+ uint64_t occlusion_count;
+ unsigned active_query_count;
+
+ /** Mapped vertex buffers */
+ ubyte *mapped_vbuffer[PIPE_MAX_ATTRIBS];
+
+ /** Mapped constant buffers */
+ void *mapped_constants[PIPE_SHADER_TYPES];
+
+ /** Vertex format */
+ struct vertex_info vertex_info;
+ struct vertex_info vertex_info_vbuf;
+
+ /** Which vertex shader output slot contains point size */
+ int psize_slot;
+
+ /* The reduced version of the primitive supplied by the state
+ * tracker.
+ */
+ unsigned reduced_api_prim;
+
+ /* The reduced primitive after unfilled triangles, wide-line
+ * decomposition, etc, are taken into account. This is the
+ * primitive actually rasterized.
+ */
+ unsigned reduced_prim;
+
+ /** Derived from scissor and surface bounds: */
+ struct pipe_scissor_state cliprect;
+
+ unsigned line_stipple_counter;
+
+ /** TGSI exec things */
+ struct {
+ struct lp_shader_sampler vert_samplers[PIPE_MAX_SAMPLERS];
+ struct lp_shader_sampler *vert_samplers_list[PIPE_MAX_SAMPLERS];
+ struct lp_shader_sampler frag_samplers[PIPE_MAX_SAMPLERS];
+ struct lp_shader_sampler *frag_samplers_list[PIPE_MAX_SAMPLERS];
+ } tgsi;
+
+ /** The primitive drawing context */
+ struct draw_context *draw;
+ struct draw_stage *setup;
+ struct draw_stage *vbuf;
+ struct llvmpipe_vbuf_render *vbuf_render;
+
+ boolean dirty_render_cache;
+
+ struct llvmpipe_tile_cache *cbuf_cache[PIPE_MAX_COLOR_BUFS];
+
+ /* TODO: we shouldn't be using external interfaces internally like this */
+ struct pipe_transfer *zsbuf_transfer;
+ uint8_t *zsbuf_map;
+
+ unsigned tex_timestamp;
+ struct llvmpipe_tex_tile_cache *tex_cache[PIPE_MAX_SAMPLERS];
+
+ unsigned no_rast : 1;
+
+ struct lp_jit_context jit_context;
+};
+
+
+static INLINE struct llvmpipe_context *
+llvmpipe_context( struct pipe_context *pipe )
+{
+ return (struct llvmpipe_context *)pipe;
+}
+
+#endif /* LP_CONTEXT_H */
+
diff --git a/src/gallium/drivers/llvmpipe/lp_draw_arrays.c b/src/gallium/drivers/llvmpipe/lp_draw_arrays.c
new file mode 100644
index 0000000000..89772e62d3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_draw_arrays.c
@@ -0,0 +1,192 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ * Keith Whitwell
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_prim.h"
+
+#include "lp_buffer.h"
+#include "lp_context.h"
+#include "lp_state.h"
+
+#include "draw/draw_context.h"
+
+
+
+static void
+llvmpipe_map_constant_buffers(struct llvmpipe_context *lp)
+{
+ struct pipe_screen *screen = lp->pipe.screen;
+ uint i, size;
+
+ for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+ if (lp->constants[i].buffer && lp->constants[i].buffer->size)
+ lp->mapped_constants[i] = screen->buffer_map(screen, lp->constants[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ }
+
+ if (lp->constants[PIPE_SHADER_VERTEX].buffer)
+ size = lp->constants[PIPE_SHADER_VERTEX].buffer->size;
+ else
+ size = 0;
+
+ lp->jit_context.constants = lp->mapped_constants[PIPE_SHADER_FRAGMENT];
+
+ draw_set_mapped_constant_buffer(lp->draw,
+ lp->mapped_constants[PIPE_SHADER_VERTEX],
+ size);
+}
+
+
+static void
+llvmpipe_unmap_constant_buffers(struct llvmpipe_context *lp)
+{
+ struct pipe_screen *screen = lp->pipe.screen;
+ uint i;
+
+ /* really need to flush all prims since the vert/frag shaders const buffers
+ * are going away now.
+ */
+ draw_flush(lp->draw);
+
+ draw_set_mapped_constant_buffer(lp->draw, NULL, 0);
+
+ lp->jit_context.constants = NULL;
+
+ for (i = 0; i < 2; i++) {
+ if (lp->constants[i].buffer && lp->constants[i].buffer->size)
+ screen->buffer_unmap(screen, lp->constants[i].buffer);
+ lp->mapped_constants[i] = NULL;
+ }
+}
+
+
+boolean
+llvmpipe_draw_arrays(struct pipe_context *pipe, unsigned mode,
+ unsigned start, unsigned count)
+{
+ return llvmpipe_draw_elements(pipe, NULL, 0, mode, start, count);
+}
+
+
+/**
+ * Draw vertex arrays, with optional indexing.
+ * Basically, map the vertex buffers (and drawing surfaces), then hand off
+ * the drawing to the 'draw' module.
+ */
+boolean
+llvmpipe_draw_range_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned min_index,
+ unsigned max_index,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct llvmpipe_context *lp = llvmpipe_context(pipe);
+ struct draw_context *draw = lp->draw;
+ unsigned i;
+
+ lp->reduced_api_prim = u_reduced_prim(mode);
+
+ if (lp->dirty)
+ llvmpipe_update_derived( lp );
+
+ llvmpipe_map_transfers(lp);
+ llvmpipe_map_constant_buffers(lp);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < lp->num_vertex_buffers; i++) {
+ void *buf = llvmpipe_buffer(lp->vertex_buffer[i].buffer)->data;
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+
+ /* Map index buffer, if present */
+ if (indexBuffer) {
+ void *mapped_indexes = llvmpipe_buffer(indexBuffer)->data;
+ draw_set_mapped_element_buffer_range(draw, indexSize,
+ min_index,
+ max_index,
+ mapped_indexes);
+ }
+ else {
+ /* no index/element buffer */
+ draw_set_mapped_element_buffer_range(draw, 0, start,
+ start + count - 1, NULL);
+ }
+
+ /* draw! */
+ draw_arrays(draw, mode, start, count);
+
+ /*
+ * unmap vertex/index buffers - will cause draw module to flush
+ */
+ for (i = 0; i < lp->num_vertex_buffers; i++) {
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ }
+ if (indexBuffer) {
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+
+ /* Note: leave drawing surfaces mapped */
+ llvmpipe_unmap_constant_buffers(lp);
+
+ lp->dirty_render_cache = TRUE;
+
+ return TRUE;
+}
+
+
+boolean
+llvmpipe_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ return llvmpipe_draw_range_elements( pipe, indexBuffer,
+ indexSize,
+ 0, 0xffffffff,
+ mode, start, count );
+}
+
+
+void
+llvmpipe_set_edgeflags(struct pipe_context *pipe, const unsigned *edgeflags)
+{
+ struct llvmpipe_context *lp = llvmpipe_context(pipe);
+ draw_set_edgeflags(lp->draw, edgeflags);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_flush.c b/src/gallium/drivers/llvmpipe/lp_flush.c
new file mode 100644
index 0000000000..b5c1c95bb7
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_flush.c
@@ -0,0 +1,98 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_defines.h"
+#include "draw/draw_context.h"
+#include "lp_flush.h"
+#include "lp_context.h"
+#include "lp_surface.h"
+#include "lp_state.h"
+#include "lp_tile_cache.h"
+#include "lp_tex_cache.h"
+#include "lp_winsys.h"
+
+
+void
+llvmpipe_flush( struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ uint i;
+
+ draw_flush(llvmpipe->draw);
+
+ if (flags & PIPE_FLUSH_SWAPBUFFERS) {
+ /* If this is a swapbuffers, just flush color buffers.
+ *
+ * The zbuffer changes are not discarded, but held in the cache
+ * in the hope that a later clear will wipe them out.
+ */
+ for (i = 0; i < llvmpipe->framebuffer.nr_cbufs; i++)
+ if (llvmpipe->cbuf_cache[i])
+ lp_flush_tile_cache(llvmpipe->cbuf_cache[i]);
+
+ /* Need this call for hardware buffers before swapbuffers.
+ *
+ * there should probably be another/different flush-type function
+ * that's called before swapbuffers because we don't always want
+ * to unmap surfaces when flushing.
+ */
+ llvmpipe_unmap_transfers(llvmpipe);
+ }
+ else if (flags & PIPE_FLUSH_RENDER_CACHE) {
+ for (i = 0; i < llvmpipe->framebuffer.nr_cbufs; i++)
+ if (llvmpipe->cbuf_cache[i])
+ lp_flush_tile_cache(llvmpipe->cbuf_cache[i]);
+
+ /* FIXME: untile zsbuf! */
+
+ llvmpipe->dirty_render_cache = FALSE;
+ }
+
+ /* Enable to dump BMPs of the color/depth buffers each frame */
+#if 0
+ if(flags & PIPE_FLUSH_FRAME) {
+ static unsigned frame_no = 1;
+ static char filename[256];
+ util_snprintf(filename, sizeof(filename), "cbuf_%u.bmp", frame_no);
+ debug_dump_surface_bmp(filename, llvmpipe->framebuffer.cbufs[0]);
+ util_snprintf(filename, sizeof(filename), "zsbuf_%u.bmp", frame_no);
+ debug_dump_surface_bmp(filename, llvmpipe->framebuffer.zsbuf);
+ ++frame_no;
+ }
+#endif
+
+ if (fence)
+ *fence = NULL;
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_flush.h b/src/gallium/drivers/llvmpipe/lp_flush.h
new file mode 100644
index 0000000000..10b2b52583
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_flush.h
@@ -0,0 +1,37 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_FLUSH_H
+#define LP_FLUSH_H
+
+struct pipe_context;
+struct pipe_fence_handle;
+
+void llvmpipe_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence);
+
+#endif
diff --git a/src/gallium/drivers/llvmpipe/lp_jit.c b/src/gallium/drivers/llvmpipe/lp_jit.c
new file mode 100644
index 0000000000..9465f763d5
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_jit.c
@@ -0,0 +1,173 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * C - JIT interfaces
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include <llvm-c/Transforms/Scalar.h>
+
+#include "util/u_memory.h"
+#include "lp_screen.h"
+#include "lp_bld_intr.h"
+#include "lp_jit.h"
+
+
+static void
+lp_jit_init_globals(struct llvmpipe_screen *screen)
+{
+ LLVMTypeRef texture_type;
+
+ /* struct lp_jit_texture */
+ {
+ LLVMTypeRef elem_types[4];
+
+ elem_types[LP_JIT_TEXTURE_WIDTH] = LLVMInt32Type();
+ elem_types[LP_JIT_TEXTURE_HEIGHT] = LLVMInt32Type();
+ elem_types[LP_JIT_TEXTURE_STRIDE] = LLVMInt32Type();
+ elem_types[LP_JIT_TEXTURE_DATA] = LLVMPointerType(LLVMInt8Type(), 0);
+
+ texture_type = LLVMStructType(elem_types, Elements(elem_types), 0);
+
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_texture, width,
+ screen->target, texture_type,
+ LP_JIT_TEXTURE_WIDTH);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_texture, height,
+ screen->target, texture_type,
+ LP_JIT_TEXTURE_HEIGHT);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_texture, stride,
+ screen->target, texture_type,
+ LP_JIT_TEXTURE_STRIDE);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_texture, data,
+ screen->target, texture_type,
+ LP_JIT_TEXTURE_DATA);
+ LP_CHECK_STRUCT_SIZE(struct lp_jit_texture,
+ screen->target, texture_type);
+
+ LLVMAddTypeName(screen->module, "texture", texture_type);
+ }
+
+ /* struct lp_jit_context */
+ {
+ LLVMTypeRef elem_types[5];
+ LLVMTypeRef context_type;
+
+ elem_types[0] = LLVMPointerType(LLVMFloatType(), 0); /* constants */
+ elem_types[1] = LLVMPointerType(LLVMInt8Type(), 0); /* samplers */
+ elem_types[2] = LLVMFloatType(); /* alpha_ref_value */
+ elem_types[3] = LLVMPointerType(LLVMInt8Type(), 0); /* blend_color */
+ elem_types[4] = LLVMArrayType(texture_type, PIPE_MAX_SAMPLERS); /* textures */
+
+ context_type = LLVMStructType(elem_types, Elements(elem_types), 0);
+
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_context, constants,
+ screen->target, context_type, 0);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_context, samplers,
+ screen->target, context_type, 1);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_context, alpha_ref_value,
+ screen->target, context_type, 2);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_context, blend_color,
+ screen->target, context_type, 3);
+ LP_CHECK_MEMBER_OFFSET(struct lp_jit_context, textures,
+ screen->target, context_type,
+ LP_JIT_CONTEXT_TEXTURES_INDEX);
+ LP_CHECK_STRUCT_SIZE(struct lp_jit_context,
+ screen->target, context_type);
+
+ LLVMAddTypeName(screen->module, "context", context_type);
+
+ screen->context_ptr_type = LLVMPointerType(context_type, 0);
+ }
+
+ /* fetch_texel
+ */
+ {
+ LLVMTypeRef ret_type;
+ LLVMTypeRef arg_types[3];
+ LLVMValueRef fetch_texel;
+
+ ret_type = LLVMVoidType();
+ arg_types[0] = LLVMPointerType(LLVMInt8Type(), 0); /* samplers */
+ arg_types[1] = LLVMInt32Type(); /* unit */
+ arg_types[2] = LLVMPointerType(LLVMVectorType(LLVMFloatType(), 4), 0); /* store */
+
+ fetch_texel = lp_declare_intrinsic(screen->module, "fetch_texel",
+ ret_type, arg_types, Elements(arg_types));
+
+ LLVMAddGlobalMapping(screen->engine, fetch_texel, lp_fetch_texel_soa);
+ }
+
+#ifdef DEBUG
+ LLVMDumpModule(screen->module);
+#endif
+}
+
+
+void
+lp_jit_screen_cleanup(struct llvmpipe_screen *screen)
+{
+ if(screen->engine)
+ LLVMDisposeExecutionEngine(screen->engine);
+
+ if(screen->pass)
+ LLVMDisposePassManager(screen->pass);
+}
+
+
+void
+lp_jit_screen_init(struct llvmpipe_screen *screen)
+{
+ char *error = NULL;
+
+ screen->module = LLVMModuleCreateWithName("llvmpipe");
+
+ screen->provider = LLVMCreateModuleProviderForExistingModule(screen->module);
+
+ if (LLVMCreateJITCompiler(&screen->engine, screen->provider, 1, &error)) {
+ fprintf(stderr, "%s\n", error);
+ LLVMDisposeMessage(error);
+ abort();
+ }
+
+ screen->target = LLVMGetExecutionEngineTargetData(screen->engine);
+
+ screen->pass = LLVMCreateFunctionPassManager(screen->provider);
+ LLVMAddTargetData(screen->target, screen->pass);
+ /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
+ * but there are more on SVN. */
+ LLVMAddConstantPropagationPass(screen->pass);
+ LLVMAddInstructionCombiningPass(screen->pass);
+ LLVMAddPromoteMemoryToRegisterPass(screen->pass);
+ LLVMAddGVNPass(screen->pass);
+ LLVMAddCFGSimplificationPass(screen->pass);
+
+ lp_jit_init_globals(screen);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_jit.h b/src/gallium/drivers/llvmpipe/lp_jit.h
new file mode 100644
index 0000000000..58f716ede2
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_jit.h
@@ -0,0 +1,135 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * C - JIT interfaces
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef LP_JIT_H
+#define LP_JIT_H
+
+
+#include "lp_bld_struct.h"
+
+#include "pipe/p_state.h"
+
+
+struct tgsi_sampler;
+struct llvmpipe_screen;
+
+
+struct lp_jit_texture
+{
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ const void *data;
+};
+
+
+enum {
+ LP_JIT_TEXTURE_WIDTH = 0,
+ LP_JIT_TEXTURE_HEIGHT,
+ LP_JIT_TEXTURE_STRIDE,
+ LP_JIT_TEXTURE_DATA
+};
+
+
+
+/**
+ * This structure is passed directly to the generated fragment shader.
+ *
+ * It contains the derived state.
+ *
+ * Changes here must be reflected in the lp_jit_context_* macros and
+ * lp_jit_init_types function. Changes to the ordering should be avoided.
+ *
+ * Only use types with a clear size and padding here, in particular prefer the
+ * stdint.h types to the basic integer types.
+ */
+struct lp_jit_context
+{
+ const float *constants;
+
+ struct tgsi_sampler **samplers;
+
+ float alpha_ref_value;
+
+ /* FIXME: store (also?) in floats */
+ uint8_t *blend_color;
+
+ struct lp_jit_texture textures[PIPE_MAX_SAMPLERS];
+};
+
+
+#define lp_jit_context_constants(_builder, _ptr) \
+ lp_build_struct_get(_builder, _ptr, 0, "constants")
+
+#define lp_jit_context_samplers(_builder, _ptr) \
+ lp_build_struct_get(_builder, _ptr, 1, "samplers")
+
+#define lp_jit_context_alpha_ref_value(_builder, _ptr) \
+ lp_build_struct_get(_builder, _ptr, 2, "alpha_ref_value")
+
+#define lp_jit_context_blend_color(_builder, _ptr) \
+ lp_build_struct_get(_builder, _ptr, 3, "blend_color")
+
+#define LP_JIT_CONTEXT_TEXTURES_INDEX 4
+
+#define lp_jit_context_textures(_builder, _ptr) \
+ lp_build_struct_get_ptr(_builder, _ptr, LP_JIT_CONTEXT_TEXTURES_INDEX, "textures")
+
+
+typedef void
+(*lp_jit_frag_func)(struct lp_jit_context *context,
+ uint32_t x,
+ uint32_t y,
+ const void *a0,
+ const void *dadx,
+ const void *dady,
+ uint32_t *mask,
+ void *color,
+ void *depth);
+
+void PIPE_CDECL
+lp_fetch_texel_soa( struct tgsi_sampler **samplers,
+ uint32_t unit,
+ float *store );
+
+
+void
+lp_jit_screen_cleanup(struct llvmpipe_screen *screen);
+
+
+void
+lp_jit_screen_init(struct llvmpipe_screen *screen);
+
+
+#endif /* LP_JIT_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_prim_setup.c b/src/gallium/drivers/llvmpipe/lp_prim_setup.c
new file mode 100644
index 0000000000..b14f8fb99d
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_prim_setup.c
@@ -0,0 +1,190 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief A draw stage that drives our triangle setup routines from
+ * within the draw pipeline. One of two ways to drive setup, the
+ * other being in lp_prim_vbuf.c.
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Brian Paul
+ */
+
+
+#include "lp_context.h"
+#include "lp_setup.h"
+#include "lp_state.h"
+#include "lp_prim_setup.h"
+#include "draw/draw_pipe.h"
+#include "draw/draw_vertex.h"
+#include "util/u_memory.h"
+
+/**
+ * Triangle setup info (derived from draw_stage).
+ * Also used for line drawing (taking some liberties).
+ */
+struct setup_stage {
+ struct draw_stage stage; /**< This must be first (base class) */
+
+ struct setup_context *setup;
+};
+
+
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct setup_stage *setup_stage( struct draw_stage *stage )
+{
+ return (struct setup_stage *)stage;
+}
+
+
+typedef const float (*cptrf4)[4];
+
+static void
+do_tri(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct setup_stage *setup = setup_stage( stage );
+
+ llvmpipe_setup_tri( setup->setup,
+ (cptrf4)prim->v[0]->data,
+ (cptrf4)prim->v[1]->data,
+ (cptrf4)prim->v[2]->data );
+}
+
+static void
+do_line(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct setup_stage *setup = setup_stage( stage );
+
+ llvmpipe_setup_line( setup->setup,
+ (cptrf4)prim->v[0]->data,
+ (cptrf4)prim->v[1]->data );
+}
+
+static void
+do_point(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct setup_stage *setup = setup_stage( stage );
+
+ llvmpipe_setup_point( setup->setup,
+ (cptrf4)prim->v[0]->data );
+}
+
+
+
+
+static void setup_begin( struct draw_stage *stage )
+{
+ struct setup_stage *setup = setup_stage(stage);
+
+ llvmpipe_setup_prepare( setup->setup );
+
+ stage->point = do_point;
+ stage->line = do_line;
+ stage->tri = do_tri;
+}
+
+
+static void setup_first_point( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ setup_begin(stage);
+ stage->point( stage, header );
+}
+
+static void setup_first_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ setup_begin(stage);
+ stage->line( stage, header );
+}
+
+
+static void setup_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ setup_begin(stage);
+ stage->tri( stage, header );
+}
+
+
+
+static void setup_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ stage->point = setup_first_point;
+ stage->line = setup_first_line;
+ stage->tri = setup_first_tri;
+}
+
+
+static void reset_stipple_counter( struct draw_stage *stage )
+{
+}
+
+
+static void render_destroy( struct draw_stage *stage )
+{
+ struct setup_stage *ssetup = setup_stage(stage);
+ llvmpipe_setup_destroy_context(ssetup->setup);
+ FREE( stage );
+}
+
+
+/**
+ * Create a new primitive setup/render stage.
+ */
+struct draw_stage *lp_draw_render_stage( struct llvmpipe_context *llvmpipe )
+{
+ struct setup_stage *sstage = CALLOC_STRUCT(setup_stage);
+
+ sstage->setup = llvmpipe_setup_create_context(llvmpipe);
+ sstage->stage.draw = llvmpipe->draw;
+ sstage->stage.point = setup_first_point;
+ sstage->stage.line = setup_first_line;
+ sstage->stage.tri = setup_first_tri;
+ sstage->stage.flush = setup_flush;
+ sstage->stage.reset_stipple_counter = reset_stipple_counter;
+ sstage->stage.destroy = render_destroy;
+
+ return (struct draw_stage *)sstage;
+}
+
+struct setup_context *
+lp_draw_setup_context( struct draw_stage *stage )
+{
+ struct setup_stage *ssetup = setup_stage(stage);
+ return ssetup->setup;
+}
+
+void
+lp_draw_flush( struct draw_stage *stage )
+{
+ stage->flush( stage, 0 );
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_prim_setup.h b/src/gallium/drivers/llvmpipe/lp_prim_setup.h
new file mode 100644
index 0000000000..da6cae6375
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_prim_setup.h
@@ -0,0 +1,85 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef LP_PRIM_SETUP_H
+#define LP_PRIM_SETUP_H
+
+
+/**
+ * vbuf is a special stage to gather the stream of triangles, lines, points
+ * together and reconstruct vertex buffers for hardware upload.
+ *
+ * First attempt, work in progress.
+ *
+ * TODO:
+ * - separate out vertex buffer building and primitive emit, ie >1 draw per vb.
+ * - tell vbuf stage how to build hw vertices directly
+ * - pass vbuf stage a buffer pointer for direct emit to agp/vram.
+ *
+ *
+ *
+ * Vertices are just an array of floats, with all the attributes
+ * packed. We currently assume a layout like:
+ *
+ * attr[0][0..3] - window position
+ * attr[1..n][0..3] - remaining attributes.
+ *
+ * Attributes are assumed to be 4 floats wide but are packed so that
+ * all the enabled attributes run contiguously.
+ */
+
+
+struct draw_stage;
+struct llvmpipe_context;
+
+
+typedef void (*vbuf_draw_func)( struct pipe_context *pipe,
+ unsigned prim,
+ const ushort *elements,
+ unsigned nr_elements,
+ const void *vertex_buffer,
+ unsigned nr_vertices );
+
+
+extern struct draw_stage *
+lp_draw_render_stage( struct llvmpipe_context *llvmpipe );
+
+extern struct setup_context *
+lp_draw_setup_context( struct draw_stage * );
+
+extern void
+lp_draw_flush( struct draw_stage * );
+
+
+extern struct draw_stage *
+lp_draw_vbuf_stage( struct draw_context *draw_context,
+ struct pipe_context *pipe,
+ vbuf_draw_func draw );
+
+
+#endif /* LP_PRIM_SETUP_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_prim_vbuf.c b/src/gallium/drivers/llvmpipe/lp_prim_vbuf.c
new file mode 100644
index 0000000000..c394dcb61d
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_prim_vbuf.c
@@ -0,0 +1,607 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Interface between 'draw' module's output and the llvmpipe rasterizer/setup
+ * code. When the 'draw' module has finished filling a vertex buffer, the
+ * draw_arrays() functions below will be called. Loop over the vertices and
+ * call the point/line/tri setup functions.
+ *
+ * Authors
+ * Brian Paul
+ */
+
+
+#include "lp_context.h"
+#include "lp_state.h"
+#include "lp_prim_vbuf.h"
+#include "lp_prim_setup.h"
+#include "lp_setup.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vbuf.h"
+#include "util/u_memory.h"
+#include "util/u_prim.h"
+
+
+#define LP_MAX_VBUF_INDEXES 1024
+#define LP_MAX_VBUF_SIZE 4096
+
+typedef const float (*cptrf4)[4];
+
+/**
+ * Subclass of vbuf_render.
+ */
+struct llvmpipe_vbuf_render
+{
+ struct vbuf_render base;
+ struct llvmpipe_context *llvmpipe;
+ uint prim;
+ uint vertex_size;
+ uint nr_vertices;
+ uint vertex_buffer_size;
+ void *vertex_buffer;
+};
+
+
+/** cast wrapper */
+static struct llvmpipe_vbuf_render *
+llvmpipe_vbuf_render(struct vbuf_render *vbr)
+{
+ return (struct llvmpipe_vbuf_render *) vbr;
+}
+
+
+static const struct vertex_info *
+lp_vbuf_get_vertex_info(struct vbuf_render *vbr)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ return llvmpipe_get_vbuf_vertex_info(cvbr->llvmpipe);
+}
+
+
+static boolean
+lp_vbuf_allocate_vertices(struct vbuf_render *vbr,
+ ushort vertex_size, ushort nr_vertices)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ unsigned size = vertex_size * nr_vertices;
+
+ if (cvbr->vertex_buffer_size < size) {
+ align_free(cvbr->vertex_buffer);
+ cvbr->vertex_buffer = align_malloc(size, 16);
+ cvbr->vertex_buffer_size = size;
+ }
+
+ cvbr->vertex_size = vertex_size;
+ cvbr->nr_vertices = nr_vertices;
+
+ return cvbr->vertex_buffer != NULL;
+}
+
+static void
+lp_vbuf_release_vertices(struct vbuf_render *vbr)
+{
+#if 0
+ {
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ const struct vertex_info *info =
+ llvmpipe_get_vbuf_vertex_info(cvbr->llvmpipe);
+ const float *vtx = (const float *) cvbr->vertex_buffer;
+ uint i, j;
+ debug_printf("%s (vtx_size = %u, vtx_used = %u)\n",
+ __FUNCTION__, cvbr->vertex_size, cvbr->nr_vertices);
+ for (i = 0; i < cvbr->nr_vertices; i++) {
+ for (j = 0; j < info->num_attribs; j++) {
+ uint k;
+ switch (info->attrib[j].emit) {
+ case EMIT_4F: k = 4; break;
+ case EMIT_3F: k = 3; break;
+ case EMIT_2F: k = 2; break;
+ case EMIT_1F: k = 1; break;
+ default: assert(0);
+ }
+ debug_printf("Vert %u attr %u: ", i, j);
+ while (k-- > 0) {
+ debug_printf("%g ", vtx[0]);
+ vtx++;
+ }
+ debug_printf("\n");
+ }
+ }
+ }
+#endif
+
+ /* keep the old allocation for next time */
+}
+
+static void *
+lp_vbuf_map_vertices(struct vbuf_render *vbr)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ return cvbr->vertex_buffer;
+}
+
+static void
+lp_vbuf_unmap_vertices(struct vbuf_render *vbr,
+ ushort min_index,
+ ushort max_index )
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ assert( cvbr->vertex_buffer_size >= (max_index+1) * cvbr->vertex_size );
+ /* do nothing */
+}
+
+
+static boolean
+lp_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+
+ /* XXX: break this dependency - make setup_context live under
+ * llvmpipe, rename the old "setup" draw stage to something else.
+ */
+ struct setup_context *setup_ctx = lp_draw_setup_context(cvbr->llvmpipe->setup);
+
+ llvmpipe_setup_prepare( setup_ctx );
+
+ cvbr->llvmpipe->reduced_prim = u_reduced_prim(prim);
+ cvbr->prim = prim;
+ return TRUE;
+
+}
+
+
+static INLINE cptrf4 get_vert( const void *vertex_buffer,
+ int index,
+ int stride )
+{
+ return (cptrf4)((char *)vertex_buffer + index * stride);
+}
+
+
+/**
+ * draw elements / indexed primitives
+ */
+static void
+lp_vbuf_draw(struct vbuf_render *vbr, const ushort *indices, uint nr)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ struct llvmpipe_context *llvmpipe = cvbr->llvmpipe;
+ const unsigned stride = llvmpipe->vertex_info_vbuf.size * sizeof(float);
+ const void *vertex_buffer = cvbr->vertex_buffer;
+ unsigned i;
+
+ /* XXX: break this dependency - make setup_context live under
+ * llvmpipe, rename the old "setup" draw stage to something else.
+ */
+ struct draw_stage *setup = llvmpipe->setup;
+ struct setup_context *setup_ctx = lp_draw_setup_context(setup);
+
+ switch (cvbr->prim) {
+ case PIPE_PRIM_POINTS:
+ for (i = 0; i < nr; i++) {
+ llvmpipe_setup_point( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (i = 1; i < nr; i += 2) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ for (i = 1; i < nr; i ++) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ for (i = 1; i < nr; i ++) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ if (nr) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[nr-1], stride),
+ get_vert(vertex_buffer, indices[0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 2; i < nr; i += 3) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-2], stride) );
+ }
+ }
+ else {
+ for (i = 2; i < nr; i += 3) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i+(i&1)-1], stride),
+ get_vert(vertex_buffer, indices[i-(i&1)], stride),
+ get_vert(vertex_buffer, indices[i-2], stride) );
+ }
+ }
+ else {
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i+(i&1)-2], stride),
+ get_vert(vertex_buffer, indices[i-(i&1)-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride) );
+ }
+ }
+ else {
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUADS:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 3; i < nr; i += 4) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-3], stride) );
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-3], stride) );
+ }
+ }
+ else {
+ for (i = 3; i < nr; i += 4) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-3], stride),
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 3; i < nr; i += 2) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-3], stride));
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-3], stride) );
+ }
+ }
+ else {
+ for (i = 3; i < nr; i += 2) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-3], stride),
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-3], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ /* Almost same as tri fan but the _first_ vertex specifies the flat
+ * shading color. Note that the first polygon vertex is passed as
+ * the last triangle vertex here.
+ * flatshade_first state makes no difference.
+ */
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[0], stride) );
+ }
+ break;
+
+ default:
+ assert(0);
+ }
+
+ /* XXX: why are we calling this??? If we had to call something, it
+ * would be a function in lp_setup.c:
+ */
+ lp_draw_flush( setup );
+}
+
+
+/**
+ * This function is hit when the draw module is working in pass-through mode.
+ * It's up to us to convert the vertex array into point/line/tri prims.
+ */
+static void
+lp_vbuf_draw_arrays(struct vbuf_render *vbr, uint start, uint nr)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ struct llvmpipe_context *llvmpipe = cvbr->llvmpipe;
+ const unsigned stride = llvmpipe->vertex_info_vbuf.size * sizeof(float);
+ const void *vertex_buffer =
+ (void *) get_vert(cvbr->vertex_buffer, start, stride);
+ unsigned i;
+
+ /* XXX: break this dependency - make setup_context live under
+ * llvmpipe, rename the old "setup" draw stage to something else.
+ */
+ struct draw_stage *setup = llvmpipe->setup;
+ struct setup_context *setup_ctx = lp_draw_setup_context(setup);
+
+ switch (cvbr->prim) {
+ case PIPE_PRIM_POINTS:
+ for (i = 0; i < nr; i++) {
+ llvmpipe_setup_point( setup_ctx,
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (i = 1; i < nr; i += 2) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ for (i = 1; i < nr; i ++) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ for (i = 1; i < nr; i ++) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ if (nr) {
+ llvmpipe_setup_line( setup_ctx,
+ get_vert(vertex_buffer, nr-1, stride),
+ get_vert(vertex_buffer, 0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 2; i < nr; i += 3) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-2, stride) );
+ }
+ }
+ else {
+ for (i = 2; i < nr; i += 3) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 2; i < nr; i++) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i+(i&1)-1, stride),
+ get_vert(vertex_buffer, i-(i&1), stride),
+ get_vert(vertex_buffer, i-2, stride) );
+ }
+ }
+ else {
+ for (i = 2; i < nr; i++) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i+(i&1)-2, stride),
+ get_vert(vertex_buffer, i-(i&1)-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, 0, stride),
+ get_vert(vertex_buffer, i-1, stride) );
+ }
+ }
+ else {
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, 0, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUADS:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 3; i < nr; i += 4) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ }
+ }
+ else {
+ for (i = 3; i < nr; i += 4) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-3, stride),
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ if (llvmpipe->rasterizer->flatshade_first) {
+ for (i = 3; i < nr; i += 2) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ }
+ }
+ else {
+ for (i = 3; i < nr; i += 2) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-3, stride),
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-3, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ /* Almost same as tri fan but the _first_ vertex specifies the flat
+ * shading color. Note that the first polygon vertex is passed as
+ * the last triangle vertex here.
+ * flatshade_first state makes no difference.
+ */
+ for (i = 2; i < nr; i += 1) {
+ llvmpipe_setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, 0, stride) );
+ }
+ break;
+
+ default:
+ assert(0);
+ }
+}
+
+
+
+static void
+lp_vbuf_destroy(struct vbuf_render *vbr)
+{
+ struct llvmpipe_vbuf_render *cvbr = llvmpipe_vbuf_render(vbr);
+ cvbr->llvmpipe->vbuf_render = NULL;
+ FREE(cvbr);
+}
+
+
+/**
+ * Initialize the post-transform vertex buffer information for the given
+ * context.
+ */
+void
+lp_init_vbuf(struct llvmpipe_context *lp)
+{
+ assert(lp->draw);
+
+ lp->vbuf_render = CALLOC_STRUCT(llvmpipe_vbuf_render);
+
+ lp->vbuf_render->base.max_indices = LP_MAX_VBUF_INDEXES;
+ lp->vbuf_render->base.max_vertex_buffer_bytes = LP_MAX_VBUF_SIZE;
+
+ lp->vbuf_render->base.get_vertex_info = lp_vbuf_get_vertex_info;
+ lp->vbuf_render->base.allocate_vertices = lp_vbuf_allocate_vertices;
+ lp->vbuf_render->base.map_vertices = lp_vbuf_map_vertices;
+ lp->vbuf_render->base.unmap_vertices = lp_vbuf_unmap_vertices;
+ lp->vbuf_render->base.set_primitive = lp_vbuf_set_primitive;
+ lp->vbuf_render->base.draw = lp_vbuf_draw;
+ lp->vbuf_render->base.draw_arrays = lp_vbuf_draw_arrays;
+ lp->vbuf_render->base.release_vertices = lp_vbuf_release_vertices;
+ lp->vbuf_render->base.destroy = lp_vbuf_destroy;
+
+ lp->vbuf_render->llvmpipe = lp;
+
+ lp->vbuf = draw_vbuf_stage(lp->draw, &lp->vbuf_render->base);
+
+ draw_set_rasterize_stage(lp->draw, lp->vbuf);
+
+ draw_set_render(lp->draw, &lp->vbuf_render->base);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_prim_vbuf.h b/src/gallium/drivers/llvmpipe/lp_prim_vbuf.h
new file mode 100644
index 0000000000..6c4e6063e6
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_prim_vbuf.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_VBUF_H
+#define LP_VBUF_H
+
+
+struct llvmpipe_context;
+
+extern void
+lp_init_vbuf(struct llvmpipe_context *llvmpipe);
+
+
+#endif /* LP_VBUF_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_quad.h b/src/gallium/drivers/llvmpipe/lp_quad.h
new file mode 100644
index 0000000000..7eb05de77a
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_quad.h
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef LP_QUAD_H
+#define LP_QUAD_H
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_exec.h"
+
+
+#define QUAD_PRIM_POINT 1
+#define QUAD_PRIM_LINE 2
+#define QUAD_PRIM_TRI 3
+
+
+/* The rasterizer generates 2x2 quads of fragment and feeds them to
+ * the current fp_machine (see below).
+ * Remember that Y=0=top with Y increasing down the window.
+ */
+#define QUAD_TOP_LEFT 0
+#define QUAD_TOP_RIGHT 1
+#define QUAD_BOTTOM_LEFT 2
+#define QUAD_BOTTOM_RIGHT 3
+
+#define MASK_TOP_LEFT (1 << QUAD_TOP_LEFT)
+#define MASK_TOP_RIGHT (1 << QUAD_TOP_RIGHT)
+#define MASK_BOTTOM_LEFT (1 << QUAD_BOTTOM_LEFT)
+#define MASK_BOTTOM_RIGHT (1 << QUAD_BOTTOM_RIGHT)
+#define MASK_ALL 0xf
+
+
+/**
+ * Quad stage inputs (pos, coverage, front/back face, etc)
+ */
+struct quad_header_input
+{
+ int x0, y0; /**< quad window pos, always even */
+ float coverage[QUAD_SIZE]; /**< fragment coverage for antialiasing */
+ unsigned facing:1; /**< Front (0) or back (1) facing? */
+ unsigned prim:2; /**< QUAD_PRIM_POINT, LINE, TRI */
+};
+
+
+/**
+ * Quad stage inputs/outputs.
+ */
+struct quad_header_inout
+{
+ unsigned mask:4;
+};
+
+
+/**
+ * Quad stage outputs (color & depth).
+ */
+struct quad_header_output
+{
+ /** colors in SOA format (rrrr, gggg, bbbb, aaaa) */
+ float ALIGN16_ATTRIB color[PIPE_MAX_COLOR_BUFS][NUM_CHANNELS][QUAD_SIZE];
+};
+
+
+/**
+ * Input interpolation coefficients
+ */
+struct quad_interp_coef
+{
+ float ALIGN16_ATTRIB a0[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+ float ALIGN16_ATTRIB dadx[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+ float ALIGN16_ATTRIB dady[1 + PIPE_MAX_SHADER_INPUTS][NUM_CHANNELS];
+};
+
+
+/**
+ * Encodes everything we need to know about a 2x2 pixel block. Uses
+ * "Channel-Serial" or "SoA" layout.
+ */
+struct quad_header {
+ struct quad_header_input input;
+ struct quad_header_inout inout;
+
+ /* Redundant/duplicated:
+ */
+ const struct quad_interp_coef *coef;
+};
+
+#endif /* LP_QUAD_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_query.c b/src/gallium/drivers/llvmpipe/lp_query.c
new file mode 100644
index 0000000000..5554285425
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_query.c
@@ -0,0 +1,111 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "lp_context.h"
+#include "lp_query.h"
+#include "lp_state.h"
+
+struct llvmpipe_query {
+ uint64_t start;
+ uint64_t end;
+};
+
+
+static struct llvmpipe_query *llvmpipe_query( struct pipe_query *p )
+{
+ return (struct llvmpipe_query *)p;
+}
+
+static struct pipe_query *
+llvmpipe_create_query(struct pipe_context *pipe,
+ unsigned type)
+{
+ assert(type == PIPE_QUERY_OCCLUSION_COUNTER);
+ return (struct pipe_query *)CALLOC_STRUCT( llvmpipe_query );
+}
+
+
+static void
+llvmpipe_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
+{
+ FREE(q);
+}
+
+
+static void
+llvmpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe );
+ struct llvmpipe_query *sq = llvmpipe_query(q);
+
+ sq->start = llvmpipe->occlusion_count;
+ llvmpipe->active_query_count++;
+ llvmpipe->dirty |= LP_NEW_QUERY;
+}
+
+
+static void
+llvmpipe_end_query(struct pipe_context *pipe, struct pipe_query *q)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context( pipe );
+ struct llvmpipe_query *sq = llvmpipe_query(q);
+
+ llvmpipe->active_query_count--;
+ sq->end = llvmpipe->occlusion_count;
+ llvmpipe->dirty |= LP_NEW_QUERY;
+}
+
+
+static boolean
+llvmpipe_get_query_result(struct pipe_context *pipe,
+ struct pipe_query *q,
+ boolean wait,
+ uint64_t *result )
+{
+ struct llvmpipe_query *sq = llvmpipe_query(q);
+ *result = sq->end - sq->start;
+ return TRUE;
+}
+
+
+void llvmpipe_init_query_funcs(struct llvmpipe_context *llvmpipe )
+{
+ llvmpipe->pipe.create_query = llvmpipe_create_query;
+ llvmpipe->pipe.destroy_query = llvmpipe_destroy_query;
+ llvmpipe->pipe.begin_query = llvmpipe_begin_query;
+ llvmpipe->pipe.end_query = llvmpipe_end_query;
+ llvmpipe->pipe.get_query_result = llvmpipe_get_query_result;
+}
+
+
diff --git a/src/gallium/drivers/llvmpipe/lp_query.h b/src/gallium/drivers/llvmpipe/lp_query.h
new file mode 100644
index 0000000000..fa9fcd8713
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_query.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell
+ */
+
+#ifndef LP_QUERY_H
+#define LP_QUERY_H
+
+struct llvmpipe_context;
+extern void llvmpipe_init_query_funcs(struct llvmpipe_context * );
+
+
+#endif /* LP_QUERY_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_screen.c b/src/gallium/drivers/llvmpipe/lp_screen.c
new file mode 100644
index 0000000000..125035771e
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_screen.c
@@ -0,0 +1,243 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "util/u_simple_screen.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+
+#include "lp_texture.h"
+#include "lp_buffer.h"
+#include "lp_winsys.h"
+#include "lp_jit.h"
+#include "lp_screen.h"
+
+
+static const char *
+llvmpipe_get_vendor(struct pipe_screen *screen)
+{
+ return "VMware, Inc.";
+}
+
+
+static const char *
+llvmpipe_get_name(struct pipe_screen *screen)
+{
+ return "llvmpipe";
+}
+
+
+static int
+llvmpipe_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return PIPE_MAX_SAMPLERS;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return PIPE_MAX_SAMPLERS;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 1;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 0;
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return PIPE_MAX_COLOR_BUFS;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 1;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 13; /* max 4Kx4K */
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 8; /* max 128x128x128 */
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 13; /* max 4Kx4K */
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 1;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+static float
+llvmpipe_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 255.0; /* arbitrary */
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 255.0; /* arbitrary */
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 16.0; /* not actually signficant at this time */
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0; /* arbitrary */
+ default:
+ return 0;
+ }
+}
+
+
+/**
+ * Query format support for creating a texture, drawing surface, etc.
+ * \param format the format to test
+ * \param type one of PIPE_TEXTURE, PIPE_SURFACE
+ */
+static boolean
+llvmpipe_is_format_supported( struct pipe_screen *_screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags )
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(_screen);
+ struct llvmpipe_winsys *winsys = screen->winsys;
+
+ assert(target == PIPE_TEXTURE_1D ||
+ target == PIPE_TEXTURE_2D ||
+ target == PIPE_TEXTURE_3D ||
+ target == PIPE_TEXTURE_CUBE);
+
+ if(format == PIPE_FORMAT_Z16_UNORM)
+ return FALSE;
+ if(format == PIPE_FORMAT_S8_UNORM)
+ return FALSE;
+
+ switch(format) {
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ return FALSE;
+ default:
+ break;
+ }
+
+ if(tex_usage & PIPE_TEXTURE_USAGE_DISPLAY_TARGET)
+ return winsys->is_displaytarget_format_supported(winsys, format);
+
+ return TRUE;
+}
+
+
+static struct pipe_buffer *
+llvmpipe_surface_buffer_create(struct pipe_screen *screen,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned tex_usage,
+ unsigned usage,
+ unsigned *stride)
+{
+ /* This function should never be used */
+ assert(0);
+ return NULL;
+}
+
+
+static void
+llvmpipe_flush_frontbuffer(struct pipe_screen *_screen,
+ struct pipe_surface *surface,
+ void *context_private)
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(_screen);
+ struct llvmpipe_winsys *winsys = screen->winsys;
+ struct llvmpipe_texture *texture = llvmpipe_texture(surface->texture);
+
+ assert(texture->dt);
+ if (texture->dt)
+ winsys->displaytarget_display(winsys, texture->dt, context_private);
+}
+
+
+static void
+llvmpipe_destroy_screen( struct pipe_screen *_screen )
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(_screen);
+
+ struct pipe_winsys *winsys = _screen->winsys;
+
+ lp_jit_screen_cleanup(screen);
+
+ if(winsys->destroy)
+ winsys->destroy(winsys);
+
+ FREE(screen);
+}
+
+
+
+/**
+ * Create a new pipe_screen object
+ * Note: we're not presently subclassing pipe_screen (no llvmpipe_screen).
+ */
+struct pipe_screen *
+llvmpipe_create_screen(struct llvmpipe_winsys *winsys)
+{
+ struct llvmpipe_screen *screen = CALLOC_STRUCT(llvmpipe_screen);
+
+ if (!screen)
+ return NULL;
+
+ screen->winsys = winsys;
+
+ screen->base.destroy = llvmpipe_destroy_screen;
+
+ screen->base.get_name = llvmpipe_get_name;
+ screen->base.get_vendor = llvmpipe_get_vendor;
+ screen->base.get_param = llvmpipe_get_param;
+ screen->base.get_paramf = llvmpipe_get_paramf;
+ screen->base.is_format_supported = llvmpipe_is_format_supported;
+
+ screen->base.surface_buffer_create = llvmpipe_surface_buffer_create;
+ screen->base.flush_frontbuffer = llvmpipe_flush_frontbuffer;
+
+ llvmpipe_init_screen_texture_funcs(&screen->base);
+ llvmpipe_init_screen_buffer_funcs(&screen->base);
+
+ lp_jit_screen_init(screen);
+
+ return &screen->base;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_screen.h b/src/gallium/drivers/llvmpipe/lp_screen.h
new file mode 100644
index 0000000000..4a1b4d6f3e
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_screen.h
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ * @author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef LP_SCREEN_H
+#define LP_SCREEN_H
+
+#include <llvm-c/Core.h>
+#include <llvm-c/Analysis.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/ExecutionEngine.h>
+
+#include "pipe/p_screen.h"
+#include "pipe/p_defines.h"
+
+
+struct llvmpipe_winsys;
+
+
+struct llvmpipe_screen
+{
+ struct pipe_screen base;
+
+ struct llvmpipe_winsys *winsys;
+
+ LLVMModuleRef module;
+ LLVMExecutionEngineRef engine;
+ LLVMModuleProviderRef provider;
+ LLVMTargetDataRef target;
+ LLVMPassManagerRef pass;
+
+ LLVMTypeRef context_ptr_type;
+
+ /* Increments whenever textures are modified. Contexts can track
+ * this.
+ */
+ unsigned timestamp;
+};
+
+
+
+
+static INLINE struct llvmpipe_screen *
+llvmpipe_screen( struct pipe_screen *pipe )
+{
+ return (struct llvmpipe_screen *)pipe;
+}
+
+
+#endif /* LP_SCREEN_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_setup.c b/src/gallium/drivers/llvmpipe/lp_setup.c
new file mode 100644
index 0000000000..2d2fc19a65
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_setup.c
@@ -0,0 +1,1483 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Primitive rasterization/rendering (points, lines, triangles)
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Brian Paul
+ */
+
+#include "lp_context.h"
+#include "lp_prim_setup.h"
+#include "lp_quad.h"
+#include "lp_setup.h"
+#include "lp_state.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vertex.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_thread.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "lp_bld_debug.h"
+#include "lp_tile_cache.h"
+#include "lp_tile_soa.h"
+
+
+#define DEBUG_VERTS 0
+#define DEBUG_FRAGS 0
+
+/**
+ * Triangle edge info
+ */
+struct edge {
+ float dx; /**< X(v1) - X(v0), used only during setup */
+ float dy; /**< Y(v1) - Y(v0), used only during setup */
+ float dxdy; /**< dx/dy */
+ float sx, sy; /**< first sample point coord */
+ int lines; /**< number of lines on this edge */
+};
+
+
+#define MAX_QUADS 16
+
+
+/**
+ * Triangle setup info (derived from draw_stage).
+ * Also used for line drawing (taking some liberties).
+ */
+struct setup_context {
+ struct llvmpipe_context *llvmpipe;
+
+ /* Vertices are just an array of floats making up each attribute in
+ * turn. Currently fixed at 4 floats, but should change in time.
+ * Codegen will help cope with this.
+ */
+ const float (*vmax)[4];
+ const float (*vmid)[4];
+ const float (*vmin)[4];
+ const float (*vprovoke)[4];
+
+ struct edge ebot;
+ struct edge etop;
+ struct edge emaj;
+
+ float oneoverarea;
+ int facing;
+
+ struct quad_header quad[MAX_QUADS];
+ struct quad_header *quad_ptrs[MAX_QUADS];
+ unsigned count;
+
+ struct quad_interp_coef coef;
+
+ struct {
+ int left[2]; /**< [0] = row0, [1] = row1 */
+ int right[2];
+ int y;
+ } span;
+
+#if DEBUG_FRAGS
+ uint numFragsEmitted; /**< per primitive */
+ uint numFragsWritten; /**< per primitive */
+#endif
+
+ unsigned winding; /* which winding to cull */
+};
+
+
+
+/**
+ * Execute fragment shader for the four fragments in the quad.
+ */
+static void
+shade_quads(struct llvmpipe_context *llvmpipe,
+ struct quad_header *quads[],
+ unsigned nr)
+{
+ struct lp_fragment_shader *fs = llvmpipe->fs;
+ struct quad_header *quad = quads[0];
+ const unsigned x = quad->input.x0;
+ const unsigned y = quad->input.y0;
+ uint8_t *tile = lp_get_cached_tile(llvmpipe->cbuf_cache[0], x, y);
+ uint8_t *color;
+ void *depth;
+ uint32_t ALIGN16_ATTRIB mask[4][NUM_CHANNELS];
+ unsigned chan_index;
+ unsigned q;
+
+ assert(fs->current);
+ if(!fs->current)
+ return;
+
+ /* Sanity checks */
+ assert(nr * QUAD_SIZE == TILE_VECTOR_HEIGHT * TILE_VECTOR_WIDTH);
+ assert(x % TILE_VECTOR_WIDTH == 0);
+ assert(y % TILE_VECTOR_HEIGHT == 0);
+ for (q = 0; q < nr; ++q) {
+ assert(quads[q]->input.x0 == x + q*2);
+ assert(quads[q]->input.y0 == y);
+ }
+
+ /* mask */
+ for (q = 0; q < 4; ++q)
+ for (chan_index = 0; chan_index < NUM_CHANNELS; ++chan_index)
+ mask[q][chan_index] = quads[q]->inout.mask & (1 << chan_index) ? ~0 : 0;
+
+ /* color buffer */
+ color = &TILE_PIXEL(tile, x & (TILE_SIZE-1), y & (TILE_SIZE-1), 0);
+
+ /* depth buffer */
+ if(llvmpipe->zsbuf_map) {
+ assert((x % 2) == 0);
+ assert((y % 2) == 0);
+ depth = llvmpipe->zsbuf_map +
+ y*llvmpipe->zsbuf_transfer->stride +
+ 2*x*llvmpipe->zsbuf_transfer->block.size;
+ }
+ else
+ depth = NULL;
+
+ /* XXX: This will most likely fail on 32bit x86 without -mstackrealign */
+ assert(lp_check_alignment(mask, 16));
+
+ assert(lp_check_alignment(depth, 16));
+ assert(lp_check_alignment(color, 16));
+ assert(lp_check_alignment(llvmpipe->jit_context.blend_color, 16));
+
+ /* run shader */
+ fs->current->jit_function( &llvmpipe->jit_context,
+ x, y,
+ quad->coef->a0,
+ quad->coef->dadx,
+ quad->coef->dady,
+ &mask[0][0],
+ color,
+ depth);
+}
+
+
+
+
+/**
+ * Do triangle cull test using tri determinant (sign indicates orientation)
+ * \return true if triangle is to be culled.
+ */
+static INLINE boolean
+cull_tri(const struct setup_context *setup, float det)
+{
+ if (det != 0) {
+ /* if (det < 0 then Z points toward camera and triangle is
+ * counter-clockwise winding.
+ */
+ unsigned winding = (det < 0) ? PIPE_WINDING_CCW : PIPE_WINDING_CW;
+
+ if ((winding & setup->winding) == 0)
+ return FALSE;
+ }
+
+ /* Culled:
+ */
+ return TRUE;
+}
+
+
+
+/**
+ * Clip setup->quad against the scissor/surface bounds.
+ */
+static INLINE void
+quad_clip( struct setup_context *setup, struct quad_header *quad )
+{
+ const struct pipe_scissor_state *cliprect = &setup->llvmpipe->cliprect;
+ const int minx = (int) cliprect->minx;
+ const int maxx = (int) cliprect->maxx;
+ const int miny = (int) cliprect->miny;
+ const int maxy = (int) cliprect->maxy;
+
+ if (quad->input.x0 >= maxx ||
+ quad->input.y0 >= maxy ||
+ quad->input.x0 + 1 < minx ||
+ quad->input.y0 + 1 < miny) {
+ /* totally clipped */
+ quad->inout.mask = 0x0;
+ return;
+ }
+ if (quad->input.x0 < minx)
+ quad->inout.mask &= (MASK_BOTTOM_RIGHT | MASK_TOP_RIGHT);
+ if (quad->input.y0 < miny)
+ quad->inout.mask &= (MASK_BOTTOM_LEFT | MASK_BOTTOM_RIGHT);
+ if (quad->input.x0 == maxx - 1)
+ quad->inout.mask &= (MASK_BOTTOM_LEFT | MASK_TOP_LEFT);
+ if (quad->input.y0 == maxy - 1)
+ quad->inout.mask &= (MASK_TOP_LEFT | MASK_TOP_RIGHT);
+}
+
+
+
+/**
+ * Given an X or Y coordinate, return the block/quad coordinate that it
+ * belongs to.
+ */
+static INLINE int block( int x )
+{
+ return x & ~(2-1);
+}
+
+static INLINE int block_x( int x )
+{
+ return x & ~(TILE_VECTOR_WIDTH - 1);
+}
+
+
+/**
+ * Emit a quad (pass to next stage) with clipping.
+ */
+static INLINE void
+clip_emit_quad( struct setup_context *setup, struct quad_header *quad )
+{
+ quad_clip( setup, quad );
+
+ if (quad->inout.mask) {
+ struct llvmpipe_context *lp = setup->llvmpipe;
+
+#if 1
+ /* XXX: The blender expects 4 quads. This is far from efficient, but
+ * until we codegenerate single-quad variants of the fragment pipeline
+ * we need this hack. */
+ const unsigned nr_quads = TILE_VECTOR_HEIGHT*TILE_VECTOR_WIDTH/QUAD_SIZE;
+ struct quad_header quads[nr_quads];
+ struct quad_header *quad_ptrs[nr_quads];
+ int x0 = block_x(quad->input.x0);
+ unsigned i;
+
+ for(i = 0; i < nr_quads; ++i) {
+ int x = x0 + 2*i;
+ if(x == quad->input.x0)
+ memcpy(&quads[i], quad, sizeof quads[i]);
+ else {
+ memset(&quads[i], 0, sizeof quads[i]);
+ quads[i].input.x0 = x;
+ quads[i].input.y0 = quad->input.y0;
+ quads[i].coef = quad->coef;
+ }
+ quad_ptrs[i] = &quads[i];
+ }
+
+ shade_quads( lp, quad_ptrs, nr_quads );
+#else
+ shade_quads( lp, &quad, 1 );
+#endif
+ }
+}
+
+
+/**
+ * Render a horizontal span of quads
+ */
+static void flush_spans( struct setup_context *setup )
+{
+ const int step = TILE_VECTOR_WIDTH;
+ const int xleft0 = setup->span.left[0];
+ const int xleft1 = setup->span.left[1];
+ const int xright0 = setup->span.right[0];
+ const int xright1 = setup->span.right[1];
+
+
+ int minleft = block_x(MIN2(xleft0, xleft1));
+ int maxright = MAX2(xright0, xright1);
+ int x;
+
+ for (x = minleft; x < maxright; x += step) {
+ unsigned skip_left0 = CLAMP(xleft0 - x, 0, step);
+ unsigned skip_left1 = CLAMP(xleft1 - x, 0, step);
+ unsigned skip_right0 = CLAMP(x + step - xright0, 0, step);
+ unsigned skip_right1 = CLAMP(x + step - xright1, 0, step);
+ unsigned lx = x;
+ const unsigned nr_quads = TILE_VECTOR_HEIGHT*TILE_VECTOR_WIDTH/QUAD_SIZE;
+ unsigned q = 0;
+
+ unsigned skipmask_left0 = (1U << skip_left0) - 1U;
+ unsigned skipmask_left1 = (1U << skip_left1) - 1U;
+
+ /* These calculations fail when step == 32 and skip_right == 0.
+ */
+ unsigned skipmask_right0 = ~0U << (unsigned)(step - skip_right0);
+ unsigned skipmask_right1 = ~0U << (unsigned)(step - skip_right1);
+
+ unsigned mask0 = ~skipmask_left0 & ~skipmask_right0;
+ unsigned mask1 = ~skipmask_left1 & ~skipmask_right1;
+
+ if (mask0 | mask1) {
+ for(q = 0; q < nr_quads; ++q) {
+ unsigned quadmask = (mask0 & 3) | ((mask1 & 3) << 2);
+ setup->quad[q].input.x0 = lx;
+ setup->quad[q].input.y0 = setup->span.y;
+ setup->quad[q].inout.mask = quadmask;
+ setup->quad_ptrs[q] = &setup->quad[q];
+ mask0 >>= 2;
+ mask1 >>= 2;
+ lx += 2;
+ }
+ assert(!(mask0 | mask1));
+
+ shade_quads(setup->llvmpipe, setup->quad_ptrs, nr_quads );
+ }
+ }
+
+
+ setup->span.y = 0;
+ setup->span.right[0] = 0;
+ setup->span.right[1] = 0;
+ setup->span.left[0] = 1000000; /* greater than right[0] */
+ setup->span.left[1] = 1000000; /* greater than right[1] */
+}
+
+
+#if DEBUG_VERTS
+static void print_vertex(const struct setup_context *setup,
+ const float (*v)[4])
+{
+ int i;
+ debug_printf(" Vertex: (%p)\n", v);
+ for (i = 0; i < setup->quad[0].nr_attrs; i++) {
+ debug_printf(" %d: %f %f %f %f\n", i,
+ v[i][0], v[i][1], v[i][2], v[i][3]);
+ if (util_is_inf_or_nan(v[i][0])) {
+ debug_printf(" NaN!\n");
+ }
+ }
+}
+#endif
+
+/**
+ * Sort the vertices from top to bottom order, setting up the triangle
+ * edge fields (ebot, emaj, etop).
+ * \return FALSE if coords are inf/nan (cull the tri), TRUE otherwise
+ */
+static boolean setup_sort_vertices( struct setup_context *setup,
+ float det,
+ const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] )
+{
+ setup->vprovoke = v2;
+
+ /* determine bottom to top order of vertices */
+ {
+ float y0 = v0[0][1];
+ float y1 = v1[0][1];
+ float y2 = v2[0][1];
+ if (y0 <= y1) {
+ if (y1 <= y2) {
+ /* y0<=y1<=y2 */
+ setup->vmin = v0;
+ setup->vmid = v1;
+ setup->vmax = v2;
+ }
+ else if (y2 <= y0) {
+ /* y2<=y0<=y1 */
+ setup->vmin = v2;
+ setup->vmid = v0;
+ setup->vmax = v1;
+ }
+ else {
+ /* y0<=y2<=y1 */
+ setup->vmin = v0;
+ setup->vmid = v2;
+ setup->vmax = v1;
+ }
+ }
+ else {
+ if (y0 <= y2) {
+ /* y1<=y0<=y2 */
+ setup->vmin = v1;
+ setup->vmid = v0;
+ setup->vmax = v2;
+ }
+ else if (y2 <= y1) {
+ /* y2<=y1<=y0 */
+ setup->vmin = v2;
+ setup->vmid = v1;
+ setup->vmax = v0;
+ }
+ else {
+ /* y1<=y2<=y0 */
+ setup->vmin = v1;
+ setup->vmid = v2;
+ setup->vmax = v0;
+ }
+ }
+ }
+
+ setup->ebot.dx = setup->vmid[0][0] - setup->vmin[0][0];
+ setup->ebot.dy = setup->vmid[0][1] - setup->vmin[0][1];
+ setup->emaj.dx = setup->vmax[0][0] - setup->vmin[0][0];
+ setup->emaj.dy = setup->vmax[0][1] - setup->vmin[0][1];
+ setup->etop.dx = setup->vmax[0][0] - setup->vmid[0][0];
+ setup->etop.dy = setup->vmax[0][1] - setup->vmid[0][1];
+
+ /*
+ * Compute triangle's area. Use 1/area to compute partial
+ * derivatives of attributes later.
+ *
+ * The area will be the same as prim->det, but the sign may be
+ * different depending on how the vertices get sorted above.
+ *
+ * To determine whether the primitive is front or back facing we
+ * use the prim->det value because its sign is correct.
+ */
+ {
+ const float area = (setup->emaj.dx * setup->ebot.dy -
+ setup->ebot.dx * setup->emaj.dy);
+
+ setup->oneoverarea = 1.0f / area;
+
+ /*
+ debug_printf("%s one-over-area %f area %f det %f\n",
+ __FUNCTION__, setup->oneoverarea, area, det );
+ */
+ if (util_is_inf_or_nan(setup->oneoverarea))
+ return FALSE;
+ }
+
+ /* We need to know if this is a front or back-facing triangle for:
+ * - the GLSL gl_FrontFacing fragment attribute (bool)
+ * - two-sided stencil test
+ */
+ setup->facing =
+ ((det > 0.0) ^
+ (setup->llvmpipe->rasterizer->front_winding == PIPE_WINDING_CW));
+
+ return TRUE;
+}
+
+
+/**
+ * Compute a0, dadx and dady for a linearly interpolated coefficient,
+ * for a triangle.
+ */
+static void tri_pos_coeff( struct setup_context *setup,
+ uint vertSlot, unsigned i)
+{
+ float botda = setup->vmid[vertSlot][i] - setup->vmin[vertSlot][i];
+ float majda = setup->vmax[vertSlot][i] - setup->vmin[vertSlot][i];
+ float a = setup->ebot.dy * majda - botda * setup->emaj.dy;
+ float b = setup->emaj.dx * botda - majda * setup->ebot.dx;
+ float dadx = a * setup->oneoverarea;
+ float dady = b * setup->oneoverarea;
+
+ assert(i <= 3);
+
+ setup->coef.dadx[0][i] = dadx;
+ setup->coef.dady[0][i] = dady;
+
+ /* calculate a0 as the value which would be sampled for the
+ * fragment at (0,0), taking into account that we want to sample at
+ * pixel centers, in other words (0.5, 0.5).
+ *
+ * this is neat but unfortunately not a good way to do things for
+ * triangles with very large values of dadx or dady as it will
+ * result in the subtraction and re-addition from a0 of a very
+ * large number, which means we'll end up loosing a lot of the
+ * fractional bits and precision from a0. the way to fix this is
+ * to define a0 as the sample at a pixel center somewhere near vmin
+ * instead - i'll switch to this later.
+ */
+ setup->coef.a0[0][i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+
+ /*
+ debug_printf("attr[%d].%c: %f dx:%f dy:%f\n",
+ slot, "xyzw"[i],
+ setup->coef[slot].a0[i],
+ setup->coef[slot].dadx[i],
+ setup->coef[slot].dady[i]);
+ */
+}
+
+
+/**
+ * Compute a0 for a constant-valued coefficient (GL_FLAT shading).
+ * The value value comes from vertex[slot][i].
+ * The result will be put into setup->coef[slot].a0[i].
+ * \param slot which attribute slot
+ * \param i which component of the slot (0..3)
+ */
+static void const_pos_coeff( struct setup_context *setup,
+ uint vertSlot, unsigned i)
+{
+ setup->coef.dadx[0][i] = 0;
+ setup->coef.dady[0][i] = 0;
+
+ /* need provoking vertex info!
+ */
+ setup->coef.a0[0][i] = setup->vprovoke[vertSlot][i];
+}
+
+
+/**
+ * Compute a0 for a constant-valued coefficient (GL_FLAT shading).
+ * The value value comes from vertex[slot][i].
+ * The result will be put into setup->coef[slot].a0[i].
+ * \param slot which attribute slot
+ * \param i which component of the slot (0..3)
+ */
+static void const_coeff( struct setup_context *setup,
+ unsigned attrib,
+ uint vertSlot)
+{
+ unsigned i;
+ for (i = 0; i < NUM_CHANNELS; ++i) {
+ setup->coef.dadx[1 + attrib][i] = 0;
+ setup->coef.dady[1 + attrib][i] = 0;
+
+ /* need provoking vertex info!
+ */
+ setup->coef.a0[1 + attrib][i] = setup->vprovoke[vertSlot][i];
+ }
+}
+
+
+/**
+ * Compute a0, dadx and dady for a linearly interpolated coefficient,
+ * for a triangle.
+ */
+static void tri_linear_coeff( struct setup_context *setup,
+ unsigned attrib,
+ uint vertSlot)
+{
+ unsigned i;
+ for (i = 0; i < NUM_CHANNELS; ++i) {
+ float botda = setup->vmid[vertSlot][i] - setup->vmin[vertSlot][i];
+ float majda = setup->vmax[vertSlot][i] - setup->vmin[vertSlot][i];
+ float a = setup->ebot.dy * majda - botda * setup->emaj.dy;
+ float b = setup->emaj.dx * botda - majda * setup->ebot.dx;
+ float dadx = a * setup->oneoverarea;
+ float dady = b * setup->oneoverarea;
+
+ assert(i <= 3);
+
+ setup->coef.dadx[1 + attrib][i] = dadx;
+ setup->coef.dady[1 + attrib][i] = dady;
+
+ /* calculate a0 as the value which would be sampled for the
+ * fragment at (0,0), taking into account that we want to sample at
+ * pixel centers, in other words (0.5, 0.5).
+ *
+ * this is neat but unfortunately not a good way to do things for
+ * triangles with very large values of dadx or dady as it will
+ * result in the subtraction and re-addition from a0 of a very
+ * large number, which means we'll end up loosing a lot of the
+ * fractional bits and precision from a0. the way to fix this is
+ * to define a0 as the sample at a pixel center somewhere near vmin
+ * instead - i'll switch to this later.
+ */
+ setup->coef.a0[1 + attrib][i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+
+ /*
+ debug_printf("attr[%d].%c: %f dx:%f dy:%f\n",
+ slot, "xyzw"[i],
+ setup->coef[slot].a0[i],
+ setup->coef[slot].dadx[i],
+ setup->coef[slot].dady[i]);
+ */
+ }
+}
+
+
+/**
+ * Compute a0, dadx and dady for a perspective-corrected interpolant,
+ * for a triangle.
+ * We basically multiply the vertex value by 1/w before computing
+ * the plane coefficients (a0, dadx, dady).
+ * Later, when we compute the value at a particular fragment position we'll
+ * divide the interpolated value by the interpolated W at that fragment.
+ */
+static void tri_persp_coeff( struct setup_context *setup,
+ unsigned attrib,
+ uint vertSlot)
+{
+ unsigned i;
+ for (i = 0; i < NUM_CHANNELS; ++i) {
+ /* premultiply by 1/w (v[0][3] is always W):
+ */
+ float mina = setup->vmin[vertSlot][i] * setup->vmin[0][3];
+ float mida = setup->vmid[vertSlot][i] * setup->vmid[0][3];
+ float maxa = setup->vmax[vertSlot][i] * setup->vmax[0][3];
+ float botda = mida - mina;
+ float majda = maxa - mina;
+ float a = setup->ebot.dy * majda - botda * setup->emaj.dy;
+ float b = setup->emaj.dx * botda - majda * setup->ebot.dx;
+ float dadx = a * setup->oneoverarea;
+ float dady = b * setup->oneoverarea;
+
+ /*
+ debug_printf("tri persp %d,%d: %f %f %f\n", vertSlot, i,
+ setup->vmin[vertSlot][i],
+ setup->vmid[vertSlot][i],
+ setup->vmax[vertSlot][i]
+ );
+ */
+ assert(i <= 3);
+
+ setup->coef.dadx[1 + attrib][i] = dadx;
+ setup->coef.dady[1 + attrib][i] = dady;
+ setup->coef.a0[1 + attrib][i] = (mina -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+ }
+}
+
+
+/**
+ * Special coefficient setup for gl_FragCoord.
+ * X and Y are trivial, though Y has to be inverted for OpenGL.
+ * Z and W are copied from posCoef which should have already been computed.
+ * We could do a bit less work if we'd examine gl_FragCoord's swizzle mask.
+ */
+static void
+setup_fragcoord_coeff(struct setup_context *setup, uint slot)
+{
+ /*X*/
+ setup->coef.a0[1 + slot][0] = 0;
+ setup->coef.dadx[1 + slot][0] = 1.0;
+ setup->coef.dady[1 + slot][0] = 0.0;
+ /*Y*/
+ setup->coef.a0[1 + slot][1] = 0.0;
+ setup->coef.dadx[1 + slot][1] = 0.0;
+ setup->coef.dady[1 + slot][1] = 1.0;
+ /*Z*/
+ setup->coef.a0[1 + slot][2] = setup->coef.a0[0][2];
+ setup->coef.dadx[1 + slot][2] = setup->coef.dadx[0][2];
+ setup->coef.dady[1 + slot][2] = setup->coef.dady[0][2];
+ /*W*/
+ setup->coef.a0[1 + slot][3] = setup->coef.a0[0][3];
+ setup->coef.dadx[1 + slot][3] = setup->coef.dadx[0][3];
+ setup->coef.dady[1 + slot][3] = setup->coef.dady[0][3];
+}
+
+
+
+/**
+ * Compute the setup->coef[] array dadx, dady, a0 values.
+ * Must be called after setup->vmin,vmid,vmax,vprovoke are initialized.
+ */
+static void setup_tri_coefficients( struct setup_context *setup )
+{
+ struct llvmpipe_context *llvmpipe = setup->llvmpipe;
+ const struct lp_fragment_shader *lpfs = llvmpipe->fs;
+ const struct vertex_info *vinfo = llvmpipe_get_vertex_info(llvmpipe);
+ uint fragSlot;
+
+ /* z and w are done by linear interpolation:
+ */
+ tri_pos_coeff(setup, 0, 2);
+ tri_pos_coeff(setup, 0, 3);
+
+ /* setup interpolation for all the remaining attributes:
+ */
+ for (fragSlot = 0; fragSlot < lpfs->info.num_inputs; fragSlot++) {
+ const uint vertSlot = vinfo->attrib[fragSlot].src_index;
+
+ switch (vinfo->attrib[fragSlot].interp_mode) {
+ case INTERP_CONSTANT:
+ const_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_LINEAR:
+ tri_linear_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_PERSPECTIVE:
+ tri_persp_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_POS:
+ setup_fragcoord_coeff(setup, fragSlot);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (lpfs->info.input_semantic_name[fragSlot] == TGSI_SEMANTIC_FACE) {
+ setup->coef.a0[1 + fragSlot][0] = 1.0f - setup->facing;
+ setup->coef.dadx[1 + fragSlot][0] = 0.0;
+ setup->coef.dady[1 + fragSlot][0] = 0.0;
+ }
+ }
+}
+
+
+
+static void setup_tri_edges( struct setup_context *setup )
+{
+ float vmin_x = setup->vmin[0][0] + 0.5f;
+ float vmid_x = setup->vmid[0][0] + 0.5f;
+
+ float vmin_y = setup->vmin[0][1] - 0.5f;
+ float vmid_y = setup->vmid[0][1] - 0.5f;
+ float vmax_y = setup->vmax[0][1] - 0.5f;
+
+ setup->emaj.sy = ceilf(vmin_y);
+ setup->emaj.lines = (int) ceilf(vmax_y - setup->emaj.sy);
+ setup->emaj.dxdy = setup->emaj.dx / setup->emaj.dy;
+ setup->emaj.sx = vmin_x + (setup->emaj.sy - vmin_y) * setup->emaj.dxdy;
+
+ setup->etop.sy = ceilf(vmid_y);
+ setup->etop.lines = (int) ceilf(vmax_y - setup->etop.sy);
+ setup->etop.dxdy = setup->etop.dx / setup->etop.dy;
+ setup->etop.sx = vmid_x + (setup->etop.sy - vmid_y) * setup->etop.dxdy;
+
+ setup->ebot.sy = ceilf(vmin_y);
+ setup->ebot.lines = (int) ceilf(vmid_y - setup->ebot.sy);
+ setup->ebot.dxdy = setup->ebot.dx / setup->ebot.dy;
+ setup->ebot.sx = vmin_x + (setup->ebot.sy - vmin_y) * setup->ebot.dxdy;
+}
+
+
+/**
+ * Render the upper or lower half of a triangle.
+ * Scissoring/cliprect is applied here too.
+ */
+static void subtriangle( struct setup_context *setup,
+ struct edge *eleft,
+ struct edge *eright,
+ unsigned lines )
+{
+ const struct pipe_scissor_state *cliprect = &setup->llvmpipe->cliprect;
+ const int minx = (int) cliprect->minx;
+ const int maxx = (int) cliprect->maxx;
+ const int miny = (int) cliprect->miny;
+ const int maxy = (int) cliprect->maxy;
+ int y, start_y, finish_y;
+ int sy = (int)eleft->sy;
+
+ assert((int)eleft->sy == (int) eright->sy);
+
+ /* clip top/bottom */
+ start_y = sy;
+ if (start_y < miny)
+ start_y = miny;
+
+ finish_y = sy + lines;
+ if (finish_y > maxy)
+ finish_y = maxy;
+
+ start_y -= sy;
+ finish_y -= sy;
+
+ /*
+ debug_printf("%s %d %d\n", __FUNCTION__, start_y, finish_y);
+ */
+
+ for (y = start_y; y < finish_y; y++) {
+
+ /* avoid accumulating adds as floats don't have the precision to
+ * accurately iterate large triangle edges that way. luckily we
+ * can just multiply these days.
+ *
+ * this is all drowned out by the attribute interpolation anyway.
+ */
+ int left = (int)(eleft->sx + y * eleft->dxdy);
+ int right = (int)(eright->sx + y * eright->dxdy);
+
+ /* clip left/right */
+ if (left < minx)
+ left = minx;
+ if (right > maxx)
+ right = maxx;
+
+ if (left < right) {
+ int _y = sy + y;
+ if (block(_y) != setup->span.y) {
+ flush_spans(setup);
+ setup->span.y = block(_y);
+ }
+
+ setup->span.left[_y&1] = left;
+ setup->span.right[_y&1] = right;
+ }
+ }
+
+
+ /* save the values so that emaj can be restarted:
+ */
+ eleft->sx += lines * eleft->dxdy;
+ eright->sx += lines * eright->dxdy;
+ eleft->sy += lines;
+ eright->sy += lines;
+}
+
+
+/**
+ * Recalculate prim's determinant. This is needed as we don't have
+ * get this information through the vbuf_render interface & we must
+ * calculate it here.
+ */
+static float
+calc_det( const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] )
+{
+ /* edge vectors e = v0 - v2, f = v1 - v2 */
+ const float ex = v0[0][0] - v2[0][0];
+ const float ey = v0[0][1] - v2[0][1];
+ const float fx = v1[0][0] - v2[0][0];
+ const float fy = v1[0][1] - v2[0][1];
+
+ /* det = cross(e,f).z */
+ return ex * fy - ey * fx;
+}
+
+
+/**
+ * Do setup for triangle rasterization, then render the triangle.
+ */
+void llvmpipe_setup_tri( struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] )
+{
+ float det;
+
+#if DEBUG_VERTS
+ debug_printf("Setup triangle:\n");
+ print_vertex(setup, v0);
+ print_vertex(setup, v1);
+ print_vertex(setup, v2);
+#endif
+
+ if (setup->llvmpipe->no_rast)
+ return;
+
+ det = calc_det(v0, v1, v2);
+ /*
+ debug_printf("%s\n", __FUNCTION__ );
+ */
+
+#if DEBUG_FRAGS
+ setup->numFragsEmitted = 0;
+ setup->numFragsWritten = 0;
+#endif
+
+ if (cull_tri( setup, det ))
+ return;
+
+ if (!setup_sort_vertices( setup, det, v0, v1, v2 ))
+ return;
+ setup_tri_coefficients( setup );
+ setup_tri_edges( setup );
+
+ assert(setup->llvmpipe->reduced_prim == PIPE_PRIM_TRIANGLES);
+
+ setup->span.y = 0;
+ setup->span.right[0] = 0;
+ setup->span.right[1] = 0;
+ /* setup->span.z_mode = tri_z_mode( setup->ctx ); */
+
+ /* init_constant_attribs( setup ); */
+
+ if (setup->oneoverarea < 0.0) {
+ /* emaj on left:
+ */
+ subtriangle( setup, &setup->emaj, &setup->ebot, setup->ebot.lines );
+ subtriangle( setup, &setup->emaj, &setup->etop, setup->etop.lines );
+ }
+ else {
+ /* emaj on right:
+ */
+ subtriangle( setup, &setup->ebot, &setup->emaj, setup->ebot.lines );
+ subtriangle( setup, &setup->etop, &setup->emaj, setup->etop.lines );
+ }
+
+ flush_spans( setup );
+
+#if DEBUG_FRAGS
+ printf("Tri: %u frags emitted, %u written\n",
+ setup->numFragsEmitted,
+ setup->numFragsWritten);
+#endif
+}
+
+
+
+/**
+ * Compute a0, dadx and dady for a linearly interpolated coefficient,
+ * for a line.
+ */
+static void
+linear_pos_coeff(struct setup_context *setup,
+ uint vertSlot, uint i)
+{
+ const float da = setup->vmax[vertSlot][i] - setup->vmin[vertSlot][i];
+ const float dadx = da * setup->emaj.dx * setup->oneoverarea;
+ const float dady = da * setup->emaj.dy * setup->oneoverarea;
+ setup->coef.dadx[0][i] = dadx;
+ setup->coef.dady[0][i] = dady;
+ setup->coef.a0[0][i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+}
+
+
+/**
+ * Compute a0, dadx and dady for a linearly interpolated coefficient,
+ * for a line.
+ */
+static void
+line_linear_coeff(struct setup_context *setup,
+ unsigned attrib,
+ uint vertSlot)
+{
+ unsigned i;
+ for (i = 0; i < NUM_CHANNELS; ++i) {
+ const float da = setup->vmax[vertSlot][i] - setup->vmin[vertSlot][i];
+ const float dadx = da * setup->emaj.dx * setup->oneoverarea;
+ const float dady = da * setup->emaj.dy * setup->oneoverarea;
+ setup->coef.dadx[1 + attrib][i] = dadx;
+ setup->coef.dady[1 + attrib][i] = dady;
+ setup->coef.a0[1 + attrib][i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+ }
+}
+
+
+/**
+ * Compute a0, dadx and dady for a perspective-corrected interpolant,
+ * for a line.
+ */
+static void
+line_persp_coeff(struct setup_context *setup,
+ unsigned attrib,
+ uint vertSlot)
+{
+ unsigned i;
+ for (i = 0; i < NUM_CHANNELS; ++i) {
+ /* XXX double-check/verify this arithmetic */
+ const float a0 = setup->vmin[vertSlot][i] * setup->vmin[0][3];
+ const float a1 = setup->vmax[vertSlot][i] * setup->vmax[0][3];
+ const float da = a1 - a0;
+ const float dadx = da * setup->emaj.dx * setup->oneoverarea;
+ const float dady = da * setup->emaj.dy * setup->oneoverarea;
+ setup->coef.dadx[1 + attrib][i] = dadx;
+ setup->coef.dady[1 + attrib][i] = dady;
+ setup->coef.a0[1 + attrib][i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+ }
+}
+
+
+/**
+ * Compute the setup->coef[] array dadx, dady, a0 values.
+ * Must be called after setup->vmin,vmax are initialized.
+ */
+static INLINE boolean
+setup_line_coefficients(struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4])
+{
+ struct llvmpipe_context *llvmpipe = setup->llvmpipe;
+ const struct lp_fragment_shader *lpfs = llvmpipe->fs;
+ const struct vertex_info *vinfo = llvmpipe_get_vertex_info(llvmpipe);
+ uint fragSlot;
+ float area;
+
+ /* use setup->vmin, vmax to point to vertices */
+ if (llvmpipe->rasterizer->flatshade_first)
+ setup->vprovoke = v0;
+ else
+ setup->vprovoke = v1;
+ setup->vmin = v0;
+ setup->vmax = v1;
+
+ setup->emaj.dx = setup->vmax[0][0] - setup->vmin[0][0];
+ setup->emaj.dy = setup->vmax[0][1] - setup->vmin[0][1];
+
+ /* NOTE: this is not really area but something proportional to it */
+ area = setup->emaj.dx * setup->emaj.dx + setup->emaj.dy * setup->emaj.dy;
+ if (area == 0.0f || util_is_inf_or_nan(area))
+ return FALSE;
+ setup->oneoverarea = 1.0f / area;
+
+ /* z and w are done by linear interpolation:
+ */
+ linear_pos_coeff(setup, 0, 2);
+ linear_pos_coeff(setup, 0, 3);
+
+ /* setup interpolation for all the remaining attributes:
+ */
+ for (fragSlot = 0; fragSlot < lpfs->info.num_inputs; fragSlot++) {
+ const uint vertSlot = vinfo->attrib[fragSlot].src_index;
+
+ switch (vinfo->attrib[fragSlot].interp_mode) {
+ case INTERP_CONSTANT:
+ const_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_LINEAR:
+ line_linear_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_PERSPECTIVE:
+ line_persp_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_POS:
+ setup_fragcoord_coeff(setup, fragSlot);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (lpfs->info.input_semantic_name[fragSlot] == TGSI_SEMANTIC_FACE) {
+ setup->coef.a0[1 + fragSlot][0] = 1.0f - setup->facing;
+ setup->coef.dadx[1 + fragSlot][0] = 0.0;
+ setup->coef.dady[1 + fragSlot][0] = 0.0;
+ }
+ }
+ return TRUE;
+}
+
+
+/**
+ * Plot a pixel in a line segment.
+ */
+static INLINE void
+plot(struct setup_context *setup, int x, int y)
+{
+ const int iy = y & 1;
+ const int ix = x & 1;
+ const int quadX = x - ix;
+ const int quadY = y - iy;
+ const int mask = (1 << ix) << (2 * iy);
+
+ if (quadX != setup->quad[0].input.x0 ||
+ quadY != setup->quad[0].input.y0)
+ {
+ /* flush prev quad, start new quad */
+
+ if (setup->quad[0].input.x0 != -1)
+ clip_emit_quad( setup, &setup->quad[0] );
+
+ setup->quad[0].input.x0 = quadX;
+ setup->quad[0].input.y0 = quadY;
+ setup->quad[0].inout.mask = 0x0;
+ }
+
+ setup->quad[0].inout.mask |= mask;
+}
+
+
+/**
+ * Do setup for line rasterization, then render the line.
+ * Single-pixel width, no stipple, etc. We rely on the 'draw' module
+ * to handle stippling and wide lines.
+ */
+void
+llvmpipe_setup_line(struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4])
+{
+ int x0 = (int) v0[0][0];
+ int x1 = (int) v1[0][0];
+ int y0 = (int) v0[0][1];
+ int y1 = (int) v1[0][1];
+ int dx = x1 - x0;
+ int dy = y1 - y0;
+ int xstep, ystep;
+
+#if DEBUG_VERTS
+ debug_printf("Setup line:\n");
+ print_vertex(setup, v0);
+ print_vertex(setup, v1);
+#endif
+
+ if (setup->llvmpipe->no_rast)
+ return;
+
+ if (dx == 0 && dy == 0)
+ return;
+
+ if (!setup_line_coefficients(setup, v0, v1))
+ return;
+
+ assert(v0[0][0] < 1.0e9);
+ assert(v0[0][1] < 1.0e9);
+ assert(v1[0][0] < 1.0e9);
+ assert(v1[0][1] < 1.0e9);
+
+ if (dx < 0) {
+ dx = -dx; /* make positive */
+ xstep = -1;
+ }
+ else {
+ xstep = 1;
+ }
+
+ if (dy < 0) {
+ dy = -dy; /* make positive */
+ ystep = -1;
+ }
+ else {
+ ystep = 1;
+ }
+
+ assert(dx >= 0);
+ assert(dy >= 0);
+ assert(setup->llvmpipe->reduced_prim == PIPE_PRIM_LINES);
+
+ setup->quad[0].input.x0 = setup->quad[0].input.y0 = -1;
+ setup->quad[0].inout.mask = 0x0;
+
+ /* XXX temporary: set coverage to 1.0 so the line appears
+ * if AA mode happens to be enabled.
+ */
+ setup->quad[0].input.coverage[0] =
+ setup->quad[0].input.coverage[1] =
+ setup->quad[0].input.coverage[2] =
+ setup->quad[0].input.coverage[3] = 1.0;
+
+ if (dx > dy) {
+ /*** X-major line ***/
+ int i;
+ const int errorInc = dy + dy;
+ int error = errorInc - dx;
+ const int errorDec = error - dx;
+
+ for (i = 0; i < dx; i++) {
+ plot(setup, x0, y0);
+
+ x0 += xstep;
+ if (error < 0) {
+ error += errorInc;
+ }
+ else {
+ error += errorDec;
+ y0 += ystep;
+ }
+ }
+ }
+ else {
+ /*** Y-major line ***/
+ int i;
+ const int errorInc = dx + dx;
+ int error = errorInc - dy;
+ const int errorDec = error - dy;
+
+ for (i = 0; i < dy; i++) {
+ plot(setup, x0, y0);
+
+ y0 += ystep;
+ if (error < 0) {
+ error += errorInc;
+ }
+ else {
+ error += errorDec;
+ x0 += xstep;
+ }
+ }
+ }
+
+ /* draw final quad */
+ if (setup->quad[0].inout.mask) {
+ clip_emit_quad( setup, &setup->quad[0] );
+ }
+}
+
+
+static void
+point_persp_coeff(struct setup_context *setup,
+ const float (*vert)[4],
+ unsigned attrib,
+ uint vertSlot)
+{
+ unsigned i;
+ for(i = 0; i < NUM_CHANNELS; ++i) {
+ setup->coef.dadx[1 + attrib][i] = 0.0F;
+ setup->coef.dady[1 + attrib][i] = 0.0F;
+ setup->coef.a0[1 + attrib][i] = vert[vertSlot][i] * vert[0][3];
+ }
+}
+
+
+/**
+ * Do setup for point rasterization, then render the point.
+ * Round or square points...
+ * XXX could optimize a lot for 1-pixel points.
+ */
+void
+llvmpipe_setup_point( struct setup_context *setup,
+ const float (*v0)[4] )
+{
+ struct llvmpipe_context *llvmpipe = setup->llvmpipe;
+ const struct lp_fragment_shader *lpfs = llvmpipe->fs;
+ const int sizeAttr = setup->llvmpipe->psize_slot;
+ const float size
+ = sizeAttr > 0 ? v0[sizeAttr][0]
+ : setup->llvmpipe->rasterizer->point_size;
+ const float halfSize = 0.5F * size;
+ const boolean round = (boolean) setup->llvmpipe->rasterizer->point_smooth;
+ const float x = v0[0][0]; /* Note: data[0] is always position */
+ const float y = v0[0][1];
+ const struct vertex_info *vinfo = llvmpipe_get_vertex_info(llvmpipe);
+ uint fragSlot;
+
+#if DEBUG_VERTS
+ debug_printf("Setup point:\n");
+ print_vertex(setup, v0);
+#endif
+
+ if (llvmpipe->no_rast)
+ return;
+
+ assert(setup->llvmpipe->reduced_prim == PIPE_PRIM_POINTS);
+
+ /* For points, all interpolants are constant-valued.
+ * However, for point sprites, we'll need to setup texcoords appropriately.
+ * XXX: which coefficients are the texcoords???
+ * We may do point sprites as textured quads...
+ *
+ * KW: We don't know which coefficients are texcoords - ultimately
+ * the choice of what interpolation mode to use for each attribute
+ * should be determined by the fragment program, using
+ * per-attribute declaration statements that include interpolation
+ * mode as a parameter. So either the fragment program will have
+ * to be adjusted for pointsprite vs normal point behaviour, or
+ * otherwise a special interpolation mode will have to be defined
+ * which matches the required behaviour for point sprites. But -
+ * the latter is not a feature of normal hardware, and as such
+ * probably should be ruled out on that basis.
+ */
+ setup->vprovoke = v0;
+
+ /* setup Z, W */
+ const_pos_coeff(setup, 0, 2);
+ const_pos_coeff(setup, 0, 3);
+
+ for (fragSlot = 0; fragSlot < lpfs->info.num_inputs; fragSlot++) {
+ const uint vertSlot = vinfo->attrib[fragSlot].src_index;
+
+ switch (vinfo->attrib[fragSlot].interp_mode) {
+ case INTERP_CONSTANT:
+ /* fall-through */
+ case INTERP_LINEAR:
+ const_coeff(setup, fragSlot, vertSlot);
+ break;
+ case INTERP_PERSPECTIVE:
+ point_persp_coeff(setup, setup->vprovoke, fragSlot, vertSlot);
+ break;
+ case INTERP_POS:
+ setup_fragcoord_coeff(setup, fragSlot);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (lpfs->info.input_semantic_name[fragSlot] == TGSI_SEMANTIC_FACE) {
+ setup->coef.a0[1 + fragSlot][0] = 1.0f - setup->facing;
+ setup->coef.dadx[1 + fragSlot][0] = 0.0;
+ setup->coef.dady[1 + fragSlot][0] = 0.0;
+ }
+ }
+
+
+ if (halfSize <= 0.5 && !round) {
+ /* special case for 1-pixel points */
+ const int ix = ((int) x) & 1;
+ const int iy = ((int) y) & 1;
+ setup->quad[0].input.x0 = (int) x - ix;
+ setup->quad[0].input.y0 = (int) y - iy;
+ setup->quad[0].inout.mask = (1 << ix) << (2 * iy);
+ clip_emit_quad( setup, &setup->quad[0] );
+ }
+ else {
+ if (round) {
+ /* rounded points */
+ const int ixmin = block((int) (x - halfSize));
+ const int ixmax = block((int) (x + halfSize));
+ const int iymin = block((int) (y - halfSize));
+ const int iymax = block((int) (y + halfSize));
+ const float rmin = halfSize - 0.7071F; /* 0.7071 = sqrt(2)/2 */
+ const float rmax = halfSize + 0.7071F;
+ const float rmin2 = MAX2(0.0F, rmin * rmin);
+ const float rmax2 = rmax * rmax;
+ const float cscale = 1.0F / (rmax2 - rmin2);
+ int ix, iy;
+
+ for (iy = iymin; iy <= iymax; iy += 2) {
+ for (ix = ixmin; ix <= ixmax; ix += 2) {
+ float dx, dy, dist2, cover;
+
+ setup->quad[0].inout.mask = 0x0;
+
+ dx = (ix + 0.5f) - x;
+ dy = (iy + 0.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad[0].input.coverage[QUAD_TOP_LEFT] = MIN2(cover, 1.0f);
+ setup->quad[0].inout.mask |= MASK_TOP_LEFT;
+ }
+
+ dx = (ix + 1.5f) - x;
+ dy = (iy + 0.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad[0].input.coverage[QUAD_TOP_RIGHT] = MIN2(cover, 1.0f);
+ setup->quad[0].inout.mask |= MASK_TOP_RIGHT;
+ }
+
+ dx = (ix + 0.5f) - x;
+ dy = (iy + 1.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad[0].input.coverage[QUAD_BOTTOM_LEFT] = MIN2(cover, 1.0f);
+ setup->quad[0].inout.mask |= MASK_BOTTOM_LEFT;
+ }
+
+ dx = (ix + 1.5f) - x;
+ dy = (iy + 1.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad[0].input.coverage[QUAD_BOTTOM_RIGHT] = MIN2(cover, 1.0f);
+ setup->quad[0].inout.mask |= MASK_BOTTOM_RIGHT;
+ }
+
+ if (setup->quad[0].inout.mask) {
+ setup->quad[0].input.x0 = ix;
+ setup->quad[0].input.y0 = iy;
+ clip_emit_quad( setup, &setup->quad[0] );
+ }
+ }
+ }
+ }
+ else {
+ /* square points */
+ const int xmin = (int) (x + 0.75 - halfSize);
+ const int ymin = (int) (y + 0.25 - halfSize);
+ const int xmax = xmin + (int) size;
+ const int ymax = ymin + (int) size;
+ /* XXX could apply scissor to xmin,ymin,xmax,ymax now */
+ const int ixmin = block(xmin);
+ const int ixmax = block(xmax - 1);
+ const int iymin = block(ymin);
+ const int iymax = block(ymax - 1);
+ int ix, iy;
+
+ /*
+ debug_printf("(%f, %f) -> X:%d..%d Y:%d..%d\n", x, y, xmin, xmax,ymin,ymax);
+ */
+ for (iy = iymin; iy <= iymax; iy += 2) {
+ uint rowMask = 0xf;
+ if (iy < ymin) {
+ /* above the top edge */
+ rowMask &= (MASK_BOTTOM_LEFT | MASK_BOTTOM_RIGHT);
+ }
+ if (iy + 1 >= ymax) {
+ /* below the bottom edge */
+ rowMask &= (MASK_TOP_LEFT | MASK_TOP_RIGHT);
+ }
+
+ for (ix = ixmin; ix <= ixmax; ix += 2) {
+ uint mask = rowMask;
+
+ if (ix < xmin) {
+ /* fragment is past left edge of point, turn off left bits */
+ mask &= (MASK_BOTTOM_RIGHT | MASK_TOP_RIGHT);
+ }
+ if (ix + 1 >= xmax) {
+ /* past the right edge */
+ mask &= (MASK_BOTTOM_LEFT | MASK_TOP_LEFT);
+ }
+
+ setup->quad[0].inout.mask = mask;
+ setup->quad[0].input.x0 = ix;
+ setup->quad[0].input.y0 = iy;
+ clip_emit_quad( setup, &setup->quad[0] );
+ }
+ }
+ }
+ }
+}
+
+void llvmpipe_setup_prepare( struct setup_context *setup )
+{
+ struct llvmpipe_context *lp = setup->llvmpipe;
+
+ if (lp->dirty) {
+ llvmpipe_update_derived(lp);
+ }
+
+ if (lp->reduced_api_prim == PIPE_PRIM_TRIANGLES &&
+ lp->rasterizer->fill_cw == PIPE_POLYGON_MODE_FILL &&
+ lp->rasterizer->fill_ccw == PIPE_POLYGON_MODE_FILL) {
+ /* we'll do culling */
+ setup->winding = lp->rasterizer->cull_mode;
+ }
+ else {
+ /* 'draw' will do culling */
+ setup->winding = PIPE_WINDING_NONE;
+ }
+}
+
+
+
+void llvmpipe_setup_destroy_context( struct setup_context *setup )
+{
+ align_free( setup );
+}
+
+
+/**
+ * Create a new primitive setup/render stage.
+ */
+struct setup_context *llvmpipe_setup_create_context( struct llvmpipe_context *llvmpipe )
+{
+ struct setup_context *setup;
+ unsigned i;
+
+ setup = align_malloc(sizeof(struct setup_context), 16);
+ if (!setup)
+ return NULL;
+
+ memset(setup, 0, sizeof *setup);
+ setup->llvmpipe = llvmpipe;
+
+ for (i = 0; i < MAX_QUADS; i++) {
+ setup->quad[i].coef = &setup->coef;
+ }
+
+ setup->span.left[0] = 1000000; /* greater than right[0] */
+ setup->span.left[1] = 1000000; /* greater than right[1] */
+
+ return setup;
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_setup.h b/src/gallium/drivers/llvmpipe/lp_setup.h
new file mode 100644
index 0000000000..89c43da046
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_setup.h
@@ -0,0 +1,53 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef LP_SETUP_H
+#define LP_SETUP_H
+
+struct setup_context;
+struct llvmpipe_context;
+
+void
+llvmpipe_setup_tri( struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] );
+
+void
+llvmpipe_setup_line(struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4]);
+
+void
+llvmpipe_setup_point( struct setup_context *setup,
+ const float (*v0)[4] );
+
+
+struct setup_context *llvmpipe_setup_create_context( struct llvmpipe_context *llvmpipe );
+void llvmpipe_setup_prepare( struct setup_context *setup );
+void llvmpipe_setup_destroy_context( struct setup_context *setup );
+
+#endif
diff --git a/src/gallium/drivers/llvmpipe/lp_state.h b/src/gallium/drivers/llvmpipe/lp_state.h
new file mode 100644
index 0000000000..7b26ce61a3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state.h
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef LP_STATE_H
+#define LP_STATE_H
+
+#include <llvm-c/Core.h>
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+#include "lp_jit.h"
+#include "lp_bld_sample.h" /* for struct lp_sampler_static_state */
+
+
+#define LP_NEW_VIEWPORT 0x1
+#define LP_NEW_RASTERIZER 0x2
+#define LP_NEW_FS 0x4
+#define LP_NEW_BLEND 0x8
+#define LP_NEW_CLIP 0x10
+#define LP_NEW_SCISSOR 0x20
+#define LP_NEW_STIPPLE 0x40
+#define LP_NEW_FRAMEBUFFER 0x80
+#define LP_NEW_DEPTH_STENCIL_ALPHA 0x100
+#define LP_NEW_CONSTANTS 0x200
+#define LP_NEW_SAMPLER 0x400
+#define LP_NEW_TEXTURE 0x800
+#define LP_NEW_VERTEX 0x1000
+#define LP_NEW_VS 0x2000
+#define LP_NEW_QUERY 0x4000
+
+
+struct tgsi_sampler;
+struct vertex_info;
+struct pipe_context;
+struct llvmpipe_context;
+
+struct lp_fragment_shader;
+
+
+struct lp_fragment_shader_variant_key
+{
+ enum pipe_format zsbuf_format;
+ struct pipe_depth_state depth;
+ struct pipe_alpha_state alpha;
+ struct pipe_blend_state blend;
+
+ struct lp_sampler_static_state sampler[PIPE_MAX_SAMPLERS];
+};
+
+
+struct lp_fragment_shader_variant
+{
+ struct lp_fragment_shader *shader;
+
+ struct lp_fragment_shader_variant_key key;
+
+ LLVMValueRef function;
+
+ lp_jit_frag_func jit_function;
+
+ struct lp_fragment_shader_variant *next;
+};
+
+
+/**
+ * Subclass of pipe_shader_state (though it doesn't really need to be).
+ *
+ * This is starting to look an awful lot like a quad pipeline stage...
+ */
+struct lp_fragment_shader
+{
+ struct pipe_shader_state base;
+
+ struct tgsi_shader_info info;
+
+ struct lp_fragment_shader_variant *variants;
+
+ struct lp_fragment_shader_variant *current;
+};
+
+
+/** Subclass of pipe_shader_state */
+struct lp_vertex_shader {
+ struct pipe_shader_state shader;
+ struct draw_vertex_shader *draw_data;
+};
+
+
+
+void *
+llvmpipe_create_blend_state(struct pipe_context *,
+ const struct pipe_blend_state *);
+void llvmpipe_bind_blend_state(struct pipe_context *,
+ void *);
+void llvmpipe_delete_blend_state(struct pipe_context *,
+ void *);
+
+void *
+llvmpipe_create_sampler_state(struct pipe_context *,
+ const struct pipe_sampler_state *);
+void llvmpipe_bind_sampler_states(struct pipe_context *, unsigned, void **);
+void llvmpipe_delete_sampler_state(struct pipe_context *, void *);
+
+void *
+llvmpipe_create_depth_stencil_state(struct pipe_context *,
+ const struct pipe_depth_stencil_alpha_state *);
+void llvmpipe_bind_depth_stencil_state(struct pipe_context *, void *);
+void llvmpipe_delete_depth_stencil_state(struct pipe_context *, void *);
+
+void *
+llvmpipe_create_rasterizer_state(struct pipe_context *,
+ const struct pipe_rasterizer_state *);
+void llvmpipe_bind_rasterizer_state(struct pipe_context *, void *);
+void llvmpipe_delete_rasterizer_state(struct pipe_context *, void *);
+
+void llvmpipe_set_framebuffer_state( struct pipe_context *,
+ const struct pipe_framebuffer_state * );
+
+void llvmpipe_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color );
+
+void llvmpipe_set_clip_state( struct pipe_context *,
+ const struct pipe_clip_state * );
+
+void llvmpipe_set_constant_buffer(struct pipe_context *,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf);
+
+void *llvmpipe_create_fs_state(struct pipe_context *,
+ const struct pipe_shader_state *);
+void llvmpipe_bind_fs_state(struct pipe_context *, void *);
+void llvmpipe_delete_fs_state(struct pipe_context *, void *);
+void *llvmpipe_create_vs_state(struct pipe_context *,
+ const struct pipe_shader_state *);
+void llvmpipe_bind_vs_state(struct pipe_context *, void *);
+void llvmpipe_delete_vs_state(struct pipe_context *, void *);
+
+void llvmpipe_set_polygon_stipple( struct pipe_context *,
+ const struct pipe_poly_stipple * );
+
+void llvmpipe_set_scissor_state( struct pipe_context *,
+ const struct pipe_scissor_state * );
+
+void llvmpipe_set_sampler_textures( struct pipe_context *,
+ unsigned num,
+ struct pipe_texture ** );
+
+void llvmpipe_set_viewport_state( struct pipe_context *,
+ const struct pipe_viewport_state * );
+
+void llvmpipe_set_vertex_elements(struct pipe_context *,
+ unsigned count,
+ const struct pipe_vertex_element *);
+
+void llvmpipe_set_vertex_buffers(struct pipe_context *,
+ unsigned count,
+ const struct pipe_vertex_buffer *);
+
+void llvmpipe_update_fs(struct llvmpipe_context *lp);
+
+void llvmpipe_update_derived( struct llvmpipe_context *llvmpipe );
+
+
+boolean llvmpipe_draw_arrays(struct pipe_context *pipe, unsigned mode,
+ unsigned start, unsigned count);
+
+boolean llvmpipe_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count);
+boolean
+llvmpipe_draw_range_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned min_index,
+ unsigned max_index,
+ unsigned mode, unsigned start, unsigned count);
+
+void
+llvmpipe_set_edgeflags(struct pipe_context *pipe, const unsigned *edgeflags);
+
+
+void
+llvmpipe_map_transfers(struct llvmpipe_context *lp);
+
+void
+llvmpipe_unmap_transfers(struct llvmpipe_context *lp);
+
+void
+llvmpipe_map_texture_surfaces(struct llvmpipe_context *lp);
+
+void
+llvmpipe_unmap_texture_surfaces(struct llvmpipe_context *lp);
+
+
+struct vertex_info *
+llvmpipe_get_vertex_info(struct llvmpipe_context *llvmpipe);
+
+struct vertex_info *
+llvmpipe_get_vbuf_vertex_info(struct llvmpipe_context *llvmpipe);
+
+
+#endif
diff --git a/src/gallium/drivers/llvmpipe/lp_state_blend.c b/src/gallium/drivers/llvmpipe/lp_state_blend.c
new file mode 100644
index 0000000000..3f03bd0057
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_blend.c
@@ -0,0 +1,114 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ * @author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "util/u_debug_dump.h"
+#include "lp_screen.h"
+#include "lp_context.h"
+#include "lp_state.h"
+
+
+void *
+llvmpipe_create_blend_state(struct pipe_context *pipe,
+ const struct pipe_blend_state *blend)
+{
+ return mem_dup(blend, sizeof(*blend));
+}
+
+void llvmpipe_bind_blend_state( struct pipe_context *pipe,
+ void *blend )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ llvmpipe->blend = blend;
+
+ llvmpipe->dirty |= LP_NEW_BLEND;
+}
+
+void llvmpipe_delete_blend_state(struct pipe_context *pipe,
+ void *blend)
+{
+ FREE( blend );
+}
+
+
+void llvmpipe_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ unsigned i, j;
+
+ memcpy(&llvmpipe->blend_color, blend_color, sizeof *blend_color);
+
+ if(!llvmpipe->jit_context.blend_color)
+ llvmpipe->jit_context.blend_color = align_malloc(4 * 16, 16);
+ for (i = 0; i < 4; ++i) {
+ uint8_t c = float_to_ubyte(blend_color->color[i]);
+ for (j = 0; j < 16; ++j)
+ llvmpipe->jit_context.blend_color[i*4 + j] = c;
+ }
+}
+
+
+/** XXX move someday? Or consolidate all these simple state setters
+ * into one file.
+ */
+
+
+void *
+llvmpipe_create_depth_stencil_state(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil)
+{
+ return mem_dup(depth_stencil, sizeof(*depth_stencil));
+}
+
+void
+llvmpipe_bind_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ llvmpipe->depth_stencil = (const struct pipe_depth_stencil_alpha_state *)depth_stencil;
+
+ if(llvmpipe->depth_stencil)
+ llvmpipe->jit_context.alpha_ref_value = llvmpipe->depth_stencil->alpha.ref_value;
+
+ llvmpipe->dirty |= LP_NEW_DEPTH_STENCIL_ALPHA;
+}
+
+void
+llvmpipe_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
+{
+ FREE( depth );
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_state_clip.c b/src/gallium/drivers/llvmpipe/lp_state_clip.c
new file mode 100644
index 0000000000..df68f27acc
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_clip.c
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+#include "lp_context.h"
+#include "lp_state.h"
+#include "draw/draw_context.h"
+
+
+void llvmpipe_set_clip_state( struct pipe_context *pipe,
+ const struct pipe_clip_state *clip )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ /* pass the clip state to the draw module */
+ draw_set_clip_state(llvmpipe->draw, clip);
+}
+
+
+void llvmpipe_set_viewport_state( struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ /* pass the viewport info to the draw module */
+ draw_set_viewport_state(llvmpipe->draw, viewport);
+
+ llvmpipe->viewport = *viewport; /* struct copy */
+ llvmpipe->dirty |= LP_NEW_VIEWPORT;
+}
+
+
+void llvmpipe_set_scissor_state( struct pipe_context *pipe,
+ const struct pipe_scissor_state *scissor )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ draw_flush(llvmpipe->draw);
+
+ llvmpipe->scissor = *scissor; /* struct copy */
+ llvmpipe->dirty |= LP_NEW_SCISSOR;
+}
+
+
+void llvmpipe_set_polygon_stipple( struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple )
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ draw_flush(llvmpipe->draw);
+
+ llvmpipe->poly_stipple = *stipple; /* struct copy */
+ llvmpipe->dirty |= LP_NEW_STIPPLE;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_state_derived.c b/src/gallium/drivers/llvmpipe/lp_state_derived.c
new file mode 100644
index 0000000000..e87976b9f3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_derived.c
@@ -0,0 +1,260 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_private.h"
+#include "lp_context.h"
+#include "lp_screen.h"
+#include "lp_tex_cache.h"
+#include "lp_state.h"
+
+
+/**
+ * Mark the current vertex layout as "invalid".
+ * We'll validate the vertex layout later, when we start to actually
+ * render a point or line or tri.
+ */
+static void
+invalidate_vertex_layout(struct llvmpipe_context *llvmpipe)
+{
+ llvmpipe->vertex_info.num_attribs = 0;
+}
+
+
+/**
+ * The vertex info describes how to convert the post-transformed vertices
+ * (simple float[][4]) used by the 'draw' module into vertices for
+ * rasterization.
+ *
+ * This function validates the vertex layout and returns a pointer to a
+ * vertex_info object.
+ */
+struct vertex_info *
+llvmpipe_get_vertex_info(struct llvmpipe_context *llvmpipe)
+{
+ struct vertex_info *vinfo = &llvmpipe->vertex_info;
+
+ if (vinfo->num_attribs == 0) {
+ /* compute vertex layout now */
+ const struct lp_fragment_shader *lpfs = llvmpipe->fs;
+ const enum interp_mode colorInterp
+ = llvmpipe->rasterizer->flatshade ? INTERP_CONSTANT : INTERP_LINEAR;
+ uint i;
+
+ if (llvmpipe->vbuf) {
+ /* if using the post-transform vertex buffer, tell draw_vbuf to
+ * simply emit the whole post-xform vertex as-is:
+ */
+ struct vertex_info *vinfo_vbuf = &llvmpipe->vertex_info_vbuf;
+ const uint num = draw_num_vs_outputs(llvmpipe->draw);
+ uint i;
+
+ /* No longer any need to try and emit draw vertex_header info.
+ */
+ vinfo_vbuf->num_attribs = 0;
+ for (i = 0; i < num; i++) {
+ draw_emit_vertex_attr(vinfo_vbuf, EMIT_4F, INTERP_PERSPECTIVE, i);
+ }
+ draw_compute_vertex_size(vinfo_vbuf);
+ }
+
+ /*
+ * Loop over fragment shader inputs, searching for the matching output
+ * from the vertex shader.
+ */
+ vinfo->num_attribs = 0;
+ for (i = 0; i < lpfs->info.num_inputs; i++) {
+ int src;
+ switch (lpfs->info.input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ src = draw_find_vs_output(llvmpipe->draw,
+ TGSI_SEMANTIC_POSITION, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_POS, src);
+ break;
+
+ case TGSI_SEMANTIC_COLOR:
+ src = draw_find_vs_output(llvmpipe->draw, TGSI_SEMANTIC_COLOR,
+ lpfs->info.input_semantic_index[i]);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, colorInterp, src);
+ break;
+
+ case TGSI_SEMANTIC_FOG:
+ src = draw_find_vs_output(llvmpipe->draw, TGSI_SEMANTIC_FOG, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ break;
+
+ case TGSI_SEMANTIC_GENERIC:
+ case TGSI_SEMANTIC_FACE:
+ /* this includes texcoords and varying vars */
+ src = draw_find_vs_output(llvmpipe->draw, TGSI_SEMANTIC_GENERIC,
+ lpfs->info.input_semantic_index[i]);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ llvmpipe->psize_slot = draw_find_vs_output(llvmpipe->draw,
+ TGSI_SEMANTIC_PSIZE, 0);
+ if (llvmpipe->psize_slot > 0) {
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_CONSTANT,
+ llvmpipe->psize_slot);
+ }
+
+ draw_compute_vertex_size(vinfo);
+ }
+
+ return vinfo;
+}
+
+
+/**
+ * Called from vbuf module.
+ *
+ * Note that there's actually two different vertex layouts in llvmpipe.
+ *
+ * The normal one is computed in llvmpipe_get_vertex_info() above and is
+ * used by the point/line/tri "setup" code.
+ *
+ * The other one (this one) is only used by the vbuf module (which is
+ * not normally used by default but used in testing). For the vbuf module,
+ * we basically want to pass-through the draw module's vertex layout as-is.
+ * When the llvmpipe vbuf code begins drawing, the normal vertex layout
+ * will come into play again.
+ */
+struct vertex_info *
+llvmpipe_get_vbuf_vertex_info(struct llvmpipe_context *llvmpipe)
+{
+ (void) llvmpipe_get_vertex_info(llvmpipe);
+ return &llvmpipe->vertex_info_vbuf;
+}
+
+
+/**
+ * Recompute cliprect from scissor bounds, scissor enable and surface size.
+ */
+static void
+compute_cliprect(struct llvmpipe_context *lp)
+{
+ /* LP_NEW_FRAMEBUFFER
+ */
+ uint surfWidth = lp->framebuffer.width;
+ uint surfHeight = lp->framebuffer.height;
+
+ /* LP_NEW_RASTERIZER
+ */
+ if (lp->rasterizer->scissor) {
+
+ /* LP_NEW_SCISSOR
+ *
+ * clip to scissor rect:
+ */
+ lp->cliprect.minx = MAX2(lp->scissor.minx, 0);
+ lp->cliprect.miny = MAX2(lp->scissor.miny, 0);
+ lp->cliprect.maxx = MIN2(lp->scissor.maxx, surfWidth);
+ lp->cliprect.maxy = MIN2(lp->scissor.maxy, surfHeight);
+ }
+ else {
+ /* clip to surface bounds */
+ lp->cliprect.minx = 0;
+ lp->cliprect.miny = 0;
+ lp->cliprect.maxx = surfWidth;
+ lp->cliprect.maxy = surfHeight;
+ }
+}
+
+
+static void
+update_tgsi_samplers( struct llvmpipe_context *llvmpipe )
+{
+ unsigned i;
+
+ /* vertex shader samplers */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ llvmpipe->tgsi.vert_samplers[i].sampler = llvmpipe->sampler[i];
+ llvmpipe->tgsi.vert_samplers[i].texture = llvmpipe->texture[i];
+ llvmpipe->tgsi.frag_samplers[i].base.get_samples = lp_get_samples;
+ }
+
+ /* fragment shader samplers */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ llvmpipe->tgsi.frag_samplers[i].sampler = llvmpipe->sampler[i];
+ llvmpipe->tgsi.frag_samplers[i].texture = llvmpipe->texture[i];
+ llvmpipe->tgsi.frag_samplers[i].base.get_samples = lp_get_samples;
+ }
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ lp_tex_tile_cache_validate_texture( llvmpipe->tex_cache[i] );
+ }
+
+ llvmpipe->jit_context.samplers = (struct tgsi_sampler **)llvmpipe->tgsi.frag_samplers_list;
+}
+
+/* Hopefully this will remain quite simple, otherwise need to pull in
+ * something like the state tracker mechanism.
+ */
+void llvmpipe_update_derived( struct llvmpipe_context *llvmpipe )
+{
+ struct llvmpipe_screen *lp_screen = llvmpipe_screen(llvmpipe->pipe.screen);
+
+ /* Check for updated textures.
+ */
+ if (llvmpipe->tex_timestamp != lp_screen->timestamp) {
+ llvmpipe->tex_timestamp = lp_screen->timestamp;
+ llvmpipe->dirty |= LP_NEW_TEXTURE;
+ }
+
+ if (llvmpipe->dirty & (LP_NEW_SAMPLER |
+ LP_NEW_TEXTURE))
+ update_tgsi_samplers( llvmpipe );
+
+ if (llvmpipe->dirty & (LP_NEW_RASTERIZER |
+ LP_NEW_FS |
+ LP_NEW_VS))
+ invalidate_vertex_layout( llvmpipe );
+
+ if (llvmpipe->dirty & (LP_NEW_SCISSOR |
+ LP_NEW_RASTERIZER |
+ LP_NEW_FRAMEBUFFER))
+ compute_cliprect(llvmpipe);
+
+ if (llvmpipe->dirty & (LP_NEW_FS |
+ LP_NEW_BLEND |
+ LP_NEW_DEPTH_STENCIL_ALPHA |
+ LP_NEW_SAMPLER |
+ LP_NEW_TEXTURE))
+ llvmpipe_update_fs( llvmpipe );
+
+
+ llvmpipe->dirty = 0;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_state_fs.c b/src/gallium/drivers/llvmpipe/lp_state_fs.c
new file mode 100644
index 0000000000..618cf1ffb8
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_fs.c
@@ -0,0 +1,762 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Code generate the whole fragment pipeline.
+ *
+ * The fragment pipeline consists of the following stages:
+ * - stipple (TBI)
+ * - early depth test
+ * - fragment shader
+ * - alpha test
+ * - depth/stencil test (stencil TBI)
+ * - blending
+ *
+ * This file has only the glue to assembly the fragment pipeline. The actual
+ * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
+ * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
+ * muster the LLVM JIT execution engine to create a function that follows an
+ * established binary interface and that can be called from C directly.
+ *
+ * A big source of complexity here is that we often want to run different
+ * stages with different precisions and data types and precisions. For example,
+ * the fragment shader needs typically to be done in floats, but the
+ * depth/stencil test and blending is better done in the type that most closely
+ * matches the depth/stencil and color buffer respectively.
+ *
+ * Since the width of a SIMD vector register stays the same regardless of the
+ * element type, different types imply different number of elements, so we must
+ * code generate more instances of the stages with larger types to be able to
+ * feed/consume the stages with smaller types.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "util/u_format.h"
+#include "util/u_debug_dump.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw/draw_context.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_scan.h"
+#include "tgsi/tgsi_parse.h"
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_conv.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_logic.h"
+#include "lp_bld_depth.h"
+#include "lp_bld_interp.h"
+#include "lp_bld_tgsi.h"
+#include "lp_bld_alpha.h"
+#include "lp_bld_blend.h"
+#include "lp_bld_swizzle.h"
+#include "lp_bld_flow.h"
+#include "lp_bld_debug.h"
+#include "lp_screen.h"
+#include "lp_context.h"
+#include "lp_state.h"
+#include "lp_quad.h"
+#include "lp_tex_sample.h"
+
+
+static const unsigned char quad_offset_x[4] = {0, 1, 0, 1};
+static const unsigned char quad_offset_y[4] = {0, 0, 1, 1};
+
+
+/*
+ * Derive from the quad's upper left scalar coordinates the coordinates for
+ * all other quad pixels
+ */
+static void
+generate_pos0(LLVMBuilderRef builder,
+ LLVMValueRef x,
+ LLVMValueRef y,
+ LLVMValueRef *x0,
+ LLVMValueRef *y0)
+{
+ LLVMTypeRef int_elem_type = LLVMInt32Type();
+ LLVMTypeRef int_vec_type = LLVMVectorType(int_elem_type, QUAD_SIZE);
+ LLVMTypeRef elem_type = LLVMFloatType();
+ LLVMTypeRef vec_type = LLVMVectorType(elem_type, QUAD_SIZE);
+ LLVMValueRef x_offsets[QUAD_SIZE];
+ LLVMValueRef y_offsets[QUAD_SIZE];
+ unsigned i;
+
+ x = lp_build_broadcast(builder, int_vec_type, x);
+ y = lp_build_broadcast(builder, int_vec_type, y);
+
+ for(i = 0; i < QUAD_SIZE; ++i) {
+ x_offsets[i] = LLVMConstInt(int_elem_type, quad_offset_x[i], 0);
+ y_offsets[i] = LLVMConstInt(int_elem_type, quad_offset_y[i], 0);
+ }
+
+ x = LLVMBuildAdd(builder, x, LLVMConstVector(x_offsets, QUAD_SIZE), "");
+ y = LLVMBuildAdd(builder, y, LLVMConstVector(y_offsets, QUAD_SIZE), "");
+
+ *x0 = LLVMBuildSIToFP(builder, x, vec_type, "");
+ *y0 = LLVMBuildSIToFP(builder, y, vec_type, "");
+}
+
+
+/**
+ * Generate the depth test.
+ */
+static void
+generate_depth(LLVMBuilderRef builder,
+ const struct lp_fragment_shader_variant_key *key,
+ union lp_type src_type,
+ struct lp_build_mask_context *mask,
+ LLVMValueRef src,
+ LLVMValueRef dst_ptr)
+{
+ const struct util_format_description *format_desc;
+ union lp_type dst_type;
+
+ if(!key->depth.enabled)
+ return;
+
+ format_desc = util_format_description(key->zsbuf_format);
+ assert(format_desc);
+
+ /* Pick the depth type. */
+ dst_type = lp_depth_type(format_desc, src_type.width*src_type.length);
+
+ /* FIXME: Cope with a depth test type with a different bit width. */
+ assert(dst_type.width == src_type.width);
+ assert(dst_type.length == src_type.length);
+
+#if 1
+ src = lp_build_clamped_float_to_unsigned_norm(builder,
+ src_type,
+ dst_type.width,
+ src);
+#else
+ lp_build_conv(builder, src_type, dst_type, &src, 1, &src, 1);
+#endif
+
+ lp_build_depth_test(builder,
+ &key->depth,
+ dst_type,
+ format_desc,
+ mask,
+ src,
+ dst_ptr);
+}
+
+
+/**
+ * Generate the fragment shader, depth/stencil test, and alpha tests.
+ */
+static void
+generate_fs(struct llvmpipe_context *lp,
+ struct lp_fragment_shader *shader,
+ const struct lp_fragment_shader_variant_key *key,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ LLVMValueRef context_ptr,
+ unsigned i,
+ const struct lp_build_interp_soa_context *interp,
+ struct lp_build_sampler_soa *sampler,
+ LLVMValueRef *pmask,
+ LLVMValueRef *color,
+ LLVMValueRef depth_ptr)
+{
+ const struct tgsi_token *tokens = shader->base.tokens;
+ LLVMTypeRef elem_type;
+ LLVMTypeRef vec_type;
+ LLVMTypeRef int_vec_type;
+ LLVMValueRef consts_ptr;
+ LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][NUM_CHANNELS];
+ LLVMValueRef z = interp->pos[2];
+ struct lp_build_flow_context *flow;
+ struct lp_build_mask_context mask;
+ boolean early_depth_test;
+ unsigned attrib;
+ unsigned chan;
+
+ elem_type = lp_build_elem_type(type);
+ vec_type = lp_build_vec_type(type);
+ int_vec_type = lp_build_int_vec_type(type);
+
+ consts_ptr = lp_jit_context_constants(builder, context_ptr);
+
+ flow = lp_build_flow_create(builder);
+
+ memset(outputs, 0, sizeof outputs);
+
+ lp_build_flow_scope_begin(flow);
+
+ /* Declare the color and z variables */
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ color[chan] = LLVMGetUndef(vec_type);
+ lp_build_flow_scope_declare(flow, &color[chan]);
+ }
+ lp_build_flow_scope_declare(flow, &z);
+
+ lp_build_mask_begin(&mask, flow, type, *pmask);
+
+ early_depth_test =
+ key->depth.enabled &&
+ !key->alpha.enabled &&
+ !shader->info.uses_kill &&
+ !shader->info.writes_z;
+
+ if(early_depth_test)
+ generate_depth(builder, key,
+ type, &mask,
+ z, depth_ptr);
+
+ lp_build_tgsi_soa(builder, tokens, type, &mask,
+ consts_ptr, interp->pos, interp->inputs,
+ outputs, sampler);
+
+ for (attrib = 0; attrib < shader->info.num_outputs; ++attrib) {
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ if(outputs[attrib][chan]) {
+ lp_build_name(outputs[attrib][chan], "output%u.%u.%c", i, attrib, "xyzw"[chan]);
+
+ switch (shader->info.output_semantic_name[attrib]) {
+ case TGSI_SEMANTIC_COLOR:
+ {
+ unsigned cbuf = shader->info.output_semantic_index[attrib];
+
+ lp_build_name(outputs[attrib][chan], "color%u.%u.%c", i, attrib, "rgba"[chan]);
+
+ /* Alpha test */
+ /* XXX: should the alpha reference value be passed separately? */
+ if(cbuf == 0 && chan == 3) {
+ LLVMValueRef alpha = outputs[attrib][chan];
+ LLVMValueRef alpha_ref_value;
+ alpha_ref_value = lp_jit_context_alpha_ref_value(builder, context_ptr);
+ alpha_ref_value = lp_build_broadcast(builder, vec_type, alpha_ref_value);
+ lp_build_alpha_test(builder, &key->alpha, type,
+ &mask, alpha, alpha_ref_value);
+ }
+
+ if(cbuf == 0)
+ color[chan] = outputs[attrib][chan];
+
+ break;
+ }
+
+ case TGSI_SEMANTIC_POSITION:
+ if(chan == 2)
+ z = outputs[attrib][chan];
+ break;
+ }
+ }
+ }
+ }
+
+ if(!early_depth_test)
+ generate_depth(builder, key,
+ type, &mask,
+ z, depth_ptr);
+
+ lp_build_mask_end(&mask);
+
+ lp_build_flow_scope_end(flow);
+
+ lp_build_flow_destroy(flow);
+
+ *pmask = mask.value;
+
+}
+
+
+/**
+ * Generate color blending and color output.
+ */
+static void
+generate_blend(const struct pipe_blend_state *blend,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ LLVMValueRef context_ptr,
+ LLVMValueRef mask,
+ LLVMValueRef *src,
+ LLVMValueRef dst_ptr)
+{
+ struct lp_build_context bld;
+ struct lp_build_flow_context *flow;
+ struct lp_build_mask_context mask_ctx;
+ LLVMTypeRef vec_type;
+ LLVMTypeRef int_vec_type;
+ LLVMValueRef const_ptr;
+ LLVMValueRef con[4];
+ LLVMValueRef dst[4];
+ LLVMValueRef res[4];
+ unsigned chan;
+
+ lp_build_context_init(&bld, builder, type);
+
+ flow = lp_build_flow_create(builder);
+ lp_build_mask_begin(&mask_ctx, flow, type, mask);
+
+ vec_type = lp_build_vec_type(type);
+ int_vec_type = lp_build_int_vec_type(type);
+
+ const_ptr = lp_jit_context_blend_color(builder, context_ptr);
+ const_ptr = LLVMBuildBitCast(builder, const_ptr,
+ LLVMPointerType(vec_type, 0), "");
+
+ for(chan = 0; chan < 4; ++chan) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), chan, 0);
+ con[chan] = LLVMBuildLoad(builder, LLVMBuildGEP(builder, const_ptr, &index, 1, ""), "");
+
+ dst[chan] = LLVMBuildLoad(builder, LLVMBuildGEP(builder, dst_ptr, &index, 1, ""), "");
+
+ lp_build_name(con[chan], "con.%c", "rgba"[chan]);
+ lp_build_name(dst[chan], "dst.%c", "rgba"[chan]);
+ }
+
+ lp_build_blend_soa(builder, blend, type, src, dst, con, res);
+
+ for(chan = 0; chan < 4; ++chan) {
+ if(blend->colormask & (1 << chan)) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), chan, 0);
+ lp_build_name(res[chan], "res.%c", "rgba"[chan]);
+ res[chan] = lp_build_select(&bld, mask, res[chan], dst[chan]);
+ LLVMBuildStore(builder, res[chan], LLVMBuildGEP(builder, dst_ptr, &index, 1, ""));
+ }
+ }
+
+ lp_build_mask_end(&mask_ctx);
+ lp_build_flow_destroy(flow);
+}
+
+
+/**
+ * Generate the runtime callable function for the whole fragment pipeline.
+ */
+static struct lp_fragment_shader_variant *
+generate_fragment(struct llvmpipe_context *lp,
+ struct lp_fragment_shader *shader,
+ const struct lp_fragment_shader_variant_key *key)
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(lp->pipe.screen);
+ struct lp_fragment_shader_variant *variant;
+ union lp_type fs_type;
+ union lp_type blend_type;
+ LLVMTypeRef fs_elem_type;
+ LLVMTypeRef fs_vec_type;
+ LLVMTypeRef fs_int_vec_type;
+ LLVMTypeRef blend_vec_type;
+ LLVMTypeRef blend_int_vec_type;
+ LLVMTypeRef arg_types[9];
+ LLVMTypeRef func_type;
+ LLVMValueRef context_ptr;
+ LLVMValueRef x;
+ LLVMValueRef y;
+ LLVMValueRef a0_ptr;
+ LLVMValueRef dadx_ptr;
+ LLVMValueRef dady_ptr;
+ LLVMValueRef mask_ptr;
+ LLVMValueRef color_ptr;
+ LLVMValueRef depth_ptr;
+ LLVMBasicBlockRef block;
+ LLVMBuilderRef builder;
+ LLVMValueRef x0;
+ LLVMValueRef y0;
+ struct lp_build_sampler_soa *sampler;
+ struct lp_build_interp_soa_context interp;
+ LLVMValueRef fs_mask[LP_MAX_VECTOR_LENGTH];
+ LLVMValueRef fs_out_color[NUM_CHANNELS][LP_MAX_VECTOR_LENGTH];
+ LLVMValueRef blend_mask;
+ LLVMValueRef blend_in_color[NUM_CHANNELS];
+ unsigned num_fs;
+ unsigned i;
+ unsigned chan;
+
+#ifdef DEBUG
+ tgsi_dump(shader->base.tokens, 0);
+ if(key->depth.enabled) {
+ debug_printf("depth.func = %s\n", debug_dump_func(key->depth.func, TRUE));
+ debug_printf("depth.writemask = %u\n", key->depth.writemask);
+ debug_printf("depth.occlusion_count = %u\n", key->depth.occlusion_count);
+ }
+ if(key->alpha.enabled) {
+ debug_printf("alpha.func = %s\n", debug_dump_func(key->alpha.func, TRUE));
+ debug_printf("alpha.ref_value = %f\n", key->alpha.ref_value);
+ }
+ if(key->blend.logicop_enable) {
+ debug_printf("blend.logicop_func = %u\n", key->blend.logicop_func);
+ }
+ else if(key->blend.blend_enable) {
+ debug_printf("blend.rgb_func = %s\n", debug_dump_blend_func (key->blend.rgb_func, TRUE));
+ debug_printf("rgb_src_factor = %s\n", debug_dump_blend_factor(key->blend.rgb_src_factor, TRUE));
+ debug_printf("rgb_dst_factor = %s\n", debug_dump_blend_factor(key->blend.rgb_dst_factor, TRUE));
+ debug_printf("alpha_func = %s\n", debug_dump_blend_func (key->blend.alpha_func, TRUE));
+ debug_printf("alpha_src_factor = %s\n", debug_dump_blend_factor(key->blend.alpha_src_factor, TRUE));
+ debug_printf("alpha_dst_factor = %s\n", debug_dump_blend_factor(key->blend.alpha_dst_factor, TRUE));
+ }
+ debug_printf("blend.colormask = 0x%x\n", key->blend.colormask);
+#endif
+
+ variant = CALLOC_STRUCT(lp_fragment_shader_variant);
+ if(!variant)
+ return NULL;
+
+ variant->shader = shader;
+ memcpy(&variant->key, key, sizeof *key);
+
+ /* TODO: actually pick these based on the fs and color buffer
+ * characteristics. */
+
+ fs_type.value = 0;
+ fs_type.floating = TRUE; /* floating point values */
+ fs_type.sign = TRUE; /* values are signed */
+ fs_type.norm = FALSE; /* values are not limited to [0,1] or [-1,1] */
+ fs_type.width = 32; /* 32-bit float */
+ fs_type.length = 4; /* 4 element per vector */
+ num_fs = 4;
+
+ blend_type.value = 0;
+ blend_type.floating = FALSE; /* values are integers */
+ blend_type.sign = FALSE; /* values are unsigned */
+ blend_type.norm = TRUE; /* values are in [0,1] or [-1,1] */
+ blend_type.width = 8; /* 8-bit ubyte values */
+ blend_type.length = 16; /* 16 elements per vector */
+
+ /*
+ * Generate the function prototype. Any change here must be reflected in
+ * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
+ */
+
+ fs_elem_type = lp_build_elem_type(fs_type);
+ fs_vec_type = lp_build_vec_type(fs_type);
+ fs_int_vec_type = lp_build_int_vec_type(fs_type);
+
+ blend_vec_type = lp_build_vec_type(blend_type);
+ blend_int_vec_type = lp_build_int_vec_type(blend_type);
+
+ arg_types[0] = screen->context_ptr_type; /* context */
+ arg_types[1] = LLVMInt32Type(); /* x */
+ arg_types[2] = LLVMInt32Type(); /* y */
+ arg_types[3] = LLVMPointerType(fs_elem_type, 0); /* a0 */
+ arg_types[4] = LLVMPointerType(fs_elem_type, 0); /* dadx */
+ arg_types[5] = LLVMPointerType(fs_elem_type, 0); /* dady */
+ arg_types[6] = LLVMPointerType(fs_int_vec_type, 0); /* mask */
+ arg_types[7] = LLVMPointerType(blend_vec_type, 0); /* color */
+ arg_types[8] = LLVMPointerType(fs_int_vec_type, 0); /* depth */
+
+ func_type = LLVMFunctionType(LLVMVoidType(), arg_types, Elements(arg_types), 0);
+
+ variant->function = LLVMAddFunction(screen->module, "shader", func_type);
+ LLVMSetFunctionCallConv(variant->function, LLVMCCallConv);
+ for(i = 0; i < Elements(arg_types); ++i)
+ if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
+ LLVMAddAttribute(LLVMGetParam(variant->function, i), LLVMNoAliasAttribute);
+
+ context_ptr = LLVMGetParam(variant->function, 0);
+ x = LLVMGetParam(variant->function, 1);
+ y = LLVMGetParam(variant->function, 2);
+ a0_ptr = LLVMGetParam(variant->function, 3);
+ dadx_ptr = LLVMGetParam(variant->function, 4);
+ dady_ptr = LLVMGetParam(variant->function, 5);
+ mask_ptr = LLVMGetParam(variant->function, 6);
+ color_ptr = LLVMGetParam(variant->function, 7);
+ depth_ptr = LLVMGetParam(variant->function, 8);
+
+ lp_build_name(context_ptr, "context");
+ lp_build_name(x, "x");
+ lp_build_name(y, "y");
+ lp_build_name(a0_ptr, "a0");
+ lp_build_name(dadx_ptr, "dadx");
+ lp_build_name(dady_ptr, "dady");
+ lp_build_name(mask_ptr, "mask");
+ lp_build_name(color_ptr, "color");
+ lp_build_name(depth_ptr, "depth");
+
+ /*
+ * Function body
+ */
+
+ block = LLVMAppendBasicBlock(variant->function, "entry");
+ builder = LLVMCreateBuilder();
+ LLVMPositionBuilderAtEnd(builder, block);
+
+ generate_pos0(builder, x, y, &x0, &y0);
+
+ lp_build_interp_soa_init(&interp, shader->base.tokens, builder, fs_type,
+ a0_ptr, dadx_ptr, dady_ptr,
+ x0, y0, 2, 0);
+
+#if 0
+ /* C texture sampling */
+ sampler = lp_c_sampler_soa_create(context_ptr);
+#else
+ /* code generated texture sampling */
+ sampler = lp_llvm_sampler_soa_create(key->sampler, context_ptr);
+#endif
+
+ for(i = 0; i < num_fs; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef out_color[NUM_CHANNELS];
+ LLVMValueRef depth_ptr_i;
+
+ if(i != 0)
+ lp_build_interp_soa_update(&interp);
+
+ fs_mask[i] = LLVMBuildLoad(builder, LLVMBuildGEP(builder, mask_ptr, &index, 1, ""), "");
+ depth_ptr_i = LLVMBuildGEP(builder, depth_ptr, &index, 1, "");
+
+ generate_fs(lp, shader, key,
+ builder,
+ fs_type,
+ context_ptr,
+ i,
+ &interp,
+ sampler,
+ &fs_mask[i],
+ out_color,
+ depth_ptr_i);
+
+ for(chan = 0; chan < NUM_CHANNELS; ++chan)
+ fs_out_color[chan][i] = out_color[chan];
+ }
+
+ sampler->destroy(sampler);
+
+ /*
+ * Convert the fs's output color and mask to fit to the blending type.
+ */
+
+ for(chan = 0; chan < NUM_CHANNELS; ++chan) {
+ lp_build_conv(builder, fs_type, blend_type,
+ fs_out_color[chan], num_fs,
+ &blend_in_color[chan], 1);
+ lp_build_name(blend_in_color[chan], "color.%c", "rgba"[chan]);
+
+ }
+
+ lp_build_conv_mask(builder, fs_type, blend_type,
+ fs_mask, num_fs,
+ &blend_mask, 1);
+
+ /*
+ * Blending.
+ */
+
+ generate_blend(&key->blend,
+ builder,
+ blend_type,
+ context_ptr,
+ blend_mask,
+ blend_in_color,
+ color_ptr);
+
+ LLVMBuildRetVoid(builder);
+
+ LLVMDisposeBuilder(builder);
+
+ /*
+ * Translate the LLVM IR into machine code.
+ */
+
+ LLVMRunFunctionPassManager(screen->pass, variant->function);
+
+#ifdef DEBUG
+ LLVMDumpValue(variant->function);
+ debug_printf("\n");
+#endif
+
+ if(LLVMVerifyFunction(variant->function, LLVMPrintMessageAction)) {
+ LLVMDumpValue(variant->function);
+ abort();
+ }
+
+ variant->jit_function = (lp_jit_frag_func)LLVMGetPointerToGlobal(screen->engine, variant->function);
+
+#ifdef DEBUG
+ lp_disassemble(variant->jit_function);
+#endif
+
+ variant->next = shader->variants;
+ shader->variants = variant;
+
+ return variant;
+}
+
+
+void *
+llvmpipe_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct lp_fragment_shader *shader;
+
+ shader = CALLOC_STRUCT(lp_fragment_shader);
+ if (!shader)
+ return NULL;
+
+ /* get/save the summary info for this shader */
+ tgsi_scan_shader(templ->tokens, &shader->info);
+
+ /* we need to keep a local copy of the tokens */
+ shader->base.tokens = tgsi_dup_tokens(templ->tokens);
+
+ return shader;
+}
+
+
+void
+llvmpipe_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ llvmpipe->fs = (struct lp_fragment_shader *) fs;
+
+ llvmpipe->dirty |= LP_NEW_FS;
+}
+
+
+void
+llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ struct llvmpipe_screen *screen = llvmpipe_screen(pipe->screen);
+ struct lp_fragment_shader *shader = fs;
+ struct lp_fragment_shader_variant *variant;
+
+ assert(fs != llvmpipe->fs);
+
+ variant = shader->variants;
+ while(variant) {
+ struct lp_fragment_shader_variant *next = variant->next;
+
+ if(variant->function) {
+ if(variant->jit_function)
+ LLVMFreeMachineCodeForFunction(screen->engine, variant->function);
+ LLVMDeleteFunction(variant->function);
+ }
+
+ FREE(variant);
+
+ variant = next;
+ }
+
+ FREE((void *) shader->base.tokens);
+ FREE(shader);
+}
+
+
+
+void
+llvmpipe_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ /* note: reference counting */
+ pipe_buffer_reference(&llvmpipe->constants[shader].buffer,
+ buf ? buf->buffer : NULL);
+
+ llvmpipe->dirty |= LP_NEW_CONSTANTS;
+}
+
+
+/**
+ * We need to generate several variants of the fragment pipeline to match
+ * all the combinations of the contributing state atoms.
+ *
+ * TODO: there is actually no reason to tie this to context state -- the
+ * generated code could be cached globally in the screen.
+ */
+static void
+make_variant_key(struct llvmpipe_context *lp,
+ struct lp_fragment_shader *shader,
+ struct lp_fragment_shader_variant_key *key)
+{
+ unsigned i;
+
+ memset(key, 0, sizeof *key);
+
+ if(lp->framebuffer.zsbuf &&
+ lp->depth_stencil->depth.enabled) {
+ key->zsbuf_format = lp->framebuffer.zsbuf->format;
+ memcpy(&key->depth, &lp->depth_stencil->depth, sizeof key->depth);
+ }
+
+ key->alpha.enabled = lp->depth_stencil->alpha.enabled;
+ if(key->alpha.enabled)
+ key->alpha.func = lp->depth_stencil->alpha.func;
+ /* alpha.ref_value is passed in jit_context */
+
+ if(lp->framebuffer.cbufs[0]) {
+ const struct util_format_description *format_desc;
+ unsigned chan;
+
+ memcpy(&key->blend, lp->blend, sizeof key->blend);
+
+ format_desc = util_format_description(lp->framebuffer.cbufs[0]->format);
+ assert(format_desc->layout == UTIL_FORMAT_COLORSPACE_RGB ||
+ format_desc->layout == UTIL_FORMAT_COLORSPACE_SRGB);
+
+ /* mask out color channels not present in the color buffer */
+ for(chan = 0; chan < 4; ++chan) {
+ enum util_format_swizzle swizzle = format_desc->swizzle[chan];
+ if(swizzle > 4)
+ key->blend.colormask &= ~(1 << chan);
+ }
+ }
+
+ for(i = 0; i < PIPE_MAX_SAMPLERS; ++i)
+ if(shader->info.file_mask[TGSI_FILE_SAMPLER] & (1 << i))
+ lp_sampler_static_state(&key->sampler[i], lp->texture[i], lp->sampler[i]);
+}
+
+
+void
+llvmpipe_update_fs(struct llvmpipe_context *lp)
+{
+ struct lp_fragment_shader *shader = lp->fs;
+ struct lp_fragment_shader_variant_key key;
+ struct lp_fragment_shader_variant *variant;
+
+ make_variant_key(lp, shader, &key);
+
+ variant = shader->variants;
+ while(variant) {
+ if(memcmp(&variant->key, &key, sizeof key) == 0)
+ break;
+
+ variant = variant->next;
+ }
+
+ if(!variant)
+ variant = generate_fragment(lp, shader, &key);
+
+ shader->current = variant;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_state_rasterizer.c b/src/gallium/drivers/llvmpipe/lp_state_rasterizer.c
new file mode 100644
index 0000000000..4561c6b845
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_rasterizer.c
@@ -0,0 +1,62 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "lp_context.h"
+#include "lp_state.h"
+#include "draw/draw_context.h"
+
+
+
+void *
+llvmpipe_create_rasterizer_state(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *rast)
+{
+ return mem_dup(rast, sizeof(*rast));
+}
+
+void llvmpipe_bind_rasterizer_state(struct pipe_context *pipe,
+ void *setup)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ /* pass-through to draw module */
+ draw_set_rasterizer_state(llvmpipe->draw, setup);
+
+ llvmpipe->rasterizer = (struct pipe_rasterizer_state *)setup;
+
+ llvmpipe->dirty |= LP_NEW_RASTERIZER;
+}
+
+void llvmpipe_delete_rasterizer_state(struct pipe_context *pipe,
+ void *rasterizer)
+{
+ FREE( rasterizer );
+}
+
+
diff --git a/src/gallium/drivers/llvmpipe/lp_state_sampler.c b/src/gallium/drivers/llvmpipe/lp_state_sampler.c
new file mode 100644
index 0000000000..c69d90c723
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_sampler.c
@@ -0,0 +1,127 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors:
+ * Brian Paul
+ */
+
+#include "util/u_memory.h"
+
+#include "draw/draw_context.h"
+
+#include "lp_context.h"
+#include "lp_context.h"
+#include "lp_state.h"
+#include "lp_texture.h"
+#include "lp_tex_cache.h"
+#include "draw/draw_context.h"
+
+
+
+void *
+llvmpipe_create_sampler_state(struct pipe_context *pipe,
+ const struct pipe_sampler_state *sampler)
+{
+ return mem_dup(sampler, sizeof(*sampler));
+}
+
+
+void
+llvmpipe_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ unsigned i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == llvmpipe->num_samplers &&
+ !memcmp(llvmpipe->sampler, sampler, num * sizeof(void *)))
+ return;
+
+ draw_flush(llvmpipe->draw);
+
+ for (i = 0; i < num; ++i)
+ llvmpipe->sampler[i] = sampler[i];
+ for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
+ llvmpipe->sampler[i] = NULL;
+
+ llvmpipe->num_samplers = num;
+
+ llvmpipe->dirty |= LP_NEW_SAMPLER;
+}
+
+
+void
+llvmpipe_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num, struct pipe_texture **texture)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ uint i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == llvmpipe->num_textures &&
+ !memcmp(llvmpipe->texture, texture, num * sizeof(struct pipe_texture *)))
+ return;
+
+ draw_flush(llvmpipe->draw);
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ struct pipe_texture *tex = i < num ? texture[i] : NULL;
+
+ pipe_texture_reference(&llvmpipe->texture[i], tex);
+ lp_tex_tile_cache_set_texture(llvmpipe->tex_cache[i], tex);
+
+ if(tex) {
+ struct llvmpipe_texture *lp_tex = llvmpipe_texture(tex);
+ struct lp_jit_texture *jit_tex = &llvmpipe->jit_context.textures[i];
+ jit_tex->width = tex->width[0];
+ jit_tex->height = tex->height[0];
+ jit_tex->stride = lp_tex->stride[0];
+ if(!lp_tex->dt)
+ jit_tex->data = lp_tex->data;
+ }
+ }
+
+ llvmpipe->num_textures = num;
+
+ llvmpipe->dirty |= LP_NEW_TEXTURE;
+}
+
+
+void
+llvmpipe_delete_sampler_state(struct pipe_context *pipe,
+ void *sampler)
+{
+ FREE( sampler );
+}
+
+
+
diff --git a/src/gallium/drivers/llvmpipe/lp_state_surface.c b/src/gallium/drivers/llvmpipe/lp_state_surface.c
new file mode 100644
index 0000000000..177a26b7b1
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_surface.c
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "lp_context.h"
+#include "lp_state.h"
+#include "lp_surface.h"
+#include "lp_tile_cache.h"
+
+#include "draw/draw_context.h"
+
+
+/**
+ * XXX this might get moved someday
+ * Set the framebuffer surface info: color buffers, zbuffer, stencil buffer.
+ * Here, we flush the old surfaces and update the tile cache to point to the new
+ * surfaces.
+ */
+void
+llvmpipe_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct llvmpipe_context *lp = llvmpipe_context(pipe);
+ uint i;
+
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
+ /* check if changing cbuf */
+ if (lp->framebuffer.cbufs[i] != fb->cbufs[i]) {
+ /* flush old */
+ lp_flush_tile_cache(lp->cbuf_cache[i]);
+
+ /* assign new */
+ lp->framebuffer.cbufs[i] = fb->cbufs[i];
+
+ /* update cache */
+ lp_tile_cache_set_surface(lp->cbuf_cache[i], fb->cbufs[i]);
+ }
+ }
+
+ lp->framebuffer.nr_cbufs = fb->nr_cbufs;
+
+ /* zbuf changing? */
+ if (lp->framebuffer.zsbuf != fb->zsbuf) {
+
+ if(lp->zsbuf_transfer) {
+ struct pipe_screen *screen = pipe->screen;
+
+ if(lp->zsbuf_map) {
+ screen->transfer_unmap(screen, lp->zsbuf_transfer);
+ lp->zsbuf_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(lp->zsbuf_transfer);
+ lp->zsbuf_transfer = NULL;
+ }
+
+ /* assign new */
+ lp->framebuffer.zsbuf = fb->zsbuf;
+
+ /* Tell draw module how deep the Z/depth buffer is */
+ if (lp->framebuffer.zsbuf) {
+ int depth_bits;
+ double mrd;
+ depth_bits = pf_get_component_bits(lp->framebuffer.zsbuf->format,
+ PIPE_FORMAT_COMP_Z);
+ if (depth_bits > 16) {
+ mrd = 0.0000001;
+ }
+ else {
+ mrd = 0.00002;
+ }
+ draw_set_mrd(lp->draw, mrd);
+ }
+ }
+
+ lp->framebuffer.width = fb->width;
+ lp->framebuffer.height = fb->height;
+
+ lp->dirty |= LP_NEW_FRAMEBUFFER;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_state_vertex.c b/src/gallium/drivers/llvmpipe/lp_state_vertex.c
new file mode 100644
index 0000000000..1a17631a4c
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_vertex.c
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "lp_context.h"
+#include "lp_state.h"
+#include "lp_surface.h"
+
+#include "draw/draw_context.h"
+
+
+void
+llvmpipe_set_vertex_elements(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_element *attribs)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(llvmpipe->vertex_element, attribs,
+ count * sizeof(struct pipe_vertex_element));
+ llvmpipe->num_vertex_elements = count;
+
+ llvmpipe->dirty |= LP_NEW_VERTEX;
+
+ draw_set_vertex_elements(llvmpipe->draw, count, attribs);
+}
+
+
+void
+llvmpipe_set_vertex_buffers(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(llvmpipe->vertex_buffer, buffers, count * sizeof(buffers[0]));
+ llvmpipe->num_vertex_buffers = count;
+
+ llvmpipe->dirty |= LP_NEW_VERTEX;
+
+ draw_set_vertex_buffers(llvmpipe->draw, count, buffers);
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_state_vs.c b/src/gallium/drivers/llvmpipe/lp_state_vs.c
new file mode 100644
index 0000000000..15c3029614
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_state_vs.c
@@ -0,0 +1,96 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_defines.h"
+#include "tgsi/tgsi_parse.h"
+#include "util/u_memory.h"
+#include "draw/draw_context.h"
+
+#include "lp_context.h"
+#include "lp_state.h"
+
+
+void *
+llvmpipe_create_vs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+ struct lp_vertex_shader *state;
+
+ state = CALLOC_STRUCT(lp_vertex_shader);
+ if (state == NULL )
+ goto fail;
+
+ /* copy shader tokens, the ones passed in will go away.
+ */
+ state->shader.tokens = tgsi_dup_tokens(templ->tokens);
+ if (state->shader.tokens == NULL)
+ goto fail;
+
+ state->draw_data = draw_create_vertex_shader(llvmpipe->draw, templ);
+ if (state->draw_data == NULL)
+ goto fail;
+
+ return state;
+
+fail:
+ if (state) {
+ FREE( (void *)state->shader.tokens );
+ FREE( state->draw_data );
+ FREE( state );
+ }
+ return NULL;
+}
+
+
+void
+llvmpipe_bind_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ llvmpipe->vs = (const struct lp_vertex_shader *)vs;
+
+ draw_bind_vertex_shader(llvmpipe->draw,
+ (llvmpipe->vs ? llvmpipe->vs->draw_data : NULL));
+
+ llvmpipe->dirty |= LP_NEW_VS;
+}
+
+
+void
+llvmpipe_delete_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
+
+ struct lp_vertex_shader *state =
+ (struct lp_vertex_shader *)vs;
+
+ draw_delete_vertex_shader(llvmpipe->draw, state->draw_data);
+ FREE( state );
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_surface.c b/src/gallium/drivers/llvmpipe/lp_surface.c
new file mode 100644
index 0000000000..6110b0a193
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_surface.c
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_rect.h"
+#include "lp_context.h"
+#include "lp_surface.h"
+
+
+static void
+lp_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ util_surface_copy(pipe, FALSE,
+ dest, destx, desty,
+ src, srcx, srcy,
+ width, height);
+}
+
+void
+lp_init_surface_functions(struct llvmpipe_context *lp)
+{
+ lp->pipe.surface_copy = lp_surface_copy;
+ lp->pipe.surface_fill = util_surface_fill;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_surface.h b/src/gallium/drivers/llvmpipe/lp_surface.h
new file mode 100644
index 0000000000..4d78a53c4f
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_surface.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef LP_SURFACE_H
+#define LP_SURFACE_H
+
+
+struct llvmpipe_context;
+
+
+extern void
+lp_init_surface_functions(struct llvmpipe_context *lp);
+
+
+#endif /* LP_SURFACE_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_test.h b/src/gallium/drivers/llvmpipe/lp_test.h
new file mode 100644
index 0000000000..69aaae26e0
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_test.h
@@ -0,0 +1,128 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Shared testing code.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#ifndef LP_TEST_H
+#define LP_TEST_H
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <float.h>
+
+#include <llvm-c/Core.h>
+#include <llvm-c/Analysis.h>
+#include <llvm-c/ExecutionEngine.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/BitWriter.h>
+#include <llvm-c/Transforms/Scalar.h>
+
+#include "pipe/p_state.h"
+#include "util/u_format.h"
+#include "util/u_math.h"
+#include "util/u_debug_dump.h"
+
+#include "lp_bld_type.h"
+
+
+void
+write_tsv_header(FILE *fp);
+
+
+boolean
+test_some(unsigned verbose, FILE *fp, unsigned long n);
+
+
+boolean
+test_all(unsigned verbose, FILE *fp);
+
+
+static INLINE uint64_t
+rdtsc(void)
+{
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ uint32_t hi, lo;
+ __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
+ return ((uint64_t)lo) | (((uint64_t)hi) << 32);
+#else
+ return 0;
+#endif
+}
+
+
+float
+random_float(void);
+
+
+void
+dump_type(FILE *fp, union lp_type type);
+
+
+double
+read_elem(union lp_type type, const void *src, unsigned index);
+
+
+void
+write_elem(union lp_type type, void *dst, unsigned index, double src);
+
+
+void
+random_elem(union lp_type type, void *dst, unsigned index);
+
+
+void
+read_vec(union lp_type type, const void *src, double *dst);
+
+
+void
+write_vec(union lp_type type, void *dst, const double *src);
+
+
+void
+random_vec(union lp_type type, void *dst);
+
+
+boolean
+compare_vec_with_eps(union lp_type type, const void *res, const void *ref, double eps);
+
+
+boolean
+compare_vec(union lp_type type, const void *res, const void *ref);
+
+
+void
+dump_vec(FILE *fp, union lp_type type, const void *src);
+
+
+#endif /* !LP_TEST_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_test_blend.c b/src/gallium/drivers/llvmpipe/lp_test_blend.c
new file mode 100644
index 0000000000..8dfad468e3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_test_blend.c
@@ -0,0 +1,881 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Unit tests for blend LLVM IR generation
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ *
+ * Blend computation code derived from code written by
+ * @author Brian Paul <brian@vmware.com>
+ */
+
+
+#include "lp_bld_type.h"
+#include "lp_bld_arit.h"
+#include "lp_bld_blend.h"
+#include "lp_bld_debug.h"
+#include "lp_test.h"
+
+
+enum vector_mode
+{
+ AoS = 0,
+ SoA = 1
+};
+
+
+typedef void (*blend_test_ptr_t)(const void *src, const void *dst, const void *con, void *res);
+
+
+void
+write_tsv_header(FILE *fp)
+{
+ fprintf(fp,
+ "result\t"
+ "cycles_per_channel\t"
+ "mode\t"
+ "type\t"
+ "sep_func\t"
+ "sep_src_factor\t"
+ "sep_dst_factor\t"
+ "rgb_func\t"
+ "rgb_src_factor\t"
+ "rgb_dst_factor\t"
+ "alpha_func\t"
+ "alpha_src_factor\t"
+ "alpha_dst_factor\n");
+
+ fflush(fp);
+}
+
+
+static void
+write_tsv_row(FILE *fp,
+ const struct pipe_blend_state *blend,
+ enum vector_mode mode,
+ union lp_type type,
+ double cycles,
+ boolean success)
+{
+ fprintf(fp, "%s\t", success ? "pass" : "fail");
+
+ if (mode == AoS) {
+ fprintf(fp, "%.1f\t", cycles / type.length);
+ fprintf(fp, "aos\t");
+ }
+
+ if (mode == SoA) {
+ fprintf(fp, "%.1f\t", cycles / (4 * type.length));
+ fprintf(fp, "soa\t");
+ }
+
+ fprintf(fp, "%s%u%sx%u\t",
+ type.floating ? "f" : (type.fixed ? "h" : (type.sign ? "s" : "u")),
+ type.width,
+ type.norm ? "n" : "",
+ type.length);
+
+ fprintf(fp,
+ "%s\t%s\t%s\t",
+ blend->rgb_func != blend->alpha_func ? "true" : "false",
+ blend->rgb_src_factor != blend->alpha_src_factor ? "true" : "false",
+ blend->rgb_dst_factor != blend->alpha_dst_factor ? "true" : "false");
+
+ fprintf(fp,
+ "%s\t%s\t%s\t%s\t%s\t%s\n",
+ debug_dump_blend_func(blend->rgb_func, TRUE),
+ debug_dump_blend_factor(blend->rgb_src_factor, TRUE),
+ debug_dump_blend_factor(blend->rgb_dst_factor, TRUE),
+ debug_dump_blend_func(blend->alpha_func, TRUE),
+ debug_dump_blend_factor(blend->alpha_src_factor, TRUE),
+ debug_dump_blend_factor(blend->alpha_dst_factor, TRUE));
+
+ fflush(fp);
+}
+
+
+static void
+dump_blend_type(FILE *fp,
+ const struct pipe_blend_state *blend,
+ enum vector_mode mode,
+ union lp_type type)
+{
+ fprintf(fp, "%s", mode ? "soa" : "aos");
+
+ fprintf(fp, " type=%s%u%sx%u",
+ type.floating ? "f" : (type.fixed ? "h" : (type.sign ? "s" : "u")),
+ type.width,
+ type.norm ? "n" : "",
+ type.length);
+
+ fprintf(fp,
+ " %s=%s %s=%s %s=%s %s=%s %s=%s %s=%s",
+ "rgb_func", debug_dump_blend_func(blend->rgb_func, TRUE),
+ "rgb_src_factor", debug_dump_blend_factor(blend->rgb_src_factor, TRUE),
+ "rgb_dst_factor", debug_dump_blend_factor(blend->rgb_dst_factor, TRUE),
+ "alpha_func", debug_dump_blend_func(blend->alpha_func, TRUE),
+ "alpha_src_factor", debug_dump_blend_factor(blend->alpha_src_factor, TRUE),
+ "alpha_dst_factor", debug_dump_blend_factor(blend->alpha_dst_factor, TRUE));
+
+ fprintf(fp, " ...\n");
+ fflush(fp);
+}
+
+
+static LLVMValueRef
+add_blend_test(LLVMModuleRef module,
+ const struct pipe_blend_state *blend,
+ enum vector_mode mode,
+ union lp_type type)
+{
+ LLVMTypeRef ret_type;
+ LLVMTypeRef vec_type;
+ LLVMTypeRef args[4];
+ LLVMValueRef func;
+ LLVMValueRef src_ptr;
+ LLVMValueRef dst_ptr;
+ LLVMValueRef const_ptr;
+ LLVMValueRef res_ptr;
+ LLVMBasicBlockRef block;
+ LLVMBuilderRef builder;
+
+ ret_type = LLVMInt64Type();
+ vec_type = lp_build_vec_type(type);
+
+ args[3] = args[2] = args[1] = args[0] = LLVMPointerType(vec_type, 0);
+ func = LLVMAddFunction(module, "test", LLVMFunctionType(LLVMVoidType(), args, 4, 0));
+ LLVMSetFunctionCallConv(func, LLVMCCallConv);
+ src_ptr = LLVMGetParam(func, 0);
+ dst_ptr = LLVMGetParam(func, 1);
+ const_ptr = LLVMGetParam(func, 2);
+ res_ptr = LLVMGetParam(func, 3);
+
+ block = LLVMAppendBasicBlock(func, "entry");
+ builder = LLVMCreateBuilder();
+ LLVMPositionBuilderAtEnd(builder, block);
+
+ if (mode == AoS) {
+ LLVMValueRef src;
+ LLVMValueRef dst;
+ LLVMValueRef con;
+ LLVMValueRef res;
+
+ src = LLVMBuildLoad(builder, src_ptr, "src");
+ dst = LLVMBuildLoad(builder, dst_ptr, "dst");
+ con = LLVMBuildLoad(builder, const_ptr, "const");
+
+ res = lp_build_blend_aos(builder, blend, type, src, dst, con, 3);
+
+ lp_build_name(res, "res");
+
+ LLVMBuildStore(builder, res, res_ptr);
+ }
+
+ if (mode == SoA) {
+ LLVMValueRef src[4];
+ LLVMValueRef dst[4];
+ LLVMValueRef con[4];
+ LLVMValueRef res[4];
+ unsigned i;
+
+ for(i = 0; i < 4; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ src[i] = LLVMBuildLoad(builder, LLVMBuildGEP(builder, src_ptr, &index, 1, ""), "");
+ dst[i] = LLVMBuildLoad(builder, LLVMBuildGEP(builder, dst_ptr, &index, 1, ""), "");
+ con[i] = LLVMBuildLoad(builder, LLVMBuildGEP(builder, const_ptr, &index, 1, ""), "");
+ lp_build_name(src[i], "src.%c", "rgba"[i]);
+ lp_build_name(con[i], "con.%c", "rgba"[i]);
+ lp_build_name(dst[i], "dst.%c", "rgba"[i]);
+ }
+
+ lp_build_blend_soa(builder, blend, type, src, dst, con, res);
+
+ for(i = 0; i < 4; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ lp_build_name(res[i], "res.%c", "rgba"[i]);
+ LLVMBuildStore(builder, res[i], LLVMBuildGEP(builder, res_ptr, &index, 1, ""));
+ }
+ }
+
+ LLVMBuildRetVoid(builder);;
+
+ LLVMDisposeBuilder(builder);
+ return func;
+}
+
+
+/** Add and limit result to ceiling of 1.0 */
+#define ADD_SAT(R, A, B) \
+do { \
+ R = (A) + (B); if (R > 1.0f) R = 1.0f; \
+} while (0)
+
+/** Subtract and limit result to floor of 0.0 */
+#define SUB_SAT(R, A, B) \
+do { \
+ R = (A) - (B); if (R < 0.0f) R = 0.0f; \
+} while (0)
+
+
+static void
+compute_blend_ref_term(unsigned rgb_factor,
+ unsigned alpha_factor,
+ const double *factor,
+ const double *src,
+ const double *dst,
+ const double *con,
+ double *term)
+{
+ double temp;
+
+ switch (rgb_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ term[0] = factor[0]; /* R */
+ term[1] = factor[1]; /* G */
+ term[2] = factor[2]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ term[0] = factor[0] * src[0]; /* R */
+ term[1] = factor[1] * src[1]; /* G */
+ term[2] = factor[2] * src[2]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ term[0] = factor[0] * src[3]; /* R */
+ term[1] = factor[1] * src[3]; /* G */
+ term[2] = factor[2] * src[3]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ term[0] = factor[0] * dst[0]; /* R */
+ term[1] = factor[1] * dst[1]; /* G */
+ term[2] = factor[2] * dst[2]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ term[0] = factor[0] * dst[3]; /* R */
+ term[1] = factor[1] * dst[3]; /* G */
+ term[2] = factor[2] * dst[3]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ temp = MIN2(src[3], 1.0f - dst[3]);
+ term[0] = factor[0] * temp; /* R */
+ term[1] = factor[1] * temp; /* G */
+ term[2] = factor[2] * temp; /* B */
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ term[0] = factor[0] * con[0]; /* R */
+ term[1] = factor[1] * con[1]; /* G */
+ term[2] = factor[2] * con[2]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ term[0] = factor[0] * con[3]; /* R */
+ term[1] = factor[1] * con[3]; /* G */
+ term[2] = factor[2] * con[3]; /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ assert(0); /* to do */
+ break;
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ assert(0); /* to do */
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ term[0] = 0.0f; /* R */
+ term[1] = 0.0f; /* G */
+ term[2] = 0.0f; /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ term[0] = factor[0] * (1.0f - src[0]); /* R */
+ term[1] = factor[1] * (1.0f - src[1]); /* G */
+ term[2] = factor[2] * (1.0f - src[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ term[0] = factor[0] * (1.0f - src[3]); /* R */
+ term[1] = factor[1] * (1.0f - src[3]); /* G */
+ term[2] = factor[2] * (1.0f - src[3]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ term[0] = factor[0] * (1.0f - dst[3]); /* R */
+ term[1] = factor[1] * (1.0f - dst[3]); /* G */
+ term[2] = factor[2] * (1.0f - dst[3]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ term[0] = factor[0] * (1.0f - dst[0]); /* R */
+ term[1] = factor[1] * (1.0f - dst[1]); /* G */
+ term[2] = factor[2] * (1.0f - dst[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ term[0] = factor[0] * (1.0f - con[0]); /* R */
+ term[1] = factor[1] * (1.0f - con[1]); /* G */
+ term[2] = factor[2] * (1.0f - con[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ term[0] = factor[0] * (1.0f - con[3]); /* R */
+ term[1] = factor[1] * (1.0f - con[3]); /* G */
+ term[2] = factor[2] * (1.0f - con[3]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ assert(0); /* to do */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ assert(0); /* to do */
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * Compute src/first term A
+ */
+ switch (alpha_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ term[3] = factor[3]; /* A */
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ term[3] = factor[3] * src[3]; /* A */
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ term[3] = factor[3] * dst[3]; /* A */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ term[3] = src[3]; /* A */
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ term[3] = factor[3] * con[3]; /* A */
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ term[3] = 0.0f; /* A */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ term[3] = factor[3] * (1.0f - src[3]); /* A */
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ term[3] = factor[3] * (1.0f - dst[3]); /* A */
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ term[3] = factor[3] * (1.0f - con[3]);
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static void
+compute_blend_ref(const struct pipe_blend_state *blend,
+ const double *src,
+ const double *dst,
+ const double *con,
+ double *res)
+{
+ double src_term[4];
+ double dst_term[4];
+
+ compute_blend_ref_term(blend->rgb_src_factor, blend->alpha_src_factor, src, src, dst, con, src_term);
+ compute_blend_ref_term(blend->rgb_dst_factor, blend->alpha_dst_factor, dst, src, dst, con, dst_term);
+
+ /*
+ * Combine RGB terms
+ */
+ switch (blend->rgb_func) {
+ case PIPE_BLEND_ADD:
+ ADD_SAT(res[0], src_term[0], dst_term[0]); /* R */
+ ADD_SAT(res[1], src_term[1], dst_term[1]); /* G */
+ ADD_SAT(res[2], src_term[2], dst_term[2]); /* B */
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ SUB_SAT(res[0], src_term[0], dst_term[0]); /* R */
+ SUB_SAT(res[1], src_term[1], dst_term[1]); /* G */
+ SUB_SAT(res[2], src_term[2], dst_term[2]); /* B */
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ SUB_SAT(res[0], dst_term[0], src_term[0]); /* R */
+ SUB_SAT(res[1], dst_term[1], src_term[1]); /* G */
+ SUB_SAT(res[2], dst_term[2], src_term[2]); /* B */
+ break;
+ case PIPE_BLEND_MIN:
+ res[0] = MIN2(src_term[0], dst_term[0]); /* R */
+ res[1] = MIN2(src_term[1], dst_term[1]); /* G */
+ res[2] = MIN2(src_term[2], dst_term[2]); /* B */
+ break;
+ case PIPE_BLEND_MAX:
+ res[0] = MAX2(src_term[0], dst_term[0]); /* R */
+ res[1] = MAX2(src_term[1], dst_term[1]); /* G */
+ res[2] = MAX2(src_term[2], dst_term[2]); /* B */
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * Combine A terms
+ */
+ switch (blend->alpha_func) {
+ case PIPE_BLEND_ADD:
+ ADD_SAT(res[3], src_term[3], dst_term[3]); /* A */
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ SUB_SAT(res[3], src_term[3], dst_term[3]); /* A */
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ SUB_SAT(res[3], dst_term[3], src_term[3]); /* A */
+ break;
+ case PIPE_BLEND_MIN:
+ res[3] = MIN2(src_term[3], dst_term[3]); /* A */
+ break;
+ case PIPE_BLEND_MAX:
+ res[3] = MAX2(src_term[3], dst_term[3]); /* A */
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static boolean
+test_one(unsigned verbose,
+ FILE *fp,
+ const struct pipe_blend_state *blend,
+ enum vector_mode mode,
+ union lp_type type)
+{
+ LLVMModuleRef module = NULL;
+ LLVMValueRef func = NULL;
+ LLVMExecutionEngineRef engine = NULL;
+ LLVMModuleProviderRef provider = NULL;
+ LLVMPassManagerRef pass = NULL;
+ char *error = NULL;
+ blend_test_ptr_t blend_test_ptr;
+ boolean success;
+ const unsigned n = 32;
+ int64_t cycles[n];
+ double cycles_avg = 0.0;
+ unsigned i, j;
+
+ if(verbose >= 1)
+ dump_blend_type(stdout, blend, mode, type);
+
+ module = LLVMModuleCreateWithName("test");
+
+ func = add_blend_test(module, blend, mode, type);
+
+ if(LLVMVerifyModule(module, LLVMPrintMessageAction, &error)) {
+ LLVMDumpModule(module);
+ abort();
+ }
+ LLVMDisposeMessage(error);
+
+ provider = LLVMCreateModuleProviderForExistingModule(module);
+ if (LLVMCreateJITCompiler(&engine, provider, 1, &error)) {
+ if(verbose < 1)
+ dump_blend_type(stderr, blend, mode, type);
+ fprintf(stderr, "%s\n", error);
+ LLVMDisposeMessage(error);
+ abort();
+ }
+
+#if 0
+ pass = LLVMCreatePassManager();
+ LLVMAddTargetData(LLVMGetExecutionEngineTargetData(engine), pass);
+ /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
+ * but there are more on SVN. */
+ LLVMAddConstantPropagationPass(pass);
+ LLVMAddInstructionCombiningPass(pass);
+ LLVMAddPromoteMemoryToRegisterPass(pass);
+ LLVMAddGVNPass(pass);
+ LLVMAddCFGSimplificationPass(pass);
+ LLVMRunPassManager(pass, module);
+#else
+ (void)pass;
+#endif
+
+ if(verbose >= 2)
+ LLVMDumpModule(module);
+
+ blend_test_ptr = (blend_test_ptr_t)LLVMGetPointerToGlobal(engine, func);
+
+ if(verbose >= 2)
+ lp_disassemble(blend_test_ptr);
+
+ success = TRUE;
+ for(i = 0; i < n && success; ++i) {
+ if(mode == AoS) {
+ uint8_t src[LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t dst[LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t con[LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t res[LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t ref[LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ int64_t start_counter = 0;
+ int64_t end_counter = 0;
+
+ random_vec(type, src);
+ random_vec(type, dst);
+ random_vec(type, con);
+
+ {
+ double fsrc[LP_MAX_VECTOR_LENGTH];
+ double fdst[LP_MAX_VECTOR_LENGTH];
+ double fcon[LP_MAX_VECTOR_LENGTH];
+ double fref[LP_MAX_VECTOR_LENGTH];
+
+ read_vec(type, src, fsrc);
+ read_vec(type, dst, fdst);
+ read_vec(type, con, fcon);
+
+ for(j = 0; j < type.length; j += 4)
+ compute_blend_ref(blend, fsrc + j, fdst + j, fcon + j, fref + j);
+
+ write_vec(type, ref, fref);
+ }
+
+ start_counter = rdtsc();
+ blend_test_ptr(src, dst, con, res);
+ end_counter = rdtsc();
+
+ cycles[i] = end_counter - start_counter;
+
+ if(!compare_vec(type, res, ref)) {
+ success = FALSE;
+
+ if(verbose < 1)
+ dump_blend_type(stderr, blend, mode, type);
+ fprintf(stderr, "MISMATCH\n");
+
+ fprintf(stderr, " Src: ");
+ dump_vec(stderr, type, src);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Dst: ");
+ dump_vec(stderr, type, dst);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Con: ");
+ dump_vec(stderr, type, con);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Res: ");
+ dump_vec(stderr, type, res);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Ref: ");
+ dump_vec(stderr, type, ref);
+ fprintf(stderr, "\n");
+ }
+ }
+
+ if(mode == SoA) {
+ const unsigned stride = type.length*type.width/8;
+ uint8_t src[4*LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t dst[4*LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t con[4*LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t res[4*LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ uint8_t ref[4*LP_MAX_VECTOR_LENGTH*LP_MAX_TYPE_WIDTH/8];
+ int64_t start_counter = 0;
+ int64_t end_counter = 0;
+ boolean mismatch;
+
+ for(j = 0; j < 4; ++j) {
+ random_vec(type, src + j*stride);
+ random_vec(type, dst + j*stride);
+ random_vec(type, con + j*stride);
+ }
+
+ {
+ double fsrc[4];
+ double fdst[4];
+ double fcon[4];
+ double fref[4];
+ unsigned k;
+
+ for(k = 0; k < type.length; ++k) {
+ for(j = 0; j < 4; ++j) {
+ fsrc[j] = read_elem(type, src + j*stride, k);
+ fdst[j] = read_elem(type, dst + j*stride, k);
+ fcon[j] = read_elem(type, con + j*stride, k);
+ }
+
+ compute_blend_ref(blend, fsrc, fdst, fcon, fref);
+
+ for(j = 0; j < 4; ++j)
+ write_elem(type, ref + j*stride, k, fref[j]);
+ }
+ }
+
+ start_counter = rdtsc();
+ blend_test_ptr(src, dst, con, res);
+ end_counter = rdtsc();
+
+ cycles[i] = end_counter - start_counter;
+
+ mismatch = FALSE;
+ for (j = 0; j < 4; ++j)
+ if(!compare_vec(type, res + j*stride, ref + j*stride))
+ mismatch = TRUE;
+
+ if (mismatch) {
+ success = FALSE;
+
+ if(verbose < 1)
+ dump_blend_type(stderr, blend, mode, type);
+ fprintf(stderr, "MISMATCH\n");
+ for(j = 0; j < 4; ++j) {
+ char channel = "RGBA"[j];
+ fprintf(stderr, " Src%c: ", channel);
+ dump_vec(stderr, type, src + j*stride);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Dst%c: ", channel);
+ dump_vec(stderr, type, dst + j*stride);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Con%c: ", channel);
+ dump_vec(stderr, type, con + j*stride);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Res%c: ", channel);
+ dump_vec(stderr, type, res + j*stride);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Ref%c: ", channel);
+ dump_vec(stderr, type, ref + j*stride);
+ fprintf(stderr, "\n");
+ }
+ }
+ }
+ }
+
+ /*
+ * Unfortunately the output of cycle counter is not very reliable as it comes
+ * -- sometimes we get outliers (due IRQs perhaps?) which are
+ * better removed to avoid random or biased data.
+ */
+ {
+ double sum = 0.0, sum2 = 0.0;
+ double avg, std;
+ unsigned m;
+
+ for(i = 0; i < n; ++i) {
+ sum += cycles[i];
+ sum2 += cycles[i]*cycles[i];
+ }
+
+ avg = sum/n;
+ std = sqrtf((sum2 - n*avg*avg)/n);
+
+ m = 0;
+ sum = 0.0;
+ for(i = 0; i < n; ++i) {
+ if(fabs(cycles[i] - avg) <= 4.0*std) {
+ sum += cycles[i];
+ ++m;
+ }
+ }
+
+ cycles_avg = sum/m;
+
+ }
+
+ if(fp)
+ write_tsv_row(fp, blend, mode, type, cycles_avg, success);
+
+ if (!success) {
+ if(verbose < 2)
+ LLVMDumpModule(module);
+ LLVMWriteBitcodeToFile(module, "blend.bc");
+ fprintf(stderr, "blend.bc written\n");
+ fprintf(stderr, "Invoke as \"llc -o - blend.bc\"\n");
+ abort();
+ }
+
+ LLVMFreeMachineCodeForFunction(engine, func);
+
+ LLVMDisposeExecutionEngine(engine);
+ if(pass)
+ LLVMDisposePassManager(pass);
+
+ return success;
+}
+
+
+const unsigned
+blend_factors[] = {
+ PIPE_BLENDFACTOR_ZERO,
+ PIPE_BLENDFACTOR_ONE,
+ PIPE_BLENDFACTOR_SRC_COLOR,
+ PIPE_BLENDFACTOR_SRC_ALPHA,
+ PIPE_BLENDFACTOR_DST_COLOR,
+ PIPE_BLENDFACTOR_DST_ALPHA,
+ PIPE_BLENDFACTOR_CONST_COLOR,
+ PIPE_BLENDFACTOR_CONST_ALPHA,
+#if 0
+ PIPE_BLENDFACTOR_SRC1_COLOR,
+ PIPE_BLENDFACTOR_SRC1_ALPHA,
+#endif
+ PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE,
+ PIPE_BLENDFACTOR_INV_SRC_COLOR,
+ PIPE_BLENDFACTOR_INV_SRC_ALPHA,
+ PIPE_BLENDFACTOR_INV_DST_COLOR,
+ PIPE_BLENDFACTOR_INV_DST_ALPHA,
+ PIPE_BLENDFACTOR_INV_CONST_COLOR,
+ PIPE_BLENDFACTOR_INV_CONST_ALPHA,
+#if 0
+ PIPE_BLENDFACTOR_INV_SRC1_COLOR,
+ PIPE_BLENDFACTOR_INV_SRC1_ALPHA,
+#endif
+};
+
+
+const unsigned
+blend_funcs[] = {
+ PIPE_BLEND_ADD,
+ PIPE_BLEND_SUBTRACT,
+ PIPE_BLEND_REVERSE_SUBTRACT,
+ PIPE_BLEND_MIN,
+ PIPE_BLEND_MAX
+};
+
+
+const union lp_type blend_types[] = {
+ /* float, fixed, sign, norm, width, len */
+ {{ TRUE, FALSE, FALSE, TRUE, 32, 4 }}, /* f32 x 4 */
+ {{ FALSE, FALSE, FALSE, TRUE, 8, 16 }}, /* u8n x 16 */
+};
+
+
+const unsigned num_funcs = sizeof(blend_funcs)/sizeof(blend_funcs[0]);
+const unsigned num_factors = sizeof(blend_factors)/sizeof(blend_factors[0]);
+const unsigned num_types = sizeof(blend_types)/sizeof(blend_types[0]);
+
+
+boolean
+test_all(unsigned verbose, FILE *fp)
+{
+ const unsigned *rgb_func;
+ const unsigned *rgb_src_factor;
+ const unsigned *rgb_dst_factor;
+ const unsigned *alpha_func;
+ const unsigned *alpha_src_factor;
+ const unsigned *alpha_dst_factor;
+ struct pipe_blend_state blend;
+ enum vector_mode mode;
+ const union lp_type *type;
+ bool success = TRUE;
+
+ for(rgb_func = blend_funcs; rgb_func < &blend_funcs[num_funcs]; ++rgb_func) {
+ for(alpha_func = blend_funcs; alpha_func < &blend_funcs[num_funcs]; ++alpha_func) {
+ for(rgb_src_factor = blend_factors; rgb_src_factor < &blend_factors[num_factors]; ++rgb_src_factor) {
+ for(rgb_dst_factor = blend_factors; rgb_dst_factor <= rgb_src_factor; ++rgb_dst_factor) {
+ for(alpha_src_factor = blend_factors; alpha_src_factor < &blend_factors[num_factors]; ++alpha_src_factor) {
+ for(alpha_dst_factor = blend_factors; alpha_dst_factor <= alpha_src_factor; ++alpha_dst_factor) {
+ for(mode = 0; mode < 2; ++mode) {
+ for(type = blend_types; type < &blend_types[num_types]; ++type) {
+
+ if(*rgb_dst_factor == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE ||
+ *alpha_dst_factor == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE)
+ continue;
+
+ memset(&blend, 0, sizeof blend);
+ blend.blend_enable = 1;
+ blend.rgb_func = *rgb_func;
+ blend.rgb_src_factor = *rgb_src_factor;
+ blend.rgb_dst_factor = *rgb_dst_factor;
+ blend.alpha_func = *alpha_func;
+ blend.alpha_src_factor = *alpha_src_factor;
+ blend.alpha_dst_factor = *alpha_dst_factor;
+ blend.colormask = PIPE_MASK_RGBA;
+
+ if(!test_one(verbose, fp, &blend, mode, *type))
+ success = FALSE;
+
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return success;
+}
+
+
+boolean
+test_some(unsigned verbose, FILE *fp, unsigned long n)
+{
+ const unsigned *rgb_func;
+ const unsigned *rgb_src_factor;
+ const unsigned *rgb_dst_factor;
+ const unsigned *alpha_func;
+ const unsigned *alpha_src_factor;
+ const unsigned *alpha_dst_factor;
+ struct pipe_blend_state blend;
+ enum vector_mode mode;
+ const union lp_type *type;
+ unsigned long i;
+ bool success = TRUE;
+
+ for(i = 0; i < n; ++i) {
+ rgb_func = &blend_funcs[random() % num_funcs];
+ alpha_func = &blend_funcs[random() % num_funcs];
+ rgb_src_factor = &blend_factors[random() % num_factors];
+ alpha_src_factor = &blend_factors[random() % num_factors];
+
+ do {
+ rgb_dst_factor = &blend_factors[random() % num_factors];
+ } while(*rgb_dst_factor == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE);
+
+ do {
+ alpha_dst_factor = &blend_factors[random() % num_factors];
+ } while(*alpha_dst_factor == PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE);
+
+ mode = random() & 1;
+
+ type = &blend_types[random() % num_types];
+
+ memset(&blend, 0, sizeof blend);
+ blend.blend_enable = 1;
+ blend.rgb_func = *rgb_func;
+ blend.rgb_src_factor = *rgb_src_factor;
+ blend.rgb_dst_factor = *rgb_dst_factor;
+ blend.alpha_func = *alpha_func;
+ blend.alpha_src_factor = *alpha_src_factor;
+ blend.alpha_dst_factor = *alpha_dst_factor;
+ blend.colormask = PIPE_MASK_RGBA;
+
+ if(!test_one(verbose, fp, &blend, mode, *type))
+ success = FALSE;
+ }
+
+ return success;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_test_conv.c b/src/gallium/drivers/llvmpipe/lp_test_conv.c
new file mode 100644
index 0000000000..e6489834af
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_test_conv.c
@@ -0,0 +1,427 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Unit tests for type conversion.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_conv.h"
+#include "lp_bld_debug.h"
+#include "lp_test.h"
+
+
+typedef void (*conv_test_ptr_t)(const void *src, const void *dst);
+
+
+void
+write_tsv_header(FILE *fp)
+{
+ fprintf(fp,
+ "result\t"
+ "cycles_per_channel\t"
+ "src_type\t"
+ "dst_type\n");
+
+ fflush(fp);
+}
+
+
+static void
+write_tsv_row(FILE *fp,
+ union lp_type src_type,
+ union lp_type dst_type,
+ double cycles,
+ boolean success)
+{
+ fprintf(fp, "%s\t", success ? "pass" : "fail");
+
+ fprintf(fp, "%.1f\t", cycles / MAX2(src_type.length, dst_type.length));
+
+ dump_type(fp, src_type);
+ fprintf(fp, "\t");
+
+ dump_type(fp, dst_type);
+ fprintf(fp, "\n");
+
+ fflush(fp);
+}
+
+
+static void
+dump_conv_types(FILE *fp,
+ union lp_type src_type,
+ union lp_type dst_type)
+{
+ fprintf(fp, "src_type=");
+ dump_type(fp, src_type);
+
+ fprintf(fp, " dst_type=");
+ dump_type(fp, dst_type);
+
+ fprintf(fp, " ...\n");
+ fflush(fp);
+}
+
+
+static LLVMValueRef
+add_conv_test(LLVMModuleRef module,
+ union lp_type src_type, unsigned num_srcs,
+ union lp_type dst_type, unsigned num_dsts)
+{
+ LLVMTypeRef args[2];
+ LLVMValueRef func;
+ LLVMValueRef src_ptr;
+ LLVMValueRef dst_ptr;
+ LLVMBasicBlockRef block;
+ LLVMBuilderRef builder;
+ LLVMValueRef src[LP_MAX_VECTOR_LENGTH];
+ LLVMValueRef dst[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ args[0] = LLVMPointerType(lp_build_vec_type(src_type), 0);
+ args[1] = LLVMPointerType(lp_build_vec_type(dst_type), 0);
+
+ func = LLVMAddFunction(module, "test", LLVMFunctionType(LLVMVoidType(), args, 2, 0));
+ LLVMSetFunctionCallConv(func, LLVMCCallConv);
+ src_ptr = LLVMGetParam(func, 0);
+ dst_ptr = LLVMGetParam(func, 1);
+
+ block = LLVMAppendBasicBlock(func, "entry");
+ builder = LLVMCreateBuilder();
+ LLVMPositionBuilderAtEnd(builder, block);
+
+ for(i = 0; i < num_srcs; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef ptr = LLVMBuildGEP(builder, src_ptr, &index, 1, "");
+ src[i] = LLVMBuildLoad(builder, ptr, "");
+ }
+
+ lp_build_conv(builder, src_type, dst_type, src, num_srcs, dst, num_dsts);
+
+ for(i = 0; i < num_dsts; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef ptr = LLVMBuildGEP(builder, dst_ptr, &index, 1, "");
+ LLVMBuildStore(builder, dst[i], ptr);
+ }
+
+ LLVMBuildRetVoid(builder);;
+
+ LLVMDisposeBuilder(builder);
+ return func;
+}
+
+
+static boolean
+test_one(unsigned verbose,
+ FILE *fp,
+ union lp_type src_type,
+ union lp_type dst_type)
+{
+ LLVMModuleRef module = NULL;
+ LLVMValueRef func = NULL;
+ LLVMExecutionEngineRef engine = NULL;
+ LLVMModuleProviderRef provider = NULL;
+ LLVMPassManagerRef pass = NULL;
+ char *error = NULL;
+ conv_test_ptr_t conv_test_ptr;
+ boolean success;
+ const unsigned n = 32;
+ int64_t cycles[n];
+ double cycles_avg = 0.0;
+ unsigned num_srcs;
+ unsigned num_dsts;
+ double eps;
+ unsigned i, j;
+
+ if(verbose >= 1)
+ dump_conv_types(stdout, src_type, dst_type);
+
+ if(src_type.length > dst_type.length) {
+ num_srcs = 1;
+ num_dsts = src_type.length/dst_type.length;
+ }
+ else {
+ num_dsts = 1;
+ num_srcs = dst_type.length/src_type.length;
+ }
+
+ assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
+
+ /* We must not loose or gain channels. Only precision */
+ assert(src_type.length * num_srcs == dst_type.length * num_dsts);
+
+ eps = MAX2(lp_const_eps(src_type), lp_const_eps(dst_type));
+
+ module = LLVMModuleCreateWithName("test");
+
+ func = add_conv_test(module, src_type, num_srcs, dst_type, num_dsts);
+
+ if(LLVMVerifyModule(module, LLVMPrintMessageAction, &error)) {
+ LLVMDumpModule(module);
+ abort();
+ }
+ LLVMDisposeMessage(error);
+
+ provider = LLVMCreateModuleProviderForExistingModule(module);
+ if (LLVMCreateJITCompiler(&engine, provider, 1, &error)) {
+ if(verbose < 1)
+ dump_conv_types(stderr, src_type, dst_type);
+ fprintf(stderr, "%s\n", error);
+ LLVMDisposeMessage(error);
+ abort();
+ }
+
+#if 0
+ pass = LLVMCreatePassManager();
+ LLVMAddTargetData(LLVMGetExecutionEngineTargetData(engine), pass);
+ /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
+ * but there are more on SVN. */
+ LLVMAddConstantPropagationPass(pass);
+ LLVMAddInstructionCombiningPass(pass);
+ LLVMAddPromoteMemoryToRegisterPass(pass);
+ LLVMAddGVNPass(pass);
+ LLVMAddCFGSimplificationPass(pass);
+ LLVMRunPassManager(pass, module);
+#else
+ (void)pass;
+#endif
+
+ if(verbose >= 2)
+ LLVMDumpModule(module);
+
+ conv_test_ptr = (conv_test_ptr_t)LLVMGetPointerToGlobal(engine, func);
+
+ if(verbose >= 2)
+ lp_disassemble(conv_test_ptr);
+
+ success = TRUE;
+ for(i = 0; i < n && success; ++i) {
+ unsigned src_stride = src_type.length*src_type.width/8;
+ unsigned dst_stride = dst_type.length*dst_type.width/8;
+ uint8_t src[LP_MAX_VECTOR_LENGTH*LP_MAX_VECTOR_LENGTH];
+ uint8_t dst[LP_MAX_VECTOR_LENGTH*LP_MAX_VECTOR_LENGTH];
+ double fref[LP_MAX_VECTOR_LENGTH*LP_MAX_VECTOR_LENGTH];
+ uint8_t ref[LP_MAX_VECTOR_LENGTH*LP_MAX_VECTOR_LENGTH];
+ int64_t start_counter = 0;
+ int64_t end_counter = 0;
+
+ for(j = 0; j < num_srcs; ++j) {
+ random_vec(src_type, src + j*src_stride);
+ read_vec(src_type, src + j*src_stride, fref + j*src_type.length);
+ }
+
+ for(j = 0; j < num_dsts; ++j) {
+ write_vec(dst_type, ref + j*dst_stride, fref + j*dst_type.length);
+ }
+
+ start_counter = rdtsc();
+ conv_test_ptr(src, dst);
+ end_counter = rdtsc();
+
+ cycles[i] = end_counter - start_counter;
+
+ for(j = 0; j < num_dsts; ++j) {
+ if(!compare_vec_with_eps(dst_type, dst + j*dst_stride, ref + j*dst_stride, eps))
+ success = FALSE;
+ }
+
+ if (!success) {
+ if(verbose < 1)
+ dump_conv_types(stderr, src_type, dst_type);
+ fprintf(stderr, "MISMATCH\n");
+
+ for(j = 0; j < num_srcs; ++j) {
+ fprintf(stderr, " Src%u: ", j);
+ dump_vec(stderr, src_type, src + j*src_stride);
+ fprintf(stderr, "\n");
+ }
+
+#if 1
+ fprintf(stderr, " Ref: ");
+ for(j = 0; j < src_type.length*num_srcs; ++j)
+ fprintf(stderr, " %f", fref[j]);
+ fprintf(stderr, "\n");
+#endif
+
+ for(j = 0; j < num_dsts; ++j) {
+ fprintf(stderr, " Dst%u: ", j);
+ dump_vec(stderr, dst_type, dst + j*dst_stride);
+ fprintf(stderr, "\n");
+
+ fprintf(stderr, " Ref%u: ", j);
+ dump_vec(stderr, dst_type, ref + j*dst_stride);
+ fprintf(stderr, "\n");
+ }
+ }
+ }
+
+ /*
+ * Unfortunately the output of cycle counter is not very reliable as it comes
+ * -- sometimes we get outliers (due IRQs perhaps?) which are
+ * better removed to avoid random or biased data.
+ */
+ {
+ double sum = 0.0, sum2 = 0.0;
+ double avg, std;
+ unsigned m;
+
+ for(i = 0; i < n; ++i) {
+ sum += cycles[i];
+ sum2 += cycles[i]*cycles[i];
+ }
+
+ avg = sum/n;
+ std = sqrtf((sum2 - n*avg*avg)/n);
+
+ m = 0;
+ sum = 0.0;
+ for(i = 0; i < n; ++i) {
+ if(fabs(cycles[i] - avg) <= 4.0*std) {
+ sum += cycles[i];
+ ++m;
+ }
+ }
+
+ cycles_avg = sum/m;
+
+ }
+
+ if(fp)
+ write_tsv_row(fp, src_type, dst_type, cycles_avg, success);
+
+ if (!success) {
+ static boolean firsttime = TRUE;
+ if(firsttime) {
+ if(verbose < 2)
+ LLVMDumpModule(module);
+ LLVMWriteBitcodeToFile(module, "conv.bc");
+ fprintf(stderr, "conv.bc written\n");
+ fprintf(stderr, "Invoke as \"llc -o - conv.bc\"\n");
+ firsttime = FALSE;
+ //abort();
+ }
+ }
+
+ LLVMFreeMachineCodeForFunction(engine, func);
+
+ LLVMDisposeExecutionEngine(engine);
+ if(pass)
+ LLVMDisposePassManager(pass);
+
+ return success;
+}
+
+
+const union lp_type conv_types[] = {
+ /* float, fixed, sign, norm, width, len */
+
+ {{ TRUE, FALSE, TRUE, TRUE, 32, 4 }},
+ {{ TRUE, FALSE, TRUE, FALSE, 32, 4 }},
+ {{ TRUE, FALSE, FALSE, TRUE, 32, 4 }},
+ {{ TRUE, FALSE, FALSE, FALSE, 32, 4 }},
+
+ /* TODO: test fixed formats too */
+
+ {{ FALSE, FALSE, TRUE, TRUE, 16, 8 }},
+ {{ FALSE, FALSE, TRUE, FALSE, 16, 8 }},
+ {{ FALSE, FALSE, FALSE, TRUE, 16, 8 }},
+ {{ FALSE, FALSE, FALSE, FALSE, 16, 8 }},
+
+ {{ FALSE, FALSE, TRUE, TRUE, 32, 4 }},
+ {{ FALSE, FALSE, TRUE, FALSE, 32, 4 }},
+ {{ FALSE, FALSE, FALSE, TRUE, 32, 4 }},
+ {{ FALSE, FALSE, FALSE, FALSE, 32, 4 }},
+
+ {{ FALSE, FALSE, TRUE, TRUE, 16, 8 }},
+ {{ FALSE, FALSE, TRUE, FALSE, 16, 8 }},
+ {{ FALSE, FALSE, FALSE, TRUE, 16, 8 }},
+ {{ FALSE, FALSE, FALSE, FALSE, 16, 8 }},
+
+ {{ FALSE, FALSE, TRUE, TRUE, 8, 16 }},
+ {{ FALSE, FALSE, TRUE, FALSE, 8, 16 }},
+ {{ FALSE, FALSE, FALSE, TRUE, 8, 16 }},
+ {{ FALSE, FALSE, FALSE, FALSE, 8, 16 }},
+};
+
+
+const unsigned num_types = sizeof(conv_types)/sizeof(conv_types[0]);
+
+
+boolean
+test_all(unsigned verbose, FILE *fp)
+{
+ const union lp_type *src_type;
+ const union lp_type *dst_type;
+ bool success = TRUE;
+
+ for(src_type = conv_types; src_type < &conv_types[num_types]; ++src_type) {
+ for(dst_type = conv_types; dst_type < &conv_types[num_types]; ++dst_type) {
+
+ if(src_type == dst_type)
+ continue;
+
+ if(src_type->norm != dst_type->norm)
+ continue;
+
+ if(!test_one(verbose, fp, *src_type, *dst_type))
+ success = FALSE;
+
+ }
+ }
+
+ return success;
+}
+
+
+boolean
+test_some(unsigned verbose, FILE *fp, unsigned long n)
+{
+ const union lp_type *src_type;
+ const union lp_type *dst_type;
+ unsigned long i;
+ bool success = TRUE;
+
+ for(i = 0; i < n; ++i) {
+ src_type = &conv_types[random() % num_types];
+
+ do {
+ dst_type = &conv_types[random() % num_types];
+ } while (src_type == dst_type || src_type->norm != dst_type->norm);
+
+ if(!test_one(verbose, fp, *src_type, *dst_type))
+ success = FALSE;
+ }
+
+ return success;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_test_format.c b/src/gallium/drivers/llvmpipe/lp_test_format.c
new file mode 100644
index 0000000000..d8455e5649
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_test_format.c
@@ -0,0 +1,272 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <llvm-c/Core.h>
+#include <llvm-c/Analysis.h>
+#include <llvm-c/ExecutionEngine.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/Transforms/Scalar.h>
+
+#include "util/u_format.h"
+
+#include "lp_bld_flow.h"
+#include "lp_bld_format.h"
+
+
+struct pixel_test_case
+{
+ enum pipe_format format;
+ uint32_t packed;
+ double unpacked[4];
+};
+
+
+struct pixel_test_case test_cases[] =
+{
+ {PIPE_FORMAT_R5G6B5_UNORM, 0x0000, {0.0, 0.0, 0.0, 1.0}},
+ {PIPE_FORMAT_R5G6B5_UNORM, 0x001f, {0.0, 0.0, 1.0, 1.0}},
+ {PIPE_FORMAT_R5G6B5_UNORM, 0x07e0, {0.0, 1.0, 0.0, 1.0}},
+ {PIPE_FORMAT_R5G6B5_UNORM, 0xf800, {1.0, 0.0, 0.0, 1.0}},
+ {PIPE_FORMAT_R5G6B5_UNORM, 0xffff, {1.0, 1.0, 1.0, 1.0}},
+
+ {PIPE_FORMAT_A1R5G5B5_UNORM, 0x0000, {0.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_A1R5G5B5_UNORM, 0x001f, {0.0, 0.0, 1.0, 0.0}},
+ {PIPE_FORMAT_A1R5G5B5_UNORM, 0x03e0, {0.0, 1.0, 0.0, 0.0}},
+ {PIPE_FORMAT_A1R5G5B5_UNORM, 0x7c00, {1.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_A1R5G5B5_UNORM, 0x8000, {0.0, 0.0, 0.0, 1.0}},
+ {PIPE_FORMAT_A1R5G5B5_UNORM, 0xffff, {1.0, 1.0, 1.0, 1.0}},
+
+ {PIPE_FORMAT_A8R8G8B8_UNORM, 0x00000000, {0.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_A8R8G8B8_UNORM, 0x000000ff, {0.0, 0.0, 1.0, 0.0}},
+ {PIPE_FORMAT_A8R8G8B8_UNORM, 0x0000ff00, {0.0, 1.0, 0.0, 0.0}},
+ {PIPE_FORMAT_A8R8G8B8_UNORM, 0x00ff0000, {1.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_A8R8G8B8_UNORM, 0xff000000, {0.0, 0.0, 0.0, 1.0}},
+ {PIPE_FORMAT_A8R8G8B8_UNORM, 0xffffffff, {1.0, 1.0, 1.0, 1.0}},
+
+#if 0
+ {PIPE_FORMAT_R8G8B8A8_UNORM, 0x00000000, {0.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_R8G8B8A8_UNORM, 0x000000ff, {0.0, 0.0, 0.0, 1.0}},
+ {PIPE_FORMAT_R8G8B8A8_UNORM, 0x0000ff00, {0.0, 0.0, 1.0, 0.0}},
+ {PIPE_FORMAT_R8G8B8A8_UNORM, 0x00ff0000, {0.0, 1.0, 0.0, 0.0}},
+ {PIPE_FORMAT_R8G8B8A8_UNORM, 0xff000000, {1.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_R8G8B8A8_UNORM, 0xffffffff, {1.0, 1.0, 1.0, 1.0}},
+#endif
+
+ {PIPE_FORMAT_B8G8R8A8_UNORM, 0x00000000, {0.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_B8G8R8A8_UNORM, 0x000000ff, {0.0, 0.0, 0.0, 1.0}},
+ {PIPE_FORMAT_B8G8R8A8_UNORM, 0x0000ff00, {1.0, 0.0, 0.0, 0.0}},
+ {PIPE_FORMAT_B8G8R8A8_UNORM, 0x00ff0000, {0.0, 1.0, 0.0, 0.0}},
+ {PIPE_FORMAT_B8G8R8A8_UNORM, 0xff000000, {0.0, 0.0, 1.0, 0.0}},
+ {PIPE_FORMAT_B8G8R8A8_UNORM, 0xffffffff, {1.0, 1.0, 1.0, 1.0}},
+};
+
+
+typedef void (*load_ptr_t)(const void *, float *);
+
+
+static LLVMValueRef
+add_load_rgba_test(LLVMModuleRef module,
+ enum pipe_format format)
+{
+ LLVMTypeRef args[2];
+ LLVMValueRef func;
+ LLVMValueRef ptr;
+ LLVMValueRef rgba_ptr;
+ LLVMBasicBlockRef block;
+ LLVMBuilderRef builder;
+ LLVMValueRef rgba;
+ struct lp_build_loop_state loop;
+
+ args[0] = LLVMPointerType(LLVMInt8Type(), 0);
+ args[1] = LLVMPointerType(LLVMVectorType(LLVMFloatType(), 4), 0);
+
+ func = LLVMAddFunction(module, "load", LLVMFunctionType(LLVMVoidType(), args, 2, 0));
+ LLVMSetFunctionCallConv(func, LLVMCCallConv);
+ ptr = LLVMGetParam(func, 0);
+ rgba_ptr = LLVMGetParam(func, 1);
+
+ block = LLVMAppendBasicBlock(func, "entry");
+ builder = LLVMCreateBuilder();
+ LLVMPositionBuilderAtEnd(builder, block);
+
+ lp_build_loop_begin(builder, LLVMConstInt(LLVMInt32Type(), 1, 0), &loop);
+
+ rgba = lp_build_load_rgba_aos(builder, format, ptr);
+ LLVMBuildStore(builder, rgba, rgba_ptr);
+
+ lp_build_loop_end(builder, LLVMConstInt(LLVMInt32Type(), 4, 0), NULL, &loop);
+
+ LLVMBuildRetVoid(builder);
+
+ LLVMDisposeBuilder(builder);
+ return func;
+}
+
+
+typedef void (*store_ptr_t)(void *, const float *);
+
+
+static LLVMValueRef
+add_store_rgba_test(LLVMModuleRef module,
+ enum pipe_format format)
+{
+ LLVMTypeRef args[2];
+ LLVMValueRef func;
+ LLVMValueRef ptr;
+ LLVMValueRef rgba_ptr;
+ LLVMBasicBlockRef block;
+ LLVMBuilderRef builder;
+ LLVMValueRef rgba;
+
+ args[0] = LLVMPointerType(LLVMInt8Type(), 0);
+ args[1] = LLVMPointerType(LLVMVectorType(LLVMFloatType(), 4), 0);
+
+ func = LLVMAddFunction(module, "store", LLVMFunctionType(LLVMVoidType(), args, 2, 0));
+ LLVMSetFunctionCallConv(func, LLVMCCallConv);
+ ptr = LLVMGetParam(func, 0);
+ rgba_ptr = LLVMGetParam(func, 1);
+
+ block = LLVMAppendBasicBlock(func, "entry");
+ builder = LLVMCreateBuilder();
+ LLVMPositionBuilderAtEnd(builder, block);
+
+ rgba = LLVMBuildLoad(builder, rgba_ptr, "");
+
+ lp_build_store_rgba_aos(builder, format, ptr, rgba);
+
+ LLVMBuildRetVoid(builder);
+
+ LLVMDisposeBuilder(builder);
+ return func;
+}
+
+
+static boolean
+test_format(const struct pixel_test_case *test)
+{
+ LLVMModuleRef module = NULL;
+ LLVMValueRef load = NULL;
+ LLVMValueRef store = NULL;
+ LLVMExecutionEngineRef engine = NULL;
+ LLVMModuleProviderRef provider = NULL;
+ LLVMPassManagerRef pass = NULL;
+ char *error = NULL;
+ const struct util_format_description *desc;
+ load_ptr_t load_ptr;
+ store_ptr_t store_ptr;
+ float unpacked[4];
+ unsigned packed;
+ boolean success;
+ unsigned i;
+
+ desc = util_format_description(test->format);
+ fprintf(stderr, "%s\n", desc->name);
+
+ module = LLVMModuleCreateWithName("test");
+
+ load = add_load_rgba_test(module, test->format);
+ store = add_store_rgba_test(module, test->format);
+
+ if(LLVMVerifyModule(module, LLVMPrintMessageAction, &error)) {
+ LLVMDumpModule(module);
+ abort();
+ }
+ LLVMDisposeMessage(error);
+
+ provider = LLVMCreateModuleProviderForExistingModule(module);
+ if (LLVMCreateJITCompiler(&engine, provider, 1, &error)) {
+ fprintf(stderr, "%s\n", error);
+ LLVMDisposeMessage(error);
+ abort();
+ }
+
+#if 0
+ pass = LLVMCreatePassManager();
+ LLVMAddTargetData(LLVMGetExecutionEngineTargetData(engine), pass);
+ /* These are the passes currently listed in llvm-c/Transforms/Scalar.h,
+ * but there are more on SVN. */
+ LLVMAddConstantPropagationPass(pass);
+ LLVMAddInstructionCombiningPass(pass);
+ LLVMAddPromoteMemoryToRegisterPass(pass);
+ LLVMAddGVNPass(pass);
+ LLVMAddCFGSimplificationPass(pass);
+ LLVMRunPassManager(pass, module);
+#else
+ (void)pass;
+#endif
+
+ load_ptr = (load_ptr_t) LLVMGetPointerToGlobal(engine, load);
+ store_ptr = (store_ptr_t)LLVMGetPointerToGlobal(engine, store);
+
+ memset(unpacked, 0, sizeof unpacked);
+ packed = 0;
+
+ load_ptr(&test->packed, unpacked);
+ store_ptr(&packed, unpacked);
+
+ success = TRUE;
+ if(test->packed != packed)
+ success = FALSE;
+ for(i = 0; i < 4; ++i)
+ if(test->unpacked[i] != unpacked[i])
+ success = FALSE;
+
+ if (!success) {
+ printf("FAILED\n");
+ printf(" Packed: %08x\n", test->packed);
+ printf(" %08x\n", packed);
+ printf(" Unpacked: %f %f %f %f\n", unpacked[0], unpacked[1], unpacked[2], unpacked[3]);
+ printf(" %f %f %f %f\n", test->unpacked[0], test->unpacked[1], test->unpacked[2], test->unpacked[3]);
+ LLVMDumpModule(module);
+ }
+
+ LLVMFreeMachineCodeForFunction(engine, store);
+ LLVMFreeMachineCodeForFunction(engine, load);
+
+ LLVMDisposeExecutionEngine(engine);
+ if(pass)
+ LLVMDisposePassManager(pass);
+
+ return success;
+}
+
+
+int main(int argc, char **argv)
+{
+ unsigned i;
+ int ret;
+
+ for (i = 0; i < sizeof(test_cases)/sizeof(test_cases[0]); ++i)
+ if(!test_format(&test_cases[i]))
+ ret = 1;
+
+ return ret;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_test_main.c b/src/gallium/drivers/llvmpipe/lp_test_main.c
new file mode 100644
index 0000000000..49213fb4f0
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_test_main.c
@@ -0,0 +1,384 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Shared testing code.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+
+#include "lp_bld_const.h"
+#include "lp_test.h"
+
+
+void
+dump_type(FILE *fp,
+ union lp_type type)
+{
+ fprintf(fp, "%s%s%u%sx%u",
+ type.sign ? (type.floating || type.fixed ? "" : "s") : "u",
+ type.floating ? "f" : (type.fixed ? "h" : "i"),
+ type.width,
+ type.norm ? "n" : "",
+ type.length);
+}
+
+
+double
+read_elem(union lp_type type, const void *src, unsigned index)
+{
+ double scale = lp_const_scale(type);
+ double value;
+ assert(index < type.length);
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ value = *((const float *)src + index);
+ break;
+ case 64:
+ value = *((const double *)src + index);
+ break;
+ default:
+ assert(0);
+ return 0.0;
+ }
+ }
+ else {
+ if(type.sign) {
+ switch(type.width) {
+ case 8:
+ value = *((const int8_t *)src + index);
+ break;
+ case 16:
+ value = *((const int16_t *)src + index);
+ break;
+ case 32:
+ value = *((const int32_t *)src + index);
+ break;
+ case 64:
+ value = *((const int64_t *)src + index);
+ break;
+ default:
+ assert(0);
+ return 0.0;
+ }
+ }
+ else {
+ switch(type.width) {
+ case 8:
+ value = *((const uint8_t *)src + index);
+ break;
+ case 16:
+ value = *((const uint16_t *)src + index);
+ break;
+ case 32:
+ value = *((const uint32_t *)src + index);
+ break;
+ case 64:
+ value = *((const uint64_t *)src + index);
+ break;
+ default:
+ assert(0);
+ return 0.0;
+ }
+ }
+ }
+ return value/scale;
+}
+
+
+void
+write_elem(union lp_type type, void *dst, unsigned index, double value)
+{
+ assert(index < type.length);
+ if(!type.sign && value < 0.0)
+ value = 0.0;
+ if(type.norm && value < -1.0)
+ value = -1.0;
+ if(type.norm && value > 1.0)
+ value = 1.0;
+ if (type.floating) {
+ switch(type.width) {
+ case 32:
+ *((float *)dst + index) = (float)(value);
+ break;
+ case 64:
+ *((double *)dst + index) = value;
+ break;
+ default:
+ assert(0);
+ }
+ }
+ else {
+ double scale = lp_const_scale(type);
+ value = round(value*scale);
+ if(type.sign) {
+ long long lvalue = (long long)value;
+ lvalue = MIN2(lvalue, ((long long)1 << (type.width - 1)) - 1);
+ switch(type.width) {
+ case 8:
+ *((int8_t *)dst + index) = (int8_t)lvalue;
+ break;
+ case 16:
+ *((int16_t *)dst + index) = (int16_t)lvalue;
+ break;
+ case 32:
+ *((int32_t *)dst + index) = (int32_t)lvalue;
+ break;
+ case 64:
+ *((int64_t *)dst + index) = (int64_t)lvalue;
+ break;
+ default:
+ assert(0);
+ }
+ }
+ else {
+ unsigned long long lvalue = (long long)value;
+ lvalue = MIN2(lvalue, ((unsigned long long)1 << type.width) - 1);
+ switch(type.width) {
+ case 8:
+ *((uint8_t *)dst + index) = (uint8_t)lvalue;
+ break;
+ case 16:
+ *((uint16_t *)dst + index) = (uint16_t)lvalue;
+ break;
+ case 32:
+ *((uint32_t *)dst + index) = (uint32_t)lvalue;
+ break;
+ case 64:
+ *((uint64_t *)dst + index) = (uint64_t)lvalue;
+ break;
+ default:
+ assert(0);
+ }
+ }
+ }
+}
+
+
+void
+random_elem(union lp_type type, void *dst, unsigned index)
+{
+ double value;
+ assert(index < type.length);
+ value = (double)random()/(double)RAND_MAX;
+ if(!type.norm) {
+ unsigned long long mask;
+ if (type.floating)
+ mask = ~(unsigned long long)0;
+ else if (type.fixed)
+ mask = ((unsigned long long)1 << (type.width / 2)) - 1;
+ else if (type.sign)
+ mask = ((unsigned long long)1 << (type.width - 1)) - 1;
+ else
+ mask = ((unsigned long long)1 << type.width) - 1;
+ value += (double)(mask & random());
+ }
+ if(!type.sign)
+ if(random() & 1)
+ value = -value;
+ write_elem(type, dst, index, value);
+}
+
+
+void
+read_vec(union lp_type type, const void *src, double *dst)
+{
+ unsigned i;
+ for (i = 0; i < type.length; ++i)
+ dst[i] = read_elem(type, src, i);
+}
+
+
+void
+write_vec(union lp_type type, void *dst, const double *src)
+{
+ unsigned i;
+ for (i = 0; i < type.length; ++i)
+ write_elem(type, dst, i, src[i]);
+}
+
+
+float
+random_float(void)
+{
+ return (float)((double)random()/(double)RAND_MAX);
+}
+
+
+void
+random_vec(union lp_type type, void *dst)
+{
+ unsigned i;
+ for (i = 0; i < type.length; ++i)
+ random_elem(type, dst, i);
+}
+
+
+boolean
+compare_vec_with_eps(union lp_type type, const void *res, const void *ref, double eps)
+{
+ unsigned i;
+ for (i = 0; i < type.length; ++i) {
+ double res_elem = read_elem(type, res, i);
+ double ref_elem = read_elem(type, ref, i);
+ double delta = fabs(res_elem - ref_elem);
+ if(delta >= 2.0*eps)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+boolean
+compare_vec(union lp_type type, const void *res, const void *ref)
+{
+ double eps = lp_const_eps(type);
+ return compare_vec_with_eps(type, res, ref, eps);
+}
+
+
+void
+dump_vec(FILE *fp, union lp_type type, const void *src)
+{
+ unsigned i;
+ for (i = 0; i < type.length; ++i) {
+ if(i)
+ fprintf(fp, " ");
+ if (type.floating) {
+ double value;
+ switch(type.width) {
+ case 32:
+ value = *((const float *)src + i);
+ break;
+ case 64:
+ value = *((const double *)src + i);
+ break;
+ default:
+ assert(0);
+ value = 0.0;
+ }
+ fprintf(fp, "%f", value);
+ }
+ else {
+ if(type.sign && !type.norm) {
+ long long value;
+ const char *format;
+ switch(type.width) {
+ case 8:
+ value = *((const int8_t *)src + i);
+ format = "%3lli";
+ break;
+ case 16:
+ value = *((const int16_t *)src + i);
+ format = "%5lli";
+ break;
+ case 32:
+ value = *((const int32_t *)src + i);
+ format = "%10lli";
+ break;
+ case 64:
+ value = *((const int64_t *)src + i);
+ format = "%20lli";
+ break;
+ default:
+ assert(0);
+ value = 0.0;
+ format = "?";
+ }
+ fprintf(fp, format, value);
+ }
+ else {
+ unsigned long long value;
+ const char *format;
+ switch(type.width) {
+ case 8:
+ value = *((const uint8_t *)src + i);
+ format = type.norm ? "%2x" : "%4llu";
+ break;
+ case 16:
+ value = *((const uint16_t *)src + i);
+ format = type.norm ? "%4x" : "%6llx";
+ break;
+ case 32:
+ value = *((const uint32_t *)src + i);
+ format = type.norm ? "%8x" : "%11llx";
+ break;
+ case 64:
+ value = *((const uint64_t *)src + i);
+ format = type.norm ? "%16x" : "%21llx";
+ break;
+ default:
+ assert(0);
+ value = 0.0;
+ format = "?";
+ }
+ fprintf(fp, format, value);
+ }
+ }
+ }
+}
+
+
+int main(int argc, char **argv)
+{
+ unsigned verbose = 0;
+ FILE *fp = NULL;
+ unsigned long n = 1000;
+ unsigned i;
+ boolean success;
+
+ for(i = 1; i < argc; ++i) {
+ if(strcmp(argv[i], "-v") == 0)
+ ++verbose;
+ else if(strcmp(argv[i], "-o") == 0)
+ fp = fopen(argv[++i], "wt");
+ else
+ n = atoi(argv[i]);
+ }
+
+ if(fp) {
+ /* Warm up the caches */
+ test_some(0, NULL, 100);
+
+ write_tsv_header(fp);
+ }
+
+ if(n)
+ success = test_some(verbose, fp, n);
+ else
+ success = test_all(verbose, fp);
+
+ if(fp)
+ fclose(fp);
+
+ return success ? 0 : 1;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_tex_cache.c b/src/gallium/drivers/llvmpipe/lp_tex_cache.c
new file mode 100644
index 0000000000..773e848242
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tex_cache.c
@@ -0,0 +1,304 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Texture tile caching.
+ *
+ * Author:
+ * Brian Paul
+ */
+
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_tile.h"
+#include "util/u_format.h"
+#include "lp_context.h"
+#include "lp_surface.h"
+#include "lp_texture.h"
+#include "lp_tex_cache.h"
+
+
+
+/**
+ * Return the position in the cache for the tile that contains win pos (x,y).
+ * We currently use a direct mapped cache so this is like a hack key.
+ * At some point we should investige something more sophisticated, like
+ * a LRU replacement policy.
+ */
+#define CACHE_POS(x, y) \
+ (((x) + (y) * 5) % NUM_ENTRIES)
+
+
+
+/**
+ * Is the tile at (x,y) in cleared state?
+ */
+static INLINE uint
+is_clear_flag_set(const uint *bitvec, union tex_tile_address addr)
+{
+ int pos, bit;
+ pos = addr.bits.y * (MAX_TEX_WIDTH / TEX_TILE_SIZE) + addr.bits.x;
+ assert(pos / 32 < (MAX_TEX_WIDTH / TEX_TILE_SIZE) * (MAX_TEX_HEIGHT / TEX_TILE_SIZE) / 32);
+ bit = bitvec[pos / 32] & (1 << (pos & 31));
+ return bit;
+}
+
+
+/**
+ * Mark the tile at (x,y) as not cleared.
+ */
+static INLINE void
+clear_clear_flag(uint *bitvec, union tex_tile_address addr)
+{
+ int pos;
+ pos = addr.bits.y * (MAX_TEX_WIDTH / TEX_TILE_SIZE) + addr.bits.x;
+ assert(pos / 32 < (MAX_TEX_WIDTH / TEX_TILE_SIZE) * (MAX_TEX_HEIGHT / TEX_TILE_SIZE) / 32);
+ bitvec[pos / 32] &= ~(1 << (pos & 31));
+}
+
+
+struct llvmpipe_tex_tile_cache *
+lp_create_tex_tile_cache( struct pipe_screen *screen )
+{
+ struct llvmpipe_tex_tile_cache *tc;
+ uint pos;
+
+ tc = CALLOC_STRUCT( llvmpipe_tex_tile_cache );
+ if (tc) {
+ tc->screen = screen;
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ tc->entries[pos].addr.bits.invalid = 1;
+ }
+ tc->last_tile = &tc->entries[0]; /* any tile */
+ }
+ return tc;
+}
+
+
+void
+lp_destroy_tex_tile_cache(struct llvmpipe_tex_tile_cache *tc)
+{
+ struct pipe_screen *screen;
+ uint pos;
+
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ /*assert(tc->entries[pos].x < 0);*/
+ }
+ if (tc->transfer) {
+ screen = tc->transfer->texture->screen;
+ screen->tex_transfer_destroy(tc->transfer);
+ }
+ if (tc->tex_trans) {
+ screen = tc->tex_trans->texture->screen;
+ screen->tex_transfer_destroy(tc->tex_trans);
+ }
+
+ FREE( tc );
+}
+
+
+void
+lp_tex_tile_cache_map_transfers(struct llvmpipe_tex_tile_cache *tc)
+{
+ if (tc->transfer && !tc->transfer_map)
+ tc->transfer_map = tc->screen->transfer_map(tc->screen, tc->transfer);
+
+ if (tc->tex_trans && !tc->tex_trans_map)
+ tc->tex_trans_map = tc->screen->transfer_map(tc->screen, tc->tex_trans);
+}
+
+
+void
+lp_tex_tile_cache_unmap_transfers(struct llvmpipe_tex_tile_cache *tc)
+{
+ if (tc->transfer_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->transfer);
+ tc->transfer_map = NULL;
+ }
+
+ if (tc->tex_trans_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->tex_trans);
+ tc->tex_trans_map = NULL;
+ }
+}
+
+void
+lp_tex_tile_cache_validate_texture(struct llvmpipe_tex_tile_cache *tc)
+{
+ if (tc->texture) {
+ struct llvmpipe_texture *lpt = llvmpipe_texture(tc->texture);
+ if (lpt->timestamp != tc->timestamp) {
+ /* texture was modified, invalidate all cached tiles */
+ uint i;
+ debug_printf("INV %d %d\n", tc->timestamp, lpt->timestamp);
+ for (i = 0; i < NUM_ENTRIES; i++) {
+ tc->entries[i].addr.bits.invalid = 1;
+ }
+
+ tc->timestamp = lpt->timestamp;
+ }
+ }
+}
+
+/**
+ * Specify the texture to cache.
+ */
+void
+lp_tex_tile_cache_set_texture(struct llvmpipe_tex_tile_cache *tc,
+ struct pipe_texture *texture)
+{
+ uint i;
+
+ assert(!tc->transfer);
+
+ if (tc->texture != texture) {
+ pipe_texture_reference(&tc->texture, texture);
+
+ if (tc->tex_trans) {
+ struct pipe_screen *screen = tc->tex_trans->texture->screen;
+
+ if (tc->tex_trans_map) {
+ screen->transfer_unmap(screen, tc->tex_trans);
+ tc->tex_trans_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(tc->tex_trans);
+ tc->tex_trans = NULL;
+ }
+
+ /* mark as entries as invalid/empty */
+ /* XXX we should try to avoid this when the teximage hasn't changed */
+ for (i = 0; i < NUM_ENTRIES; i++) {
+ tc->entries[i].addr.bits.invalid = 1;
+ }
+
+ tc->tex_face = -1; /* any invalid value here */
+ }
+}
+
+
+/**
+ * Given the texture face, level, zslice, x and y values, compute
+ * the cache entry position/index where we'd hope to find the
+ * cached texture tile.
+ * This is basically a direct-map cache.
+ * XXX There's probably lots of ways in which we can improve this.
+ */
+static INLINE uint
+tex_cache_pos( union tex_tile_address addr )
+{
+ uint entry = (addr.bits.x +
+ addr.bits.y * 9 +
+ addr.bits.z * 3 +
+ addr.bits.face +
+ addr.bits.level * 7);
+
+ return entry % NUM_ENTRIES;
+}
+
+/**
+ * Similar to lp_get_cached_tile() but for textures.
+ * Tiles are read-only and indexed with more params.
+ */
+const struct llvmpipe_cached_tex_tile *
+lp_find_cached_tex_tile(struct llvmpipe_tex_tile_cache *tc,
+ union tex_tile_address addr )
+{
+ struct pipe_screen *screen = tc->screen;
+ struct llvmpipe_cached_tex_tile *tile;
+
+ tile = tc->entries + tex_cache_pos( addr );
+
+ if (addr.value != tile->addr.value) {
+
+ /* cache miss. Most misses are because we've invaldiated the
+ * texture cache previously -- most commonly on binding a new
+ * texture. Currently we effectively flush the cache on texture
+ * bind.
+ */
+#if 0
+ _debug_printf("miss at %u: x=%d y=%d z=%d face=%d level=%d\n"
+ " tile %u: x=%d y=%d z=%d face=%d level=%d\n",
+ pos, x/TEX_TILE_SIZE, y/TEX_TILE_SIZE, z, face, level,
+ pos, tile->addr.bits.x, tile->addr.bits.y, tile->z, tile->face, tile->level);
+#endif
+
+ /* check if we need to get a new transfer */
+ if (!tc->tex_trans ||
+ tc->tex_face != addr.bits.face ||
+ tc->tex_level != addr.bits.level ||
+ tc->tex_z != addr.bits.z) {
+ /* get new transfer (view into texture) */
+
+ if (tc->tex_trans) {
+ if (tc->tex_trans_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->tex_trans);
+ tc->tex_trans_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(tc->tex_trans);
+ tc->tex_trans = NULL;
+ }
+
+ tc->tex_trans =
+ screen->get_tex_transfer(screen, tc->texture,
+ addr.bits.face,
+ addr.bits.level,
+ addr.bits.z,
+ PIPE_TRANSFER_READ, 0, 0,
+ tc->texture->width[addr.bits.level],
+ tc->texture->height[addr.bits.level]);
+
+ tc->tex_trans_map = screen->transfer_map(screen, tc->tex_trans);
+
+ tc->tex_face = addr.bits.face;
+ tc->tex_level = addr.bits.level;
+ tc->tex_z = addr.bits.z;
+ }
+
+ {
+ unsigned x = addr.bits.x * TEX_TILE_SIZE;
+ unsigned y = addr.bits.y * TEX_TILE_SIZE;
+ unsigned w = TEX_TILE_SIZE;
+ unsigned h = TEX_TILE_SIZE;
+
+ if (pipe_clip_tile(x, y, &w, &h, tc->tex_trans)) {
+ assert(0);
+ }
+
+ util_format_read_4ub(tc->tex_trans->format,
+ (uint8_t *)tile->color, sizeof tile->color[0],
+ tc->tex_trans_map, tc->tex_trans->stride,
+ x, y, w, h);
+ }
+
+ tile->addr = addr;
+ }
+
+ tc->last_tile = tile;
+ return tile;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_tex_cache.h b/src/gallium/drivers/llvmpipe/lp_tex_cache.h
new file mode 100644
index 0000000000..9fa6c36812
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tex_cache.h
@@ -0,0 +1,151 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_TEX_CACHE_H
+#define LP_TEX_CACHE_H
+
+
+#include "pipe/p_compiler.h"
+
+
+struct llvmpipe_context;
+struct llvmpipe_tex_tile_cache;
+
+
+/**
+ * Cache tile size (width and height). This needs to be a power of two.
+ */
+#define TEX_TILE_SIZE 64
+
+
+/* If we need to support > 4096, just expand this to be a 64 bit
+ * union, or consider tiling in Z as well.
+ */
+union tex_tile_address {
+ struct {
+ unsigned x:6; /* 4096 / TEX_TILE_SIZE */
+ unsigned y:6; /* 4096 / TEX_TILE_SIZE */
+ unsigned z:12; /* 4096 -- z not tiled */
+ unsigned face:3;
+ unsigned level:4;
+ unsigned invalid:1;
+ } bits;
+ unsigned value;
+};
+
+
+struct llvmpipe_cached_tex_tile
+{
+ union tex_tile_address addr;
+ uint8_t color[TEX_TILE_SIZE][TEX_TILE_SIZE][4];
+};
+
+#define NUM_ENTRIES 50
+
+
+/** XXX move these */
+#define MAX_TEX_WIDTH 2048
+#define MAX_TEX_HEIGHT 2048
+
+
+struct llvmpipe_tex_tile_cache
+{
+ struct pipe_screen *screen;
+ struct pipe_surface *surface; /**< the surface we're caching */
+ struct pipe_transfer *transfer;
+ void *transfer_map;
+
+ struct pipe_texture *texture; /**< if caching a texture */
+ unsigned timestamp;
+
+ struct llvmpipe_cached_tex_tile entries[NUM_ENTRIES];
+
+ struct pipe_transfer *tex_trans;
+ void *tex_trans_map;
+ int tex_face, tex_level, tex_z;
+
+ struct llvmpipe_cached_tex_tile *last_tile; /**< most recently retrieved tile */
+};
+
+
+extern struct llvmpipe_tex_tile_cache *
+lp_create_tex_tile_cache( struct pipe_screen *screen );
+
+extern void
+lp_destroy_tex_tile_cache(struct llvmpipe_tex_tile_cache *tc);
+
+extern void
+lp_tex_tile_cache_map_transfers(struct llvmpipe_tex_tile_cache *tc);
+
+extern void
+lp_tex_tile_cache_unmap_transfers(struct llvmpipe_tex_tile_cache *tc);
+
+extern void
+lp_tex_tile_cache_set_texture(struct llvmpipe_tex_tile_cache *tc,
+ struct pipe_texture *texture);
+
+void
+lp_tex_tile_cache_validate_texture(struct llvmpipe_tex_tile_cache *tc);
+
+extern const struct llvmpipe_cached_tex_tile *
+lp_find_cached_tex_tile(struct llvmpipe_tex_tile_cache *tc,
+ union tex_tile_address addr );
+
+static INLINE const union tex_tile_address
+tex_tile_address( unsigned x,
+ unsigned y,
+ unsigned z,
+ unsigned face,
+ unsigned level )
+{
+ union tex_tile_address addr;
+
+ addr.value = 0;
+ addr.bits.x = x / TEX_TILE_SIZE;
+ addr.bits.y = y / TEX_TILE_SIZE;
+ addr.bits.z = z;
+ addr.bits.face = face;
+ addr.bits.level = level;
+
+ return addr;
+}
+
+/* Quickly retrieve tile if it matches last lookup.
+ */
+static INLINE const struct llvmpipe_cached_tex_tile *
+lp_get_cached_tex_tile(struct llvmpipe_tex_tile_cache *tc,
+ union tex_tile_address addr )
+{
+ if (tc->last_tile->addr.value == addr.value)
+ return tc->last_tile;
+
+ return lp_find_cached_tex_tile( tc, addr );
+}
+
+
+#endif /* LP_TEX_CACHE_H */
+
diff --git a/src/gallium/drivers/llvmpipe/lp_tex_sample.h b/src/gallium/drivers/llvmpipe/lp_tex_sample.h
new file mode 100644
index 0000000000..9ad1bde956
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tex_sample.h
@@ -0,0 +1,101 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_TEX_SAMPLE_H
+#define LP_TEX_SAMPLE_H
+
+
+#include <llvm-c/Core.h>
+
+#include "tgsi/tgsi_exec.h"
+
+
+struct llvmpipe_tex_tile_cache;
+struct lp_sampler_static_state;
+
+
+/**
+ * Subclass of tgsi_sampler
+ */
+struct lp_shader_sampler
+{
+ struct tgsi_sampler base; /**< base class */
+
+ unsigned processor;
+
+ /* For lp_get_samples_2d_linear_POT:
+ */
+ unsigned xpot;
+ unsigned ypot;
+ unsigned level;
+
+ const struct pipe_texture *texture;
+ const struct pipe_sampler_state *sampler;
+
+ struct llvmpipe_tex_tile_cache *cache;
+};
+
+
+
+static INLINE struct lp_shader_sampler *
+lp_shader_sampler(const struct tgsi_sampler *sampler)
+{
+ return (struct lp_shader_sampler *) sampler;
+}
+
+
+
+extern void
+lp_get_samples(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE]);
+
+
+/**
+ * Texture sampling code generator that just calls lp_get_samples C function
+ * for the actual sampling computation.
+ *
+ * @param context_ptr LLVM value with the pointer to the struct lp_jit_context.
+ */
+struct lp_build_sampler_soa *
+lp_c_sampler_soa_create(LLVMValueRef context_ptr);
+
+
+/**
+ * Pure-LLVM texture sampling code generator.
+ *
+ * @param context_ptr LLVM value with the pointer to the struct lp_jit_context.
+ */
+struct lp_build_sampler_soa *
+lp_llvm_sampler_soa_create(const struct lp_sampler_static_state *key,
+ LLVMValueRef context_ptr);
+
+
+#endif /* LP_TEX_SAMPLE_H */
diff --git a/src/gallium/drivers/llvmpipe/lp_tex_sample_c.c b/src/gallium/drivers/llvmpipe/lp_tex_sample_c.c
new file mode 100644
index 0000000000..9a876f404d
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tex_sample_c.c
@@ -0,0 +1,1713 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Texture sampling
+ *
+ * Authors:
+ * Brian Paul
+ */
+
+#include "lp_context.h"
+#include "lp_quad.h"
+#include "lp_surface.h"
+#include "lp_texture.h"
+#include "lp_tex_sample.h"
+#include "lp_tex_cache.h"
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+
+/*
+ * Note, the FRAC macro has to work perfectly. Otherwise you'll sometimes
+ * see 1-pixel bands of improperly weighted linear-filtered textures.
+ * The tests/texwrap.c demo is a good test.
+ * Also note, FRAC(x) doesn't truly return the fractional part of x for x < 0.
+ * Instead, if x < 0 then FRAC(x) = 1 - true_frac(x).
+ */
+#define FRAC(f) ((f) - util_ifloor(f))
+
+
+/**
+ * Linear interpolation macro
+ */
+static INLINE float
+lerp(float a, float v0, float v1)
+{
+ return v0 + a * (v1 - v0);
+}
+
+
+/**
+ * Do 2D/biliner interpolation of float values.
+ * v00, v10, v01 and v11 are typically four texture samples in a square/box.
+ * a and b are the horizontal and vertical interpolants.
+ * It's important that this function is inlined when compiled with
+ * optimization! If we find that's not true on some systems, convert
+ * to a macro.
+ */
+static INLINE float
+lerp_2d(float a, float b,
+ float v00, float v10, float v01, float v11)
+{
+ const float temp0 = lerp(a, v00, v10);
+ const float temp1 = lerp(a, v01, v11);
+ return lerp(b, temp0, temp1);
+}
+
+
+/**
+ * As above, but 3D interpolation of 8 values.
+ */
+static INLINE float
+lerp_3d(float a, float b, float c,
+ float v000, float v100, float v010, float v110,
+ float v001, float v101, float v011, float v111)
+{
+ const float temp0 = lerp_2d(a, b, v000, v100, v010, v110);
+ const float temp1 = lerp_2d(a, b, v001, v101, v011, v111);
+ return lerp(c, temp0, temp1);
+}
+
+
+
+/**
+ * If A is a signed integer, A % B doesn't give the right value for A < 0
+ * (in terms of texture repeat). Just casting to unsigned fixes that.
+ */
+#define REMAINDER(A, B) ((unsigned) (A) % (unsigned) (B))
+
+
+/**
+ * Apply texture coord wrapping mode and return integer texture indexes
+ * for a vector of four texcoords (S or T or P).
+ * \param wrapMode PIPE_TEX_WRAP_x
+ * \param s the incoming texcoords
+ * \param size the texture image size
+ * \param icoord returns the integer texcoords
+ * \return integer texture index
+ */
+static INLINE void
+nearest_texcoord_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord[4])
+{
+ uint ch;
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_REPEAT:
+ /* s limited to [0,1) */
+ /* i limited to [0,size-1] */
+ for (ch = 0; ch < 4; ch++) {
+ int i = util_ifloor(s[ch] * size);
+ icoord[ch] = REMAINDER(i, size);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP:
+ /* s limited to [0,1] */
+ /* i limited to [0,size-1] */
+ for (ch = 0; ch < 4; ch++) {
+ if (s[ch] <= 0.0F)
+ icoord[ch] = 0;
+ else if (s[ch] >= 1.0F)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(s[ch] * size);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [0, size-1] */
+ const float min = 1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ if (s[ch] < min)
+ icoord[ch] = 0;
+ else if (s[ch] > max)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(s[ch] * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [-1, size] */
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ if (s[ch] <= min)
+ icoord[ch] = -1;
+ else if (s[ch] >= max)
+ icoord[ch] = size;
+ else
+ icoord[ch] = util_ifloor(s[ch] * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ {
+ const float min = 1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ const int flr = util_ifloor(s[ch]);
+ float u;
+ if (flr & 1)
+ u = 1.0F - (s[ch] - (float) flr);
+ else
+ u = s[ch] - (float) flr;
+ if (u < min)
+ icoord[ch] = 0;
+ else if (u > max)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ /* s limited to [0,1] */
+ /* i limited to [0,size-1] */
+ const float u = fabsf(s[ch]);
+ if (u <= 0.0F)
+ icoord[ch] = 0;
+ else if (u >= 1.0F)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [0, size-1] */
+ const float min = 1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ const float u = fabsf(s[ch]);
+ if (u < min)
+ icoord[ch] = 0;
+ else if (u > max)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [0, size-1] */
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ const float u = fabsf(s[ch]);
+ if (u < min)
+ icoord[ch] = -1;
+ else if (u > max)
+ icoord[ch] = size;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ }
+ return;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * Used to compute texel locations for linear sampling for four texcoords.
+ * \param wrapMode PIPE_TEX_WRAP_x
+ * \param s the texcoords
+ * \param size the texture image size
+ * \param icoord0 returns first texture indexes
+ * \param icoord1 returns second texture indexes (usually icoord0 + 1)
+ * \param w returns blend factor/weight between texture indexes
+ * \param icoord returns the computed integer texture coords
+ */
+static INLINE void
+linear_texcoord_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord0[4], int icoord1[4], float w[4])
+{
+ uint ch;
+
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_REPEAT:
+ for (ch = 0; ch < 4; ch++) {
+ float u = s[ch] * size - 0.5F;
+ icoord0[ch] = REMAINDER(util_ifloor(u), size);
+ icoord1[ch] = REMAINDER(icoord0[ch] + 1, size);
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], 0.0F, 1.0F);
+ u = u * size - 0.5f;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], 0.0F, 1.0F);
+ u = u * size - 0.5f;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord0[ch] < 0)
+ icoord0[ch] = 0;
+ if (icoord1[ch] >= (int) size)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ {
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], min, max);
+ u = u * size - 0.5f;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ for (ch = 0; ch < 4; ch++) {
+ const int flr = util_ifloor(s[ch]);
+ float u;
+ if (flr & 1)
+ u = 1.0F - (s[ch] - (float) flr);
+ else
+ u = s[ch] - (float) flr;
+ u = u * size - 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord0[ch] < 0)
+ icoord0[ch] = 0;
+ if (icoord1[ch] >= (int) size)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ float u = fabsf(s[ch]);
+ if (u >= 1.0F)
+ u = (float) size;
+ else
+ u *= size;
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ for (ch = 0; ch < 4; ch++) {
+ float u = fabsf(s[ch]);
+ if (u >= 1.0F)
+ u = (float) size;
+ else
+ u *= size;
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord0[ch] < 0)
+ icoord0[ch] = 0;
+ if (icoord1[ch] >= (int) size)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ {
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ float u = fabsf(s[ch]);
+ if (u <= min)
+ u = min * size;
+ else if (u >= max)
+ u = max * size;
+ else
+ u *= size;
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ }
+ break;;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * For RECT textures / unnormalized texcoords
+ * Only a subset of wrap modes supported.
+ */
+static INLINE void
+nearest_texcoord_unnorm_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord[4])
+{
+ uint ch;
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ int i = util_ifloor(s[ch]);
+ icoord[ch]= CLAMP(i, 0, (int) size-1);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ /* fall-through */
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ for (ch = 0; ch < 4; ch++) {
+ icoord[ch]= util_ifloor( CLAMP(s[ch], 0.5F, (float) size - 0.5F) );
+ }
+ return;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * For RECT textures / unnormalized texcoords.
+ * Only a subset of wrap modes supported.
+ */
+static INLINE void
+linear_texcoord_unnorm_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord0[4], int icoord1[4], float w[4])
+{
+ uint ch;
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ /* Not exactly what the spec says, but it matches NVIDIA output */
+ float u = CLAMP(s[ch] - 0.5F, 0.0f, (float) size - 1.0f);
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ /* fall-through */
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], 0.5F, (float) size - 0.5F);
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord1[ch] > (int) size - 1)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static unsigned
+choose_cube_face(float rx, float ry, float rz, float *newS, float *newT)
+{
+ /*
+ major axis
+ direction target sc tc ma
+ ---------- ------------------------------- --- --- ---
+ +rx TEXTURE_CUBE_MAP_POSITIVE_X_EXT -rz -ry rx
+ -rx TEXTURE_CUBE_MAP_NEGATIVE_X_EXT +rz -ry rx
+ +ry TEXTURE_CUBE_MAP_POSITIVE_Y_EXT +rx +rz ry
+ -ry TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT +rx -rz ry
+ +rz TEXTURE_CUBE_MAP_POSITIVE_Z_EXT +rx -ry rz
+ -rz TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT -rx -ry rz
+ */
+ const float arx = fabsf(rx), ary = fabsf(ry), arz = fabsf(rz);
+ unsigned face;
+ float sc, tc, ma;
+
+ if (arx > ary && arx > arz) {
+ if (rx >= 0.0F) {
+ face = PIPE_TEX_FACE_POS_X;
+ sc = -rz;
+ tc = -ry;
+ ma = arx;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_X;
+ sc = rz;
+ tc = -ry;
+ ma = arx;
+ }
+ }
+ else if (ary > arx && ary > arz) {
+ if (ry >= 0.0F) {
+ face = PIPE_TEX_FACE_POS_Y;
+ sc = rx;
+ tc = rz;
+ ma = ary;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_Y;
+ sc = rx;
+ tc = -rz;
+ ma = ary;
+ }
+ }
+ else {
+ if (rz > 0.0F) {
+ face = PIPE_TEX_FACE_POS_Z;
+ sc = rx;
+ tc = -ry;
+ ma = arz;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_Z;
+ sc = -rx;
+ tc = -ry;
+ ma = arz;
+ }
+ }
+
+ *newS = ( sc / ma + 1.0F ) * 0.5F;
+ *newT = ( tc / ma + 1.0F ) * 0.5F;
+
+ return face;
+}
+
+
+/**
+ * Examine the quad's texture coordinates to compute the partial
+ * derivatives w.r.t X and Y, then compute lambda (level of detail).
+ *
+ * This is only done for fragment shaders, not vertex shaders.
+ */
+static float
+compute_lambda(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias)
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+ float rho, lambda;
+
+ if (samp->processor == TGSI_PROCESSOR_VERTEX)
+ return lodbias;
+
+ assert(sampler->normalized_coords);
+
+ assert(s);
+ {
+ float dsdx = s[QUAD_BOTTOM_RIGHT] - s[QUAD_BOTTOM_LEFT];
+ float dsdy = s[QUAD_TOP_LEFT] - s[QUAD_BOTTOM_LEFT];
+ dsdx = fabsf(dsdx);
+ dsdy = fabsf(dsdy);
+ rho = MAX2(dsdx, dsdy) * texture->width[0];
+ }
+ if (t) {
+ float dtdx = t[QUAD_BOTTOM_RIGHT] - t[QUAD_BOTTOM_LEFT];
+ float dtdy = t[QUAD_TOP_LEFT] - t[QUAD_BOTTOM_LEFT];
+ float max;
+ dtdx = fabsf(dtdx);
+ dtdy = fabsf(dtdy);
+ max = MAX2(dtdx, dtdy) * texture->height[0];
+ rho = MAX2(rho, max);
+ }
+ if (p) {
+ float dpdx = p[QUAD_BOTTOM_RIGHT] - p[QUAD_BOTTOM_LEFT];
+ float dpdy = p[QUAD_TOP_LEFT] - p[QUAD_BOTTOM_LEFT];
+ float max;
+ dpdx = fabsf(dpdx);
+ dpdy = fabsf(dpdy);
+ max = MAX2(dpdx, dpdy) * texture->depth[0];
+ rho = MAX2(rho, max);
+ }
+
+ lambda = util_fast_log2(rho);
+ lambda += lodbias + sampler->lod_bias;
+ lambda = CLAMP(lambda, sampler->min_lod, sampler->max_lod);
+
+ return lambda;
+}
+
+
+/**
+ * Do several things here:
+ * 1. Compute lambda from the texcoords, if needed
+ * 2. Determine if we're minifying or magnifying
+ * 3. If minifying, choose mipmap levels
+ * 4. Return image filter to use within mipmap images
+ * \param level0 Returns first mipmap level to sample from
+ * \param level1 Returns second mipmap level to sample from
+ * \param levelBlend Returns blend factor between levels, in [0,1]
+ * \param imgFilter Returns either the min or mag filter, depending on lambda
+ */
+static void
+choose_mipmap_levels(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ unsigned *level0, unsigned *level1, float *levelBlend,
+ unsigned *imgFilter)
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+
+ if (sampler->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) {
+ /* no mipmap selection needed */
+ *level0 = *level1 = CLAMP((int) sampler->min_lod,
+ 0, (int) texture->last_level);
+
+ if (sampler->min_img_filter != sampler->mag_img_filter) {
+ /* non-mipmapped texture, but still need to determine if doing
+ * minification or magnification.
+ */
+ float lambda = compute_lambda(tgsi_sampler, s, t, p, lodbias);
+ if (lambda <= 0.0) {
+ *imgFilter = sampler->mag_img_filter;
+ }
+ else {
+ *imgFilter = sampler->min_img_filter;
+ }
+ }
+ else {
+ *imgFilter = sampler->mag_img_filter;
+ }
+ }
+ else {
+ float lambda = compute_lambda(tgsi_sampler, s, t, p, lodbias);
+
+ if (lambda <= 0.0) { /* XXX threshold depends on the filter */
+ /* magnifying */
+ *imgFilter = sampler->mag_img_filter;
+ *level0 = *level1 = 0;
+ }
+ else {
+ /* minifying */
+ *imgFilter = sampler->min_img_filter;
+
+ /* choose mipmap level(s) and compute the blend factor between them */
+ if (sampler->min_mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
+ /* Nearest mipmap level */
+ const int lvl = (int) (lambda + 0.5);
+ *level0 =
+ *level1 = CLAMP(lvl, 0, (int) texture->last_level);
+ }
+ else {
+ /* Linear interpolation between mipmap levels */
+ const int lvl = (int) lambda;
+ *level0 = CLAMP(lvl, 0, (int) texture->last_level);
+ *level1 = CLAMP(lvl + 1, 0, (int) texture->last_level);
+ *levelBlend = FRAC(lambda); /* blending weight between levels */
+ }
+ }
+ }
+}
+
+
+/**
+ * Get a texel from a texture, using the texture tile cache.
+ *
+ * \param face the cube face in 0..5
+ * \param level the mipmap level
+ * \param x the x coord of texel within 2D image
+ * \param y the y coord of texel within 2D image
+ * \param z which slice of a 3D texture
+ * \param rgba the quad to put the texel/color into
+ * \param j which element of the rgba quad to write to
+ *
+ * XXX maybe move this into lp_tile_cache.c and merge with the
+ * lp_get_cached_tile_tex() function. Also, get 4 texels instead of 1...
+ */
+static void
+get_texel_quad_2d(const struct tgsi_sampler *tgsi_sampler,
+ unsigned face, unsigned level, int x, int y,
+ const uint8_t *out[4])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+
+ const struct llvmpipe_cached_tex_tile *tile
+ = lp_get_cached_tex_tile(samp->cache,
+ tex_tile_address(x, y, 0, face, level));
+
+ y %= TEX_TILE_SIZE;
+ x %= TEX_TILE_SIZE;
+
+ out[0] = &tile->color[y ][x ][0];
+ out[1] = &tile->color[y ][x+1][0];
+ out[2] = &tile->color[y+1][x ][0];
+ out[3] = &tile->color[y+1][x+1][0];
+}
+
+static INLINE const uint8_t *
+get_texel_2d_ptr(const struct tgsi_sampler *tgsi_sampler,
+ unsigned face, unsigned level, int x, int y)
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+
+ const struct llvmpipe_cached_tex_tile *tile
+ = lp_get_cached_tex_tile(samp->cache,
+ tex_tile_address(x, y, 0, face, level));
+
+ y %= TEX_TILE_SIZE;
+ x %= TEX_TILE_SIZE;
+
+ return &tile->color[y][x][0];
+}
+
+
+static void
+get_texel_quad_2d_mt(const struct tgsi_sampler *tgsi_sampler,
+ unsigned face, unsigned level,
+ int x0, int y0,
+ int x1, int y1,
+ const uint8_t *out[4])
+{
+ unsigned i;
+
+ for (i = 0; i < 4; i++) {
+ unsigned tx = (i & 1) ? x1 : x0;
+ unsigned ty = (i >> 1) ? y1 : y0;
+
+ out[i] = get_texel_2d_ptr( tgsi_sampler, face, level, tx, ty );
+ }
+}
+
+static void
+get_texel(const struct tgsi_sampler *tgsi_sampler,
+ unsigned face, unsigned level, int x, int y, int z,
+ float rgba[NUM_CHANNELS][QUAD_SIZE], unsigned j)
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+
+ if (x < 0 || x >= (int) texture->width[level] ||
+ y < 0 || y >= (int) texture->height[level] ||
+ z < 0 || z >= (int) texture->depth[level]) {
+ rgba[0][j] = sampler->border_color[0];
+ rgba[1][j] = sampler->border_color[1];
+ rgba[2][j] = sampler->border_color[2];
+ rgba[3][j] = sampler->border_color[3];
+ }
+ else {
+ const unsigned tx = x % TEX_TILE_SIZE;
+ const unsigned ty = y % TEX_TILE_SIZE;
+ const struct llvmpipe_cached_tex_tile *tile;
+
+ tile = lp_get_cached_tex_tile(samp->cache,
+ tex_tile_address(x, y, z, face, level));
+
+ rgba[0][j] = ubyte_to_float(tile->color[ty][tx][0]);
+ rgba[1][j] = ubyte_to_float(tile->color[ty][tx][1]);
+ rgba[2][j] = ubyte_to_float(tile->color[ty][tx][2]);
+ rgba[3][j] = ubyte_to_float(tile->color[ty][tx][3]);
+ if (0)
+ {
+ debug_printf("Get texel %f %f %f %f from %s\n",
+ rgba[0][j], rgba[1][j], rgba[2][j], rgba[3][j],
+ pf_name(texture->format));
+ }
+ }
+}
+
+
+/**
+ * Compare texcoord 'p' (aka R) against texture value 'rgba[0]'
+ * When we sampled the depth texture, the depth value was put into all
+ * RGBA channels. We look at the red channel here.
+ * \param rgba quad of (depth) texel values
+ * \param p texture 'P' components for four pixels in quad
+ * \param j which pixel in the quad to test [0..3]
+ */
+static INLINE void
+shadow_compare(const struct pipe_sampler_state *sampler,
+ float rgba[NUM_CHANNELS][QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ uint j)
+{
+ int k;
+ switch (sampler->compare_func) {
+ case PIPE_FUNC_LESS:
+ k = p[j] < rgba[0][j];
+ break;
+ case PIPE_FUNC_LEQUAL:
+ k = p[j] <= rgba[0][j];
+ break;
+ case PIPE_FUNC_GREATER:
+ k = p[j] > rgba[0][j];
+ break;
+ case PIPE_FUNC_GEQUAL:
+ k = p[j] >= rgba[0][j];
+ break;
+ case PIPE_FUNC_EQUAL:
+ k = p[j] == rgba[0][j];
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ k = p[j] != rgba[0][j];
+ break;
+ case PIPE_FUNC_ALWAYS:
+ k = 1;
+ break;
+ case PIPE_FUNC_NEVER:
+ k = 0;
+ break;
+ default:
+ k = 0;
+ assert(0);
+ break;
+ }
+
+ /* XXX returning result for default GL_DEPTH_TEXTURE_MODE = GL_LUMINANCE */
+ rgba[0][j] = rgba[1][j] = rgba[2][j] = (float) k;
+ rgba[3][j] = 1.0F;
+}
+
+
+/**
+ * As above, but do four z/texture comparisons.
+ */
+static INLINE void
+shadow_compare4(const struct pipe_sampler_state *sampler,
+ float rgba[NUM_CHANNELS][QUAD_SIZE],
+ const float p[QUAD_SIZE])
+{
+ int j, k0, k1, k2, k3;
+ float val;
+
+ /* compare four texcoords vs. four texture samples */
+ switch (sampler->compare_func) {
+ case PIPE_FUNC_LESS:
+ k0 = p[0] < rgba[0][0];
+ k1 = p[1] < rgba[0][1];
+ k2 = p[2] < rgba[0][2];
+ k3 = p[3] < rgba[0][3];
+ break;
+ case PIPE_FUNC_LEQUAL:
+ k0 = p[0] <= rgba[0][0];
+ k1 = p[1] <= rgba[0][1];
+ k2 = p[2] <= rgba[0][2];
+ k3 = p[3] <= rgba[0][3];
+ break;
+ case PIPE_FUNC_GREATER:
+ k0 = p[0] > rgba[0][0];
+ k1 = p[1] > rgba[0][1];
+ k2 = p[2] > rgba[0][2];
+ k3 = p[3] > rgba[0][3];
+ break;
+ case PIPE_FUNC_GEQUAL:
+ k0 = p[0] >= rgba[0][0];
+ k1 = p[1] >= rgba[0][1];
+ k2 = p[2] >= rgba[0][2];
+ k3 = p[3] >= rgba[0][3];
+ break;
+ case PIPE_FUNC_EQUAL:
+ k0 = p[0] == rgba[0][0];
+ k1 = p[1] == rgba[0][1];
+ k2 = p[2] == rgba[0][2];
+ k3 = p[3] == rgba[0][3];
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ k0 = p[0] != rgba[0][0];
+ k1 = p[1] != rgba[0][1];
+ k2 = p[2] != rgba[0][2];
+ k3 = p[3] != rgba[0][3];
+ break;
+ case PIPE_FUNC_ALWAYS:
+ k0 = k1 = k2 = k3 = 1;
+ break;
+ case PIPE_FUNC_NEVER:
+ k0 = k1 = k2 = k3 = 0;
+ break;
+ default:
+ k0 = k1 = k2 = k3 = 0;
+ assert(0);
+ break;
+ }
+
+ /* convert four pass/fail values to an intensity in [0,1] */
+ val = 0.25F * (k0 + k1 + k2 + k3);
+
+ /* XXX returning result for default GL_DEPTH_TEXTURE_MODE = GL_LUMINANCE */
+ for (j = 0; j < 4; j++) {
+ rgba[0][j] = rgba[1][j] = rgba[2][j] = val;
+ rgba[3][j] = 1.0F;
+ }
+}
+
+
+
+static void
+lp_get_samples_2d_linear_repeat_POT(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ unsigned j;
+ unsigned level = samp->level;
+ unsigned xpot = 1 << (samp->xpot - level);
+ unsigned ypot = 1 << (samp->ypot - level);
+ unsigned xmax = (xpot - 1) & (TEX_TILE_SIZE - 1); /* MIN2(TEX_TILE_SIZE, xpot) - 1; */
+ unsigned ymax = (ypot - 1) & (TEX_TILE_SIZE - 1); /* MIN2(TEX_TILE_SIZE, ypot) - 1; */
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int c;
+
+ float u = s[j] * xpot - 0.5F;
+ float v = t[j] * ypot - 0.5F;
+
+ int uflr = util_ifloor(u);
+ int vflr = util_ifloor(v);
+
+ float xw = u - (float)uflr;
+ float yw = v - (float)vflr;
+
+ int x0 = uflr & (xpot - 1);
+ int y0 = vflr & (ypot - 1);
+
+ const uint8_t *tx[4];
+
+
+ /* Can we fetch all four at once:
+ */
+ if (x0 < xmax && y0 < ymax)
+ {
+ get_texel_quad_2d(tgsi_sampler, 0, level, x0, y0, tx);
+ }
+ else
+ {
+ unsigned x1 = (x0 + 1) & (xpot - 1);
+ unsigned y1 = (y0 + 1) & (ypot - 1);
+ get_texel_quad_2d_mt(tgsi_sampler, 0, level,
+ x0, y0, x1, y1, tx);
+ }
+
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_2d(xw, yw,
+ ubyte_to_float(tx[0][c]), ubyte_to_float(tx[1][c]),
+ ubyte_to_float(tx[2][c]), ubyte_to_float(tx[3][c]));
+ }
+ }
+}
+
+
+static void
+lp_get_samples_2d_nearest_repeat_POT(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ unsigned j;
+ unsigned level = samp->level;
+ unsigned xpot = 1 << (samp->xpot - level);
+ unsigned ypot = 1 << (samp->ypot - level);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int c;
+
+ float u = s[j] * xpot;
+ float v = t[j] * ypot;
+
+ int uflr = util_ifloor(u);
+ int vflr = util_ifloor(v);
+
+ int x0 = uflr & (xpot - 1);
+ int y0 = vflr & (ypot - 1);
+
+ const uint8_t *out = get_texel_2d_ptr(tgsi_sampler, 0, level, x0, y0);
+
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = ubyte_to_float(out[c]);
+ }
+ }
+}
+
+
+static void
+lp_get_samples_2d_nearest_clamp_POT(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ unsigned j;
+ unsigned level = samp->level;
+ unsigned xpot = 1 << (samp->xpot - level);
+ unsigned ypot = 1 << (samp->ypot - level);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int c;
+
+ float u = s[j] * xpot;
+ float v = t[j] * ypot;
+
+ int x0, y0;
+ const uint8_t *out;
+
+ x0 = util_ifloor(u);
+ if (x0 < 0)
+ x0 = 0;
+ else if (x0 > xpot - 1)
+ x0 = xpot - 1;
+
+ y0 = util_ifloor(v);
+ if (y0 < 0)
+ y0 = 0;
+ else if (y0 > ypot - 1)
+ y0 = ypot - 1;
+
+ out = get_texel_2d_ptr(tgsi_sampler, 0, level, x0, y0);
+
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = ubyte_to_float(out[c]);
+ }
+ }
+}
+
+
+static void
+lp_get_samples_2d_linear_mip_linear_repeat_POT(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ int level0;
+ float lambda;
+
+ lambda = compute_lambda(tgsi_sampler, s, t, p, lodbias);
+ level0 = (int)lambda;
+
+ if (lambda < 0.0) {
+ samp->level = 0;
+ lp_get_samples_2d_linear_repeat_POT( tgsi_sampler,
+ s, t, p, 0, rgba );
+ }
+ else if (level0 >= texture->last_level) {
+ samp->level = texture->last_level;
+ lp_get_samples_2d_linear_repeat_POT( tgsi_sampler,
+ s, t, p, 0, rgba );
+ }
+ else {
+ float levelBlend = lambda - level0;
+ float rgba0[4][4];
+ float rgba1[4][4];
+ int c,j;
+
+ samp->level = level0;
+ lp_get_samples_2d_linear_repeat_POT( tgsi_sampler,
+ s, t, p, 0, rgba0 );
+
+ samp->level = level0+1;
+ lp_get_samples_2d_linear_repeat_POT( tgsi_sampler,
+ s, t, p, 0, rgba1 );
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba0[c][j], rgba1[c][j]);
+ }
+ }
+ }
+}
+
+/**
+ * Common code for sampling 1D/2D/cube textures.
+ * Could probably extend for 3D...
+ */
+static void
+lp_get_samples_2d_common(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE],
+ const unsigned faces[4])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+ unsigned level0, level1, j, imgFilter;
+ int width, height;
+ float levelBlend;
+
+ choose_mipmap_levels(tgsi_sampler, s, t, p,
+ lodbias,
+ &level0, &level1, &levelBlend, &imgFilter);
+
+ assert(sampler->normalized_coords);
+
+ width = texture->width[level0];
+ height = texture->height[level0];
+
+ assert(width > 0);
+
+ switch (imgFilter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ {
+ int x[4], y[4];
+ nearest_texcoord_4(sampler->wrap_s, s, width, x);
+ nearest_texcoord_4(sampler->wrap_t, t, height, y);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ get_texel(tgsi_sampler, faces[j], level0, x[j], y[j], 0, rgba, j);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare(sampler, rgba, p, j);
+ }
+
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ unsigned c;
+ x[j] /= 2;
+ y[j] /= 2;
+ get_texel(tgsi_sampler, faces[j], level1, x[j], y[j], 0,
+ rgba2, j);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE){
+ shadow_compare(sampler, rgba2, p, j);
+ }
+
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba[c][j], rgba2[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ {
+ int x0[4], y0[4], x1[4], y1[4];
+ float xw[4], yw[4]; /* weights */
+
+ linear_texcoord_4(sampler->wrap_s, s, width, x0, x1, xw);
+ linear_texcoord_4(sampler->wrap_t, t, height, y0, y1, yw);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ float tx[4][4]; /* texels */
+ int c;
+ get_texel(tgsi_sampler, faces[j], level0, x0[j], y0[j], 0, tx, 0);
+ get_texel(tgsi_sampler, faces[j], level0, x1[j], y0[j], 0, tx, 1);
+ get_texel(tgsi_sampler, faces[j], level0, x0[j], y1[j], 0, tx, 2);
+ get_texel(tgsi_sampler, faces[j], level0, x1[j], y1[j], 0, tx, 3);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare4(sampler, tx, p);
+ }
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_2d(xw[j], yw[j],
+ tx[c][0], tx[c][1],
+ tx[c][2], tx[c][3]);
+ }
+
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+
+ /* XXX: This is incorrect -- will often end up with (x0
+ * == x1 && y0 == y1), meaning that we fetch the same
+ * texel four times and linearly interpolate between
+ * identical values. The correct approach would be to
+ * call linear_texcoord again for the second level.
+ */
+ x0[j] /= 2;
+ y0[j] /= 2;
+ x1[j] /= 2;
+ y1[j] /= 2;
+ get_texel(tgsi_sampler, faces[j], level1, x0[j], y0[j], 0, tx, 0);
+ get_texel(tgsi_sampler, faces[j], level1, x1[j], y0[j], 0, tx, 1);
+ get_texel(tgsi_sampler, faces[j], level1, x0[j], y1[j], 0, tx, 2);
+ get_texel(tgsi_sampler, faces[j], level1, x1[j], y1[j], 0, tx, 3);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE){
+ shadow_compare4(sampler, tx, p);
+ }
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba2[c][j] = lerp_2d(xw[j], yw[j],
+ tx[c][0], tx[c][1], tx[c][2], tx[c][3]);
+ }
+
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba[c][j], rgba2[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static INLINE void
+lp_get_samples_1d(struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ static const unsigned faces[4] = {0, 0, 0, 0};
+ static const float tzero[4] = {0, 0, 0, 0};
+ lp_get_samples_2d_common(sampler, s, tzero, NULL,
+ lodbias, rgba, faces);
+}
+
+
+static INLINE void
+lp_get_samples_2d(struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ static const unsigned faces[4] = {0, 0, 0, 0};
+ lp_get_samples_2d_common(sampler, s, t, p,
+ lodbias, rgba, faces);
+}
+
+
+static INLINE void
+lp_get_samples_3d(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+ /* get/map pipe_surfaces corresponding to 3D tex slices */
+ unsigned level0, level1, j, imgFilter;
+ int width, height, depth;
+ float levelBlend;
+ const uint face = 0;
+
+ choose_mipmap_levels(tgsi_sampler, s, t, p,
+ lodbias,
+ &level0, &level1, &levelBlend, &imgFilter);
+
+ assert(sampler->normalized_coords);
+
+ width = texture->width[level0];
+ height = texture->height[level0];
+ depth = texture->depth[level0];
+
+ assert(width > 0);
+ assert(height > 0);
+ assert(depth > 0);
+
+ switch (imgFilter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ {
+ int x[4], y[4], z[4];
+ nearest_texcoord_4(sampler->wrap_s, s, width, x);
+ nearest_texcoord_4(sampler->wrap_t, t, height, y);
+ nearest_texcoord_4(sampler->wrap_r, p, depth, z);
+ for (j = 0; j < QUAD_SIZE; j++) {
+ get_texel(tgsi_sampler, face, level0, x[j], y[j], z[j], rgba, j);
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ unsigned c;
+ x[j] /= 2;
+ y[j] /= 2;
+ z[j] /= 2;
+ get_texel(tgsi_sampler, face, level1, x[j], y[j], z[j], rgba2, j);
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba2[c][j], rgba[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ {
+ int x0[4], x1[4], y0[4], y1[4], z0[4], z1[4];
+ float xw[4], yw[4], zw[4]; /* interpolation weights */
+ linear_texcoord_4(sampler->wrap_s, s, width, x0, x1, xw);
+ linear_texcoord_4(sampler->wrap_t, t, height, y0, y1, yw);
+ linear_texcoord_4(sampler->wrap_r, p, depth, z0, z1, zw);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int c;
+ float tx0[4][4], tx1[4][4];
+ get_texel(tgsi_sampler, face, level0, x0[j], y0[j], z0[j], tx0, 0);
+ get_texel(tgsi_sampler, face, level0, x1[j], y0[j], z0[j], tx0, 1);
+ get_texel(tgsi_sampler, face, level0, x0[j], y1[j], z0[j], tx0, 2);
+ get_texel(tgsi_sampler, face, level0, x1[j], y1[j], z0[j], tx0, 3);
+ get_texel(tgsi_sampler, face, level0, x0[j], y0[j], z1[j], tx1, 0);
+ get_texel(tgsi_sampler, face, level0, x1[j], y0[j], z1[j], tx1, 1);
+ get_texel(tgsi_sampler, face, level0, x0[j], y1[j], z1[j], tx1, 2);
+ get_texel(tgsi_sampler, face, level0, x1[j], y1[j], z1[j], tx1, 3);
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_3d(xw[j], yw[j], zw[j],
+ tx0[c][0], tx0[c][1],
+ tx0[c][2], tx0[c][3],
+ tx1[c][0], tx1[c][1],
+ tx1[c][2], tx1[c][3]);
+ }
+
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ x0[j] /= 2;
+ y0[j] /= 2;
+ z0[j] /= 2;
+ x1[j] /= 2;
+ y1[j] /= 2;
+ z1[j] /= 2;
+ get_texel(tgsi_sampler, face, level1, x0[j], y0[j], z0[j], tx0, 0);
+ get_texel(tgsi_sampler, face, level1, x1[j], y0[j], z0[j], tx0, 1);
+ get_texel(tgsi_sampler, face, level1, x0[j], y1[j], z0[j], tx0, 2);
+ get_texel(tgsi_sampler, face, level1, x1[j], y1[j], z0[j], tx0, 3);
+ get_texel(tgsi_sampler, face, level1, x0[j], y0[j], z1[j], tx1, 0);
+ get_texel(tgsi_sampler, face, level1, x1[j], y0[j], z1[j], tx1, 1);
+ get_texel(tgsi_sampler, face, level1, x0[j], y1[j], z1[j], tx1, 2);
+ get_texel(tgsi_sampler, face, level1, x1[j], y1[j], z1[j], tx1, 3);
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba2[c][j] = lerp_3d(xw[j], yw[j], zw[j],
+ tx0[c][0], tx0[c][1],
+ tx0[c][2], tx0[c][3],
+ tx1[c][0], tx1[c][1],
+ tx1[c][2], tx1[c][3]);
+ }
+
+ /* blend mipmap levels */
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba[c][j], rgba2[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static void
+lp_get_samples_cube(struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ unsigned faces[QUAD_SIZE], j;
+ float ssss[4], tttt[4];
+ for (j = 0; j < QUAD_SIZE; j++) {
+ faces[j] = choose_cube_face(s[j], t[j], p[j], ssss + j, tttt + j);
+ }
+ lp_get_samples_2d_common(sampler, ssss, tttt, NULL,
+ lodbias, rgba, faces);
+}
+
+
+static void
+lp_get_samples_rect(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+ const uint face = 0;
+ unsigned level0, level1, j, imgFilter;
+ int width, height;
+ float levelBlend;
+
+ choose_mipmap_levels(tgsi_sampler, s, t, p,
+ lodbias,
+ &level0, &level1, &levelBlend, &imgFilter);
+
+ /* texture RECTS cannot be mipmapped */
+ assert(level0 == level1);
+
+ width = texture->width[level0];
+ height = texture->height[level0];
+
+ assert(width > 0);
+
+ switch (imgFilter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ {
+ int x[4], y[4];
+ nearest_texcoord_unnorm_4(sampler->wrap_s, s, width, x);
+ nearest_texcoord_unnorm_4(sampler->wrap_t, t, height, y);
+ for (j = 0; j < QUAD_SIZE; j++) {
+ get_texel(tgsi_sampler, face, level0, x[j], y[j], 0, rgba, j);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare(sampler, rgba, p, j);
+ }
+ }
+ }
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ {
+ int x0[4], y0[4], x1[4], y1[4];
+ float xw[4], yw[4]; /* weights */
+ linear_texcoord_unnorm_4(sampler->wrap_s, s, width, x0, x1, xw);
+ linear_texcoord_unnorm_4(sampler->wrap_t, t, height, y0, y1, yw);
+ for (j = 0; j < QUAD_SIZE; j++) {
+ float tx[4][4]; /* texels */
+ int c;
+ get_texel(tgsi_sampler, face, level0, x0[j], y0[j], 0, tx, 0);
+ get_texel(tgsi_sampler, face, level0, x1[j], y0[j], 0, tx, 1);
+ get_texel(tgsi_sampler, face, level0, x0[j], y1[j], 0, tx, 2);
+ get_texel(tgsi_sampler, face, level0, x1[j], y1[j], 0, tx, 3);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare4(sampler, tx, p);
+ }
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_2d(xw[j], yw[j],
+ tx[c][0], tx[c][1], tx[c][2], tx[c][3]);
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * Error condition handler
+ */
+static INLINE void
+lp_get_samples_null(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ int i,j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ rgba[i][j] = 1.0;
+}
+
+/**
+ * Called via tgsi_sampler::get_samples() when using a sampler for the
+ * first time. Determine the actual sampler function, link it in and
+ * call it.
+ */
+void
+lp_get_samples(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ struct lp_shader_sampler *samp = lp_shader_sampler(tgsi_sampler);
+ const struct pipe_texture *texture = samp->texture;
+ const struct pipe_sampler_state *sampler = samp->sampler;
+
+ /* Default to the 'undefined' case:
+ */
+ tgsi_sampler->get_samples = lp_get_samples_null;
+
+ if (!texture) {
+ assert(0); /* is this legal?? */
+ goto out;
+ }
+
+ if (!sampler->normalized_coords) {
+ assert (texture->target == PIPE_TEXTURE_2D);
+ tgsi_sampler->get_samples = lp_get_samples_rect;
+ goto out;
+ }
+
+ switch (texture->target) {
+ case PIPE_TEXTURE_1D:
+ tgsi_sampler->get_samples = lp_get_samples_1d;
+ break;
+ case PIPE_TEXTURE_2D:
+ tgsi_sampler->get_samples = lp_get_samples_2d;
+ break;
+ case PIPE_TEXTURE_3D:
+ tgsi_sampler->get_samples = lp_get_samples_3d;
+ break;
+ case PIPE_TEXTURE_CUBE:
+ tgsi_sampler->get_samples = lp_get_samples_cube;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* Do this elsewhere:
+ */
+ samp->xpot = util_unsigned_logbase2( samp->texture->width[0] );
+ samp->ypot = util_unsigned_logbase2( samp->texture->height[0] );
+
+ /* Try to hook in a faster sampler. Ultimately we'll have to
+ * code-generate these. Luckily most of this looks like it is
+ * orthogonal state within the sampler.
+ */
+ if (texture->target == PIPE_TEXTURE_2D &&
+ sampler->min_img_filter == sampler->mag_img_filter &&
+ sampler->wrap_s == sampler->wrap_t &&
+ sampler->compare_mode == FALSE &&
+ sampler->normalized_coords)
+ {
+ if (sampler->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) {
+ samp->level = CLAMP((int) sampler->min_lod,
+ 0, (int) texture->last_level);
+
+ if (sampler->wrap_s == PIPE_TEX_WRAP_REPEAT) {
+ switch (sampler->min_img_filter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ tgsi_sampler->get_samples = lp_get_samples_2d_nearest_repeat_POT;
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ tgsi_sampler->get_samples = lp_get_samples_2d_linear_repeat_POT;
+ break;
+ default:
+ break;
+ }
+ }
+ else if (sampler->wrap_s == PIPE_TEX_WRAP_CLAMP) {
+ switch (sampler->min_img_filter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ tgsi_sampler->get_samples = lp_get_samples_2d_nearest_clamp_POT;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ else if (sampler->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
+ if (sampler->wrap_s == PIPE_TEX_WRAP_REPEAT) {
+ switch (sampler->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ tgsi_sampler->get_samples = lp_get_samples_2d_linear_mip_linear_repeat_POT;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ else if (0) {
+ _debug_printf("target %d/%d min_mip %d/%d min_img %d/%d wrap %d/%d compare %d/%d norm %d/%d\n",
+ texture->target, PIPE_TEXTURE_2D,
+ sampler->min_mip_filter, PIPE_TEX_MIPFILTER_NONE,
+ sampler->min_img_filter, sampler->mag_img_filter,
+ sampler->wrap_s, sampler->wrap_t,
+ sampler->compare_mode, FALSE,
+ sampler->normalized_coords, TRUE);
+ }
+
+out:
+ tgsi_sampler->get_samples( tgsi_sampler, s, t, p, lodbias, rgba );
+}
+
+
+void PIPE_CDECL
+lp_fetch_texel_soa( struct tgsi_sampler **samplers,
+ uint32_t unit,
+ float *store )
+{
+ struct tgsi_sampler *sampler = samplers[unit];
+
+#if 0
+ uint j;
+
+ debug_printf("%s sampler: %p (%p) store: %p\n",
+ __FUNCTION__,
+ sampler, *sampler,
+ store );
+
+ debug_printf("lodbias %f\n", store[12]);
+
+ for (j = 0; j < 4; j++)
+ debug_printf("sample %d texcoord %f %f\n",
+ j,
+ store[0+j],
+ store[4+j]);
+#endif
+
+ {
+ float rgba[NUM_CHANNELS][QUAD_SIZE];
+ sampler->get_samples(sampler,
+ &store[0],
+ &store[4],
+ &store[8],
+ 0.0f, /*store[12], lodbias */
+ rgba);
+ memcpy(store, rgba, sizeof rgba);
+ }
+
+#if 0
+ for (j = 0; j < 4; j++)
+ debug_printf("sample %d result %f %f %f %f\n",
+ j,
+ store[0+j],
+ store[4+j],
+ store[8+j],
+ store[12+j]);
+#endif
+}
+
+
+#include "lp_bld_type.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_tgsi.h"
+
+
+struct lp_c_sampler_soa
+{
+ struct lp_build_sampler_soa base;
+
+ LLVMValueRef context_ptr;
+
+ LLVMValueRef samplers_ptr;
+
+ /** Coords/texels store */
+ LLVMValueRef store_ptr;
+};
+
+
+static void
+lp_c_sampler_soa_destroy(struct lp_build_sampler_soa *sampler)
+{
+ FREE(sampler);
+}
+
+
+static void
+lp_c_sampler_soa_emit_fetch_texel(struct lp_build_sampler_soa *_sampler,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ unsigned unit,
+ unsigned num_coords,
+ const LLVMValueRef *coords,
+ LLVMValueRef lodbias,
+ LLVMValueRef *texel)
+{
+ struct lp_c_sampler_soa *sampler = (struct lp_c_sampler_soa *)_sampler;
+ LLVMTypeRef vec_type = LLVMTypeOf(coords[0]);
+ LLVMValueRef args[3];
+ unsigned i;
+
+ if(!sampler->samplers_ptr)
+ sampler->samplers_ptr = lp_jit_context_samplers(builder, sampler->context_ptr);
+
+ if(!sampler->store_ptr)
+ sampler->store_ptr = LLVMBuildArrayAlloca(builder,
+ vec_type,
+ LLVMConstInt(LLVMInt32Type(), 4, 0),
+ "texel_store");
+
+ for (i = 0; i < num_coords; i++) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef coord_ptr = LLVMBuildGEP(builder, sampler->store_ptr, &index, 1, "");
+ LLVMBuildStore(builder, coords[i], coord_ptr);
+ }
+
+ args[0] = sampler->samplers_ptr;
+ args[1] = LLVMConstInt(LLVMInt32Type(), unit, 0);
+ args[2] = sampler->store_ptr;
+
+ lp_build_intrinsic(builder, "fetch_texel", LLVMVoidType(), args, 3);
+
+ for (i = 0; i < NUM_CHANNELS; ++i) {
+ LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0);
+ LLVMValueRef texel_ptr = LLVMBuildGEP(builder, sampler->store_ptr, &index, 1, "");
+ texel[i] = LLVMBuildLoad(builder, texel_ptr, "");
+ }
+}
+
+
+struct lp_build_sampler_soa *
+lp_c_sampler_soa_create(LLVMValueRef context_ptr)
+{
+ struct lp_c_sampler_soa *sampler;
+
+ sampler = CALLOC_STRUCT(lp_c_sampler_soa);
+ if(!sampler)
+ return NULL;
+
+ sampler->base.destroy = lp_c_sampler_soa_destroy;
+ sampler->base.emit_fetch_texel = lp_c_sampler_soa_emit_fetch_texel;
+ sampler->context_ptr = context_ptr;
+
+ return &sampler->base;
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_tex_sample_llvm.c b/src/gallium/drivers/llvmpipe/lp_tex_sample_llvm.c
new file mode 100644
index 0000000000..7d31705d01
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tex_sample_llvm.c
@@ -0,0 +1,196 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Texture sampling code generation
+ *
+ * This file is nothing more than ugly glue between three largely independent
+ * entities:
+ * - TGSI -> LLVM translation (i.e., lp_build_tgsi_soa)
+ * - texture sampling code generation (i.e., lp_build_sample_soa)
+ * - LLVM pipe driver
+ *
+ * All interesting code is in the functions mentioned above. There is really
+ * nothing to see here.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "lp_bld_debug.h"
+#include "lp_bld_type.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_sample.h"
+#include "lp_bld_tgsi.h"
+#include "lp_state.h"
+#include "lp_tex_sample.h"
+
+
+/**
+ * This provides the bridge between the sampler state store in lp_jit_context
+ * and lp_jit_texture and the sampler code generator. It provides the
+ * texture layout information required by the texture sampler code generator
+ * in terms of the state stored in lp_jit_context and lp_jit_texture in runtime.
+ */
+struct llvmpipe_sampler_dynamic_state
+{
+ struct lp_sampler_dynamic_state base;
+
+ const struct lp_sampler_static_state *static_state;
+
+ LLVMValueRef context_ptr;
+};
+
+
+/**
+ * This is the bridge between our sampler and the TGSI translator.
+ */
+struct lp_llvm_sampler_soa
+{
+ struct lp_build_sampler_soa base;
+
+ struct llvmpipe_sampler_dynamic_state dynamic_state;
+};
+
+
+/**
+ * Fetch the specified member of the lp_jit_texture structure.
+ *
+ * @sa http://llvm.org/docs/GetElementPtr.html
+ */
+static LLVMValueRef
+lp_llvm_texture_member(struct lp_sampler_dynamic_state *base,
+ LLVMBuilderRef builder,
+ unsigned unit,
+ unsigned member_index,
+ const char *member_name)
+{
+ struct llvmpipe_sampler_dynamic_state *state = (struct llvmpipe_sampler_dynamic_state *)base;
+ LLVMValueRef indices[4];
+ LLVMValueRef ptr;
+ LLVMValueRef res;
+
+ assert(unit < PIPE_MAX_SAMPLERS);
+
+ /* context[0] */
+ indices[0] = LLVMConstInt(LLVMInt32Type(), 0, 0);
+ /* context[0].textures */
+ indices[1] = LLVMConstInt(LLVMInt32Type(), LP_JIT_CONTEXT_TEXTURES_INDEX, 0);
+ /* context[0].textures[unit] */
+ indices[2] = LLVMConstInt(LLVMInt32Type(), unit, 0);
+ /* context[0].textures[unit].member */
+ indices[3] = LLVMConstInt(LLVMInt32Type(), member_index, 0);
+
+ ptr = LLVMBuildGEP(builder, state->context_ptr, indices, Elements(indices), "");
+
+ res = LLVMBuildLoad(builder, ptr, "");
+
+ lp_build_name(res, "context.texture%u.%s", unit, member_name);
+
+ return res;
+}
+
+
+/**
+ * Helper macro to instantiate the functions that generate the code to fetch
+ * the members of lp_jit_texture to fulfill the sampler code generator requests.
+ *
+ * This complexity is the price we have to pay to keep the texture sampler code
+ * generator a reusable module without dependencies to llvmpipe internals.
+ */
+#define LP_LLVM_TEXTURE_MEMBER(_name, _index) \
+ static LLVMValueRef \
+ lp_llvm_texture_##_name( struct lp_sampler_dynamic_state *base, \
+ LLVMBuilderRef builder, \
+ unsigned unit) \
+ { \
+ return lp_llvm_texture_member(base, builder, unit, _index, #_name ); \
+ }
+
+
+LP_LLVM_TEXTURE_MEMBER(width, LP_JIT_TEXTURE_WIDTH)
+LP_LLVM_TEXTURE_MEMBER(height, LP_JIT_TEXTURE_HEIGHT)
+LP_LLVM_TEXTURE_MEMBER(stride, LP_JIT_TEXTURE_STRIDE)
+LP_LLVM_TEXTURE_MEMBER(data_ptr, LP_JIT_TEXTURE_DATA)
+
+
+static void
+lp_llvm_sampler_soa_destroy(struct lp_build_sampler_soa *sampler)
+{
+ FREE(sampler);
+}
+
+
+static void
+lp_llvm_sampler_soa_emit_fetch_texel(struct lp_build_sampler_soa *base,
+ LLVMBuilderRef builder,
+ union lp_type type,
+ unsigned unit,
+ unsigned num_coords,
+ const LLVMValueRef *coords,
+ LLVMValueRef lodbias,
+ LLVMValueRef *texel)
+{
+ struct lp_llvm_sampler_soa *sampler = (struct lp_llvm_sampler_soa *)base;
+
+ assert(unit < PIPE_MAX_SAMPLERS);
+
+ lp_build_sample_soa(builder,
+ &sampler->dynamic_state.static_state[unit],
+ &sampler->dynamic_state.base,
+ type,
+ unit,
+ num_coords,
+ coords,
+ lodbias,
+ texel);
+}
+
+
+struct lp_build_sampler_soa *
+lp_llvm_sampler_soa_create(const struct lp_sampler_static_state *static_state,
+ LLVMValueRef context_ptr)
+{
+ struct lp_llvm_sampler_soa *sampler;
+
+ sampler = CALLOC_STRUCT(lp_llvm_sampler_soa);
+ if(!sampler)
+ return NULL;
+
+ sampler->base.destroy = lp_llvm_sampler_soa_destroy;
+ sampler->base.emit_fetch_texel = lp_llvm_sampler_soa_emit_fetch_texel;
+ sampler->dynamic_state.base.width = lp_llvm_texture_width;
+ sampler->dynamic_state.base.height = lp_llvm_texture_height;
+ sampler->dynamic_state.base.stride = lp_llvm_texture_stride;
+ sampler->dynamic_state.base.data_ptr = lp_llvm_texture_data_ptr;
+ sampler->dynamic_state.static_state = static_state;
+ sampler->dynamic_state.context_ptr = context_ptr;
+
+ return &sampler->base;
+}
+
diff --git a/src/gallium/drivers/llvmpipe/lp_texture.c b/src/gallium/drivers/llvmpipe/lp_texture.c
new file mode 100644
index 0000000000..724d437833
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_texture.c
@@ -0,0 +1,429 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "lp_context.h"
+#include "lp_state.h"
+#include "lp_texture.h"
+#include "lp_tex_cache.h"
+#include "lp_screen.h"
+#include "lp_winsys.h"
+
+
+/* Simple, maximally packed layout.
+ */
+
+
+/* Conventional allocation path for non-display textures:
+ */
+static boolean
+llvmpipe_texture_layout(struct llvmpipe_screen *screen,
+ struct llvmpipe_texture * lpt)
+{
+ struct pipe_texture *pt = &lpt->base;
+ unsigned level;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned depth = pt->depth[0];
+
+ unsigned buffer_size = 0;
+
+ pf_get_block(lpt->base.format, &lpt->base.block);
+
+ for (level = 0; level <= pt->last_level; level++) {
+ pt->width[level] = width;
+ pt->height[level] = height;
+ pt->depth[level] = depth;
+ pt->nblocksx[level] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[level] = pf_get_nblocksy(&pt->block, height);
+ lpt->stride[level] = align(pt->nblocksx[level]*pt->block.size, 16);
+
+ lpt->level_offset[level] = buffer_size;
+
+ buffer_size += (pt->nblocksy[level] *
+ ((pt->target == PIPE_TEXTURE_CUBE) ? 6 : depth) *
+ lpt->stride[level]);
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ }
+
+ lpt->data = align_malloc(buffer_size, 16);
+
+ return lpt->data != NULL;
+}
+
+static boolean
+llvmpipe_displaytarget_layout(struct llvmpipe_screen *screen,
+ struct llvmpipe_texture * lpt)
+{
+ struct llvmpipe_winsys *winsys = screen->winsys;
+
+ pf_get_block(lpt->base.format, &lpt->base.block);
+ lpt->base.nblocksx[0] = pf_get_nblocksx(&lpt->base.block, lpt->base.width[0]);
+ lpt->base.nblocksy[0] = pf_get_nblocksy(&lpt->base.block, lpt->base.height[0]);
+
+ lpt->dt = winsys->displaytarget_create(winsys,
+ lpt->base.format,
+ lpt->base.width[0],
+ lpt->base.height[0],
+ 16,
+ &lpt->stride[0] );
+
+ return lpt->dt != NULL;
+}
+
+
+
+
+
+static struct pipe_texture *
+llvmpipe_texture_create(struct pipe_screen *_screen,
+ const struct pipe_texture *templat)
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(_screen);
+ struct llvmpipe_texture *lpt = CALLOC_STRUCT(llvmpipe_texture);
+ if (!lpt)
+ return NULL;
+
+ lpt->base = *templat;
+ pipe_reference_init(&lpt->base.reference, 1);
+ lpt->base.screen = &screen->base;
+
+ /* XXX: The xlib state tracker is brain-dead and will request
+ * PIPE_FORMAT_Z16_UNORM no matter how much we tell it we don't support it.
+ */
+ if(lpt->base.format == PIPE_FORMAT_Z16_UNORM)
+ lpt->base.format = PIPE_FORMAT_Z32_UNORM;
+
+ if (lpt->base.tex_usage & (PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY)) {
+ if (!llvmpipe_displaytarget_layout(screen, lpt))
+ goto fail;
+ }
+ else {
+ if (!llvmpipe_texture_layout(screen, lpt))
+ goto fail;
+ }
+
+ return &lpt->base;
+
+ fail:
+ FREE(lpt);
+ return NULL;
+}
+
+
+static struct pipe_texture *
+llvmpipe_texture_blanket(struct pipe_screen * screen,
+ const struct pipe_texture *base,
+ const unsigned *stride,
+ struct pipe_buffer *buffer)
+{
+ /* FIXME */
+#if 0
+ struct llvmpipe_texture *lpt;
+ assert(screen);
+
+ /* Only supports one type */
+ if (base->target != PIPE_TEXTURE_2D ||
+ base->last_level != 0 ||
+ base->depth[0] != 1) {
+ return NULL;
+ }
+
+ lpt = CALLOC_STRUCT(llvmpipe_texture);
+ if (!lpt)
+ return NULL;
+
+ lpt->base = *base;
+ pipe_reference_init(&lpt->base.reference, 1);
+ lpt->base.screen = screen;
+ lpt->base.nblocksx[0] = pf_get_nblocksx(&lpt->base.block, lpt->base.width[0]);
+ lpt->base.nblocksy[0] = pf_get_nblocksy(&lpt->base.block, lpt->base.height[0]);
+ lpt->stride[0] = stride[0];
+
+ pipe_buffer_reference(&lpt->buffer, buffer);
+
+ return &lpt->base;
+#else
+ return NULL;
+#endif
+}
+
+
+static void
+llvmpipe_texture_destroy(struct pipe_texture *pt)
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(pt->screen);
+ struct llvmpipe_texture *lpt = llvmpipe_texture(pt);
+
+ if(lpt->dt) {
+ struct llvmpipe_winsys *winsys = screen->winsys;
+ winsys->displaytarget_destroy(winsys, lpt->dt);
+ }
+ else
+ align_free(lpt->data);
+
+ FREE(lpt);
+}
+
+
+static struct pipe_surface *
+llvmpipe_get_tex_surface(struct pipe_screen *screen,
+ struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned usage)
+{
+ struct llvmpipe_texture *lpt = llvmpipe_texture(pt);
+ struct pipe_surface *ps;
+
+ assert(level <= pt->last_level);
+
+ ps = CALLOC_STRUCT(pipe_surface);
+ if (ps) {
+ pipe_reference_init(&ps->reference, 1);
+ pipe_texture_reference(&ps->texture, pt);
+ ps->format = pt->format;
+ ps->width = pt->width[level];
+ ps->height = pt->height[level];
+ ps->offset = lpt->level_offset[level];
+ ps->usage = usage;
+
+ /* Because we are llvmpipe, anything that the state tracker
+ * thought was going to be done with the GPU will actually get
+ * done with the CPU. Let's adjust the flags to take that into
+ * account.
+ */
+ if (ps->usage & PIPE_BUFFER_USAGE_GPU_WRITE) {
+ /* GPU_WRITE means "render" and that can involve reads (blending) */
+ ps->usage |= PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_CPU_READ;
+ }
+
+ if (ps->usage & PIPE_BUFFER_USAGE_GPU_READ)
+ ps->usage |= PIPE_BUFFER_USAGE_CPU_READ;
+
+ if (ps->usage & (PIPE_BUFFER_USAGE_CPU_WRITE |
+ PIPE_BUFFER_USAGE_GPU_WRITE)) {
+ /* Mark the surface as dirty. The tile cache will look for this. */
+ lpt->timestamp++;
+ llvmpipe_screen(screen)->timestamp++;
+ }
+
+ ps->face = face;
+ ps->level = level;
+ ps->zslice = zslice;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ps->offset += face * pt->nblocksy[level] * lpt->stride[level];
+ }
+ else if (pt->target == PIPE_TEXTURE_3D) {
+ ps->offset += zslice * pt->nblocksy[level] * lpt->stride[level];
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+ }
+ return ps;
+}
+
+
+static void
+llvmpipe_tex_surface_destroy(struct pipe_surface *surf)
+{
+ /* Effectively do the texture_update work here - if texture images
+ * needed post-processing to put them into hardware layout, this is
+ * where it would happen. For llvmpipe, nothing to do.
+ */
+ assert(surf->texture);
+ pipe_texture_reference(&surf->texture, NULL);
+ FREE(surf);
+}
+
+
+static struct pipe_transfer *
+llvmpipe_get_tex_transfer(struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct llvmpipe_texture *lptex = llvmpipe_texture(texture);
+ struct llvmpipe_transfer *lpt;
+
+ assert(texture);
+ assert(level <= texture->last_level);
+
+ lpt = CALLOC_STRUCT(llvmpipe_transfer);
+ if (lpt) {
+ struct pipe_transfer *pt = &lpt->base;
+ pipe_texture_reference(&pt->texture, texture);
+ pt->format = texture->format;
+ pt->block = texture->block;
+ pt->x = x;
+ pt->y = y;
+ pt->width = w;
+ pt->height = h;
+ pt->nblocksx = texture->nblocksx[level];
+ pt->nblocksy = texture->nblocksy[level];
+ pt->stride = lptex->stride[level];
+ pt->usage = usage;
+ pt->face = face;
+ pt->level = level;
+ pt->zslice = zslice;
+
+ lpt->offset = lptex->level_offset[level];
+
+ if (texture->target == PIPE_TEXTURE_CUBE) {
+ lpt->offset += face * pt->nblocksy * pt->stride;
+ }
+ else if (texture->target == PIPE_TEXTURE_3D) {
+ lpt->offset += zslice * pt->nblocksy * pt->stride;
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+ return pt;
+ }
+ return NULL;
+}
+
+
+static void
+llvmpipe_tex_transfer_destroy(struct pipe_transfer *transfer)
+{
+ /* Effectively do the texture_update work here - if texture images
+ * needed post-processing to put them into hardware layout, this is
+ * where it would happen. For llvmpipe, nothing to do.
+ */
+ assert (transfer->texture);
+ pipe_texture_reference(&transfer->texture, NULL);
+ FREE(transfer);
+}
+
+
+static void *
+llvmpipe_transfer_map( struct pipe_screen *_screen,
+ struct pipe_transfer *transfer )
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(_screen);
+ ubyte *map, *xfer_map;
+ struct llvmpipe_texture *lpt;
+
+ assert(transfer->texture);
+ lpt = llvmpipe_texture(transfer->texture);
+
+ if(lpt->dt) {
+ struct llvmpipe_winsys *winsys = screen->winsys;
+ unsigned flags = 0;
+
+ if (transfer->usage != PIPE_TRANSFER_READ) {
+ flags |= PIPE_BUFFER_USAGE_CPU_WRITE;
+ }
+
+ if (transfer->usage != PIPE_TRANSFER_WRITE) {
+ flags |= PIPE_BUFFER_USAGE_CPU_READ;
+ }
+
+ map = winsys->displaytarget_map(winsys, lpt->dt, flags);
+ if (map == NULL)
+ return NULL;
+ }
+ else
+ map = lpt->data;
+
+ /* May want to different things here depending on read/write nature
+ * of the map:
+ */
+ if (transfer->texture && transfer->usage != PIPE_TRANSFER_READ)
+ {
+ /* Do something to notify sharing contexts of a texture change.
+ * In llvmpipe, that would mean flushing the texture cache.
+ */
+ screen->timestamp++;
+ }
+
+ xfer_map = map + llvmpipe_transfer(transfer)->offset +
+ transfer->y / transfer->block.height * transfer->stride +
+ transfer->x / transfer->block.width * transfer->block.size;
+ /*printf("map = %p xfer map = %p\n", map, xfer_map);*/
+ return xfer_map;
+}
+
+
+static void
+llvmpipe_transfer_unmap(struct pipe_screen *_screen,
+ struct pipe_transfer *transfer)
+{
+ struct llvmpipe_screen *screen = llvmpipe_screen(_screen);
+ struct llvmpipe_texture *lpt;
+
+ assert(transfer->texture);
+ lpt = llvmpipe_texture(transfer->texture);
+
+ if(lpt->dt) {
+ struct llvmpipe_winsys *winsys = screen->winsys;
+ winsys->displaytarget_unmap(winsys, lpt->dt);
+ }
+}
+
+
+void
+llvmpipe_init_texture_funcs(struct llvmpipe_context *lp)
+{
+}
+
+
+void
+llvmpipe_init_screen_texture_funcs(struct pipe_screen *screen)
+{
+ screen->texture_create = llvmpipe_texture_create;
+ screen->texture_blanket = llvmpipe_texture_blanket;
+ screen->texture_destroy = llvmpipe_texture_destroy;
+
+ screen->get_tex_surface = llvmpipe_get_tex_surface;
+ screen->tex_surface_destroy = llvmpipe_tex_surface_destroy;
+
+ screen->get_tex_transfer = llvmpipe_get_tex_transfer;
+ screen->tex_transfer_destroy = llvmpipe_tex_transfer_destroy;
+ screen->transfer_map = llvmpipe_transfer_map;
+ screen->transfer_unmap = llvmpipe_transfer_unmap;
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_texture.h b/src/gallium/drivers/llvmpipe/lp_texture.h
new file mode 100644
index 0000000000..00a20763e4
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_texture.h
@@ -0,0 +1,90 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_TEXTURE_H
+#define LP_TEXTURE_H
+
+
+#include "pipe/p_state.h"
+
+
+struct pipe_context;
+struct pipe_screen;
+struct llvmpipe_context;
+struct llvmpipe_displaytarget;
+
+struct llvmpipe_texture
+{
+ struct pipe_texture base;
+
+ unsigned long level_offset[PIPE_MAX_TEXTURE_LEVELS];
+ unsigned stride[PIPE_MAX_TEXTURE_LEVELS];
+
+ /**
+ * Display target, for textures with the PIPE_TEXTURE_USAGE_DISPLAY_TARGET
+ * usage.
+ */
+ struct llvmpipe_displaytarget *dt;
+
+ /**
+ * Malloc'ed data for regular textures, or a mapping to dt above.
+ */
+ void *data;
+
+ unsigned timestamp;
+};
+
+struct llvmpipe_transfer
+{
+ struct pipe_transfer base;
+
+ unsigned long offset;
+};
+
+
+/** cast wrappers */
+static INLINE struct llvmpipe_texture *
+llvmpipe_texture(struct pipe_texture *pt)
+{
+ return (struct llvmpipe_texture *) pt;
+}
+
+static INLINE struct llvmpipe_transfer *
+llvmpipe_transfer(struct pipe_transfer *pt)
+{
+ return (struct llvmpipe_transfer *) pt;
+}
+
+
+extern void
+llvmpipe_init_texture_funcs( struct llvmpipe_context *llvmpipe );
+
+extern void
+llvmpipe_init_screen_texture_funcs(struct pipe_screen *screen);
+
+
+#endif /* LP_TEXTURE */
diff --git a/src/gallium/drivers/llvmpipe/lp_tile_cache.c b/src/gallium/drivers/llvmpipe/lp_tile_cache.c
new file mode 100644
index 0000000000..143afec3d3
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tile_cache.c
@@ -0,0 +1,299 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Texture tile caching.
+ *
+ * Author:
+ * Brian Paul
+ */
+
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "util/u_tile.h"
+#include "util/u_rect.h"
+#include "lp_context.h"
+#include "lp_surface.h"
+#include "lp_texture.h"
+#include "lp_tile_soa.h"
+#include "lp_tile_cache.h"
+
+
+struct llvmpipe_tile_cache *
+lp_create_tile_cache( struct pipe_screen *screen )
+{
+ struct llvmpipe_tile_cache *tc;
+
+ tc = CALLOC_STRUCT( llvmpipe_tile_cache );
+ if(!tc)
+ return NULL;
+
+ tc->screen = screen;
+
+ return tc;
+}
+
+
+void
+lp_destroy_tile_cache(struct llvmpipe_tile_cache *tc)
+{
+ struct pipe_screen *screen;
+ unsigned x, y;
+
+ for (y = 0; y < MAX_HEIGHT; y += TILE_SIZE) {
+ for (x = 0; x < MAX_WIDTH; x += TILE_SIZE) {
+ struct llvmpipe_cached_tile *tile = &tc->entries[y/TILE_SIZE][x/TILE_SIZE];
+
+ if(tile->color)
+ align_free(tile->color);
+ }
+ }
+
+ if (tc->transfer) {
+ screen = tc->transfer->texture->screen;
+ screen->tex_transfer_destroy(tc->transfer);
+ }
+
+ FREE( tc );
+}
+
+
+/**
+ * Specify the surface to cache.
+ */
+void
+lp_tile_cache_set_surface(struct llvmpipe_tile_cache *tc,
+ struct pipe_surface *ps)
+{
+ if (tc->transfer) {
+ struct pipe_screen *screen = tc->transfer->texture->screen;
+
+ if (ps == tc->surface)
+ return;
+
+ if (tc->transfer_map) {
+ screen->transfer_unmap(screen, tc->transfer);
+ tc->transfer_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(tc->transfer);
+ tc->transfer = NULL;
+ }
+
+ tc->surface = ps;
+
+ if (ps) {
+ struct pipe_screen *screen = ps->texture->screen;
+ unsigned x, y;
+
+ tc->transfer = screen->get_tex_transfer(screen, ps->texture, ps->face,
+ ps->level, ps->zslice,
+ PIPE_TRANSFER_READ_WRITE,
+ 0, 0, ps->width, ps->height);
+
+ for (y = 0; y < ps->height; y += TILE_SIZE) {
+ for (x = 0; x < ps->width; x += TILE_SIZE) {
+ struct llvmpipe_cached_tile *tile = &tc->entries[y/TILE_SIZE][x/TILE_SIZE];
+
+ tile->status = LP_TILE_STATUS_UNDEFINED;
+
+ if(!tile->color)
+ tile->color = align_malloc( TILE_SIZE*TILE_SIZE*NUM_CHANNELS, 16 );
+ }
+ }
+ }
+}
+
+
+/**
+ * Return the transfer being cached.
+ */
+struct pipe_surface *
+lp_tile_cache_get_surface(struct llvmpipe_tile_cache *tc)
+{
+ return tc->surface;
+}
+
+
+void
+lp_tile_cache_map_transfers(struct llvmpipe_tile_cache *tc)
+{
+ if (tc->transfer && !tc->transfer_map)
+ tc->transfer_map = tc->screen->transfer_map(tc->screen, tc->transfer);
+}
+
+
+void
+lp_tile_cache_unmap_transfers(struct llvmpipe_tile_cache *tc)
+{
+ if (tc->transfer_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->transfer);
+ tc->transfer_map = NULL;
+ }
+}
+
+
+/**
+ * Set a tile to a solid color.
+ */
+static void
+clear_tile(struct llvmpipe_cached_tile *tile,
+ uint8_t clear_color[4])
+{
+ if (clear_color[0] == clear_color[1] &&
+ clear_color[1] == clear_color[2] &&
+ clear_color[2] == clear_color[3]) {
+ memset(tile->color, clear_color[0], TILE_SIZE * TILE_SIZE * 4);
+ }
+ else {
+ uint x, y, chan;
+ for (y = 0; y < TILE_SIZE; y++)
+ for (x = 0; x < TILE_SIZE; x++)
+ for (chan = 0; chan < 4; ++chan)
+ TILE_PIXEL(tile->color, x, y, chan) = clear_color[chan];
+ }
+}
+
+
+/**
+ * Flush the tile cache: write all dirty tiles back to the transfer.
+ * any tiles "flagged" as cleared will be "really" cleared.
+ */
+void
+lp_flush_tile_cache(struct llvmpipe_tile_cache *tc)
+{
+ struct pipe_transfer *pt = tc->transfer;
+ unsigned x, y;
+
+ if(!pt)
+ return;
+
+ /* push the tile to all positions marked as clear */
+ for (y = 0; y < pt->height; y += TILE_SIZE) {
+ for (x = 0; x < pt->width; x += TILE_SIZE) {
+ struct llvmpipe_cached_tile *tile = &tc->entries[y/TILE_SIZE][x/TILE_SIZE];
+
+ switch(tile->status) {
+ case LP_TILE_STATUS_UNDEFINED:
+ break;
+
+ case LP_TILE_STATUS_CLEAR: {
+ /**
+ * Actually clear the tiles which were flagged as being in a clear state.
+ */
+
+ struct pipe_screen *screen = pt->texture->screen;
+ unsigned tw = TILE_SIZE;
+ unsigned th = TILE_SIZE;
+ void *dst;
+
+ if (pipe_clip_tile(x, y, &tw, &th, pt))
+ continue;
+
+ dst = screen->transfer_map(screen, pt);
+ assert(dst);
+ if(!dst)
+ continue;
+
+ util_fill_rect(dst, &pt->block, pt->stride,
+ x, y, tw, th,
+ tc->clear_val);
+
+ screen->transfer_unmap(screen, pt);
+ break;
+ }
+
+ case LP_TILE_STATUS_DEFINED:
+ lp_put_tile_rgba_soa(pt, x, y, tile->color);
+ break;
+ }
+ }
+ }
+}
+
+
+/**
+ * Get a tile from the cache.
+ * \param x, y position of tile, in pixels
+ */
+void *
+lp_get_cached_tile(struct llvmpipe_tile_cache *tc,
+ unsigned x, unsigned y )
+{
+ struct llvmpipe_cached_tile *tile = &tc->entries[y/TILE_SIZE][x/TILE_SIZE];
+ struct pipe_transfer *pt = tc->transfer;
+
+ switch(tile->status) {
+ case LP_TILE_STATUS_CLEAR:
+ /* don't get tile from framebuffer, just clear it */
+ clear_tile(tile, tc->clear_color);
+ tile->status = LP_TILE_STATUS_DEFINED;
+ break;
+
+ case LP_TILE_STATUS_UNDEFINED:
+ /* get new tile data from transfer */
+ lp_get_tile_rgba_soa(pt, x, y, tile->color);
+ tile->status = LP_TILE_STATUS_DEFINED;
+ break;
+
+ case LP_TILE_STATUS_DEFINED:
+ /* nothing to do */
+ break;
+ }
+
+ return tile->color;
+}
+
+
+/**
+ * When a whole surface is being cleared to a value we can avoid
+ * fetching tiles above.
+ * Save the color and set a 'clearflag' for each tile of the screen.
+ */
+void
+lp_tile_cache_clear(struct llvmpipe_tile_cache *tc, const float *rgba,
+ uint clearValue)
+{
+ struct pipe_transfer *pt = tc->transfer;
+ const unsigned w = pt->width;
+ const unsigned h = pt->height;
+ unsigned x, y, chan;
+
+ for(chan = 0; chan < 4; ++chan)
+ tc->clear_color[chan] = float_to_ubyte(rgba[chan]);
+
+ tc->clear_val = clearValue;
+
+ /* push the tile to all positions marked as clear */
+ for (y = 0; y < h; y += TILE_SIZE) {
+ for (x = 0; x < w; x += TILE_SIZE) {
+ struct llvmpipe_cached_tile *tile = &tc->entries[y/TILE_SIZE][x/TILE_SIZE];
+ tile->status = LP_TILE_STATUS_CLEAR;
+ }
+ }
+}
diff --git a/src/gallium/drivers/llvmpipe/lp_tile_cache.h b/src/gallium/drivers/llvmpipe/lp_tile_cache.h
new file mode 100644
index 0000000000..6d8ba5ece7
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tile_cache.h
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_TILE_CACHE_H
+#define LP_TILE_CACHE_H
+
+
+#include "pipe/p_compiler.h"
+#include "lp_tile_soa.h"
+
+
+enum llvmpipe_tile_status
+{
+ LP_TILE_STATUS_UNDEFINED = 0,
+ LP_TILE_STATUS_CLEAR = 1,
+ LP_TILE_STATUS_DEFINED = 2
+};
+
+
+struct llvmpipe_cached_tile
+{
+ enum llvmpipe_tile_status status;
+
+ /** color in SOA format */
+ uint8_t *color;
+};
+
+
+/** XXX move these */
+#define MAX_WIDTH 2048
+#define MAX_HEIGHT 2048
+
+
+struct llvmpipe_tile_cache
+{
+ struct pipe_screen *screen;
+ struct pipe_surface *surface; /**< the surface we're caching */
+ struct pipe_transfer *transfer;
+ void *transfer_map;
+
+ struct llvmpipe_cached_tile entries[MAX_WIDTH/TILE_SIZE][MAX_HEIGHT/TILE_SIZE];
+
+ uint8_t clear_color[4]; /**< for color bufs */
+ uint clear_val; /**< for z+stencil, or packed color clear value */
+
+ struct llvmpipe_cached_tile *last_tile; /**< most recently retrieved tile */
+};
+
+
+extern struct llvmpipe_tile_cache *
+lp_create_tile_cache( struct pipe_screen *screen );
+
+extern void
+lp_destroy_tile_cache(struct llvmpipe_tile_cache *tc);
+
+extern void
+lp_tile_cache_set_surface(struct llvmpipe_tile_cache *tc,
+ struct pipe_surface *lps);
+
+extern struct pipe_surface *
+lp_tile_cache_get_surface(struct llvmpipe_tile_cache *tc);
+
+extern void
+lp_tile_cache_map_transfers(struct llvmpipe_tile_cache *tc);
+
+extern void
+lp_tile_cache_unmap_transfers(struct llvmpipe_tile_cache *tc);
+
+extern void
+lp_flush_tile_cache(struct llvmpipe_tile_cache *tc);
+
+extern void
+lp_tile_cache_clear(struct llvmpipe_tile_cache *tc, const float *rgba,
+ uint clearValue);
+
+extern void *
+lp_get_cached_tile(struct llvmpipe_tile_cache *tc,
+ unsigned x, unsigned y );
+
+
+#endif /* LP_TILE_CACHE_H */
+
diff --git a/src/gallium/drivers/llvmpipe/lp_tile_soa.c b/src/gallium/drivers/llvmpipe/lp_tile_soa.c
new file mode 100644
index 0000000000..4e4ccb31cc
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tile_soa.c
@@ -0,0 +1,931 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * RGBA/float tile get/put functions.
+ * Usable both by drivers and state trackers.
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_rect.h"
+#include "util/u_tile.h"
+#include "lp_tile_cache.h"
+#include "lp_tile_soa.h"
+
+
+const unsigned char
+tile_offset[TILE_VECTOR_HEIGHT][TILE_VECTOR_WIDTH] = {
+ { 0, 1, 4, 5, 8, 9, 12, 13},
+ { 2, 3, 6, 7, 10, 11, 14, 15}
+};
+
+
+
+/*** PIPE_FORMAT_A8R8G8B8_UNORM ***/
+
+static void
+a8r8g8b8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ const unsigned pixel = *src++;
+ TILE_PIXEL(p, j, i, 0) = (pixel >> 16) & 0xff;
+ TILE_PIXEL(p, j, i, 1) = (pixel >> 8) & 0xff;
+ TILE_PIXEL(p, j, i, 2) = (pixel >> 0) & 0xff;
+ TILE_PIXEL(p, j, i, 3) = (pixel >> 24) & 0xff;
+ }
+ }
+}
+
+
+static void
+a8r8g8b8_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r, g, b, a;
+ r = TILE_PIXEL(p, j, i, 0);
+ g = TILE_PIXEL(p, j, i, 1);
+ b = TILE_PIXEL(p, j, i, 2);
+ a = TILE_PIXEL(p, j, i, 3);
+ *dst++ = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_A8R8G8B8_UNORM ***/
+
+static void
+x8r8g8b8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ const unsigned pixel = *src++;
+ TILE_PIXEL(p, j, i, 0) = (pixel >> 16) & 0xff;
+ TILE_PIXEL(p, j, i, 1) = (pixel >> 8) & 0xff;
+ TILE_PIXEL(p, j, i, 2) = (pixel >> 0) & 0xff;
+ TILE_PIXEL(p, j, i, 3) = 0xff;
+ }
+ }
+}
+
+
+static void
+x8r8g8b8_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r, g, b;
+ r = TILE_PIXEL(p, j, i, 0);
+ g = TILE_PIXEL(p, j, i, 1);
+ b = TILE_PIXEL(p, j, i, 2);
+ *dst++ = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_B8G8R8A8_UNORM ***/
+
+static void
+b8g8r8a8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ const unsigned pixel = *src++;
+ TILE_PIXEL(p, j, i, 0) = (pixel >> 8) & 0xff;
+ TILE_PIXEL(p, j, i, 1) = (pixel >> 16) & 0xff;
+ TILE_PIXEL(p, j, i, 2) = (pixel >> 24) & 0xff;
+ TILE_PIXEL(p, j, i, 3) = (pixel >> 0) & 0xff;
+ }
+ }
+}
+
+
+static void
+b8g8r8a8_put_tile_rgba(unsigned *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r, g, b, a;
+ r = TILE_PIXEL(p, j, i, 0);
+ g = TILE_PIXEL(p, j, i, 1);
+ b = TILE_PIXEL(p, j, i, 2);
+ a = TILE_PIXEL(p, j, i, 3);
+ *dst++ = (b << 24) | (g << 16) | (r << 8) | a;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_A1R5G5B5_UNORM ***/
+
+static void
+a1r5g5b5_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ const ushort pixel = *src++;
+ TILE_PIXEL(p, j, i, 0) = ((pixel >> 10) & 0x1f) * 255 / 31;
+ TILE_PIXEL(p, j, i, 1) = ((pixel >> 5) & 0x1f) * 255 / 31;
+ TILE_PIXEL(p, j, i, 2) = ((pixel ) & 0x1f) * 255 / 31;
+ TILE_PIXEL(p, j, i, 3) = ((pixel >> 15) ) * 255;
+ }
+ }
+}
+
+
+static void
+a1r5g5b5_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r, g, b, a;
+ r = TILE_PIXEL(p, j, i, 0);
+ g = TILE_PIXEL(p, j, i, 1);
+ b = TILE_PIXEL(p, j, i, 2);
+ a = TILE_PIXEL(p, j, i, 3);
+ r = r >> 3; /* 5 bits */
+ g = g >> 3; /* 5 bits */
+ b = b >> 3; /* 5 bits */
+ a = a >> 7; /* 1 bit */
+ *dst++ = (a << 15) | (r << 10) | (g << 5) | b;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_A4R4G4B4_UNORM ***/
+
+static void
+a4r4g4b4_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ const ushort pixel = *src++;
+ TILE_PIXEL(p, j, i, 0) = ((pixel >> 8) & 0xf) * 255 / 15;
+ TILE_PIXEL(p, j, i, 1) = ((pixel >> 4) & 0xf) * 255 / 15;
+ TILE_PIXEL(p, j, i, 2) = ((pixel ) & 0xf) * 255 / 15;
+ TILE_PIXEL(p, j, i, 3) = ((pixel >> 12) ) * 255 / 15;
+ }
+ }
+}
+
+
+static void
+a4r4g4b4_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r, g, b, a;
+ r = TILE_PIXEL(p, j, i, 0);
+ g = TILE_PIXEL(p, j, i, 1);
+ b = TILE_PIXEL(p, j, i, 2);
+ a = TILE_PIXEL(p, j, i, 3);
+ r >>= 4;
+ g >>= 4;
+ b >>= 4;
+ a >>= 4;
+ *dst++ = (a << 12) | (r << 16) | (g << 4) | b;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_R5G6B5_UNORM ***/
+
+static void
+r5g6b5_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ const ushort pixel = *src++;
+ TILE_PIXEL(p, j, i, 0) = ((pixel >> 11) & 0x1f) * 255 / 31;
+ TILE_PIXEL(p, j, i, 1) = ((pixel >> 5) & 0x3f) * 255 / 63;
+ TILE_PIXEL(p, j, i, 2) = ((pixel ) & 0x1f) * 255 / 31;
+ TILE_PIXEL(p, j, i, 3) = 255;
+ }
+ }
+}
+
+
+static void
+r5g6b5_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ uint r = (uint) TILE_PIXEL(p, j, i, 0) * 31 / 255;
+ uint g = (uint) TILE_PIXEL(p, j, i, 1) * 63 / 255;
+ uint b = (uint) TILE_PIXEL(p, j, i, 2) * 31 / 255;
+ *dst++ = (r << 11) | (g << 5) | (b);
+ }
+ }
+}
+
+
+
+/*** PIPE_FORMAT_Z16_UNORM ***/
+
+/**
+ * Return each Z value as four floats in [0,1].
+ */
+static void
+z16_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ const float scale = 1.0f / 65535.0f;
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = *src++ * scale;
+ }
+ }
+}
+
+
+
+
+/*** PIPE_FORMAT_L8_UNORM ***/
+
+static void
+l8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, src++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) = *src;
+ TILE_PIXEL(p, j, i, 3) = 255;
+ }
+ }
+}
+
+
+static void
+l8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r;
+ r = TILE_PIXEL(p, j, i, 0);
+ *dst++ = (ubyte) r;
+ }
+ }
+}
+
+
+
+/*** PIPE_FORMAT_A8_UNORM ***/
+
+static void
+a8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, src++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) = 0;
+ TILE_PIXEL(p, j, i, 3) = *src;
+ }
+ }
+}
+
+
+static void
+a8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned a;
+ a = TILE_PIXEL(p, j, i, 3);
+ *dst++ = (ubyte) a;
+ }
+ }
+}
+
+
+
+/*** PIPE_FORMAT_R16_SNORM ***/
+
+static void
+r16_get_tile_rgba(const short *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, src++) {
+ TILE_PIXEL(p, j, i, 0) = MAX2(src[0] >> 7, 0);
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) = 0;
+ TILE_PIXEL(p, j, i, 3) = 255;
+ }
+ }
+}
+
+
+static void
+r16_put_tile_rgba(short *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, dst++) {
+ dst[0] = TILE_PIXEL(p, j, i, 0) << 7;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_R16G16B16A16_SNORM ***/
+
+static void
+r16g16b16a16_get_tile_rgba(const short *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, src += 4) {
+ TILE_PIXEL(p, j, i, 0) = src[0] >> 8;
+ TILE_PIXEL(p, j, i, 1) = src[1] >> 8;
+ TILE_PIXEL(p, j, i, 2) = src[2] >> 8;
+ TILE_PIXEL(p, j, i, 3) = src[3] >> 8;
+ }
+ }
+}
+
+
+static void
+r16g16b16a16_put_tile_rgba(short *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, dst += 4) {
+ dst[0] = TILE_PIXEL(p, j, i, 0) << 8;
+ dst[1] = TILE_PIXEL(p, j, i, 1) << 8;
+ dst[2] = TILE_PIXEL(p, j, i, 2) << 8;
+ dst[3] = TILE_PIXEL(p, j, i, 3) << 8;
+ }
+ }
+}
+
+
+
+/*** PIPE_FORMAT_I8_UNORM ***/
+
+static void
+i8_get_tile_rgba(const ubyte *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++, src++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = *src;
+ }
+ }
+}
+
+
+static void
+i8_put_tile_rgba(ubyte *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r;
+ r = TILE_PIXEL(p, j, i, 0);
+ *dst++ = (ubyte) r;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_A8L8_UNORM ***/
+
+static void
+a8l8_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ ushort ra = *src++;
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) = ra & 0xff;
+ TILE_PIXEL(p, j, i, 3) = ra >> 8;
+ }
+ }
+}
+
+
+static void
+a8l8_put_tile_rgba(ushort *dst,
+ unsigned w, unsigned h,
+ const uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ unsigned r, a;
+ r = TILE_PIXEL(p, j, i, 0);
+ a = TILE_PIXEL(p, j, i, 3);
+ *dst++ = (a << 8) | r;
+ }
+ }
+}
+
+
+
+
+/*** PIPE_FORMAT_Z32_UNORM ***/
+
+/**
+ * Return each Z value as four floats in [0,1].
+ */
+static void
+z32_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ const double scale = 1.0 / (double) 0xffffffff;
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = (float) (*src++ * scale);
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_S8Z24_UNORM ***/
+
+/**
+ * Return Z component as four float in [0,1]. Stencil part ignored.
+ */
+static void
+s8z24_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ const double scale = 1.0 / ((1 << 24) - 1);
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = (float) (scale * (*src++ & 0xffffff));
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_Z24S8_UNORM ***/
+
+/**
+ * Return Z component as four float in [0,1]. Stencil part ignored.
+ */
+static void
+z24s8_get_tile_rgba(const unsigned *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ const double scale = 1.0 / ((1 << 24) - 1);
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = (float) (scale * (*src++ >> 8));
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_Z32_FLOAT ***/
+
+/**
+ * Return each Z value as four floats in [0,1].
+ */
+static void
+z32f_get_tile_rgba(const float *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = *src++;
+ }
+ }
+}
+
+
+/*** PIPE_FORMAT_YCBCR / PIPE_FORMAT_YCBCR_REV ***/
+
+/**
+ * Convert YCbCr (or YCrCb) to RGBA.
+ */
+static void
+ycbcr_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p,
+ boolean rev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ /* do two texels at a time */
+ for (j = 0; j < (w & ~1); j += 2, src += 2) {
+ const ushort t0 = src[0];
+ const ushort t1 = src[1];
+ const ubyte y0 = (t0 >> 8) & 0xff; /* luminance */
+ const ubyte y1 = (t1 >> 8) & 0xff; /* luminance */
+ ubyte cb, cr;
+ float r, g, b;
+
+ if (rev) {
+ cb = t1 & 0xff; /* chroma U */
+ cr = t0 & 0xff; /* chroma V */
+ }
+ else {
+ cb = t0 & 0xff; /* chroma U */
+ cr = t1 & 0xff; /* chroma V */
+ }
+
+ /* even pixel: y0,cr,cb */
+ r = 1.164f * (y0-16) + 1.596f * (cr-128);
+ g = 1.164f * (y0-16) - 0.813f * (cr-128) - 0.391f * (cb-128);
+ b = 1.164f * (y0-16) + 2.018f * (cb-128);
+ TILE_PIXEL(p, j, i, 0) = r;
+ TILE_PIXEL(p, j, i, 1) = g;
+ TILE_PIXEL(p, j, i, 2) = b;
+ TILE_PIXEL(p, j, i, 3) = 255;
+
+ /* odd pixel: use y1,cr,cb */
+ r = 1.164f * (y1-16) + 1.596f * (cr-128);
+ g = 1.164f * (y1-16) - 0.813f * (cr-128) - 0.391f * (cb-128);
+ b = 1.164f * (y1-16) + 2.018f * (cb-128);
+ TILE_PIXEL(p, j + 1, i, 0) = r;
+ TILE_PIXEL(p, j + 1, i, 1) = g;
+ TILE_PIXEL(p, j + 1, i, 2) = b;
+ TILE_PIXEL(p, j + 1, i, 3) = 255;
+ }
+ /* do the last texel */
+ if (w & 1) {
+ const ushort t0 = src[0];
+ const ushort t1 = src[1];
+ const ubyte y0 = (t0 >> 8) & 0xff; /* luminance */
+ ubyte cb, cr;
+ float r, g, b;
+
+ if (rev) {
+ cb = t1 & 0xff; /* chroma U */
+ cr = t0 & 0xff; /* chroma V */
+ }
+ else {
+ cb = t0 & 0xff; /* chroma U */
+ cr = t1 & 0xff; /* chroma V */
+ }
+
+ /* even pixel: y0,cr,cb */
+ r = 1.164f * (y0-16) + 1.596f * (cr-128);
+ g = 1.164f * (y0-16) - 0.813f * (cr-128) - 0.391f * (cb-128);
+ b = 1.164f * (y0-16) + 2.018f * (cb-128);
+ TILE_PIXEL(p, j, i, 0) = r;
+ TILE_PIXEL(p, j, i, 1) = g;
+ TILE_PIXEL(p, j, i, 2) = b;
+ TILE_PIXEL(p, j, i, 3) = 255;
+ }
+ }
+}
+
+
+static void
+fake_get_tile_rgba(const ushort *src,
+ unsigned w, unsigned h,
+ uint8_t *p)
+{
+ unsigned i, j;
+
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ TILE_PIXEL(p, j, i, 0) =
+ TILE_PIXEL(p, j, i, 1) =
+ TILE_PIXEL(p, j, i, 2) =
+ TILE_PIXEL(p, j, i, 3) = (i ^ j) & 1 ? 255 : 0;
+ }
+ }
+}
+
+
+static void
+lp_tile_raw_to_rgba_soa(enum pipe_format format,
+ void *src,
+ uint w, uint h,
+ uint8_t *p)
+{
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ a8r8g8b8_get_tile_rgba((unsigned *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ x8r8g8b8_get_tile_rgba((unsigned *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ b8g8r8a8_get_tile_rgba((unsigned *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ a1r5g5b5_get_tile_rgba((ushort *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ a4r4g4b4_get_tile_rgba((ushort *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ r5g6b5_get_tile_rgba((ushort *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_L8_UNORM:
+ l8_get_tile_rgba((ubyte *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_A8_UNORM:
+ a8_get_tile_rgba((ubyte *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_I8_UNORM:
+ i8_get_tile_rgba((ubyte *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_A8L8_UNORM:
+ a8l8_get_tile_rgba((ushort *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_R16_SNORM:
+ r16_get_tile_rgba((short *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ r16g16b16a16_get_tile_rgba((short *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ z16_get_tile_rgba((ushort *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_Z32_UNORM:
+ z32_get_tile_rgba((unsigned *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ s8z24_get_tile_rgba((unsigned *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ z24s8_get_tile_rgba((unsigned *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_Z32_FLOAT:
+ z32f_get_tile_rgba((float *) src, w, h, p);
+ break;
+ case PIPE_FORMAT_YCBCR:
+ ycbcr_get_tile_rgba((ushort *) src, w, h, p, FALSE);
+ break;
+ case PIPE_FORMAT_YCBCR_REV:
+ ycbcr_get_tile_rgba((ushort *) src, w, h, p, TRUE);
+ break;
+ default:
+ debug_printf("%s: unsupported format %s\n", __FUNCTION__, pf_name(format));
+ fake_get_tile_rgba(src, w, h, p);
+ }
+}
+
+
+void
+lp_get_tile_rgba_soa(struct pipe_transfer *pt,
+ uint x, uint y,
+ uint8_t *p)
+{
+ uint w = TILE_SIZE, h = TILE_SIZE;
+ void *packed;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ packed = MALLOC(pf_get_nblocks(&pt->block, w, h) * pt->block.size);
+
+ if (!packed)
+ return;
+
+ if(pt->format == PIPE_FORMAT_YCBCR || pt->format == PIPE_FORMAT_YCBCR_REV)
+ assert((x & 1) == 0);
+
+ pipe_get_tile_raw(pt, x, y, w, h, packed, 0);
+
+ lp_tile_raw_to_rgba_soa(pt->format, packed, w, h, p);
+
+ FREE(packed);
+}
+
+
+void
+lp_put_tile_rgba_soa(struct pipe_transfer *pt,
+ uint x, uint y,
+ const uint8_t *p)
+{
+ uint w = TILE_SIZE, h = TILE_SIZE;
+ void *packed;
+
+ if (pipe_clip_tile(x, y, &w, &h, pt))
+ return;
+
+ packed = MALLOC(pf_get_nblocks(&pt->block, w, h) * pt->block.size);
+
+ if (!packed)
+ return;
+
+ switch (pt->format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ a8r8g8b8_put_tile_rgba((unsigned *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ x8r8g8b8_put_tile_rgba((unsigned *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ b8g8r8a8_put_tile_rgba((unsigned *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ a1r5g5b5_put_tile_rgba((ushort *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ r5g6b5_put_tile_rgba((ushort *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ assert(0);
+ break;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ a4r4g4b4_put_tile_rgba((ushort *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_L8_UNORM:
+ l8_put_tile_rgba((ubyte *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_A8_UNORM:
+ a8_put_tile_rgba((ubyte *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_I8_UNORM:
+ i8_put_tile_rgba((ubyte *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_A8L8_UNORM:
+ a8l8_put_tile_rgba((ushort *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_R16_SNORM:
+ r16_put_tile_rgba((short *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_R16G16B16A16_SNORM:
+ r16g16b16a16_put_tile_rgba((short *) packed, w, h, p);
+ break;
+ case PIPE_FORMAT_Z16_UNORM:
+ /*z16_put_tile_rgba((ushort *) packed, w, h, p);*/
+ break;
+ case PIPE_FORMAT_Z32_UNORM:
+ /*z32_put_tile_rgba((unsigned *) packed, w, h, p);*/
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ /*s8z24_put_tile_rgba((unsigned *) packed, w, h, p);*/
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ /*z24s8_put_tile_rgba((unsigned *) packed, w, h, p);*/
+ break;
+ default:
+ debug_printf("%s: unsupported format %s\n", __FUNCTION__, pf_name(pt->format));
+ }
+
+ pipe_put_tile_raw(pt, x, y, w, h, packed, 0);
+
+ FREE(packed);
+}
+
+
diff --git a/src/gallium/drivers/llvmpipe/lp_tile_soa.h b/src/gallium/drivers/llvmpipe/lp_tile_soa.h
new file mode 100644
index 0000000000..3d8c703b73
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_tile_soa.h
@@ -0,0 +1,81 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef LP_TILE_SOA_H
+#define LP_TILE_SOA_H
+
+#include "pipe/p_compiler.h"
+#include "tgsi/tgsi_exec.h" // for NUM_CHANNELS
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_transfer;
+
+
+/**
+ * Cache tile size (width and height). This needs to be a power of two.
+ */
+#define TILE_SIZE 64
+
+
+#define TILE_VECTOR_HEIGHT 2
+#define TILE_VECTOR_WIDTH 8
+
+extern const unsigned char
+tile_offset[TILE_VECTOR_HEIGHT][TILE_VECTOR_WIDTH];
+
+#define TILE_C_STRIDE (TILE_VECTOR_HEIGHT*TILE_VECTOR_WIDTH)
+#define TILE_X_STRIDE (NUM_CHANNELS*TILE_C_STRIDE)
+#define TILE_Y_STRIDE (TILE_VECTOR_HEIGHT*TILE_SIZE*NUM_CHANNELS)
+
+#define TILE_PIXEL(_p, _x, _y, _c) \
+ ((_p)[((_y)/TILE_VECTOR_HEIGHT)*TILE_Y_STRIDE + \
+ ((_x)/TILE_VECTOR_WIDTH)*TILE_X_STRIDE + \
+ (_c)*TILE_C_STRIDE + \
+ tile_offset[(_y) % TILE_VECTOR_HEIGHT][(_x) % TILE_VECTOR_WIDTH]])
+
+
+void
+lp_get_tile_rgba_soa(struct pipe_transfer *pt,
+ uint x, uint y,
+ uint8_t *p);
+
+void
+lp_put_tile_rgba_soa(struct pipe_transfer *pt,
+ uint x, uint y,
+ const uint8_t *p);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/drivers/llvmpipe/lp_winsys.h b/src/gallium/drivers/llvmpipe/lp_winsys.h
new file mode 100644
index 0000000000..595481c2cb
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/lp_winsys.h
@@ -0,0 +1,128 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * llvmpipe public interface.
+ */
+
+
+#ifndef LP_WINSYS_H
+#define LP_WINSYS_H
+
+
+#include "pipe/p_compiler.h" // for boolean
+#include "pipe/p_format.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_screen;
+struct pipe_context;
+
+
+/**
+ * Opaque pointer.
+ */
+struct llvmpipe_displaytarget;
+
+
+/**
+ * This is the interface that llvmpipe expects any window system
+ * hosting it to implement.
+ *
+ * llvmpipe is for the most part a self sufficient driver. The only thing it
+ * does not know is how to display a surface.
+ */
+struct llvmpipe_winsys
+{
+ void
+ (*destroy)( struct llvmpipe_winsys *ws );
+
+ boolean
+ (*is_displaytarget_format_supported)( struct llvmpipe_winsys *ws,
+ enum pipe_format format );
+
+ /**
+ * Allocate storage for a render target.
+ *
+ * Often surfaces which are meant to be blitted to the front screen (i.e.,
+ * display targets) must be allocated with special characteristics, memory
+ * pools, or obtained directly from the windowing system.
+ *
+ * This callback is invoked by the pipe_screen when creating a texture marked
+ * with the PIPE_TEXTURE_USAGE_DISPLAY_TARGET flag to get the underlying
+ * storage.
+ */
+ struct llvmpipe_displaytarget *
+ (*displaytarget_create)( struct llvmpipe_winsys *ws,
+ enum pipe_format format,
+ unsigned width, unsigned height,
+ unsigned alignment,
+ unsigned *stride );
+
+ void *
+ (*displaytarget_map)( struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt,
+ unsigned flags );
+
+ void
+ (*displaytarget_unmap)( struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt );
+
+ /**
+ * @sa pipe_screen:flush_frontbuffer.
+ *
+ * This call will likely become asynchronous eventually.
+ */
+ void
+ (*displaytarget_display)( struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt,
+ void *context_private );
+
+ void
+ (*displaytarget_destroy)( struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt );
+};
+
+
+struct pipe_context *
+llvmpipe_create( struct pipe_screen * );
+
+
+struct pipe_screen *
+llvmpipe_create_screen( struct llvmpipe_winsys * );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LP_WINSYS_H */
diff --git a/src/gallium/drivers/llvmpipe/sp2lp.sh b/src/gallium/drivers/llvmpipe/sp2lp.sh
new file mode 100755
index 0000000000..c45a81ce3c
--- /dev/null
+++ b/src/gallium/drivers/llvmpipe/sp2lp.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Port changes from softpipe to llvmpipe. Invoke as
+#
+# sp2lp.sh <commit>
+#
+# Note that this will only affect llvmpipe -- you still need to actually
+# cherry-pick/merge the softpipe changes themselves if they affect directories
+# outside src/gallium/drivers/softpipe
+
+git format-patch \
+ --keep-subject \
+ --relative=src/gallium/drivers/softpipe \
+ --src-prefix=a/src/gallium/drivers/llvmpipe/ \
+ --dst-prefix=b/src/gallium/drivers/llvmpipe/ \
+ --stdout "$1^1..$1" \
+| sed \
+ -e 's/\<softpipe\>/llvmpipe/g' \
+ -e 's/\<sp\>/lp/g' \
+ -e 's/\<softpipe_/llvmpipe_/g' \
+ -e 's/\<sp_/lp_/g' \
+ -e 's/\<SP_/LP_/g' \
+ -e 's/\<SOFTPIPE_/LLVMPIPE_/g' \
+ -e 's/\<spt\>/lpt/g' \
+ -e 's/\<sps\>/lps/g' \
+ -e 's/\<spfs\>/lpfs/g' \
+ -e 's/\<sptex\>/lptex/g' \
+ -e 's/\<setup_\(point\|line\|tri\)\>/llvmpipe_\0/g' \
+ -e 's/\<llvmpipe_cached_tile\>/llvmpipe_cached_tex_tile/g' \
+ -e 's/_get_cached_tile_tex\>/_get_cached_tex_tile/g' \
+ -e 's/\<TILE_SIZE\>/TEX_TILE_SIZE/g' \
+ -e 's/\<tile_address\>/tex_tile_address/g' \
+ -e 's/\<tile->data\.color\>/tile->color/g' \
+| patch -p1
diff --git a/src/gallium/drivers/nouveau/Makefile b/src/gallium/drivers/nouveau/Makefile
new file mode 100644
index 0000000000..dbe8a6e7bf
--- /dev/null
+++ b/src/gallium/drivers/nouveau/Makefile
@@ -0,0 +1,8 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nouveau
+
+C_SOURCES = nouveau_screen.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nouveau/nouveau_gldefs.h b/src/gallium/drivers/nouveau/nouveau_gldefs.h
new file mode 100644
index 0000000000..ff97aaa9af
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_gldefs.h
@@ -0,0 +1,196 @@
+#ifndef __NOUVEAU_GLDEFS_H__
+#define __NOUVEAU_GLDEFS_H__
+
+static INLINE unsigned
+nvgl_blend_func(unsigned factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ZERO:
+ return 0x0000;
+ case PIPE_BLENDFACTOR_ONE:
+ return 0x0001;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ return 0x0300;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ return 0x0301;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ return 0x0302;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ return 0x0303;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ return 0x0304;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ return 0x0305;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ return 0x0306;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ return 0x0307;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ return 0x0308;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ return 0x8001;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ return 0x8002;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ return 0x8003;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return 0x8004;
+ default:
+ return 0x0000;
+ }
+}
+
+static INLINE unsigned
+nvgl_blend_eqn(unsigned func)
+{
+ switch (func) {
+ case PIPE_BLEND_ADD:
+ return 0x8006;
+ case PIPE_BLEND_MIN:
+ return 0x8007;
+ case PIPE_BLEND_MAX:
+ return 0x8008;
+ case PIPE_BLEND_SUBTRACT:
+ return 0x800a;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ return 0x800b;
+ default:
+ return 0x8006;
+ }
+}
+
+static INLINE unsigned
+nvgl_logicop_func(unsigned func)
+{
+ switch (func) {
+ case PIPE_LOGICOP_CLEAR:
+ return 0x1500;
+ case PIPE_LOGICOP_NOR:
+ return 0x1508;
+ case PIPE_LOGICOP_AND_INVERTED:
+ return 0x1504;
+ case PIPE_LOGICOP_COPY_INVERTED:
+ return 0x150c;
+ case PIPE_LOGICOP_AND_REVERSE:
+ return 0x1502;
+ case PIPE_LOGICOP_INVERT:
+ return 0x150a;
+ case PIPE_LOGICOP_XOR:
+ return 0x1506;
+ case PIPE_LOGICOP_NAND:
+ return 0x150e;
+ case PIPE_LOGICOP_AND:
+ return 0x1501;
+ case PIPE_LOGICOP_EQUIV:
+ return 0x1509;
+ case PIPE_LOGICOP_NOOP:
+ return 0x1505;
+ case PIPE_LOGICOP_OR_INVERTED:
+ return 0x150d;
+ case PIPE_LOGICOP_COPY:
+ return 0x1503;
+ case PIPE_LOGICOP_OR_REVERSE:
+ return 0x150b;
+ case PIPE_LOGICOP_OR:
+ return 0x1507;
+ case PIPE_LOGICOP_SET:
+ return 0x150f;
+ default:
+ return 0x1505;
+ }
+}
+
+static INLINE unsigned
+nvgl_comparison_op(unsigned op)
+{
+ switch (op) {
+ case PIPE_FUNC_NEVER:
+ return 0x0200;
+ case PIPE_FUNC_LESS:
+ return 0x0201;
+ case PIPE_FUNC_EQUAL:
+ return 0x0202;
+ case PIPE_FUNC_LEQUAL:
+ return 0x0203;
+ case PIPE_FUNC_GREATER:
+ return 0x0204;
+ case PIPE_FUNC_NOTEQUAL:
+ return 0x0205;
+ case PIPE_FUNC_GEQUAL:
+ return 0x0206;
+ case PIPE_FUNC_ALWAYS:
+ return 0x0207;
+ default:
+ return 0x0207;
+ }
+}
+
+static INLINE unsigned
+nvgl_polygon_mode(unsigned mode)
+{
+ switch (mode) {
+ case PIPE_POLYGON_MODE_POINT:
+ return 0x1b00;
+ case PIPE_POLYGON_MODE_LINE:
+ return 0x1b01;
+ case PIPE_POLYGON_MODE_FILL:
+ return 0x1b02;
+ default:
+ return 0x1b02;
+ }
+}
+
+static INLINE unsigned
+nvgl_stencil_op(unsigned op)
+{
+ switch (op) {
+ case PIPE_STENCIL_OP_ZERO:
+ return 0x0000;
+ case PIPE_STENCIL_OP_INVERT:
+ return 0x150a;
+ case PIPE_STENCIL_OP_KEEP:
+ return 0x1e00;
+ case PIPE_STENCIL_OP_REPLACE:
+ return 0x1e01;
+ case PIPE_STENCIL_OP_INCR:
+ return 0x1e02;
+ case PIPE_STENCIL_OP_DECR:
+ return 0x1e03;
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ return 0x8507;
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ return 0x8508;
+ default:
+ return 0x1e00;
+ }
+}
+
+static INLINE unsigned
+nvgl_primitive(unsigned prim) {
+ switch (prim) {
+ case PIPE_PRIM_POINTS:
+ return 0x0001;
+ case PIPE_PRIM_LINES:
+ return 0x0002;
+ case PIPE_PRIM_LINE_LOOP:
+ return 0x0003;
+ case PIPE_PRIM_LINE_STRIP:
+ return 0x0004;
+ case PIPE_PRIM_TRIANGLES:
+ return 0x0005;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ return 0x0006;
+ case PIPE_PRIM_TRIANGLE_FAN:
+ return 0x0007;
+ case PIPE_PRIM_QUADS:
+ return 0x0008;
+ case PIPE_PRIM_QUAD_STRIP:
+ return 0x0009;
+ case PIPE_PRIM_POLYGON:
+ return 0x000a;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_push.h b/src/gallium/drivers/nouveau/nouveau_push.h
new file mode 100644
index 0000000000..9c235080a5
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_push.h
@@ -0,0 +1,93 @@
+#ifndef __NOUVEAU_PUSH_H__
+#define __NOUVEAU_PUSH_H__
+
+#include "nouveau/nouveau_winsys.h"
+
+#ifndef NOUVEAU_PUSH_CONTEXT
+#error undefined push context
+#endif
+
+#define OUT_RING(data) do { \
+ NOUVEAU_PUSH_CONTEXT(pc); \
+ (*pc->base.channel->pushbuf->cur++) = (data); \
+} while(0)
+
+#define OUT_RINGp(src,size) do { \
+ NOUVEAU_PUSH_CONTEXT(pc); \
+ memcpy(pc->base.channel->pushbuf->cur, (src), (size) * 4); \
+ pc->base.channel->pushbuf->cur += (size); \
+} while(0)
+
+#define OUT_RINGf(data) do { \
+ union { float v; uint32_t u; } c; \
+ c.v = (data); \
+ OUT_RING(c.u); \
+} while(0)
+
+#define BEGIN_RING(obj,mthd,size) do { \
+ NOUVEAU_PUSH_CONTEXT(pc); \
+ struct nouveau_channel *chan = pc->base.channel; \
+ if (chan->pushbuf->remaining < ((size) + 1)) \
+ nouveau_pushbuf_flush(chan, ((size) + 1)); \
+ OUT_RING((pc->obj->subc << 13) | ((size) << 18) | (mthd)); \
+ chan->pushbuf->remaining -= ((size) + 1); \
+} while(0)
+
+#define BEGIN_RING_NI(obj,mthd,size) do { \
+ BEGIN_RING(obj, (mthd) | 0x40000000, (size)); \
+} while(0)
+
+static inline void
+DO_FIRE_RING(struct nouveau_channel *chan, struct pipe_fence_handle **fence)
+{
+ nouveau_pushbuf_flush(chan, 0);
+ if (fence)
+ *fence = NULL;
+}
+
+#define FIRE_RING(fence) do { \
+ NOUVEAU_PUSH_CONTEXT(pc); \
+ DO_FIRE_RING(pc->base.channel, fence); \
+} while(0)
+
+#define OUT_RELOC(bo,data,flags,vor,tor) do { \
+ NOUVEAU_PUSH_CONTEXT(pc); \
+ struct nouveau_channel *chan = pc->base.channel; \
+ nouveau_pushbuf_emit_reloc(chan, chan->pushbuf->cur++, nouveau_bo(bo), \
+ (data), 0, (flags), (vor), (tor)); \
+} while(0)
+
+/* Raw data + flags depending on FB/TT buffer */
+#define OUT_RELOCd(bo,data,flags,vor,tor) do { \
+ OUT_RELOC((bo), (data), (flags) | NOUVEAU_BO_OR, (vor), (tor)); \
+} while(0)
+
+/* FB/TT object handle */
+#define OUT_RELOCo(bo,flags) do { \
+ OUT_RELOC((bo), 0, (flags) | NOUVEAU_BO_OR, \
+ pc->base.channel->vram->handle, \
+ pc->base.channel->gart->handle); \
+} while(0)
+
+/* Low 32-bits of offset */
+#define OUT_RELOCl(bo,delta,flags) do { \
+ OUT_RELOC((bo), (delta), (flags) | NOUVEAU_BO_LOW, 0, 0); \
+} while(0)
+
+/* High 32-bits of offset */
+#define OUT_RELOCh(bo,delta,flags) do { \
+ OUT_RELOC((bo), (delta), (flags) | NOUVEAU_BO_HIGH, 0, 0); \
+} while(0)
+
+/* A reloc which'll recombine into a NV_DMA_METHOD packet header */
+#define OUT_RELOCm(bo, flags, obj, mthd, size) do { \
+ NOUVEAU_PUSH_CONTEXT(pc); \
+ struct nouveau_channel *chan = pc->base.channel; \
+ if (chan->pushbuf->remaining < ((size) + 1)) \
+ nouveau_pushbuf_flush(chan, ((size) + 1)); \
+ OUT_RELOCd((bo), (pc->obj->subc << 13) | ((size) << 18) | (mthd), \
+ (flags), 0, 0); \
+ chan->pushbuf->remaining -= ((size) + 1); \
+} while(0)
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_screen.c b/src/gallium/drivers/nouveau/nouveau_screen.c
new file mode 100644
index 0000000000..e4cf91c005
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_screen.c
@@ -0,0 +1,243 @@
+#include <pipe/p_defines.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_state.h>
+
+#include <util/u_memory.h>
+
+#include <errno.h>
+
+#include "nouveau/nouveau_bo.h"
+#include "nouveau_winsys.h"
+#include "nouveau_screen.h"
+
+static const char *
+nouveau_screen_get_name(struct pipe_screen *pscreen)
+{
+ struct nouveau_device *dev = nouveau_screen(pscreen)->device;
+ static char buffer[128];
+
+ snprintf(buffer, sizeof(buffer), "NV%02X", dev->chipset);
+ return buffer;
+}
+
+static const char *
+nouveau_screen_get_vendor(struct pipe_screen *pscreen)
+{
+ return "nouveau";
+}
+
+static struct pipe_buffer *
+nouveau_screen_bo_skel(struct pipe_screen *pscreen, struct nouveau_bo *bo,
+ unsigned alignment, unsigned usage, unsigned size)
+{
+ struct pipe_buffer *pb;
+
+ pb = CALLOC(1, sizeof(struct pipe_buffer)+sizeof(struct nouveau_bo *));
+ if (!pb) {
+ nouveau_bo_ref(NULL, &bo);
+ return NULL;
+ }
+
+ pipe_reference_init(&pb->reference, 1);
+ pb->screen = pscreen;
+ pb->alignment = alignment;
+ pb->usage = usage;
+ pb->size = size;
+ *(struct nouveau_bo **)(pb + 1) = bo;
+ return pb;
+}
+
+static struct pipe_buffer *
+nouveau_screen_bo_new(struct pipe_screen *pscreen, unsigned alignment,
+ unsigned usage, unsigned size)
+{
+ struct nouveau_device *dev = nouveau_screen(pscreen)->device;
+ struct nouveau_bo *bo = NULL;
+ uint32_t flags = NOUVEAU_BO_MAP;
+ int ret;
+
+ if (usage & NOUVEAU_BUFFER_USAGE_TRANSFER)
+ flags |= NOUVEAU_BO_GART;
+ else
+ if (usage & PIPE_BUFFER_USAGE_VERTEX) {
+ if (pscreen->get_param(pscreen, NOUVEAU_CAP_HW_VTXBUF))
+ flags |= NOUVEAU_BO_GART;
+ } else
+ if (usage & PIPE_BUFFER_USAGE_INDEX) {
+ if (pscreen->get_param(pscreen, NOUVEAU_CAP_HW_IDXBUF))
+ flags |= NOUVEAU_BO_GART;
+ }
+
+ if (usage & PIPE_BUFFER_USAGE_PIXEL) {
+ if (usage & NOUVEAU_BUFFER_USAGE_TEXTURE)
+ flags |= NOUVEAU_BO_GART;
+ if (!(usage & PIPE_BUFFER_USAGE_CPU_READ_WRITE))
+ flags |= NOUVEAU_BO_VRAM;
+
+ if (dev->chipset == 0x50 || dev->chipset >= 0x80) {
+ flags |= NOUVEAU_BO_TILED;
+ if (usage & NOUVEAU_BUFFER_USAGE_ZETA)
+ flags |= NOUVEAU_BO_ZTILE;
+ }
+ }
+
+ ret = nouveau_bo_new(dev, flags, alignment, size, &bo);
+ if (ret)
+ return NULL;
+
+ return nouveau_screen_bo_skel(pscreen, bo, alignment, usage, size);
+}
+
+static struct pipe_buffer *
+nouveau_screen_bo_user(struct pipe_screen *pscreen, void *ptr, unsigned bytes)
+{
+ struct nouveau_device *dev = nouveau_screen(pscreen)->device;
+ struct nouveau_bo *bo = NULL;
+ int ret;
+
+ ret = nouveau_bo_user(dev, ptr, bytes, &bo);
+ if (ret)
+ return NULL;
+
+ return nouveau_screen_bo_skel(pscreen, bo, 0, 0, bytes);
+}
+
+static inline uint32_t
+nouveau_screen_map_flags(unsigned pipe)
+{
+ uint32_t flags = 0;
+
+ if (pipe & PIPE_BUFFER_USAGE_CPU_READ)
+ flags |= NOUVEAU_BO_RD;
+ if (pipe & PIPE_BUFFER_USAGE_CPU_WRITE)
+ flags |= NOUVEAU_BO_WR;
+ if (pipe & PIPE_BUFFER_USAGE_DISCARD)
+ flags |= NOUVEAU_BO_INVAL;
+ if (pipe & PIPE_BUFFER_USAGE_DONTBLOCK)
+ flags |= NOUVEAU_BO_NOWAIT;
+ else
+ if (pipe & 0 /*PIPE_BUFFER_USAGE_UNSYNCHRONIZED*/)
+ flags |= NOUVEAU_BO_NOSYNC;
+
+ return flags;
+}
+
+static void *
+nouveau_screen_bo_map(struct pipe_screen *pscreen, struct pipe_buffer *pb,
+ unsigned usage)
+{
+ struct nouveau_bo *bo = nouveau_bo(pb);
+ int ret;
+
+ ret = nouveau_bo_map(bo, nouveau_screen_map_flags(usage));
+ if (ret) {
+ debug_printf("map failed: %d\n", ret);
+ return NULL;
+ }
+
+ return bo->map;
+}
+
+static void *
+nouveau_screen_bo_map_range(struct pipe_screen *pscreen, struct pipe_buffer *pb,
+ unsigned offset, unsigned length, unsigned usage)
+{
+ struct nouveau_bo *bo = nouveau_bo(pb);
+ uint32_t flags = nouveau_screen_map_flags(usage);
+ int ret;
+
+ ret = nouveau_bo_map_range(bo, offset, length, flags);
+ if (ret) {
+ if (!(flags & NOUVEAU_BO_NOWAIT) || ret != -EBUSY)
+ debug_printf("map_range failed: %d\n", ret);
+ return NULL;
+ }
+
+ return (char *)bo->map - offset; /* why gallium? why? */
+}
+
+static void
+nouveau_screen_bo_map_flush(struct pipe_screen *pscreen, struct pipe_buffer *pb,
+ unsigned offset, unsigned length)
+{
+ struct nouveau_bo *bo = nouveau_bo(pb);
+
+ nouveau_bo_map_flush(bo, offset, length);
+}
+
+static void
+nouveau_screen_bo_unmap(struct pipe_screen *pscreen, struct pipe_buffer *pb)
+{
+ struct nouveau_bo *bo = nouveau_bo(pb);
+
+ nouveau_bo_unmap(bo);
+}
+
+static void
+nouveau_screen_bo_del(struct pipe_buffer *pb)
+{
+ struct nouveau_bo *bo = nouveau_bo(pb);
+
+ nouveau_bo_ref(NULL, &bo);
+ FREE(pb);
+}
+
+static void
+nouveau_screen_fence_ref(struct pipe_screen *pscreen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *pfence)
+{
+ *ptr = pfence;
+}
+
+static int
+nouveau_screen_fence_signalled(struct pipe_screen *screen,
+ struct pipe_fence_handle *pfence,
+ unsigned flags)
+{
+ return 0;
+}
+
+static int
+nouveau_screen_fence_finish(struct pipe_screen *screen,
+ struct pipe_fence_handle *pfence,
+ unsigned flags)
+{
+ return 0;
+}
+
+int
+nouveau_screen_init(struct nouveau_screen *screen, struct nouveau_device *dev)
+{
+ struct pipe_screen *pscreen = &screen->base;
+ int ret;
+
+ ret = nouveau_channel_alloc(dev, 0xbeef0201, 0xbeef0202,
+ &screen->channel);
+ if (ret)
+ return ret;
+ screen->device = dev;
+
+ pscreen->get_name = nouveau_screen_get_name;
+ pscreen->get_vendor = nouveau_screen_get_vendor;
+
+ pscreen->buffer_create = nouveau_screen_bo_new;
+ pscreen->user_buffer_create = nouveau_screen_bo_user;
+ pscreen->buffer_map = nouveau_screen_bo_map;
+ pscreen->buffer_map_range = nouveau_screen_bo_map_range;
+ pscreen->buffer_flush_mapped_range = nouveau_screen_bo_map_flush;
+ pscreen->buffer_unmap = nouveau_screen_bo_unmap;
+ pscreen->buffer_destroy = nouveau_screen_bo_del;
+
+ pscreen->fence_reference = nouveau_screen_fence_ref;
+ pscreen->fence_signalled = nouveau_screen_fence_signalled;
+ pscreen->fence_finish = nouveau_screen_fence_finish;
+
+ return 0;
+}
+
+void
+nouveau_screen_fini(struct nouveau_screen *screen)
+{
+}
+
diff --git a/src/gallium/drivers/nouveau/nouveau_screen.h b/src/gallium/drivers/nouveau/nouveau_screen.h
new file mode 100644
index 0000000000..ebfc67ad1c
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_screen.h
@@ -0,0 +1,36 @@
+#ifndef __NOUVEAU_SCREEN_H__
+#define __NOUVEAU_SCREEN_H__
+
+struct nouveau_screen {
+ struct pipe_screen base;
+ struct nouveau_device *device;
+ struct nouveau_channel *channel;
+};
+
+static inline struct nouveau_screen *
+nouveau_screen(struct pipe_screen *pscreen)
+{
+ return (struct nouveau_screen *)pscreen;
+}
+
+static inline struct nouveau_bo *
+nouveau_bo(struct pipe_buffer *pb)
+{
+ return pb ? *(struct nouveau_bo **)(pb + 1) : NULL;
+}
+
+int nouveau_screen_init(struct nouveau_screen *, struct nouveau_device *);
+void nouveau_screen_fini(struct nouveau_screen *);
+
+struct nouveau_miptree {
+ struct pipe_texture base;
+ struct nouveau_bo *bo;
+};
+
+static inline struct nouveau_miptree *
+nouveau_miptree(struct pipe_texture *pt)
+{
+ return (struct nouveau_miptree *)pt;
+}
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_stateobj.h b/src/gallium/drivers/nouveau/nouveau_stateobj.h
new file mode 100644
index 0000000000..b595405357
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_stateobj.h
@@ -0,0 +1,159 @@
+#ifndef __NOUVEAU_STATEOBJ_H__
+#define __NOUVEAU_STATEOBJ_H__
+
+#include "util/u_debug.h"
+
+struct nouveau_stateobj_reloc {
+ struct nouveau_bo *bo;
+
+ unsigned offset;
+ unsigned packet;
+
+ unsigned data;
+ unsigned flags;
+ unsigned vor;
+ unsigned tor;
+};
+
+struct nouveau_stateobj {
+ struct pipe_reference reference;
+
+ unsigned *push;
+ struct nouveau_stateobj_reloc *reloc;
+
+ unsigned *cur;
+ unsigned cur_packet;
+ unsigned cur_reloc;
+};
+
+static INLINE struct nouveau_stateobj *
+so_new(unsigned push, unsigned reloc)
+{
+ struct nouveau_stateobj *so;
+
+ so = MALLOC(sizeof(struct nouveau_stateobj));
+ pipe_reference_init(&so->reference, 1);
+ so->push = MALLOC(sizeof(unsigned) * push);
+ so->reloc = MALLOC(sizeof(struct nouveau_stateobj_reloc) * reloc);
+
+ so->cur = so->push;
+ so->cur_reloc = so->cur_packet = 0;
+
+ return so;
+}
+
+static INLINE void
+so_ref(struct nouveau_stateobj *ref, struct nouveau_stateobj **pso)
+{
+ struct nouveau_stateobj *so = *pso;
+ int i;
+
+ if (pipe_reference((struct pipe_reference**)pso, &ref->reference)) {
+ free(so->push);
+ for (i = 0; i < so->cur_reloc; i++)
+ nouveau_bo_ref(NULL, &so->reloc[i].bo);
+ free(so->reloc);
+ free(so);
+ }
+}
+
+static INLINE void
+so_data(struct nouveau_stateobj *so, unsigned data)
+{
+ (*so->cur++) = (data);
+ so->cur_packet += 4;
+}
+
+static INLINE void
+so_datap(struct nouveau_stateobj *so, unsigned *data, unsigned size)
+{
+ so->cur_packet += (4 * size);
+ while (size--)
+ (*so->cur++) = (*data++);
+}
+
+static INLINE void
+so_method(struct nouveau_stateobj *so, struct nouveau_grobj *gr,
+ unsigned mthd, unsigned size)
+{
+ so->cur_packet = (gr->subc << 13) | (1 << 18) | (mthd - 4);
+ so_data(so, (gr->subc << 13) | (size << 18) | mthd);
+}
+
+static INLINE void
+so_reloc(struct nouveau_stateobj *so, struct nouveau_bo *bo,
+ unsigned data, unsigned flags, unsigned vor, unsigned tor)
+{
+ struct nouveau_stateobj_reloc *r = &so->reloc[so->cur_reloc++];
+
+ r->bo = NULL;
+ nouveau_bo_ref(bo, &r->bo);
+ r->offset = so->cur - so->push;
+ r->packet = so->cur_packet;
+ r->data = data;
+ r->flags = flags;
+ r->vor = vor;
+ r->tor = tor;
+ so_data(so, data);
+}
+
+static INLINE void
+so_dump(struct nouveau_stateobj *so)
+{
+ unsigned i, nr = so->cur - so->push;
+
+ for (i = 0; i < nr; i++)
+ debug_printf("+0x%04x: 0x%08x\n", i, so->push[i]);
+}
+
+static INLINE void
+so_emit(struct nouveau_channel *chan, struct nouveau_stateobj *so)
+{
+ struct nouveau_pushbuf *pb = chan->pushbuf;
+ unsigned nr, i;
+
+ nr = so->cur - so->push;
+ if (pb->remaining < nr)
+ nouveau_pushbuf_flush(chan, nr);
+ pb->remaining -= nr;
+
+ memcpy(pb->cur, so->push, nr * 4);
+ for (i = 0; i < so->cur_reloc; i++) {
+ struct nouveau_stateobj_reloc *r = &so->reloc[i];
+
+ nouveau_pushbuf_emit_reloc(chan, pb->cur + r->offset,
+ r->bo, r->data, 0, r->flags,
+ r->vor, r->tor);
+ }
+ pb->cur += nr;
+}
+
+static INLINE void
+so_emit_reloc_markers(struct nouveau_channel *chan, struct nouveau_stateobj *so)
+{
+ struct nouveau_pushbuf *pb = chan->pushbuf;
+ unsigned i;
+
+ if (!so)
+ return;
+
+ i = so->cur_reloc << 1;
+ if (pb->remaining < i)
+ nouveau_pushbuf_flush(chan, i);
+ pb->remaining -= i;
+
+ for (i = 0; i < so->cur_reloc; i++) {
+ struct nouveau_stateobj_reloc *r = &so->reloc[i];
+
+ nouveau_pushbuf_emit_reloc(chan, pb->cur++, r->bo, r->packet, 0,
+ (r->flags & (NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_GART |
+ NOUVEAU_BO_RDWR)) |
+ NOUVEAU_BO_DUMMY, 0, 0);
+ nouveau_pushbuf_emit_reloc(chan, pb->cur++, r->bo, r->data, 0,
+ r->flags | NOUVEAU_BO_DUMMY,
+ r->vor, r->tor);
+ }
+}
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_util.h b/src/gallium/drivers/nouveau/nouveau_util.h
new file mode 100644
index 0000000000..a10114beab
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_util.h
@@ -0,0 +1,91 @@
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+/* Determine how many vertices can be pushed into the command stream.
+ * Where the remaining space isn't large enough to represent all verices,
+ * split the buffer at primitive boundaries.
+ *
+ * Returns a count of vertices that can be rendered, and an index to
+ * restart drawing at after a flush.
+ */
+static INLINE unsigned
+nouveau_vbuf_split(unsigned remaining, unsigned overhead, unsigned vpp,
+ unsigned mode, unsigned start, unsigned count,
+ unsigned *restart)
+{
+ int max, adj = 0;
+
+ max = remaining - overhead;
+ if (max < 0)
+ return 0;
+
+ max *= vpp;
+ if (max >= count)
+ return count;
+
+ switch (mode) {
+ case PIPE_PRIM_POINTS:
+ break;
+ case PIPE_PRIM_LINES:
+ max = max & 1;
+ break;
+ case PIPE_PRIM_TRIANGLES:
+ max = max - (max % 3);
+ break;
+ case PIPE_PRIM_QUADS:
+ max = max & 3;
+ break;
+ case PIPE_PRIM_LINE_LOOP:
+ case PIPE_PRIM_LINE_STRIP:
+ if (max < 2)
+ max = 0;
+ adj = 1;
+ break;
+ case PIPE_PRIM_POLYGON:
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ case PIPE_PRIM_TRIANGLE_FAN:
+ if (max < 3)
+ max = 0;
+ adj = 2;
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ if (max < 4)
+ max = 0;
+ adj = 3;
+ break;
+ default:
+ assert(0);
+ }
+
+ *restart = start + max - adj;
+ return max;
+}
+
+/* Integer base-2 logarithm, rounded towards zero. */
+static INLINE unsigned log2i(unsigned i)
+{
+ unsigned r = 0;
+
+ if (i & 0xffff0000) {
+ i >>= 16;
+ r += 16;
+ }
+ if (i & 0x0000ff00) {
+ i >>= 8;
+ r += 8;
+ }
+ if (i & 0x000000f0) {
+ i >>= 4;
+ r += 4;
+ }
+ if (i & 0x0000000c) {
+ i >>= 2;
+ r += 2;
+ }
+ if (i & 0x00000002) {
+ r += 1;
+ }
+ return r;
+}
+
+#endif
diff --git a/src/gallium/drivers/nouveau/nouveau_winsys.h b/src/gallium/drivers/nouveau/nouveau_winsys.h
new file mode 100644
index 0000000000..42c77e5e77
--- /dev/null
+++ b/src/gallium/drivers/nouveau/nouveau_winsys.h
@@ -0,0 +1,62 @@
+#ifndef NOUVEAU_WINSYS_H
+#define NOUVEAU_WINSYS_H
+
+#include <stdint.h>
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_defines.h"
+
+#include "nouveau/nouveau_bo.h"
+#include "nouveau/nouveau_channel.h"
+#include "nouveau/nouveau_class.h"
+#include "nouveau/nouveau_device.h"
+#include "nouveau/nouveau_grobj.h"
+#include "nouveau/nouveau_notifier.h"
+#include "nouveau/nouveau_resource.h"
+#include "nouveau/nouveau_pushbuf.h"
+
+#define NOUVEAU_CAP_HW_VTXBUF (0xbeef0000)
+#define NOUVEAU_CAP_HW_IDXBUF (0xbeef0001)
+
+#define NOUVEAU_TEXTURE_USAGE_LINEAR (1 << 16)
+
+#define NOUVEAU_BUFFER_USAGE_TEXTURE (1 << 16)
+#define NOUVEAU_BUFFER_USAGE_ZETA (1 << 17)
+#define NOUVEAU_BUFFER_USAGE_TRANSFER (1 << 18)
+
+extern struct pipe_screen *
+nv04_screen_create(struct pipe_winsys *ws, struct nouveau_device *);
+
+extern struct pipe_context *
+nv04_create(struct pipe_screen *, unsigned pctx_id);
+
+extern struct pipe_screen *
+nv10_screen_create(struct pipe_winsys *ws, struct nouveau_device *);
+
+extern struct pipe_context *
+nv10_create(struct pipe_screen *, unsigned pctx_id);
+
+extern struct pipe_screen *
+nv20_screen_create(struct pipe_winsys *ws, struct nouveau_device *);
+
+extern struct pipe_context *
+nv20_create(struct pipe_screen *, unsigned pctx_id);
+
+extern struct pipe_screen *
+nv30_screen_create(struct pipe_winsys *ws, struct nouveau_device *);
+
+extern struct pipe_context *
+nv30_create(struct pipe_screen *, unsigned pctx_id);
+
+extern struct pipe_screen *
+nv40_screen_create(struct pipe_winsys *ws, struct nouveau_device *);
+
+extern struct pipe_context *
+nv40_create(struct pipe_screen *, unsigned pctx_id);
+
+extern struct pipe_screen *
+nv50_screen_create(struct pipe_winsys *ws, struct nouveau_device *);
+
+extern struct pipe_context *
+nv50_create(struct pipe_screen *, unsigned pctx_id);
+
+#endif
diff --git a/src/gallium/drivers/nv04/Makefile b/src/gallium/drivers/nv04/Makefile
new file mode 100644
index 0000000000..7c14bacb1d
--- /dev/null
+++ b/src/gallium/drivers/nv04/Makefile
@@ -0,0 +1,21 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nv04
+
+C_SOURCES = \
+ nv04_surface_2d.c \
+ nv04_clear.c \
+ nv04_context.c \
+ nv04_fragprog.c \
+ nv04_fragtex.c \
+ nv04_miptree.c \
+ nv04_prim_vbuf.c \
+ nv04_screen.c \
+ nv04_state.c \
+ nv04_state_emit.c \
+ nv04_surface.c \
+ nv04_transfer.c \
+ nv04_vbo.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nv04/nv04_clear.c b/src/gallium/drivers/nv04/nv04_clear.c
new file mode 100644
index 0000000000..01cacd36fe
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_clear.c
@@ -0,0 +1,12 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "nv04_context.h"
+
+void
+nv04_clear(struct pipe_context *pipe, struct pipe_surface *ps,
+ unsigned clearValue)
+{
+ pipe->surface_fill(pipe, ps, 0, 0, ps->width, ps->height, clearValue);
+}
diff --git a/src/gallium/drivers/nv04/nv04_context.c b/src/gallium/drivers/nv04/nv04_context.c
new file mode 100644
index 0000000000..17166c9f51
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_context.c
@@ -0,0 +1,134 @@
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv04_context.h"
+#include "nv04_screen.h"
+
+static void
+nv04_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ draw_flush(nv04->draw);
+
+ FIRE_RING(fence);
+}
+
+static void
+nv04_destroy(struct pipe_context *pipe)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ if (nv04->draw)
+ draw_destroy(nv04->draw);
+
+ FREE(nv04);
+}
+
+static void
+nv04_set_edgeflags(struct pipe_context *pipe, const unsigned *bitfield)
+{
+}
+
+static boolean
+nv04_init_hwctx(struct nv04_context *nv04)
+{
+ // requires a valid handle
+// BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOTIFY, 1);
+// OUT_RING(0);
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOP, 1);
+ OUT_RING(0);
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_CONTROL, 1);
+ OUT_RING(0x40182800);
+// OUT_RING(1<<20/*no cull*/);
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_BLEND, 1);
+// OUT_RING(0x24|(1<<6)|(1<<8));
+ OUT_RING(0x120001a4);
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_FORMAT, 1);
+ OUT_RING(0x332213a1);
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_FILTER, 1);
+ OUT_RING(0x11001010);
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_COLORKEY, 1);
+ OUT_RING(0x0);
+// BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_OFFSET, 1);
+// OUT_RING(SCREEN_OFFSET);
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_FOGCOLOR, 1);
+ OUT_RING(0xff000000);
+
+
+
+ FIRE_RING (NULL);
+ return TRUE;
+}
+
+static unsigned int
+nv04_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+nv04_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+
+struct pipe_context *
+nv04_create(struct pipe_screen *pscreen, unsigned pctx_id)
+{
+ struct nv04_screen *screen = nv04_screen(pscreen);
+ struct pipe_winsys *ws = pscreen->winsys;
+ struct nv04_context *nv04;
+ struct nouveau_winsys *nvws = screen->nvws;
+
+ nv04 = CALLOC(1, sizeof(struct nv04_context));
+ if (!nv04)
+ return NULL;
+ nv04->screen = screen;
+ nv04->pctx_id = pctx_id;
+
+ nv04->nvws = nvws;
+
+ nv04->pipe.winsys = ws;
+ nv04->pipe.screen = pscreen;
+ nv04->pipe.destroy = nv04_destroy;
+ nv04->pipe.set_edgeflags = nv04_set_edgeflags;
+ nv04->pipe.draw_arrays = nv04_draw_arrays;
+ nv04->pipe.draw_elements = nv04_draw_elements;
+ nv04->pipe.clear = nv04_clear;
+ nv04->pipe.flush = nv04_flush;
+
+ nv04->pipe.is_texture_referenced = nv04_is_texture_referenced;
+ nv04->pipe.is_buffer_referenced = nv04_is_buffer_referenced;
+
+ nv04_init_surface_functions(nv04);
+ nv04_init_state_functions(nv04);
+
+ nv04->draw = draw_create();
+ assert(nv04->draw);
+ draw_wide_point_threshold(nv04->draw, 0.0);
+ draw_wide_line_threshold(nv04->draw, 0.0);
+ draw_enable_line_stipple(nv04->draw, FALSE);
+ draw_enable_point_sprites(nv04->draw, FALSE);
+ draw_set_rasterize_stage(nv04->draw, nv04_draw_vbuf_stage(nv04));
+
+ nv04_init_hwctx(nv04);
+
+ return &nv04->pipe;
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_context.h b/src/gallium/drivers/nv04/nv04_context.h
new file mode 100644
index 0000000000..2842b2c90d
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_context.h
@@ -0,0 +1,151 @@
+#ifndef __NV04_CONTEXT_H__
+#define __NV04_CONTEXT_H__
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "draw/draw_vertex.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_gldefs.h"
+
+#define NOUVEAU_PUSH_CONTEXT(ctx) \
+ struct nv04_screen *ctx = nv04->screen
+#include "nouveau/nouveau_push.h"
+
+#include "nv04_state.h"
+
+#define NOUVEAU_ERR(fmt, args...) \
+ fprintf(stderr, "%s:%d - "fmt, __func__, __LINE__, ##args);
+#define NOUVEAU_MSG(fmt, args...) \
+ fprintf(stderr, "nouveau: "fmt, ##args);
+
+#include "nv04_screen.h"
+
+#define NV04_NEW_VERTPROG (1 << 1)
+#define NV04_NEW_FRAGPROG (1 << 2)
+#define NV04_NEW_BLEND (1 << 3)
+#define NV04_NEW_RAST (1 << 4)
+#define NV04_NEW_CONTROL (1 << 5)
+#define NV04_NEW_VIEWPORT (1 << 6)
+#define NV04_NEW_SAMPLER (1 << 7)
+#define NV04_NEW_FRAMEBUFFER (1 << 8)
+#define NV04_NEW_VTXARRAYS (1 << 9)
+
+struct nv04_context {
+ struct pipe_context pipe;
+
+ struct nouveau_winsys *nvws;
+ struct nv04_screen *screen;
+ unsigned pctx_id;
+
+ struct draw_context *draw;
+
+ int chipset;
+ struct nouveau_notifier *sync;
+
+ uint32_t dirty;
+
+ struct nv04_blend_state *blend;
+ struct nv04_sampler_state *sampler[PIPE_MAX_SAMPLERS];
+ struct nv04_fragtex_state fragtex;
+ struct nv04_rasterizer_state *rast;
+ struct nv04_depth_stencil_alpha_state *dsa;
+
+ struct nv04_miptree *tex_miptree[PIPE_MAX_SAMPLERS];
+ unsigned dirty_samplers;
+ unsigned fp_samplers;
+ unsigned vp_samplers;
+
+ uint32_t rt_enable;
+ struct pipe_framebuffer_state *framebuffer;
+ struct pipe_surface *rt;
+ struct pipe_surface *zeta;
+
+ struct {
+ struct pipe_buffer *buffer;
+ uint32_t format;
+ } tex[16];
+
+ unsigned vb_enable;
+ struct {
+ struct pipe_buffer *buffer;
+ unsigned delta;
+ } vb[16];
+
+ float *constbuf[PIPE_SHADER_TYPES][32][4];
+ unsigned constbuf_nr[PIPE_SHADER_TYPES];
+
+ struct vertex_info vertex_info;
+ struct {
+
+ struct nouveau_resource *exec_heap;
+ struct nouveau_resource *data_heap;
+
+ struct nv04_vertex_program *active;
+
+ struct nv04_vertex_program *current;
+ struct pipe_buffer *constant_buf;
+ } vertprog;
+
+ struct {
+ struct nv04_fragment_program *active;
+
+ struct nv04_fragment_program *current;
+ struct pipe_buffer *constant_buf;
+ } fragprog;
+
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
+
+ struct pipe_viewport_state viewport;
+};
+
+static INLINE struct nv04_context *
+nv04_context(struct pipe_context *pipe)
+{
+ return (struct nv04_context *)pipe;
+}
+
+extern void nv04_init_state_functions(struct nv04_context *nv04);
+extern void nv04_init_surface_functions(struct nv04_context *nv04);
+extern void nv04_screen_init_miptree_functions(struct pipe_screen *screen);
+
+/* nv04_clear.c */
+extern void nv04_clear(struct pipe_context *pipe, struct pipe_surface *ps,
+ unsigned clearValue);
+
+/* nv04_draw.c */
+extern struct draw_stage *nv04_draw_render_stage(struct nv04_context *nv04);
+
+/* nv04_fragprog.c */
+extern void nv04_fragprog_bind(struct nv04_context *,
+ struct nv04_fragment_program *);
+extern void nv04_fragprog_destroy(struct nv04_context *,
+ struct nv04_fragment_program *);
+
+/* nv04_fragtex.c */
+extern void nv04_fragtex_bind(struct nv04_context *);
+
+/* nv04_prim_vbuf.c */
+struct draw_stage *nv04_draw_vbuf_stage( struct nv04_context *nv04 );
+
+/* nv04_state.c and friends */
+extern void nv04_emit_hw_state(struct nv04_context *nv04);
+extern void nv04_state_tex_update(struct nv04_context *nv04);
+
+/* nv04_vbo.c */
+extern boolean nv04_draw_arrays(struct pipe_context *, unsigned mode,
+ unsigned start, unsigned count);
+extern boolean nv04_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count);
+
+
+#endif
diff --git a/src/gallium/drivers/nv04/nv04_fragprog.c b/src/gallium/drivers/nv04/nv04_fragprog.c
new file mode 100644
index 0000000000..8a2af41fe0
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_fragprog.c
@@ -0,0 +1,21 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv04_context.h"
+
+void
+nv04_fragprog_bind(struct nv04_context *nv04, struct nv04_fragment_program *fp)
+{
+}
+
+void
+nv04_fragprog_destroy(struct nv04_context *nv04,
+ struct nv04_fragment_program *fp)
+{
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_fragtex.c b/src/gallium/drivers/nv04/nv04_fragtex.c
new file mode 100644
index 0000000000..21f990fd53
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_fragtex.c
@@ -0,0 +1,73 @@
+#include "nv04_context.h"
+#include "nouveau/nouveau_util.h"
+
+#define _(m,tf) \
+{ \
+ PIPE_FORMAT_##m, \
+ NV04_DX5_TEXTURED_TRIANGLE_FORMAT_COLOR_##tf, \
+}
+
+struct nv04_texture_format {
+ uint pipe;
+ int format;
+};
+
+static struct nv04_texture_format
+nv04_texture_formats[] = {
+ _(A8R8G8B8_UNORM, A8R8G8B8),
+ _(X8R8G8B8_UNORM, X8R8G8B8),
+ _(A1R5G5B5_UNORM, A1R5G5B5),
+ _(A4R4G4B4_UNORM, A4R4G4B4),
+ _(L8_UNORM, Y8 ),
+ _(A8_UNORM, Y8 ),
+};
+
+static uint32_t
+nv04_fragtex_format(uint pipe_format)
+{
+ struct nv04_texture_format *tf = nv04_texture_formats;
+ int i;
+
+ for (i=0; i< sizeof(nv04_texture_formats)/sizeof(nv04_texture_formats[0]); i++) {
+ if (tf->pipe == pipe_format)
+ return tf->format;
+ tf++;
+ }
+
+ NOUVEAU_ERR("unknown texture format %s\n", pf_name(pipe_format));
+ return 0;
+}
+
+
+static void
+nv04_fragtex_build(struct nv04_context *nv04, int unit)
+{
+ struct nv04_miptree *nv04mt = nv04->tex_miptree[unit];
+ struct pipe_texture *pt = &nv04mt->base;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_2D:
+ break;
+ default:
+ NOUVEAU_ERR("Unknown target %d\n", pt->target);
+ return;
+ }
+
+ nv04->fragtex.format = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ORIGIN_ZOH_CORNER
+ | NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ORIGIN_FOH_CORNER
+ | nv04_fragtex_format(pt->format)
+ | ( (pt->last_level + 1) << NV04_DX5_TEXTURED_TRIANGLE_FORMAT_MIPMAP_LEVELS_SHIFT )
+ | ( log2i(pt->width[0]) << NV04_DX5_TEXTURED_TRIANGLE_FORMAT_BASE_SIZE_U_SHIFT )
+ | ( log2i(pt->height[0]) << NV04_DX5_TEXTURED_TRIANGLE_FORMAT_BASE_SIZE_V_SHIFT )
+ | NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_CLAMP_TO_EDGE
+ | NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSV_CLAMP_TO_EDGE
+ ;
+}
+
+
+void
+nv04_fragtex_bind(struct nv04_context *nv04)
+{
+ nv04_fragtex_build(nv04, 0);
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_miptree.c b/src/gallium/drivers/nv04/nv04_miptree.c
new file mode 100644
index 0000000000..93f752faec
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_miptree.c
@@ -0,0 +1,153 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "nv04_context.h"
+#include "nv04_screen.h"
+
+static void
+nv04_miptree_layout(struct nv04_miptree *nv04mt)
+{
+ struct pipe_texture *pt = &nv04mt->base;
+ uint width = pt->width[0], height = pt->height[0];
+ uint offset = 0;
+ int nr_faces, l;
+
+ nr_faces = 1;
+
+ for (l = 0; l <= pt->last_level; l++) {
+ pt->width[l] = width;
+ pt->height[l] = height;
+
+ pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
+
+ nv04mt->level[l].pitch = pt->width[0];
+ nv04mt->level[l].pitch = (nv04mt->level[l].pitch + 63) & ~63;
+
+ width = MAX2(1, width >> 1);
+ height = MAX2(1, height >> 1);
+ }
+
+ for (l = 0; l <= pt->last_level; l++) {
+
+ nv04mt->level[l].image_offset =
+ CALLOC(nr_faces, sizeof(unsigned));
+ offset += nv04mt->level[l].pitch * pt->height[l];
+ }
+
+ nv04mt->total_size = offset;
+}
+
+static struct pipe_texture *
+nv04_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
+{
+ struct nv04_miptree *mt;
+
+ mt = MALLOC(sizeof(struct nv04_miptree));
+ if (!mt)
+ return NULL;
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+
+ //mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+
+ nv04_miptree_layout(mt);
+
+ mt->buffer = pscreen->buffer_create(pscreen, 256, PIPE_BUFFER_USAGE_PIXEL |
+ NOUVEAU_BUFFER_USAGE_TEXTURE,
+ mt->total_size);
+ if (!mt->buffer) {
+ printf("failed %d byte alloc\n",mt->total_size);
+ FREE(mt);
+ return NULL;
+ }
+
+ return &mt->base;
+}
+
+static struct pipe_texture *
+nv04_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
+ const unsigned *stride, struct pipe_buffer *pb)
+{
+ struct nv04_miptree *mt;
+
+ /* Only supports 2D, non-mipmapped textures for the moment */
+ if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
+ pt->depth[0] != 1)
+ return NULL;
+
+ mt = CALLOC_STRUCT(nv04_miptree);
+ if (!mt)
+ return NULL;
+
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+ mt->level[0].pitch = stride[0];
+ mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
+
+ pipe_buffer_reference(&mt->buffer, pb);
+ return &mt->base;
+}
+
+static void
+nv04_miptree_destroy(struct pipe_texture *pt)
+{
+ struct nv04_miptree *mt = (struct nv04_miptree *)pt;
+ int l;
+
+ pipe_buffer_reference(&mt->buffer, NULL);
+ for (l = 0; l <= pt->last_level; l++) {
+ if (mt->level[l].image_offset)
+ FREE(mt->level[l].image_offset);
+ }
+
+ FREE(mt);
+}
+
+static struct pipe_surface *
+nv04_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct nv04_miptree *nv04mt = (struct nv04_miptree *)pt;
+ struct nv04_surface *ns;
+
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
+ return NULL;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ pipe_reference_init(&ns->base.reference, 1);
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv04mt->level[level].pitch;
+
+ ns->base.offset = nv04mt->level[level].image_offset;
+
+ return &ns->base;
+}
+
+static void
+nv04_miptree_surface_del(struct pipe_surface *ps)
+{
+ pipe_texture_reference(&ps->texture, NULL);
+ FREE(ps);
+}
+
+void
+nv04_screen_init_miptree_functions(struct pipe_screen *pscreen)
+{
+ pscreen->texture_create = nv04_miptree_create;
+ pscreen->texture_blanket = nv04_miptree_blanket;
+ pscreen->texture_destroy = nv04_miptree_destroy;
+ pscreen->get_tex_surface = nv04_miptree_surface_new;
+ pscreen->tex_surface_destroy = nv04_miptree_surface_del;
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_prim_vbuf.c b/src/gallium/drivers/nv04/nv04_prim_vbuf.c
new file mode 100644
index 0000000000..f6458232ae
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_prim_vbuf.c
@@ -0,0 +1,321 @@
+
+#include "util/u_debug.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_compiler.h"
+
+#include "draw/draw_vbuf.h"
+
+#include "nv04_context.h"
+#include "nv04_state.h"
+
+#define VERTEX_SIZE 40
+#define VERTEX_BUFFER_SIZE (4096*VERTEX_SIZE) // 4096 vertices of 40 bytes each
+
+/**
+ * Primitive renderer for nv04.
+ */
+struct nv04_vbuf_render {
+ struct vbuf_render base;
+
+ struct nv04_context *nv04;
+
+ /** Vertex buffer */
+ unsigned char* buffer;
+
+ /** Vertex size in bytes */
+ unsigned vertex_size;
+
+ /** Current primitive */
+ unsigned prim;
+};
+
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct nv04_vbuf_render *
+nv04_vbuf_render( struct vbuf_render *render )
+{
+ assert(render);
+ return (struct nv04_vbuf_render *)render;
+}
+
+
+static const struct vertex_info *
+nv04_vbuf_render_get_vertex_info( struct vbuf_render *render )
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+ struct nv04_context *nv04 = nv04_render->nv04;
+ return &nv04->vertex_info;
+}
+
+
+static boolean
+nv04_vbuf_render_allocate_vertices( struct vbuf_render *render,
+ ushort vertex_size,
+ ushort nr_vertices )
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+
+ nv04_render->buffer = (unsigned char*) MALLOC(VERTEX_BUFFER_SIZE);
+ assert(!nv04_render->buffer);
+
+ return nv04_render->buffer ? TRUE : FALSE;
+}
+
+static void *
+nv04_vbuf_render_map_vertices( struct vbuf_render *render )
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+ return nv04_render->buffer;
+}
+
+static void
+nv04_vbuf_render_unmap_vertices( struct vbuf_render *render,
+ ushort min_index,
+ ushort max_index )
+{
+}
+
+static boolean
+nv04_vbuf_render_set_primitive( struct vbuf_render *render,
+ unsigned prim )
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+
+ if (prim <= PIPE_PRIM_LINE_STRIP)
+ return FALSE;
+
+ nv04_render->prim = prim;
+ return TRUE;
+}
+
+static INLINE void nv04_2triangles(struct nv04_context* nv04, unsigned char* buffer, ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5)
+{
+ BEGIN_RING(fahrenheit,NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_SX(0xA),49);
+ OUT_RINGp(buffer + VERTEX_SIZE * v0,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v1,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v2,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v3,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v4,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v5,8);
+ OUT_RING(0xFEDCBA);
+}
+
+static INLINE void nv04_1triangle(struct nv04_context* nv04, unsigned char* buffer, ushort v0, ushort v1, ushort v2)
+{
+ BEGIN_RING(fahrenheit,NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_SX(0xD),25);
+ OUT_RINGp(buffer + VERTEX_SIZE * v0,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v1,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v2,8);
+ OUT_RING(0xFED);
+}
+
+static INLINE void nv04_1quad(struct nv04_context* nv04, unsigned char* buffer, ushort v0, ushort v1, ushort v2, ushort v3)
+{
+ BEGIN_RING(fahrenheit,NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_SX(0xC),33);
+ OUT_RINGp(buffer + VERTEX_SIZE * v0,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v1,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v2,8);
+ OUT_RINGp(buffer + VERTEX_SIZE * v3,8);
+ OUT_RING(0xFECEDC);
+}
+
+static void nv04_vbuf_render_triangles_elts(struct nv04_vbuf_render * render, const ushort * indices, uint nr_indices)
+{
+ unsigned char* buffer = render->buffer;
+ struct nv04_context* nv04 = render->nv04;
+ int i;
+
+ for( i=0; i< nr_indices-5; i+=6)
+ nv04_2triangles(nv04,
+ buffer,
+ indices[i+0],
+ indices[i+1],
+ indices[i+2],
+ indices[i+3],
+ indices[i+4],
+ indices[i+5]
+ );
+ if (i != nr_indices)
+ {
+ nv04_1triangle(nv04,
+ buffer,
+ indices[i+0],
+ indices[i+1],
+ indices[i+2]
+ );
+ i+=3;
+ }
+ if (i != nr_indices)
+ NOUVEAU_ERR("Houston, we have lost some vertices\n");
+}
+
+static void nv04_vbuf_render_tri_strip_elts(struct nv04_vbuf_render* render, const ushort* indices, uint nr_indices)
+{
+ const uint32_t striptbl[]={0x321210,0x543432,0x765654,0x987876,0xBA9A98,0xDCBCBA,0xFEDEDC};
+ unsigned char* buffer = render->buffer;
+ struct nv04_context* nv04 = render->nv04;
+ int i,j;
+
+ for(i = 0; i<nr_indices; i+=14)
+ {
+ int numvert = MIN2(16, nr_indices - i);
+ int numtri = numvert - 2;
+ if (numvert<3)
+ break;
+
+ BEGIN_RING( fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_SX(0x0), numvert*8 );
+ for(j = 0; j<numvert; j++)
+ OUT_RINGp( buffer + VERTEX_SIZE * indices [i+j], 8 );
+
+ BEGIN_RING_NI( fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_DRAWPRIMITIVE(0), (numtri+1)/2 );
+ for(j = 0; j<numtri/2; j++ )
+ OUT_RING(striptbl[j]);
+ if (numtri%2)
+ OUT_RING(striptbl[numtri/2]&0xFFF);
+ }
+}
+
+static void nv04_vbuf_render_tri_fan_elts(struct nv04_vbuf_render* render, const ushort* indices, uint nr_indices)
+{
+ const uint32_t fantbl[]={0x320210,0x540430,0x760650,0x980870,0xBA0A90,0xDC0CB0,0xFE0ED0};
+ unsigned char* buffer = render->buffer;
+ struct nv04_context* nv04 = render->nv04;
+ int i,j;
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_SX(0x0), 8);
+ OUT_RINGp(buffer + VERTEX_SIZE * indices[0], 8);
+
+ for(i = 1; i<nr_indices; i+=14)
+ {
+ int numvert=MIN2(15, nr_indices - i);
+ int numtri=numvert-2;
+ if (numvert < 3)
+ break;
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_SX(0x1), numvert*8);
+
+ for(j=0;j<numvert;j++)
+ OUT_RINGp( buffer + VERTEX_SIZE * indices[ i+j ], 8 );
+
+ BEGIN_RING_NI(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_TLVERTEX_DRAWPRIMITIVE(0), (numtri+1)/2);
+ for(j = 0; j<numtri/2; j++)
+ OUT_RING(fantbl[j]);
+ if (numtri%2)
+ OUT_RING(fantbl[numtri/2]&0xFFF);
+ }
+}
+
+static void nv04_vbuf_render_quads_elts(struct nv04_vbuf_render* render, const ushort* indices, uint nr_indices)
+{
+ unsigned char* buffer = render->buffer;
+ struct nv04_context* nv04 = render->nv04;
+ int i;
+
+ for(i = 0; i < nr_indices; i += 4)
+ nv04_1quad(nv04,
+ buffer,
+ indices[i+0],
+ indices[i+1],
+ indices[i+2],
+ indices[i+3]
+ );
+}
+
+
+static void
+nv04_vbuf_render_draw( struct vbuf_render *render,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+
+ // emit the indices
+ switch( nv04_render->prim )
+ {
+ case PIPE_PRIM_TRIANGLES:
+ nv04_vbuf_render_triangles_elts(nv04_render, indices, nr_indices);
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ nv04_vbuf_render_tri_strip_elts(nv04_render, indices, nr_indices);
+ break;
+ case PIPE_PRIM_TRIANGLE_FAN:
+ case PIPE_PRIM_POLYGON:
+ nv04_vbuf_render_tri_fan_elts(nv04_render, indices, nr_indices);
+ break;
+ case PIPE_PRIM_QUADS:
+ nv04_vbuf_render_quads_elts(nv04_render, indices, nr_indices);
+ break;
+ default:
+ NOUVEAU_ERR("You have to implement primitive %d, young padawan\n", nv04_render->prim);
+ break;
+ }
+}
+
+
+static void
+nv04_vbuf_render_release_vertices( struct vbuf_render *render )
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+
+ free(nv04_render->buffer);
+ nv04_render->buffer = NULL;
+}
+
+
+static void
+nv04_vbuf_render_destroy( struct vbuf_render *render )
+{
+ struct nv04_vbuf_render *nv04_render = nv04_vbuf_render(render);
+ FREE(nv04_render);
+}
+
+
+/**
+ * Create a new primitive render.
+ */
+static struct vbuf_render *
+nv04_vbuf_render_create( struct nv04_context *nv04 )
+{
+ struct nv04_vbuf_render *nv04_render = CALLOC_STRUCT(nv04_vbuf_render);
+
+ nv04_render->nv04 = nv04;
+
+ nv04_render->base.max_vertex_buffer_bytes = VERTEX_BUFFER_SIZE;
+ nv04_render->base.max_indices = 65536;
+ nv04_render->base.get_vertex_info = nv04_vbuf_render_get_vertex_info;
+ nv04_render->base.allocate_vertices = nv04_vbuf_render_allocate_vertices;
+ nv04_render->base.map_vertices = nv04_vbuf_render_map_vertices;
+ nv04_render->base.unmap_vertices = nv04_vbuf_render_unmap_vertices;
+ nv04_render->base.set_primitive = nv04_vbuf_render_set_primitive;
+ nv04_render->base.draw = nv04_vbuf_render_draw;
+ nv04_render->base.release_vertices = nv04_vbuf_render_release_vertices;
+ nv04_render->base.destroy = nv04_vbuf_render_destroy;
+
+ return &nv04_render->base;
+}
+
+
+/**
+ * Create a new primitive vbuf/render stage.
+ */
+struct draw_stage *nv04_draw_vbuf_stage( struct nv04_context *nv04 )
+{
+ struct vbuf_render *render;
+ struct draw_stage *stage;
+
+ render = nv04_vbuf_render_create(nv04);
+ if(!render)
+ return NULL;
+
+ stage = draw_vbuf_stage( nv04->draw, render );
+ if(!stage) {
+ render->destroy(render);
+ return NULL;
+ }
+
+ return stage;
+}
diff --git a/src/gallium/drivers/nv04/nv04_screen.c b/src/gallium/drivers/nv04/nv04_screen.c
new file mode 100644
index 0000000000..ff2febb668
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_screen.c
@@ -0,0 +1,212 @@
+#include "pipe/p_screen.h"
+#include "pipe/p_inlines.h"
+
+#include "nv04_context.h"
+#include "nv04_screen.h"
+
+static int
+nv04_screen_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 1;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 0;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 0;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 0;
+ case PIPE_CAP_POINT_SPRITE:
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 1;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 0;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 10;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 0;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return 0;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 0;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 0;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ case NOUVEAU_CAP_HW_IDXBUF:
+ return 0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+}
+
+static float
+nv04_screen_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 0.0;
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 0.0;
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 0.0;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 0.0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0.0;
+ }
+}
+
+static boolean
+nv04_screen_is_format_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage, unsigned geom_flags)
+{
+ if (tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET) {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else
+ if (tex_usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z16_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_A8_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+static void
+nv04_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct nv04_screen *screen = nv04_screen(pscreen);
+
+ nouveau_notifier_free(&screen->sync);
+ nouveau_grobj_free(&screen->fahrenheit);
+ nv04_surface_2d_takedown(&screen->eng2d);
+
+ FREE(pscreen);
+}
+
+static struct pipe_buffer *
+nv04_surface_buffer(struct pipe_surface *surf)
+{
+ struct nv04_miptree *mt = (struct nv04_miptree *)surf->texture;
+
+ return mt->buffer;
+}
+
+struct pipe_screen *
+nv04_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
+{
+ struct nv04_screen *screen = CALLOC_STRUCT(nv04_screen);
+ struct nouveau_channel *chan;
+ struct pipe_screen *pscreen;
+ unsigned fahrenheit_class = 0, sub3d_class = 0;
+ int ret;
+
+ if (!screen)
+ return NULL;
+ pscreen = &screen->base.base;
+
+ ret = nouveau_screen_init(&screen->base, dev);
+ if (ret) {
+ nv04_screen_destroy(pscreen);
+ return NULL;
+ }
+ chan = screen->base.channel;
+
+ pscreen->winsys = ws;
+ pscreen->destroy = nv04_screen_destroy;
+ pscreen->get_param = nv04_screen_get_param;
+ pscreen->get_paramf = nv04_screen_get_paramf;
+ pscreen->is_format_supported = nv04_screen_is_format_supported;
+
+ nv04_screen_init_miptree_functions(pscreen);
+ nv04_screen_init_transfer_functions(pscreen);
+
+ if (dev->chipset >= 0x20) {
+ fahrenheit_class = 0;
+ sub3d_class = 0;
+ } else if (dev->chipset >= 0x10) {
+ fahrenheit_class = NV10_DX5_TEXTURED_TRIANGLE;
+ sub3d_class = NV10_CONTEXT_SURFACES_3D;
+ } else {
+ fahrenheit_class=NV04_DX5_TEXTURED_TRIANGLE;
+ sub3d_class = NV04_CONTEXT_SURFACES_3D;
+ }
+
+ if (!fahrenheit_class) {
+ NOUVEAU_ERR("Unknown nv04 chipset: nv%02x\n", dev->chipset);
+ return NULL;
+ }
+
+ /* 3D object */
+ ret = nouveau_grobj_alloc(chan, 0xbeef0001, fahrenheit_class,
+ &screen->fahrenheit);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
+ return NULL;
+ }
+ BIND_RING(chan, screen->fahrenheit, 7);
+
+ /* 3D surface object */
+ ret = nouveau_grobj_alloc(chan, 0xbeef0002, sub3d_class,
+ &screen->context_surfaces_3d);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D surface object: %d\n", ret);
+ return NULL;
+ }
+ BIND_RING(chan, screen->context_surfaces_3d, 6);
+
+ /* 2D engine setup */
+ screen->eng2d = nv04_surface_2d_init(&screen->base);
+ screen->eng2d->buf = nv04_surface_buffer;
+
+ /* Notifier for sync purposes */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
+ if (ret) {
+ NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
+ nv04_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ return pscreen;
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_screen.h b/src/gallium/drivers/nv04/nv04_screen.h
new file mode 100644
index 0000000000..11466b9442
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_screen.h
@@ -0,0 +1,30 @@
+#ifndef __NV04_SCREEN_H__
+#define __NV04_SCREEN_H__
+
+#include "nouveau/nouveau_screen.h"
+#include "nv04_surface_2d.h"
+
+struct nv04_screen {
+ struct nouveau_screen base;
+
+ struct nouveau_winsys *nvws;
+ unsigned chipset;
+
+ /* HW graphics objects */
+ struct nv04_surface_2d *eng2d;
+ struct nouveau_grobj *fahrenheit;
+ struct nouveau_grobj *context_surfaces_3d;
+ struct nouveau_notifier *sync;
+
+};
+
+static INLINE struct nv04_screen *
+nv04_screen(struct pipe_screen *screen)
+{
+ return (struct nv04_screen *)screen;
+}
+
+void
+nv04_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
+#endif
diff --git a/src/gallium/drivers/nv04/nv04_state.c b/src/gallium/drivers/nv04/nv04_state.c
new file mode 100644
index 0000000000..d356ebd8b3
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_state.c
@@ -0,0 +1,459 @@
+#include "draw/draw_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_inlines.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "nv04_context.h"
+#include "nv04_state.h"
+
+static void *
+nv04_blend_state_create(struct pipe_context *pipe,
+ const struct pipe_blend_state *cso)
+{
+ struct nv04_blend_state *cb;
+
+ cb = MALLOC(sizeof(struct nv04_blend_state));
+
+ cb->b_enable = cso->blend_enable ? 1 : 0;
+ cb->b_src = ((nvgl_blend_func(cso->alpha_src_factor)<<16) |
+ (nvgl_blend_func(cso->rgb_src_factor)));
+ cb->b_dst = ((nvgl_blend_func(cso->alpha_dst_factor)<<16) |
+ (nvgl_blend_func(cso->rgb_dst_factor)));
+
+
+ return (void *)cb;
+}
+
+static void
+nv04_blend_state_bind(struct pipe_context *pipe, void *blend)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ nv04->blend = (struct nv04_blend_state*)blend;
+
+ nv04->dirty |= NV04_NEW_BLEND;
+}
+
+static void
+nv04_blend_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ free(hwcso);
+}
+
+
+static INLINE unsigned
+wrap_mode(unsigned wrap) {
+ unsigned ret;
+
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ ret = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ ret = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_MIRRORED_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ ret = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ ret = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_CLAMP:
+ ret = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_CLAMP;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ default:
+ NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
+ ret = NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_CLAMP;
+ }
+ return ret >> NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_SHIFT;
+}
+
+static void *
+nv04_sampler_state_create(struct pipe_context *pipe,
+ const struct pipe_sampler_state *cso)
+{
+
+ struct nv04_sampler_state *ss;
+ uint32_t filter = 0;
+
+ ss = MALLOC(sizeof(struct nv04_sampler_state));
+
+ ss->format = ((wrap_mode(cso->wrap_s) << NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSU_SHIFT) |
+ (wrap_mode(cso->wrap_t) << NV04_DX5_TEXTURED_TRIANGLE_FORMAT_ADDRESSV_SHIFT));
+
+ if (cso->max_anisotropy > 1.0) {
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_ANISOTROPIC_MINIFY_ENABLE | NV04_DX5_TEXTURED_TRIANGLE_FILTER_ANISOTROPIC_MAGNIFY_ENABLE;
+ }
+
+ switch (cso->mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MAGNIFY_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MAGNIFY_NEAREST;
+ break;
+ }
+
+ switch (cso->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MINIFY_LINEAR_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MINIFY_LINEAR_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MINIFY_LINEAR;
+ break;
+ }
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MINIFY_NEAREST_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MINIFY_NEAREST_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV04_DX5_TEXTURED_TRIANGLE_FILTER_MINIFY_NEAREST;
+ break;
+ }
+ break;
+ }
+
+ ss->filter = filter;
+
+ return (void *)ss;
+}
+
+static void
+nv04_sampler_state_bind(struct pipe_context *pipe, unsigned nr, void **sampler)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv04->sampler[unit] = sampler[unit];
+ nv04->dirty_samplers |= (1 << unit);
+ }
+}
+
+static void
+nv04_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ free(hwcso);
+}
+
+static void
+nv04_set_sampler_texture(struct pipe_context *pipe, unsigned nr,
+ struct pipe_texture **miptree)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv04->tex_miptree[unit] = (struct nv04_miptree *)miptree[unit];
+ nv04->dirty_samplers |= (1 << unit);
+ }
+}
+
+static void *
+nv04_rasterizer_state_create(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct nv04_rasterizer_state *rs;
+
+ /*XXX: ignored:
+ * scissor
+ * points/lines (no hw support, emulated with tris in gallium)
+ */
+ rs = MALLOC(sizeof(struct nv04_rasterizer_state));
+
+ rs->blend = cso->flatshade ? NV04_DX5_TEXTURED_TRIANGLE_BLEND_SHADE_MODE_FLAT : NV04_DX5_TEXTURED_TRIANGLE_BLEND_SHADE_MODE_GOURAUD;
+
+ return (void *)rs;
+}
+
+static void
+nv04_rasterizer_state_bind(struct pipe_context *pipe, void *rast)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ nv04->rast = (struct nv04_rasterizer_state*)rast;
+
+ draw_set_rasterizer_state(nv04->draw, (nv04->rast ? nv04->rast->templ : NULL));
+
+ nv04->dirty |= NV04_NEW_RAST | NV04_NEW_BLEND;
+}
+
+static void
+nv04_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ free(hwcso);
+}
+
+static INLINE uint32_t nv04_compare_func(uint32_t f)
+{
+ switch ( f ) {
+ case PIPE_FUNC_NEVER: return 1;
+ case PIPE_FUNC_LESS: return 2;
+ case PIPE_FUNC_EQUAL: return 3;
+ case PIPE_FUNC_LEQUAL: return 4;
+ case PIPE_FUNC_GREATER: return 5;
+ case PIPE_FUNC_NOTEQUAL: return 6;
+ case PIPE_FUNC_GEQUAL: return 7;
+ case PIPE_FUNC_ALWAYS: return 8;
+ }
+ NOUVEAU_MSG("Unable to find the function\n");
+ return 0;
+}
+
+static void *
+nv04_depth_stencil_alpha_state_create(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct nv04_depth_stencil_alpha_state *hw;
+
+ hw = MALLOC(sizeof(struct nv04_depth_stencil_alpha_state));
+
+ hw->control = float_to_ubyte(cso->alpha.ref_value);
+ hw->control |= ( nv04_compare_func(cso->alpha.func) << NV04_DX5_TEXTURED_TRIANGLE_CONTROL_ALPHA_FUNC_SHIFT );
+ hw->control |= cso->alpha.enabled ? NV04_DX5_TEXTURED_TRIANGLE_CONTROL_ALPHA_TEST_ENABLE : 0;
+ hw->control |= NV04_DX5_TEXTURED_TRIANGLE_CONTROL_ORIGIN;
+ hw->control |= cso->depth.enabled ? (1 << NV04_DX5_TEXTURED_TRIANGLE_CONTROL_Z_ENABLE_SHIFT) : 0;
+ hw->control |= ( nv04_compare_func(cso->depth.func)<< NV04_DX5_TEXTURED_TRIANGLE_CONTROL_Z_FUNC_SHIFT );
+ hw->control |= 1 << NV04_DX5_TEXTURED_TRIANGLE_CONTROL_CULL_MODE_SHIFT; // no culling, handled by the draw module
+ hw->control |= NV04_DX5_TEXTURED_TRIANGLE_CONTROL_DITHER_ENABLE;
+ hw->control |= NV04_DX5_TEXTURED_TRIANGLE_CONTROL_Z_PERSPECTIVE_ENABLE;
+ hw->control |= cso->depth.writemask ? (1 << NV04_DX5_TEXTURED_TRIANGLE_CONTROL_Z_WRITE_ENABLE_SHIFT) : 0;
+ hw->control |= 1 << NV04_DX5_TEXTURED_TRIANGLE_CONTROL_Z_FORMAT_SHIFT; // integer zbuffer format
+
+ return (void *)hw;
+}
+
+static void
+nv04_depth_stencil_alpha_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ nv04->dsa = hwcso;
+ nv04->dirty |= NV04_NEW_CONTROL;
+}
+
+static void
+nv04_depth_stencil_alpha_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ free(hwcso);
+}
+
+static void *
+nv04_vp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ return draw_create_vertex_shader(nv04->draw, templ);
+}
+
+static void
+nv04_vp_state_bind(struct pipe_context *pipe, void *shader)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ draw_bind_vertex_shader(nv04->draw, (struct draw_vertex_shader *) shader);
+
+ nv04->dirty |= NV04_NEW_VERTPROG;
+}
+
+static void
+nv04_vp_state_delete(struct pipe_context *pipe, void *shader)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ draw_delete_vertex_shader(nv04->draw, (struct draw_vertex_shader *) shader);
+}
+
+static void *
+nv04_fp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv04_fragment_program *fp;
+
+ fp = CALLOC(1, sizeof(struct nv04_fragment_program));
+ fp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+
+ return (void *)fp;
+}
+
+static void
+nv04_fp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ struct nv04_fragment_program *fp = hwcso;
+
+ nv04->fragprog.current = fp;
+ nv04->dirty |= NV04_NEW_FRAGPROG;
+}
+
+static void
+nv04_fp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ struct nv04_fragment_program *fp = hwcso;
+
+ nv04_fragprog_destroy(nv04, fp);
+ free((void*)fp->pipe.tokens);
+ free(fp);
+}
+
+static void
+nv04_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *bcol)
+{
+}
+
+static void
+nv04_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+}
+
+static void
+nv04_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+ const struct pipe_constant_buffer *buf )
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ struct pipe_screen *pscreen = pipe->screen;
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ if (buf) {
+ void *mapped;
+ if (buf->buffer && buf->buffer->size &&
+ (mapped = pipe_buffer_map(pscreen, buf->buffer, PIPE_BUFFER_USAGE_CPU_READ)))
+ {
+ memcpy(nv04->constbuf[shader], mapped, buf->buffer->size);
+ nv04->constbuf_nr[shader] =
+ buf->buffer->size / (4 * sizeof(float));
+ pipe_buffer_unmap(pscreen, buf->buffer);
+ }
+ }
+}
+
+static void
+nv04_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ nv04->framebuffer = (struct pipe_framebuffer_state*)fb;
+
+ nv04->dirty |= NV04_NEW_FRAMEBUFFER;
+}
+static void
+nv04_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ NOUVEAU_ERR("line stipple hahaha\n");
+}
+
+static void
+nv04_set_scissor_state(struct pipe_context *pipe,
+ const struct pipe_scissor_state *s)
+{
+/* struct nv04_context *nv04 = nv04_context(pipe);
+
+ // XXX
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_SCISSOR_HORIZ, 2);
+ OUT_RING (((s->maxx - s->minx) << 16) | s->minx);
+ OUT_RING (((s->maxy - s->miny) << 16) | s->miny);*/
+}
+
+static void
+nv04_set_viewport_state(struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ nv04->viewport = *viewport;
+
+ draw_set_viewport_state(nv04->draw, &nv04->viewport);
+}
+
+static void
+nv04_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ memcpy(nv04->vtxbuf, buffers, count * sizeof(buffers[0]));
+ nv04->dirty |= NV04_NEW_VTXARRAYS;
+
+ draw_set_vertex_buffers(nv04->draw, count, buffers);
+}
+
+static void
+nv04_set_vertex_elements(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_element *elements)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+
+ memcpy(nv04->vtxelt, elements, sizeof(*elements) * count);
+ nv04->dirty |= NV04_NEW_VTXARRAYS;
+
+ draw_set_vertex_elements(nv04->draw, count, elements);
+}
+
+void
+nv04_init_state_functions(struct nv04_context *nv04)
+{
+ nv04->pipe.create_blend_state = nv04_blend_state_create;
+ nv04->pipe.bind_blend_state = nv04_blend_state_bind;
+ nv04->pipe.delete_blend_state = nv04_blend_state_delete;
+
+ nv04->pipe.create_sampler_state = nv04_sampler_state_create;
+ nv04->pipe.bind_sampler_states = nv04_sampler_state_bind;
+ nv04->pipe.delete_sampler_state = nv04_sampler_state_delete;
+ nv04->pipe.set_sampler_textures = nv04_set_sampler_texture;
+
+ nv04->pipe.create_rasterizer_state = nv04_rasterizer_state_create;
+ nv04->pipe.bind_rasterizer_state = nv04_rasterizer_state_bind;
+ nv04->pipe.delete_rasterizer_state = nv04_rasterizer_state_delete;
+
+ nv04->pipe.create_depth_stencil_alpha_state = nv04_depth_stencil_alpha_state_create;
+ nv04->pipe.bind_depth_stencil_alpha_state = nv04_depth_stencil_alpha_state_bind;
+ nv04->pipe.delete_depth_stencil_alpha_state = nv04_depth_stencil_alpha_state_delete;
+
+ nv04->pipe.create_vs_state = nv04_vp_state_create;
+ nv04->pipe.bind_vs_state = nv04_vp_state_bind;
+ nv04->pipe.delete_vs_state = nv04_vp_state_delete;
+
+ nv04->pipe.create_fs_state = nv04_fp_state_create;
+ nv04->pipe.bind_fs_state = nv04_fp_state_bind;
+ nv04->pipe.delete_fs_state = nv04_fp_state_delete;
+
+ nv04->pipe.set_blend_color = nv04_set_blend_color;
+ nv04->pipe.set_clip_state = nv04_set_clip_state;
+ nv04->pipe.set_constant_buffer = nv04_set_constant_buffer;
+ nv04->pipe.set_framebuffer_state = nv04_set_framebuffer_state;
+ nv04->pipe.set_polygon_stipple = nv04_set_polygon_stipple;
+ nv04->pipe.set_scissor_state = nv04_set_scissor_state;
+ nv04->pipe.set_viewport_state = nv04_set_viewport_state;
+
+ nv04->pipe.set_vertex_buffers = nv04_set_vertex_buffers;
+ nv04->pipe.set_vertex_elements = nv04_set_vertex_elements;
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_state.h b/src/gallium/drivers/nv04/nv04_state.h
new file mode 100644
index 0000000000..399f750dbe
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_state.h
@@ -0,0 +1,71 @@
+#ifndef __NV04_STATE_H__
+#define __NV04_STATE_H__
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+struct nv04_blend_state {
+ uint32_t b_enable;
+ uint32_t b_src;
+ uint32_t b_dst;
+};
+
+struct nv04_fragtex_state {
+ uint32_t format;
+};
+
+struct nv04_sampler_state {
+ uint32_t filter;
+ uint32_t format;
+};
+
+struct nv04_depth_stencil_alpha_state {
+ uint32_t control;
+};
+
+struct nv04_rasterizer_state {
+ uint32_t blend;
+
+ const struct pipe_rasterizer_state *templ;
+};
+
+struct nv04_miptree {
+ struct pipe_texture base;
+
+ struct pipe_buffer *buffer;
+ uint total_size;
+
+ struct {
+ uint pitch;
+ uint *image_offset;
+ } level[PIPE_MAX_TEXTURE_LEVELS];
+};
+
+struct nv04_fragment_program_data {
+ unsigned offset;
+ unsigned index;
+};
+
+struct nv04_fragment_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+
+ boolean translated;
+ boolean on_hw;
+ unsigned samplers;
+
+ uint32_t *insn;
+ int insn_len;
+
+ struct nv04_fragment_program_data *consts;
+ unsigned nr_consts;
+
+ struct pipe_buffer *buffer;
+
+ uint32_t fp_control;
+ uint32_t fp_reg_control;
+};
+
+
+
+#endif
diff --git a/src/gallium/drivers/nv04/nv04_state_emit.c b/src/gallium/drivers/nv04/nv04_state_emit.c
new file mode 100644
index 0000000000..eb2c1c57c6
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_state_emit.c
@@ -0,0 +1,226 @@
+#include "nv04_context.h"
+#include "nv04_state.h"
+
+static void nv04_vertex_layout(struct pipe_context* pipe)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ struct nv04_fragment_program *fp = nv04->fragprog.current;
+ uint32_t src = 0;
+ int i;
+ struct vertex_info vinfo;
+
+ memset(&vinfo, 0, sizeof(vinfo));
+
+ for (i = 0; i < fp->info.num_inputs; i++) {
+ int isn = fp->info.input_semantic_name[i];
+ int isi = fp->info.input_semantic_index[i];
+ switch (isn) {
+ case TGSI_SEMANTIC_POSITION:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_LINEAR, src++);
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_LINEAR, src++);
+ break;
+ default:
+ case TGSI_SEMANTIC_GENERIC:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_PERSPECTIVE, src++);
+ break;
+ case TGSI_SEMANTIC_FOG:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_PERSPECTIVE, src++);
+ break;
+ }
+ }
+
+ printf("%d vertex input\n",fp->info.num_inputs);
+ draw_compute_vertex_size(&vinfo);
+}
+
+static uint32_t nv04_blend_func(uint32_t f)
+{
+ switch ( f ) {
+ case PIPE_BLENDFACTOR_ZERO: return 0x1;
+ case PIPE_BLENDFACTOR_ONE: return 0x2;
+ case PIPE_BLENDFACTOR_SRC_COLOR: return 0x3;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR: return 0x4;
+ case PIPE_BLENDFACTOR_SRC_ALPHA: return 0x5;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA: return 0x6;
+ case PIPE_BLENDFACTOR_DST_ALPHA: return 0x7;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA: return 0x8;
+ case PIPE_BLENDFACTOR_DST_COLOR: return 0x9;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR: return 0xA;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: return 0xB;
+ }
+ NOUVEAU_MSG("Unable to find the blend function 0x%x\n",f);
+ return 0;
+}
+
+static void nv04_emit_control(struct nv04_context* nv04)
+{
+ uint32_t control = nv04->dsa->control;
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_CONTROL, 1);
+ OUT_RING(control);
+}
+
+static void nv04_emit_blend(struct nv04_context* nv04)
+{
+ uint32_t blend;
+
+ blend=0x4; // texture MODULATE_ALPHA
+ blend|=0x20; // alpha is MSB
+ blend|=(2<<6); // flat shading
+ blend|=(1<<8); // persp correct
+ blend|=(0<<16); // no fog
+ blend|=(nv04->blend->b_enable<<20);
+ blend|=(nv04_blend_func(nv04->blend->b_src)<<24);
+ blend|=(nv04_blend_func(nv04->blend->b_dst)<<28);
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_BLEND, 1);
+ OUT_RING(blend);
+}
+
+static void nv04_emit_sampler(struct nv04_context *nv04, int unit)
+{
+ struct nv04_miptree *nv04mt = nv04->tex_miptree[unit];
+ struct pipe_texture *pt = &nv04mt->base;
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_OFFSET, 3);
+ OUT_RELOCl(nv04mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ OUT_RELOCd(nv04mt->buffer, (nv04->fragtex.format | nv04->sampler[unit]->format), NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_OR | NOUVEAU_BO_RD, 1/*VRAM*/,2/*TT*/);
+ OUT_RING(nv04->sampler[unit]->filter);
+}
+
+static void nv04_state_emit_framebuffer(struct nv04_context* nv04)
+{
+ struct pipe_framebuffer_state* fb = nv04->framebuffer;
+ struct nv04_surface *rt, *zeta;
+ uint32_t rt_format, w, h;
+ int colour_format = 0, zeta_format = 0;
+ struct nv04_miptree *nv04mt = 0;
+
+ w = fb->cbufs[0]->width;
+ h = fb->cbufs[0]->height;
+ colour_format = fb->cbufs[0]->format;
+ rt = (struct nv04_surface *)fb->cbufs[0];
+
+ if (fb->zsbuf) {
+ if (colour_format) {
+ assert(w == fb->zsbuf->width);
+ assert(h == fb->zsbuf->height);
+ } else {
+ w = fb->zsbuf->width;
+ h = fb->zsbuf->height;
+ }
+
+ zeta_format = fb->zsbuf->format;
+ zeta = (struct nv04_surface *)fb->zsbuf;
+ }
+
+ switch (colour_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case 0:
+ rt_format = 0x108;
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ rt_format = 0x103;
+ break;
+ default:
+ assert(0);
+ }
+
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_FORMAT, 1);
+ OUT_RING(rt_format);
+
+ nv04mt = (struct nv04_miptree *)rt->base.texture;
+ /* FIXME pitches have to be aligned ! */
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
+ OUT_RING(rt->pitch|(zeta->pitch<<16));
+ OUT_RELOCl(nv04mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ if (fb->zsbuf) {
+ nv04mt = (struct nv04_miptree *)zeta->base.texture;
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
+ OUT_RELOCl(nv04mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ }
+}
+
+void
+nv04_emit_hw_state(struct nv04_context *nv04)
+{
+ int i;
+
+ if (nv04->dirty & NV04_NEW_VERTPROG) {
+ //nv04_vertprog_bind(nv04, nv04->vertprog.current);
+ nv04->dirty &= ~NV04_NEW_VERTPROG;
+ }
+
+ if (nv04->dirty & NV04_NEW_FRAGPROG) {
+ nv04_fragprog_bind(nv04, nv04->fragprog.current);
+ nv04->dirty &= ~NV04_NEW_FRAGPROG;
+ nv04->dirty_samplers |= (1<<10);
+ nv04->dirty_samplers = 0;
+ }
+
+ if (nv04->dirty & NV04_NEW_CONTROL) {
+ nv04->dirty &= ~NV04_NEW_CONTROL;
+
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_CONTROL, 1);
+ OUT_RING(nv04->dsa->control);
+ }
+
+ if (nv04->dirty & NV04_NEW_BLEND) {
+ nv04->dirty &= ~NV04_NEW_BLEND;
+
+ nv04_emit_blend(nv04);
+ }
+
+ if (nv04->dirty & NV04_NEW_VTXARRAYS) {
+ nv04->dirty &= ~NV04_NEW_VTXARRAYS;
+ nv04_vertex_layout(nv04);
+ }
+
+ if (nv04->dirty & NV04_NEW_SAMPLER) {
+ nv04->dirty &= ~NV04_NEW_SAMPLER;
+
+ nv04_emit_sampler(nv04, 0);
+ }
+
+ if (nv04->dirty & NV04_NEW_VIEWPORT) {
+ nv04->dirty &= ~NV04_NEW_VIEWPORT;
+// nv04_state_emit_viewport(nv04);
+ }
+
+ if (nv04->dirty & NV04_NEW_FRAMEBUFFER) {
+ nv04->dirty &= ~NV04_NEW_FRAMEBUFFER;
+ nv04_state_emit_framebuffer(nv04);
+ }
+
+ /* Emit relocs for every referenced buffer.
+ * This is to ensure the bufmgr has an accurate idea of how
+ * the buffer is used. This isn't very efficient, but we don't
+ * seem to take a significant performance hit. Will be improved
+ * at some point. Vertex arrays are emitted by nv04_vbo.c
+ */
+
+ /* Render target */
+ unsigned rt_pitch = ((struct nv04_surface *)nv04->rt)->pitch;
+ unsigned zeta_pitch = ((struct nv04_surface *)nv04->zeta)->pitch;
+
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_PITCH, 2);
+ OUT_RING(rt_pitch|(zeta_pitch<<16));
+ OUT_RELOCl(nv04->rt, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ if (nv04->zeta) {
+ BEGIN_RING(context_surfaces_3d, NV04_CONTEXT_SURFACES_3D_OFFSET_ZETA, 1);
+ OUT_RELOCl(nv04->zeta, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ }
+
+ /* Texture images */
+ for (i = 0; i < 1; i++) {
+ if (!(nv04->fp_samplers & (1 << i)))
+ continue;
+ struct nv04_miptree *nv04mt = nv04->tex_miptree[i];
+ BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_OFFSET, 2);
+ OUT_RELOCl(nv04mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ OUT_RELOCd(nv04mt->buffer, (nv04->fragtex.format | nv04->sampler[i]->format), NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_OR | NOUVEAU_BO_RD, 1/*VRAM*/,2/*TT*/);
+ }
+}
+
diff --git a/src/gallium/drivers/nv04/nv04_surface.c b/src/gallium/drivers/nv04/nv04_surface.c
new file mode 100644
index 0000000000..0387ff4e78
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_surface.c
@@ -0,0 +1,63 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "nv04_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_tile.h"
+
+static void
+nv04_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ struct nv04_surface_2d *eng2d = nv04->screen->eng2d;
+
+ eng2d->copy(eng2d, dest, destx, desty, src, srcx, srcy, width, height);
+}
+
+static void
+nv04_surface_fill(struct pipe_context *pipe, struct pipe_surface *dest,
+ unsigned destx, unsigned desty, unsigned width,
+ unsigned height, unsigned value)
+{
+ struct nv04_context *nv04 = nv04_context(pipe);
+ struct nv04_surface_2d *eng2d = nv04->screen->eng2d;
+
+ eng2d->fill(eng2d, dest, destx, desty, width, height, value);
+}
+
+void
+nv04_init_surface_functions(struct nv04_context *nv04)
+{
+ nv04->pipe.surface_copy = nv04_surface_copy;
+ nv04->pipe.surface_fill = nv04_surface_fill;
+}
diff --git a/src/gallium/drivers/nv04/nv04_surface_2d.c b/src/gallium/drivers/nv04/nv04_surface_2d.c
new file mode 100644
index 0000000000..f88e138c79
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_surface_2d.c
@@ -0,0 +1,558 @@
+#include "pipe/p_context.h"
+#include "pipe/p_format.h"
+#include "util/u_memory.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_util.h"
+#include "nouveau/nouveau_screen.h"
+#include "nv04_surface_2d.h"
+
+static INLINE int
+nv04_surface_format(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_A8_UNORM:
+ return NV04_CONTEXT_SURFACES_2D_FORMAT_Y8;
+ case PIPE_FORMAT_R16_SNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ return NV04_CONTEXT_SURFACES_2D_FORMAT_R5G6B5;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return NV04_CONTEXT_SURFACES_2D_FORMAT_A8R8G8B8;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ return NV04_CONTEXT_SURFACES_2D_FORMAT_Y32;
+ default:
+ return -1;
+ }
+}
+
+static INLINE int
+nv04_rect_format(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_A8_UNORM:
+ return NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT_A8R8G8B8;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ return NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT_A16R5G6B5;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ return NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT_A8R8G8B8;
+ default:
+ return -1;
+ }
+}
+
+static INLINE int
+nv04_scaled_image_format(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_A1R5G5B5;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_A8R8G8B8;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_X8R8G8B8;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_R16_SNORM:
+ return NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_FORMAT_R5G6B5;
+ default:
+ return -1;
+ }
+}
+
+static INLINE unsigned
+nv04_swizzle_bits(unsigned x, unsigned y)
+{
+ unsigned u = (x & 0x001) << 0 |
+ (x & 0x002) << 1 |
+ (x & 0x004) << 2 |
+ (x & 0x008) << 3 |
+ (x & 0x010) << 4 |
+ (x & 0x020) << 5 |
+ (x & 0x040) << 6 |
+ (x & 0x080) << 7 |
+ (x & 0x100) << 8 |
+ (x & 0x200) << 9 |
+ (x & 0x400) << 10 |
+ (x & 0x800) << 11;
+
+ unsigned v = (y & 0x001) << 1 |
+ (y & 0x002) << 2 |
+ (y & 0x004) << 3 |
+ (y & 0x008) << 4 |
+ (y & 0x010) << 5 |
+ (y & 0x020) << 6 |
+ (y & 0x040) << 7 |
+ (y & 0x080) << 8 |
+ (y & 0x100) << 9 |
+ (y & 0x200) << 10 |
+ (y & 0x400) << 11 |
+ (y & 0x800) << 12;
+ return v | u;
+}
+
+static int
+nv04_surface_copy_swizzle(struct nv04_surface_2d *ctx,
+ struct pipe_surface *dst, int dx, int dy,
+ struct pipe_surface *src, int sx, int sy,
+ int w, int h)
+{
+ struct nouveau_channel *chan = ctx->swzsurf->channel;
+ struct nouveau_grobj *swzsurf = ctx->swzsurf;
+ struct nouveau_grobj *sifm = ctx->sifm;
+ struct nouveau_bo *src_bo = nouveau_bo(ctx->buf(src));
+ struct nouveau_bo *dst_bo = nouveau_bo(ctx->buf(dst));
+ const unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ const unsigned max_w = 1024;
+ const unsigned max_h = 1024;
+ const unsigned sub_w = w > max_w ? max_w : w;
+ const unsigned sub_h = h > max_h ? max_h : h;
+ unsigned cx;
+ unsigned cy;
+
+#if 0
+ /* That's the way she likes it */
+ assert(src_pitch == ((struct nv04_surface *)dst)->pitch);
+#endif
+
+ BEGIN_RING(chan, swzsurf, NV04_SWIZZLED_SURFACE_DMA_IMAGE, 1);
+ OUT_RELOCo(chan, dst_bo,
+ NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ BEGIN_RING(chan, swzsurf, NV04_SWIZZLED_SURFACE_FORMAT, 1);
+ OUT_RING (chan, nv04_surface_format(dst->format) |
+ log2i(w) << NV04_SWIZZLED_SURFACE_FORMAT_BASE_SIZE_U_SHIFT |
+ log2i(h) << NV04_SWIZZLED_SURFACE_FORMAT_BASE_SIZE_V_SHIFT);
+
+ BEGIN_RING(chan, sifm, NV04_SCALED_IMAGE_FROM_MEMORY_DMA_IMAGE, 1);
+ OUT_RELOCo(chan, src_bo,
+ NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+ BEGIN_RING(chan, sifm, NV04_SCALED_IMAGE_FROM_MEMORY_SURFACE, 1);
+ OUT_RING (chan, swzsurf->handle);
+
+ for (cy = 0; cy < h; cy += sub_h) {
+ for (cx = 0; cx < w; cx += sub_w) {
+ BEGIN_RING(chan, swzsurf, NV04_SWIZZLED_SURFACE_OFFSET, 1);
+ OUT_RELOCl(chan, dst_bo, dst->offset + nv04_swizzle_bits(cx+dx, cy+dy) *
+ dst->texture->block.size, NOUVEAU_BO_GART |
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ BEGIN_RING(chan, sifm, NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_CONVERSION, 9);
+ OUT_RING (chan, NV04_SCALED_IMAGE_FROM_MEMORY_COLOR_CONVERSION_TRUNCATE);
+ OUT_RING (chan, nv04_scaled_image_format(src->format));
+ OUT_RING (chan, NV04_SCALED_IMAGE_FROM_MEMORY_OPERATION_SRCCOPY);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, sub_h << 16 | sub_w);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, sub_h << 16 | sub_w);
+ OUT_RING (chan, 1 << 20);
+ OUT_RING (chan, 1 << 20);
+
+ BEGIN_RING(chan, sifm, NV04_SCALED_IMAGE_FROM_MEMORY_SIZE, 4);
+ OUT_RING (chan, sub_h << 16 | sub_w);
+ OUT_RING (chan, src_pitch |
+ NV04_SCALED_IMAGE_FROM_MEMORY_FORMAT_ORIGIN_CENTER |
+ NV04_SCALED_IMAGE_FROM_MEMORY_FORMAT_FILTER_POINT_SAMPLE);
+ OUT_RELOCl(chan, src_bo, src->offset + (cy+sy) * src_pitch +
+ (cx+sx) * src->texture->block.size, NOUVEAU_BO_GART |
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+ OUT_RING (chan, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int
+nv04_surface_copy_m2mf(struct nv04_surface_2d *ctx,
+ struct pipe_surface *dst, int dx, int dy,
+ struct pipe_surface *src, int sx, int sy, int w, int h)
+{
+ struct nouveau_channel *chan = ctx->m2mf->channel;
+ struct nouveau_grobj *m2mf = ctx->m2mf;
+ struct nouveau_bo *src_bo = nouveau_bo(ctx->buf(src));
+ struct nouveau_bo *dst_bo = nouveau_bo(ctx->buf(dst));
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
+ unsigned dst_offset = dst->offset + dy * dst_pitch +
+ dx * dst->texture->block.size;
+ unsigned src_offset = src->offset + sy * src_pitch +
+ sx * src->texture->block.size;
+
+ WAIT_RING (chan, 3 + ((h / 2047) + 1) * 9);
+ BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_DMA_BUFFER_IN, 2);
+ OUT_RELOCo(chan, src_bo,
+ NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+ OUT_RELOCo(chan, dst_bo,
+ NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ while (h) {
+ int count = (h > 2047) ? 2047 : h;
+
+ BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+ OUT_RELOCl(chan, src_bo, src_offset,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ OUT_RELOCl(chan, dst_bo, dst_offset,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_WR);
+ OUT_RING (chan, src_pitch);
+ OUT_RING (chan, dst_pitch);
+ OUT_RING (chan, w * src->texture->block.size);
+ OUT_RING (chan, count);
+ OUT_RING (chan, 0x0101);
+ OUT_RING (chan, 0);
+
+ h -= count;
+ src_offset += src_pitch * count;
+ dst_offset += dst_pitch * count;
+ }
+
+ return 0;
+}
+
+static int
+nv04_surface_copy_m2mf_swizzle(struct nv04_surface_2d *ctx,
+ struct pipe_surface *dst, int dx, int dy,
+ struct pipe_surface *src, int sx, int sy)
+{
+ struct nouveau_channel *chan = ctx->m2mf->channel;
+ struct nouveau_grobj *m2mf = ctx->m2mf;
+ struct nouveau_bo *src_bo = nouveau_bo(ctx->buf(src));
+ struct nouveau_bo *dst_bo = nouveau_bo(ctx->buf(dst));
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
+ unsigned dst_offset = dst->offset + nv04_swizzle_bits(dx, dy) *
+ dst->texture->block.size;
+ unsigned src_offset = src->offset + sy * src_pitch +
+ sx * src->texture->block.size;
+
+ BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_DMA_BUFFER_IN, 2);
+ OUT_RELOCo(chan, src_bo,
+ NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+ OUT_RELOCo(chan, dst_bo,
+ NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+ OUT_RELOCl(chan, src_bo, src_offset,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ OUT_RELOCl(chan, dst_bo, dst_offset,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_WR);
+ OUT_RING (chan, src_pitch);
+ OUT_RING (chan, dst_pitch);
+ OUT_RING (chan, 1 * src->texture->block.size);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x0101);
+ OUT_RING (chan, 0);
+
+ return 0;
+}
+
+static int
+nv04_surface_copy_blit(struct nv04_surface_2d *ctx, struct pipe_surface *dst,
+ int dx, int dy, struct pipe_surface *src, int sx, int sy,
+ int w, int h)
+{
+ struct nouveau_channel *chan = ctx->surf2d->channel;
+ struct nouveau_grobj *surf2d = ctx->surf2d;
+ struct nouveau_grobj *blit = ctx->blit;
+ struct nouveau_bo *src_bo = nouveau_bo(ctx->buf(src));
+ struct nouveau_bo *dst_bo = nouveau_bo(ctx->buf(dst));
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
+ int format;
+
+ format = nv04_surface_format(dst->format);
+ if (format < 0)
+ return 1;
+
+ WAIT_RING (chan, 12);
+ BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_DMA_IMAGE_SOURCE, 2);
+ OUT_RELOCo(chan, src_bo, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+ OUT_RELOCo(chan, dst_bo, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_FORMAT, 4);
+ OUT_RING (chan, format);
+ OUT_RING (chan, (dst_pitch << 16) | src_pitch);
+ OUT_RELOCl(chan, src_bo, src->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD);
+ OUT_RELOCl(chan, dst_bo, dst->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ BEGIN_RING(chan, blit, 0x0300, 3);
+ OUT_RING (chan, (sy << 16) | sx);
+ OUT_RING (chan, (dy << 16) | dx);
+ OUT_RING (chan, ( h << 16) | w);
+
+ return 0;
+}
+
+static void
+nv04_surface_copy(struct nv04_surface_2d *ctx, struct pipe_surface *dst,
+ int dx, int dy, struct pipe_surface *src, int sx, int sy,
+ int w, int h)
+{
+ unsigned src_pitch = ((struct nv04_surface *)src)->pitch;
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
+ int src_linear = src->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR;
+ int dst_linear = dst->texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR;
+
+ assert(src->format == dst->format);
+
+ /* Setup transfer to swizzle the texture to vram if needed */
+ if (src_linear && !dst_linear) {
+ int x,y;
+
+ if ((w>1) && (h>1)) {
+ int potWidth = 1<<log2i(w);
+ int potHeight = 1<<log2i(h);
+ int remainWidth = w-potWidth;
+ int remainHeight = h-potHeight;
+ int squareDim = (potWidth>potHeight ? potHeight : potWidth);
+
+ /* top left is always POT, but we can only swizzle squares */
+ for (y=0; y<potHeight; y+=squareDim) {
+ for (x=0; x<potWidth; x+= squareDim) {
+ nv04_surface_copy_swizzle(ctx, dst, dx+x, dy+y,
+ src, sx+x, sy+y,
+ squareDim, squareDim);
+ }
+ }
+
+ /* top right */
+ if (remainWidth>0) {
+ nv04_surface_copy(ctx, dst, dx+potWidth, dy,
+ src, sx+potWidth, sy,
+ remainWidth, potHeight);
+ }
+
+ /* bottom left */
+ if (remainHeight>0) {
+ nv04_surface_copy(ctx, dst, dx, dy+potHeight,
+ src, sx, sy+potHeight,
+ potWidth, remainHeight);
+ }
+
+ /* bottom right */
+ if ((remainWidth>0) && (remainHeight>0)) {
+ nv04_surface_copy(ctx, dst, dx+potWidth, dy+potHeight,
+ src, sx+potWidth, sy+potHeight,
+ remainWidth, remainHeight);
+ }
+ } else if (w==1) {
+ /* We have a column to copy to a swizzled texture */
+ for (y=0; y<h; y++) {
+ nv04_surface_copy_m2mf_swizzle(ctx, dst, dx, dy+y,
+ src, sx, sy+y);
+ }
+ } else if (h==1) {
+ /* We have a row to copy to a swizzled texture */
+ for (x=0; x<w; x++) {
+ nv04_surface_copy_m2mf_swizzle(ctx, dst, dx+x, dy,
+ src, sx+x, sy);
+ }
+ }
+
+ return;
+ }
+
+ /* NV_CONTEXT_SURFACES_2D has buffer alignment restrictions, fallback
+ * to NV_MEMORY_TO_MEMORY_FORMAT in this case.
+ */
+ if ((src->offset & 63) || (dst->offset & 63) ||
+ (src_pitch & 63) || (dst_pitch & 63)) {
+ nv04_surface_copy_m2mf(ctx, dst, dx, dy, src, sx, sy, w, h);
+ return;
+ }
+
+ nv04_surface_copy_blit(ctx, dst, dx, dy, src, sx, sy, w, h);
+}
+
+static void
+nv04_surface_fill(struct nv04_surface_2d *ctx, struct pipe_surface *dst,
+ int dx, int dy, int w, int h, unsigned value)
+{
+ struct nouveau_channel *chan = ctx->surf2d->channel;
+ struct nouveau_grobj *surf2d = ctx->surf2d;
+ struct nouveau_grobj *rect = ctx->rect;
+ struct nouveau_bo *dst_bo = nouveau_bo(ctx->buf(dst));
+ unsigned dst_pitch = ((struct nv04_surface *)dst)->pitch;
+ int cs2d_format, gdirect_format;
+
+ cs2d_format = nv04_surface_format(dst->format);
+ assert(cs2d_format >= 0);
+
+ gdirect_format = nv04_rect_format(dst->format);
+ assert(gdirect_format >= 0);
+
+ WAIT_RING (chan, 16);
+ BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_DMA_IMAGE_SOURCE, 2);
+ OUT_RELOCo(chan, dst_bo, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ OUT_RELOCo(chan, dst_bo, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(chan, surf2d, NV04_CONTEXT_SURFACES_2D_FORMAT, 4);
+ OUT_RING (chan, cs2d_format);
+ OUT_RING (chan, (dst_pitch << 16) | dst_pitch);
+ OUT_RELOCl(chan, dst_bo, dst->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ OUT_RELOCl(chan, dst_bo, dst->offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ BEGIN_RING(chan, rect, NV04_GDI_RECTANGLE_TEXT_COLOR_FORMAT, 1);
+ OUT_RING (chan, gdirect_format);
+ BEGIN_RING(chan, rect, NV04_GDI_RECTANGLE_TEXT_COLOR1_A, 1);
+ OUT_RING (chan, value);
+ BEGIN_RING(chan, rect,
+ NV04_GDI_RECTANGLE_TEXT_UNCLIPPED_RECTANGLE_POINT(0), 2);
+ OUT_RING (chan, (dx << 16) | dy);
+ OUT_RING (chan, ( w << 16) | h);
+}
+
+void
+nv04_surface_2d_takedown(struct nv04_surface_2d **pctx)
+{
+ struct nv04_surface_2d *ctx;
+
+ if (!pctx || !*pctx)
+ return;
+ ctx = *pctx;
+ *pctx = NULL;
+
+ nouveau_notifier_free(&ctx->ntfy);
+ nouveau_grobj_free(&ctx->m2mf);
+ nouveau_grobj_free(&ctx->surf2d);
+ nouveau_grobj_free(&ctx->swzsurf);
+ nouveau_grobj_free(&ctx->rect);
+ nouveau_grobj_free(&ctx->blit);
+ nouveau_grobj_free(&ctx->sifm);
+
+ FREE(ctx);
+}
+
+struct nv04_surface_2d *
+nv04_surface_2d_init(struct nouveau_screen *screen)
+{
+ struct nv04_surface_2d *ctx = CALLOC_STRUCT(nv04_surface_2d);
+ struct nouveau_channel *chan = screen->channel;
+ unsigned handle = 0x88000000, class;
+ int ret;
+
+ if (!ctx)
+ return NULL;
+
+ ret = nouveau_notifier_alloc(chan, handle++, 1, &ctx->ntfy);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ ret = nouveau_grobj_alloc(chan, handle++, 0x0039, &ctx->m2mf);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ BEGIN_RING(chan, ctx->m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
+ OUT_RING (chan, ctx->ntfy->handle);
+
+ if (chan->device->chipset < 0x10)
+ class = NV04_CONTEXT_SURFACES_2D;
+ else
+ class = NV10_CONTEXT_SURFACES_2D;
+
+ ret = nouveau_grobj_alloc(chan, handle++, class, &ctx->surf2d);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ BEGIN_RING(chan, ctx->surf2d,
+ NV04_CONTEXT_SURFACES_2D_DMA_IMAGE_SOURCE, 2);
+ OUT_RING (chan, chan->vram->handle);
+ OUT_RING (chan, chan->vram->handle);
+
+ if (chan->device->chipset < 0x10)
+ class = NV04_IMAGE_BLIT;
+ else
+ class = NV12_IMAGE_BLIT;
+
+ ret = nouveau_grobj_alloc(chan, handle++, class, &ctx->blit);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ BEGIN_RING(chan, ctx->blit, NV04_IMAGE_BLIT_DMA_NOTIFY, 1);
+ OUT_RING (chan, ctx->ntfy->handle);
+ BEGIN_RING(chan, ctx->blit, NV04_IMAGE_BLIT_SURFACE, 1);
+ OUT_RING (chan, ctx->surf2d->handle);
+ BEGIN_RING(chan, ctx->blit, NV04_IMAGE_BLIT_OPERATION, 1);
+ OUT_RING (chan, NV04_IMAGE_BLIT_OPERATION_SRCCOPY);
+
+ ret = nouveau_grobj_alloc(chan, handle++, NV04_GDI_RECTANGLE_TEXT,
+ &ctx->rect);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ BEGIN_RING(chan, ctx->rect, NV04_GDI_RECTANGLE_TEXT_DMA_NOTIFY, 1);
+ OUT_RING (chan, ctx->ntfy->handle);
+ BEGIN_RING(chan, ctx->rect, NV04_GDI_RECTANGLE_TEXT_SURFACE, 1);
+ OUT_RING (chan, ctx->surf2d->handle);
+ BEGIN_RING(chan, ctx->rect, NV04_GDI_RECTANGLE_TEXT_OPERATION, 1);
+ OUT_RING (chan, NV04_GDI_RECTANGLE_TEXT_OPERATION_SRCCOPY);
+ BEGIN_RING(chan, ctx->rect,
+ NV04_GDI_RECTANGLE_TEXT_MONOCHROME_FORMAT, 1);
+ OUT_RING (chan, NV04_GDI_RECTANGLE_TEXT_MONOCHROME_FORMAT_LE);
+
+ switch (chan->device->chipset & 0xf0) {
+ case 0x00:
+ case 0x10:
+ class = NV04_SWIZZLED_SURFACE;
+ break;
+ case 0x20:
+ class = NV20_SWIZZLED_SURFACE;
+ break;
+ case 0x30:
+ class = NV30_SWIZZLED_SURFACE;
+ break;
+ case 0x40:
+ case 0x60:
+ class = NV40_SWIZZLED_SURFACE;
+ break;
+ default:
+ /* Famous last words: this really can't happen.. */
+ assert(0);
+ break;
+ }
+
+ ret = nouveau_grobj_alloc(chan, handle++, class, &ctx->swzsurf);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ switch (chan->device->chipset & 0xf0) {
+ case 0x10:
+ case 0x20:
+ class = NV10_SCALED_IMAGE_FROM_MEMORY;
+ break;
+ case 0x30:
+ class = NV30_SCALED_IMAGE_FROM_MEMORY;
+ break;
+ case 0x40:
+ case 0x60:
+ class = NV40_SCALED_IMAGE_FROM_MEMORY;
+ break;
+ default:
+ class = NV04_SCALED_IMAGE_FROM_MEMORY;
+ break;
+ }
+
+ ret = nouveau_grobj_alloc(chan, handle++, class, &ctx->sifm);
+ if (ret) {
+ nv04_surface_2d_takedown(&ctx);
+ return NULL;
+ }
+
+ ctx->copy = nv04_surface_copy;
+ ctx->fill = nv04_surface_fill;
+ return ctx;
+}
diff --git a/src/gallium/drivers/nv04/nv04_surface_2d.h b/src/gallium/drivers/nv04/nv04_surface_2d.h
new file mode 100644
index 0000000000..02b3f56ba8
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_surface_2d.h
@@ -0,0 +1,33 @@
+#ifndef __NV04_SURFACE_2D_H__
+#define __NV04_SURFACE_2D_H__
+
+struct nv04_surface {
+ struct pipe_surface base;
+ unsigned pitch;
+};
+
+struct nv04_surface_2d {
+ struct nouveau_notifier *ntfy;
+ struct nouveau_grobj *surf2d;
+ struct nouveau_grobj *swzsurf;
+ struct nouveau_grobj *m2mf;
+ struct nouveau_grobj *rect;
+ struct nouveau_grobj *blit;
+ struct nouveau_grobj *sifm;
+
+ struct pipe_buffer *(*buf)(struct pipe_surface *);
+
+ void (*copy)(struct nv04_surface_2d *, struct pipe_surface *dst,
+ int dx, int dy, struct pipe_surface *src, int sx, int sy,
+ int w, int h);
+ void (*fill)(struct nv04_surface_2d *, struct pipe_surface *dst,
+ int dx, int dy, int w, int h, unsigned value);
+};
+
+struct nv04_surface_2d *
+nv04_surface_2d_init(struct nouveau_screen *screen);
+
+void
+nv04_surface_2d_takedown(struct nv04_surface_2d **);
+
+#endif
diff --git a/src/gallium/drivers/nv04/nv04_transfer.c b/src/gallium/drivers/nv04/nv04_transfer.c
new file mode 100644
index 0000000000..854b855d64
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_transfer.c
@@ -0,0 +1,195 @@
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv04_context.h"
+#include "nv04_screen.h"
+#include "nv04_state.h"
+
+struct nv04_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv04_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv04_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv04_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv04_miptree *mt = (struct nv04_miptree *)pt;
+ struct nv04_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv04_transfer);
+ if (!tx)
+ return NULL;
+
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ 0, 0, 0,
+ nv04_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv04_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv04_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv04_screen *nvscreen = nv04_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv04_transfer_del(struct pipe_transfer *ptx)
+{
+ struct nv04_transfer *tx = (struct nv04_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct pipe_screen *pscreen = ptx->texture->screen;
+ struct nv04_screen *nvscreen = nv04_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv04_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv04_transfer *tx = (struct nv04_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv04_miptree *mt = (struct nv04_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv04_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv04_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv04_transfer *tx = (struct nv04_transfer *)ptx;
+ struct nv04_miptree *mt = (struct nv04_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv04_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv04_transfer_new;
+ pscreen->tex_transfer_destroy = nv04_transfer_del;
+ pscreen->transfer_map = nv04_transfer_map;
+ pscreen->transfer_unmap = nv04_transfer_unmap;
+}
diff --git a/src/gallium/drivers/nv04/nv04_vbo.c b/src/gallium/drivers/nv04/nv04_vbo.c
new file mode 100644
index 0000000000..e3167814f2
--- /dev/null
+++ b/src/gallium/drivers/nv04/nv04_vbo.c
@@ -0,0 +1,80 @@
+#include "draw/draw_context.h"
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "nv04_context.h"
+#include "nv04_state.h"
+
+#include "nouveau/nouveau_channel.h"
+#include "nouveau/nouveau_pushbuf.h"
+
+boolean nv04_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count)
+{
+ struct pipe_screen *pscreen = pipe->screen;
+ struct nv04_context *nv04 = nv04_context( pipe );
+ struct draw_context *draw = nv04->draw;
+ unsigned i;
+
+ nv04_emit_hw_state(nv04);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
+ if (nv04->vtxbuf[i].buffer) {
+ void *buf
+ = pipe_buffer_map(pscreen,
+ nv04->vtxbuf[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+ }
+ /* Map index buffer, if present */
+ if (indexBuffer) {
+ void *mapped_indexes
+ = pipe_buffer_map(pscreen, indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes);
+ }
+ else {
+ /* no index/element buffer */
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+ draw_set_mapped_constant_buffer(draw,
+ nv04->constbuf[PIPE_SHADER_VERTEX],
+ nv04->constbuf_nr[PIPE_SHADER_VERTEX]);
+
+ /* draw! */
+ draw_arrays(nv04->draw, prim, start, count);
+
+ /*
+ * unmap vertex/index buffers
+ */
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
+ if (nv04->vtxbuf[i].buffer) {
+ pipe_buffer_unmap(pscreen, nv04->vtxbuf[i].buffer);
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ }
+ }
+ if (indexBuffer) {
+ pipe_buffer_unmap(pscreen, indexBuffer);
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+ return TRUE;
+}
+
+boolean nv04_draw_arrays( struct pipe_context *pipe,
+ unsigned prim, unsigned start, unsigned count)
+{
+ printf("coucou in draw arrays\n");
+ return nv04_draw_elements(pipe, NULL, 0, prim, start, count);
+}
+
+
+
diff --git a/src/gallium/drivers/nv10/Makefile b/src/gallium/drivers/nv10/Makefile
new file mode 100644
index 0000000000..62677f5194
--- /dev/null
+++ b/src/gallium/drivers/nv10/Makefile
@@ -0,0 +1,20 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nv10
+
+C_SOURCES = \
+ nv10_clear.c \
+ nv10_context.c \
+ nv10_fragprog.c \
+ nv10_fragtex.c \
+ nv10_miptree.c \
+ nv10_prim_vbuf.c \
+ nv10_screen.c \
+ nv10_state.c \
+ nv10_state_emit.c \
+ nv10_surface.c \
+ nv10_transfer.c \
+ nv10_vbo.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nv10/nv10_clear.c b/src/gallium/drivers/nv10/nv10_clear.c
new file mode 100644
index 0000000000..a39a2b5f52
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_clear.c
@@ -0,0 +1,14 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "util/u_clear.h"
+
+#include "nv10_context.h"
+
+void
+nv10_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil)
+{
+ util_clear(pipe, nv10_context(pipe)->framebuffer, buffers, rgba, depth,
+ stencil);
+}
diff --git a/src/gallium/drivers/nv10/nv10_context.c b/src/gallium/drivers/nv10/nv10_context.c
new file mode 100644
index 0000000000..a127b134ec
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_context.c
@@ -0,0 +1,322 @@
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv10_context.h"
+#include "nv10_screen.h"
+
+static void
+nv10_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ draw_flush(nv10->draw);
+
+ FIRE_RING(fence);
+}
+
+static void
+nv10_destroy(struct pipe_context *pipe)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ if (nv10->draw)
+ draw_destroy(nv10->draw);
+
+ FREE(nv10);
+}
+
+static void nv10_init_hwctx(struct nv10_context *nv10)
+{
+ struct nv10_screen *screen = nv10->screen;
+ struct nouveau_channel *chan = screen->base.channel;
+ int i;
+ float projectionmatrix[16];
+
+ BEGIN_RING(celsius, NV10TCL_DMA_NOTIFY, 1);
+ OUT_RING (screen->sync->handle);
+ BEGIN_RING(celsius, NV10TCL_DMA_IN_MEMORY0, 2);
+ OUT_RING (chan->vram->handle);
+ OUT_RING (chan->gart->handle);
+ BEGIN_RING(celsius, NV10TCL_DMA_IN_MEMORY2, 2);
+ OUT_RING (chan->vram->handle);
+ OUT_RING (chan->vram->handle);
+
+ BEGIN_RING(celsius, NV10TCL_NOP, 1);
+ OUT_RING (0);
+
+ BEGIN_RING(celsius, NV10TCL_RT_HORIZ, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+
+ BEGIN_RING(celsius, NV10TCL_VIEWPORT_CLIP_HORIZ(0), 1);
+ OUT_RING ((0x7ff<<16)|0x800);
+ BEGIN_RING(celsius, NV10TCL_VIEWPORT_CLIP_VERT(0), 1);
+ OUT_RING ((0x7ff<<16)|0x800);
+
+ for (i=1;i<8;i++) {
+ BEGIN_RING(celsius, NV10TCL_VIEWPORT_CLIP_HORIZ(i), 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_VIEWPORT_CLIP_VERT(i), 1);
+ OUT_RING (0);
+ }
+
+ BEGIN_RING(celsius, 0x290, 1);
+ OUT_RING ((0x10<<16)|1);
+ BEGIN_RING(celsius, 0x3f4, 1);
+ OUT_RING (0);
+
+ BEGIN_RING(celsius, NV10TCL_NOP, 1);
+ OUT_RING (0);
+
+ if (nv10->screen->celsius->grclass != NV10TCL) {
+ /* For nv11, nv17 */
+ BEGIN_RING(celsius, 0x120, 3);
+ OUT_RING (0);
+ OUT_RING (1);
+ OUT_RING (2);
+
+ BEGIN_RING(celsius, NV10TCL_NOP, 1);
+ OUT_RING (0);
+ }
+
+ BEGIN_RING(celsius, NV10TCL_NOP, 1);
+ OUT_RING (0);
+
+ /* Set state */
+ BEGIN_RING(celsius, NV10TCL_FOG_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_ALPHA_FUNC_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_ALPHA_FUNC_FUNC, 2);
+ OUT_RING (0x207);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_TX_ENABLE(0), 2);
+ OUT_RING (0);
+ OUT_RING (0);
+
+ BEGIN_RING(celsius, NV10TCL_RC_IN_ALPHA(0), 12);
+ OUT_RING (0x30141010);
+ OUT_RING (0);
+ OUT_RING (0x20040000);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0x00000c00);
+ OUT_RING (0);
+ OUT_RING (0x00000c00);
+ OUT_RING (0x18000000);
+ OUT_RING (0x300e0300);
+ OUT_RING (0x0c091c80);
+
+ BEGIN_RING(celsius, NV10TCL_BLEND_FUNC_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_DITHER_ENABLE, 2);
+ OUT_RING (1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_LINE_SMOOTH_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_WEIGHT_ENABLE, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_BLEND_FUNC_SRC, 4);
+ OUT_RING (1);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0x8006);
+ BEGIN_RING(celsius, NV10TCL_STENCIL_MASK, 8);
+ OUT_RING (0xff);
+ OUT_RING (0x207);
+ OUT_RING (0);
+ OUT_RING (0xff);
+ OUT_RING (0x1e00);
+ OUT_RING (0x1e00);
+ OUT_RING (0x1e00);
+ OUT_RING (0x1d01);
+ BEGIN_RING(celsius, NV10TCL_NORMALIZE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_FOG_ENABLE, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_LIGHT_MODEL, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_COLOR_CONTROL, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_ENABLED_LIGHTS, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_POLYGON_OFFSET_POINT_ENABLE, 3);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_DEPTH_FUNC, 1);
+ OUT_RING (0x201);
+ BEGIN_RING(celsius, NV10TCL_DEPTH_WRITE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_DEPTH_TEST_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_POLYGON_OFFSET_FACTOR, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_POINT_SIZE, 1);
+ OUT_RING (8);
+ BEGIN_RING(celsius, NV10TCL_POINT_PARAMETERS_ENABLE, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_LINE_WIDTH, 1);
+ OUT_RING (8);
+ BEGIN_RING(celsius, NV10TCL_LINE_SMOOTH_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_POLYGON_MODE_FRONT, 2);
+ OUT_RING (0x1b02);
+ OUT_RING (0x1b02);
+ BEGIN_RING(celsius, NV10TCL_CULL_FACE, 2);
+ OUT_RING (0x405);
+ OUT_RING (0x901);
+ BEGIN_RING(celsius, NV10TCL_POLYGON_SMOOTH_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_CULL_FACE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_TX_GEN_S(0), 8);
+ for (i=0;i<8;i++) {
+ OUT_RING (0);
+ }
+ BEGIN_RING(celsius, NV10TCL_FOG_EQUATION_CONSTANT, 3);
+ OUT_RING (0x3fc00000); /* -1.50 */
+ OUT_RING (0xbdb8aa0a); /* -0.09 */
+ OUT_RING (0); /* 0.00 */
+
+ BEGIN_RING(celsius, NV10TCL_NOP, 1);
+ OUT_RING (0);
+
+ BEGIN_RING(celsius, NV10TCL_FOG_MODE, 2);
+ OUT_RING (0x802);
+ OUT_RING (2);
+ /* for some reason VIEW_MATRIX_ENABLE need to be 6 instead of 4 when
+ * using texturing, except when using the texture matrix
+ */
+ BEGIN_RING(celsius, NV10TCL_VIEW_MATRIX_ENABLE, 1);
+ OUT_RING (6);
+ BEGIN_RING(celsius, NV10TCL_COLOR_MASK, 1);
+ OUT_RING (0x01010101);
+
+ /* Set vertex component */
+ BEGIN_RING(celsius, NV10TCL_VERTEX_COL_4F_R, 4);
+ OUT_RINGf (1.0);
+ OUT_RINGf (1.0);
+ OUT_RINGf (1.0);
+ OUT_RINGf (1.0);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_COL2_3F_R, 3);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_NOR_3F_X, 3);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RINGf (1.0);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_TX0_4F_S, 4);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0);
+ OUT_RINGf (1.0);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_TX1_4F_S, 4);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0);
+ OUT_RINGf (1.0);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_FOG_1F, 1);
+ OUT_RINGf (0.0);
+ BEGIN_RING(celsius, NV10TCL_EDGEFLAG_ENABLE, 1);
+ OUT_RING (1);
+
+ memset(projectionmatrix, 0, sizeof(projectionmatrix));
+ BEGIN_RING(celsius, NV10TCL_PROJECTION_MATRIX(0), 16);
+ projectionmatrix[0*4+0] = 1.0;
+ projectionmatrix[1*4+1] = 1.0;
+ projectionmatrix[2*4+2] = 1.0;
+ projectionmatrix[3*4+3] = 1.0;
+ for (i=0;i<16;i++) {
+ OUT_RINGf (projectionmatrix[i]);
+ }
+
+ BEGIN_RING(celsius, NV10TCL_DEPTH_RANGE_NEAR, 2);
+ OUT_RING (0.0);
+ OUT_RINGf (16777216.0);
+
+ BEGIN_RING(celsius, NV10TCL_VIEWPORT_SCALE_X, 4);
+ OUT_RINGf (-2048.0);
+ OUT_RINGf (-2048.0);
+ OUT_RINGf (16777215.0 * 0.5);
+ OUT_RING (0);
+
+ FIRE_RING (NULL);
+}
+
+static void
+nv10_set_edgeflags(struct pipe_context *pipe, const unsigned *bitfield)
+{
+}
+
+static unsigned int
+nv10_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+nv10_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *
+nv10_create(struct pipe_screen *pscreen, unsigned pctx_id)
+{
+ struct nv10_screen *screen = nv10_screen(pscreen);
+ struct pipe_winsys *ws = pscreen->winsys;
+ struct nv10_context *nv10;
+ struct nouveau_winsys *nvws = screen->nvws;
+
+ nv10 = CALLOC(1, sizeof(struct nv10_context));
+ if (!nv10)
+ return NULL;
+ nv10->screen = screen;
+ nv10->pctx_id = pctx_id;
+
+ nv10->nvws = nvws;
+
+ nv10->pipe.winsys = ws;
+ nv10->pipe.screen = pscreen;
+ nv10->pipe.destroy = nv10_destroy;
+ nv10->pipe.set_edgeflags = nv10_set_edgeflags;
+ nv10->pipe.draw_arrays = nv10_draw_arrays;
+ nv10->pipe.draw_elements = nv10_draw_elements;
+ nv10->pipe.clear = nv10_clear;
+ nv10->pipe.flush = nv10_flush;
+
+ nv10->pipe.is_texture_referenced = nv10_is_texture_referenced;
+ nv10->pipe.is_buffer_referenced = nv10_is_buffer_referenced;
+
+ nv10_init_surface_functions(nv10);
+ nv10_init_state_functions(nv10);
+
+ nv10->draw = draw_create();
+ assert(nv10->draw);
+ draw_set_rasterize_stage(nv10->draw, nv10_draw_vbuf_stage(nv10));
+
+ nv10_init_hwctx(nv10);
+
+ return &nv10->pipe;
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_context.h b/src/gallium/drivers/nv10/nv10_context.h
new file mode 100644
index 0000000000..f1e003c953
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_context.h
@@ -0,0 +1,154 @@
+#ifndef __NV10_CONTEXT_H__
+#define __NV10_CONTEXT_H__
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "draw/draw_vertex.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_gldefs.h"
+
+#define NOUVEAU_PUSH_CONTEXT(ctx) \
+ struct nv10_screen *ctx = nv10->screen
+#include "nouveau/nouveau_push.h"
+
+#include "nv10_state.h"
+
+#define NOUVEAU_ERR(fmt, args...) \
+ fprintf(stderr, "%s:%d - "fmt, __func__, __LINE__, ##args);
+#define NOUVEAU_MSG(fmt, args...) \
+ fprintf(stderr, "nouveau: "fmt, ##args);
+
+#define NV10_NEW_VERTPROG (1 << 0)
+#define NV10_NEW_FRAGPROG (1 << 1)
+#define NV10_NEW_VTXARRAYS (1 << 2)
+#define NV10_NEW_BLEND (1 << 3)
+#define NV10_NEW_BLENDCOL (1 << 4)
+#define NV10_NEW_RAST (1 << 5)
+#define NV10_NEW_DSA (1 << 6)
+#define NV10_NEW_VIEWPORT (1 << 7)
+#define NV10_NEW_SCISSOR (1 << 8)
+#define NV10_NEW_FRAMEBUFFER (1 << 9)
+
+#include "nv10_screen.h"
+
+struct nv10_context {
+ struct pipe_context pipe;
+
+ struct nouveau_winsys *nvws;
+ struct nv10_screen *screen;
+ unsigned pctx_id;
+
+ struct draw_context *draw;
+
+ uint32_t dirty;
+
+ struct nv10_sampler_state *tex_sampler[PIPE_MAX_SAMPLERS];
+ struct nv10_miptree *tex_miptree[PIPE_MAX_SAMPLERS];
+ unsigned dirty_samplers;
+ unsigned fp_samplers;
+ unsigned vp_samplers;
+
+ uint32_t rt_enable;
+ struct pipe_buffer *rt[4];
+ struct pipe_buffer *zeta;
+ uint32_t lma_offset;
+
+ struct nv10_blend_state *blend;
+ struct pipe_blend_color *blend_color;
+ struct nv10_rasterizer_state *rast;
+ struct nv10_depth_stencil_alpha_state *dsa;
+ struct pipe_viewport_state *viewport;
+ struct pipe_scissor_state *scissor;
+ struct pipe_framebuffer_state *framebuffer;
+
+ //struct pipe_buffer *constbuf[PIPE_SHADER_TYPES];
+ float *constbuf[PIPE_SHADER_TYPES][32][4];
+ unsigned constbuf_nr[PIPE_SHADER_TYPES];
+
+ struct vertex_info vertex_info;
+
+ struct {
+ struct pipe_buffer *buffer;
+ uint32_t format;
+ } tex[2];
+
+ unsigned vb_enable;
+ struct {
+ struct pipe_buffer *buffer;
+ unsigned delta;
+ } vb[16];
+
+/* struct {
+
+ struct nouveau_resource *exec_heap;
+ struct nouveau_resource *data_heap;
+
+ struct nv10_vertex_program *active;
+
+ struct nv10_vertex_program *current;
+ } vertprog;
+*/
+ struct {
+ struct nv10_fragment_program *active;
+
+ struct nv10_fragment_program *current;
+ struct pipe_buffer *constant_buf;
+ } fragprog;
+
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
+};
+
+static INLINE struct nv10_context *
+nv10_context(struct pipe_context *pipe)
+{
+ return (struct nv10_context *)pipe;
+}
+
+extern void nv10_init_state_functions(struct nv10_context *nv10);
+extern void nv10_init_surface_functions(struct nv10_context *nv10);
+
+extern void nv10_screen_init_miptree_functions(struct pipe_screen *pscreen);
+
+/* nv10_clear.c */
+extern void nv10_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil);
+
+
+/* nv10_draw.c */
+extern struct draw_stage *nv10_draw_render_stage(struct nv10_context *nv10);
+
+/* nv10_fragprog.c */
+extern void nv10_fragprog_bind(struct nv10_context *,
+ struct nv10_fragment_program *);
+extern void nv10_fragprog_destroy(struct nv10_context *,
+ struct nv10_fragment_program *);
+
+/* nv10_fragtex.c */
+extern void nv10_fragtex_bind(struct nv10_context *);
+
+/* nv10_prim_vbuf.c */
+struct draw_stage *nv10_draw_vbuf_stage( struct nv10_context *nv10 );
+extern void nv10_vtxbuf_bind(struct nv10_context* nv10);
+
+/* nv10_state.c and friends */
+extern void nv10_emit_hw_state(struct nv10_context *nv10);
+extern void nv10_state_tex_update(struct nv10_context *nv10);
+
+/* nv10_vbo.c */
+extern boolean nv10_draw_arrays(struct pipe_context *, unsigned mode,
+ unsigned start, unsigned count);
+extern boolean nv10_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count);
+
+
+#endif
diff --git a/src/gallium/drivers/nv10/nv10_fragprog.c b/src/gallium/drivers/nv10/nv10_fragprog.c
new file mode 100644
index 0000000000..698db5a16a
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_fragprog.c
@@ -0,0 +1,21 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv10_context.h"
+
+void
+nv10_fragprog_bind(struct nv10_context *nv10, struct nv10_fragment_program *fp)
+{
+}
+
+void
+nv10_fragprog_destroy(struct nv10_context *nv10,
+ struct nv10_fragment_program *fp)
+{
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_fragtex.c b/src/gallium/drivers/nv10/nv10_fragtex.c
new file mode 100644
index 0000000000..27f2f87584
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_fragtex.c
@@ -0,0 +1,124 @@
+#include "nv10_context.h"
+#include "nouveau/nouveau_util.h"
+
+#define _(m,tf) \
+{ \
+ TRUE, \
+ PIPE_FORMAT_##m, \
+ NV10TCL_TX_FORMAT_FORMAT_##tf, \
+}
+
+struct nv10_texture_format {
+ boolean defined;
+ uint pipe;
+ int format;
+};
+
+static struct nv10_texture_format
+nv10_texture_formats[] = {
+ _(A8R8G8B8_UNORM, A8R8G8B8),
+ _(A1R5G5B5_UNORM, A1R5G5B5),
+ _(A4R4G4B4_UNORM, A4R4G4B4),
+ _(L8_UNORM , L8 ),
+ _(A8_UNORM , A8 ),
+ _(A8L8_UNORM , A8L8 ),
+// _(RGB_DXT1 , DXT1, ),
+// _(RGBA_DXT1 , DXT1, ),
+// _(RGBA_DXT3 , DXT3, ),
+// _(RGBA_DXT5 , DXT5, ),
+ {},
+};
+
+static struct nv10_texture_format *
+nv10_fragtex_format(uint pipe_format)
+{
+ struct nv10_texture_format *tf = nv10_texture_formats;
+
+ while (tf->defined) {
+ if (tf->pipe == pipe_format)
+ return tf;
+ tf++;
+ }
+
+ return NULL;
+}
+
+
+static void
+nv10_fragtex_build(struct nv10_context *nv10, int unit)
+{
+#if 0
+ struct nv10_sampler_state *ps = nv10->tex_sampler[unit];
+ struct nv10_miptree *nv10mt = nv10->tex_miptree[unit];
+ struct pipe_texture *pt = &nv10mt->base;
+ struct nv10_texture_format *tf;
+ uint32_t txf, txs, txp;
+
+ tf = nv10_fragtex_format(pt->format);
+ if (!tf || !tf->defined) {
+ NOUVEAU_ERR("Unsupported texture format: 0x%x\n", pt->format);
+ return;
+ }
+
+ txf = tf->format << 8;
+ txf |= (pt->last_level + 1) << 16;
+ txf |= log2i(pt->width[0]) << 20;
+ txf |= log2i(pt->height[0]) << 24;
+ txf |= log2i(pt->depth[0]) << 28;
+ txf |= 8;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_CUBE:
+ txf |= NV10TCL_TX_FORMAT_CUBE_MAP;
+ /* fall-through */
+ case PIPE_TEXTURE_2D:
+ txf |= (2<<4);
+ break;
+ case PIPE_TEXTURE_1D:
+ txf |= (1<<4);
+ break;
+ default:
+ NOUVEAU_ERR("Unknown target %d\n", pt->target);
+ return;
+ }
+
+ BEGIN_RING(celsius, NV10TCL_TX_OFFSET(unit), 8);
+ OUT_RELOCl(nv10mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ OUT_RELOCd(nv10mt->buffer,txf,NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_OR | NOUVEAU_BO_RD, 1/*VRAM*/,2/*TT*/);
+ OUT_RING (ps->wrap);
+ OUT_RING (0x40000000); /* enable */
+ OUT_RING (txs);
+ OUT_RING (ps->filt | 0x2000 /* magic */);
+ OUT_RING ((pt->width[0] << 16) | pt->height[0]);
+ OUT_RING (ps->bcol);
+#endif
+}
+
+void
+nv10_fragtex_bind(struct nv10_context *nv10)
+{
+#if 0
+ struct nv10_fragment_program *fp = nv10->fragprog.active;
+ unsigned samplers, unit;
+
+ samplers = nv10->fp_samplers & ~fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ BEGIN_RING(celsius, NV10TCL_TX_ENABLE(unit), 1);
+ OUT_RING (0);
+ }
+
+ samplers = nv10->dirty_samplers & fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ nv10_fragtex_build(nv10, unit);
+ }
+
+ nv10->fp_samplers = fp->samplers;
+#endif
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_miptree.c b/src/gallium/drivers/nv10/nv10_miptree.c
new file mode 100644
index 0000000000..34e3c2ebd7
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_miptree.c
@@ -0,0 +1,167 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "nv10_context.h"
+#include "nv10_screen.h"
+
+static void
+nv10_miptree_layout(struct nv10_miptree *nv10mt)
+{
+ struct pipe_texture *pt = &nv10mt->base;
+ boolean swizzled = FALSE;
+ uint width = pt->width[0], height = pt->height[0];
+ uint offset = 0;
+ int nr_faces, l, f;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ nr_faces = 6;
+ } else {
+ nr_faces = 1;
+ }
+
+ for (l = 0; l <= pt->last_level; l++) {
+ pt->width[l] = width;
+ pt->height[l] = height;
+ pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
+
+ if (swizzled)
+ nv10mt->level[l].pitch = pt->nblocksx[l] * pt->block.size;
+ else
+ nv10mt->level[l].pitch = pt->nblocksx[0] * pt->block.size;
+ nv10mt->level[l].pitch = (nv10mt->level[l].pitch + 63) & ~63;
+
+ nv10mt->level[l].image_offset =
+ CALLOC(nr_faces, sizeof(unsigned));
+
+ width = MAX2(1, width >> 1);
+ height = MAX2(1, height >> 1);
+
+ }
+
+ for (f = 0; f < nr_faces; f++) {
+ for (l = 0; l <= pt->last_level; l++) {
+ nv10mt->level[l].image_offset[f] = offset;
+ offset += nv10mt->level[l].pitch * pt->height[l];
+ }
+ }
+
+ nv10mt->total_size = offset;
+}
+
+static struct pipe_texture *
+nv10_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
+ const unsigned *stride, struct pipe_buffer *pb)
+{
+ struct nv10_miptree *mt;
+
+ /* Only supports 2D, non-mipmapped textures for the moment */
+ if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
+ pt->depth[0] != 1)
+ return NULL;
+
+ mt = CALLOC_STRUCT(nv10_miptree);
+ if (!mt)
+ return NULL;
+
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+ mt->level[0].pitch = stride[0];
+ mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
+
+ pipe_buffer_reference(&mt->buffer, pb);
+ return &mt->base;
+}
+
+static struct pipe_texture *
+nv10_miptree_create(struct pipe_screen *screen, const struct pipe_texture *pt)
+{
+ struct nv10_miptree *mt;
+
+ mt = MALLOC(sizeof(struct nv10_miptree));
+ if (!mt)
+ return NULL;
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = screen;
+
+ nv10_miptree_layout(mt);
+
+ mt->buffer = screen->buffer_create(screen, 256, PIPE_BUFFER_USAGE_PIXEL,
+ mt->total_size);
+ if (!mt->buffer) {
+ FREE(mt);
+ return NULL;
+ }
+
+ return &mt->base;
+}
+
+static void
+nv10_miptree_destroy(struct pipe_texture *pt)
+{
+ struct nv10_miptree *nv10mt = (struct nv10_miptree *)pt;
+ int l;
+
+ pipe_buffer_reference(&nv10mt->buffer, NULL);
+ for (l = 0; l <= pt->last_level; l++) {
+ if (nv10mt->level[l].image_offset)
+ FREE(nv10mt->level[l].image_offset);
+ }
+ FREE(nv10mt);
+}
+
+static void
+nv10_miptree_update(struct pipe_context *pipe, struct pipe_texture *mt,
+ uint face, uint levels)
+{
+}
+
+
+static struct pipe_surface *
+nv10_miptree_surface_get(struct pipe_screen *screen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct nv10_miptree *nv10mt = (struct nv10_miptree *)pt;
+ struct nv04_surface *ns;
+
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
+ return NULL;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ pipe_reference_init(&ns->base.reference, 1);
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv10mt->level[level].pitch;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ns->base.offset = nv10mt->level[level].image_offset[face];
+ } else {
+ ns->base.offset = nv10mt->level[level].image_offset[0];
+ }
+
+ return &ns->base;
+}
+
+static void
+nv10_miptree_surface_destroy(struct pipe_surface *surface)
+{
+}
+
+void nv10_screen_init_miptree_functions(struct pipe_screen *pscreen)
+{
+ pscreen->texture_create = nv10_miptree_create;
+ pscreen->texture_blanket = nv10_miptree_blanket;
+ pscreen->texture_destroy = nv10_miptree_destroy;
+ pscreen->get_tex_surface = nv10_miptree_surface_get;
+ pscreen->tex_surface_destroy = nv10_miptree_surface_destroy;
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_prim_vbuf.c b/src/gallium/drivers/nv10/nv10_prim_vbuf.c
new file mode 100644
index 0000000000..1806d5f8cc
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_prim_vbuf.c
@@ -0,0 +1,261 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Build post-transformation, post-clipping vertex buffers and element
+ * lists by hooking into the end of the primitive pipeline and
+ * manipulating the vertex_id field in the vertex headers.
+ *
+ * XXX: work in progress
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "pipe/p_inlines.h"
+
+#include "nv10_context.h"
+#include "nv10_state.h"
+
+#include "draw/draw_vbuf.h"
+
+/**
+ * Primitive renderer for nv10.
+ */
+struct nv10_vbuf_render {
+ struct vbuf_render base;
+
+ struct nv10_context *nv10;
+
+ /** Vertex buffer */
+ struct pipe_buffer* buffer;
+
+ /** Vertex size in bytes */
+ unsigned vertex_size;
+
+ /** Hardware primitive */
+ unsigned hwprim;
+};
+
+
+void nv10_vtxbuf_bind( struct nv10_context* nv10 )
+{
+ int i;
+ for(i = 0; i < 8; i++) {
+ BEGIN_RING(celsius, NV10TCL_VERTEX_ARRAY_ATTRIB_OFFSET(i), 1);
+ OUT_RING(0/*nv10->vtxbuf*/);
+ BEGIN_RING(celsius, NV10TCL_VERTEX_ARRAY_ATTRIB_FORMAT(i) ,1);
+ OUT_RING(0/*XXX*/);
+ }
+}
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct nv10_vbuf_render *
+nv10_vbuf_render( struct vbuf_render *render )
+{
+ assert(render);
+ return (struct nv10_vbuf_render *)render;
+}
+
+
+static const struct vertex_info *
+nv10_vbuf_render_get_vertex_info( struct vbuf_render *render )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ struct nv10_context *nv10 = nv10_render->nv10;
+
+ nv10_emit_hw_state(nv10);
+
+ return &nv10->vertex_info;
+}
+
+static boolean
+nv10_vbuf_render_allocate_vertices( struct vbuf_render *render,
+ ushort vertex_size,
+ ushort nr_vertices )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ struct nv10_context *nv10 = nv10_render->nv10;
+ struct pipe_screen *screen = nv10->pipe.screen;
+ size_t size = (size_t)vertex_size * (size_t)nr_vertices;
+
+ assert(!nv10_render->buffer);
+ nv10_render->buffer = screen->buffer_create(screen, 64, PIPE_BUFFER_USAGE_VERTEX, size);
+
+ nv10->dirty |= NV10_NEW_VTXARRAYS;
+
+ if (nv10_render->buffer)
+ return FALSE;
+ return TRUE;
+}
+
+static void *
+nv10_vbuf_render_map_vertices( struct vbuf_render *render )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ struct nv10_context *nv10 = nv10_render->nv10;
+ struct pipe_screen *pscreen = nv10->pipe.screen;
+
+ return pipe_buffer_map(pscreen, nv10_render->buffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+}
+
+static void
+nv10_vbuf_render_unmap_vertices( struct vbuf_render *render,
+ ushort min_index,
+ ushort max_index )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ struct nv10_context *nv10 = nv10_render->nv10;
+ struct pipe_screen *pscreen = nv10->pipe.screen;
+
+ assert(!nv10_render->buffer);
+ pipe_buffer_unmap(pscreen, nv10_render->buffer);
+}
+
+static boolean
+nv10_vbuf_render_set_primitive( struct vbuf_render *render,
+ unsigned prim )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ unsigned hwp = nvgl_primitive(prim);
+ if (hwp == 0)
+ return FALSE;
+
+ nv10_render->hwprim = hwp;
+ return TRUE;
+}
+
+
+static void
+nv10_vbuf_render_draw( struct vbuf_render *render,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ struct nv10_context *nv10 = nv10_render->nv10;
+ int push, i;
+
+ nv10_emit_hw_state(nv10);
+
+ BEGIN_RING(celsius, NV10TCL_VERTEX_ARRAY_OFFSET_POS, 1);
+ OUT_RELOCl(nv10_render->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+
+ BEGIN_RING(celsius, NV10TCL_VERTEX_BUFFER_BEGIN_END, 1);
+ OUT_RING(nv10_render->hwprim);
+
+ if (nr_indices & 1) {
+ BEGIN_RING(celsius, NV10TCL_VB_ELEMENT_U32, 1);
+ OUT_RING (indices[0]);
+ indices++; nr_indices--;
+ }
+
+ while (nr_indices) {
+ // XXX too big/small ? check the size
+ push = MIN2(nr_indices, 1200 * 2);
+
+ BEGIN_RING_NI(celsius, NV10TCL_VB_ELEMENT_U16, push >> 1);
+ for (i = 0; i < push; i+=2)
+ OUT_RING((indices[i+1] << 16) | indices[i]);
+
+ nr_indices -= push;
+ indices += push;
+ }
+
+ BEGIN_RING(celsius, NV10TCL_VERTEX_BUFFER_BEGIN_END, 1);
+ OUT_RING (0);
+}
+
+
+static void
+nv10_vbuf_render_release_vertices( struct vbuf_render *render )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+
+ assert(nv10_render->buffer);
+ pipe_buffer_reference(&nv10_render->buffer, NULL);
+}
+
+
+static void
+nv10_vbuf_render_destroy( struct vbuf_render *render )
+{
+ struct nv10_vbuf_render *nv10_render = nv10_vbuf_render(render);
+ FREE(nv10_render);
+}
+
+
+/**
+ * Create a new primitive render.
+ */
+static struct vbuf_render *
+nv10_vbuf_render_create( struct nv10_context *nv10 )
+{
+ struct nv10_vbuf_render *nv10_render = CALLOC_STRUCT(nv10_vbuf_render);
+
+ nv10_render->nv10 = nv10;
+
+ nv10_render->base.max_vertex_buffer_bytes = 16*1024;
+ nv10_render->base.max_indices = 1024;
+ nv10_render->base.get_vertex_info = nv10_vbuf_render_get_vertex_info;
+ nv10_render->base.allocate_vertices = nv10_vbuf_render_allocate_vertices;
+ nv10_render->base.map_vertices = nv10_vbuf_render_map_vertices;
+ nv10_render->base.unmap_vertices = nv10_vbuf_render_unmap_vertices;
+ nv10_render->base.set_primitive = nv10_vbuf_render_set_primitive;
+ nv10_render->base.draw = nv10_vbuf_render_draw;
+ nv10_render->base.release_vertices = nv10_vbuf_render_release_vertices;
+ nv10_render->base.destroy = nv10_vbuf_render_destroy;
+
+ return &nv10_render->base;
+}
+
+
+/**
+ * Create a new primitive vbuf/render stage.
+ */
+struct draw_stage *nv10_draw_vbuf_stage( struct nv10_context *nv10 )
+{
+ struct vbuf_render *render;
+ struct draw_stage *stage;
+
+ render = nv10_vbuf_render_create(nv10);
+ if(!render)
+ return NULL;
+
+ stage = draw_vbuf_stage( nv10->draw, render );
+ if(!stage) {
+ render->destroy(render);
+ return NULL;
+ }
+
+ return stage;
+}
diff --git a/src/gallium/drivers/nv10/nv10_screen.c b/src/gallium/drivers/nv10/nv10_screen.c
new file mode 100644
index 0000000000..4469b22d91
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_screen.c
@@ -0,0 +1,198 @@
+#include "pipe/p_screen.h"
+
+#include "nv10_context.h"
+#include "nv10_screen.h"
+
+static int
+nv10_screen_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 2;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 0;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 0;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 1;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 0;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 12;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 12;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return 0;
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 0;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ case NOUVEAU_CAP_HW_IDXBUF:
+ return 0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+}
+
+static float
+nv10_screen_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 10.0;
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 64.0;
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 2.0;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 4.0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0.0;
+ }
+}
+
+static boolean
+nv10_screen_is_format_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage, unsigned geom_flags)
+{
+ if (tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET) {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else
+ if (tex_usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+static void
+nv10_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct nv10_screen *screen = nv10_screen(pscreen);
+
+ nouveau_notifier_free(&screen->sync);
+ nouveau_grobj_free(&screen->celsius);
+
+ FREE(pscreen);
+}
+
+static struct pipe_buffer *
+nv10_surface_buffer(struct pipe_surface *surf)
+{
+ struct nv10_miptree *mt = (struct nv10_miptree *)surf->texture;
+
+ return mt->buffer;
+}
+
+struct pipe_screen *
+nv10_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
+{
+ struct nv10_screen *screen = CALLOC_STRUCT(nv10_screen);
+ struct nouveau_channel *chan;
+ struct pipe_screen *pscreen;
+ unsigned celsius_class;
+ int ret;
+
+ if (!screen)
+ return NULL;
+ pscreen = &screen->base.base;
+
+ ret = nouveau_screen_init(&screen->base, dev);
+ if (ret) {
+ nv10_screen_destroy(pscreen);
+ return NULL;
+ }
+ chan = screen->base.channel;
+
+ pscreen->winsys = ws;
+ pscreen->destroy = nv10_screen_destroy;
+ pscreen->get_param = nv10_screen_get_param;
+ pscreen->get_paramf = nv10_screen_get_paramf;
+ pscreen->is_format_supported = nv10_screen_is_format_supported;
+
+ nv10_screen_init_miptree_functions(pscreen);
+ nv10_screen_init_transfer_functions(pscreen);
+
+ /* 3D object */
+ if (dev->chipset >= 0x20)
+ celsius_class = NV11TCL;
+ else if (dev->chipset >= 0x17)
+ celsius_class = NV17TCL;
+ else if (dev->chipset >= 0x11)
+ celsius_class = NV11TCL;
+ else
+ celsius_class = NV10TCL;
+
+ if (!celsius_class) {
+ NOUVEAU_ERR("Unknown nv1x chipset: nv%02x\n", dev->chipset);
+ return NULL;
+ }
+
+ ret = nouveau_grobj_alloc(chan, 0xbeef0001, celsius_class,
+ &screen->celsius);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
+ return FALSE;
+ }
+ BIND_RING(chan, screen->celsius, 7);
+
+ /* 2D engine setup */
+ screen->eng2d = nv04_surface_2d_init(&screen->base);
+ screen->eng2d->buf = nv10_surface_buffer;
+
+ /* Notifier for sync purposes */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
+ if (ret) {
+ NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
+ nv10_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ return pscreen;
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_screen.h b/src/gallium/drivers/nv10/nv10_screen.h
new file mode 100644
index 0000000000..86b6d8def5
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_screen.h
@@ -0,0 +1,28 @@
+#ifndef __NV10_SCREEN_H__
+#define __NV10_SCREEN_H__
+
+#include "nouveau/nouveau_screen.h"
+#include "nv04/nv04_surface_2d.h"
+
+struct nv10_screen {
+ struct nouveau_screen base;
+
+ struct nouveau_winsys *nvws;
+
+ /* HW graphics objects */
+ struct nv04_surface_2d *eng2d;
+ struct nouveau_grobj *celsius;
+ struct nouveau_notifier *sync;
+};
+
+static INLINE struct nv10_screen *
+nv10_screen(struct pipe_screen *screen)
+{
+ return (struct nv10_screen *)screen;
+}
+
+
+void
+nv10_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
+#endif
diff --git a/src/gallium/drivers/nv10/nv10_state.c b/src/gallium/drivers/nv10/nv10_state.c
new file mode 100644
index 0000000000..9b38219b99
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_state.c
@@ -0,0 +1,590 @@
+#include "draw/draw_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_inlines.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "nv10_context.h"
+#include "nv10_state.h"
+
+static void *
+nv10_blend_state_create(struct pipe_context *pipe,
+ const struct pipe_blend_state *cso)
+{
+ struct nv10_blend_state *cb;
+
+ cb = MALLOC(sizeof(struct nv10_blend_state));
+
+ cb->b_enable = cso->blend_enable ? 1 : 0;
+ cb->b_srcfunc = ((nvgl_blend_func(cso->alpha_src_factor)<<16) |
+ (nvgl_blend_func(cso->rgb_src_factor)));
+ cb->b_dstfunc = ((nvgl_blend_func(cso->alpha_dst_factor)<<16) |
+ (nvgl_blend_func(cso->rgb_dst_factor)));
+
+ cb->c_mask = (((cso->colormask & PIPE_MASK_A) ? (0x01<<24) : 0) |
+ ((cso->colormask & PIPE_MASK_R) ? (0x01<<16) : 0) |
+ ((cso->colormask & PIPE_MASK_G) ? (0x01<< 8) : 0) |
+ ((cso->colormask & PIPE_MASK_B) ? (0x01<< 0) : 0));
+
+ cb->d_enable = cso->dither ? 1 : 0;
+
+ return (void *)cb;
+}
+
+static void
+nv10_blend_state_bind(struct pipe_context *pipe, void *blend)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->blend = (struct nv10_blend_state*)blend;
+
+ nv10->dirty |= NV10_NEW_BLEND;
+}
+
+static void
+nv10_blend_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+
+static INLINE unsigned
+wrap_mode(unsigned wrap) {
+ unsigned ret;
+
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ ret = NV10TCL_TX_FORMAT_WRAP_S_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ ret = NV10TCL_TX_FORMAT_WRAP_S_MIRRORED_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ ret = NV10TCL_TX_FORMAT_WRAP_S_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ ret = NV10TCL_TX_FORMAT_WRAP_S_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_CLAMP:
+ ret = NV10TCL_TX_FORMAT_WRAP_S_CLAMP;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ default:
+ NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
+ ret = NV10TCL_TX_FORMAT_WRAP_S_REPEAT;
+ break;
+ }
+
+ return ret >> NV10TCL_TX_FORMAT_WRAP_S_SHIFT;
+}
+
+static void *
+nv10_sampler_state_create(struct pipe_context *pipe,
+ const struct pipe_sampler_state *cso)
+{
+ struct nv10_sampler_state *ps;
+ uint32_t filter = 0;
+
+ ps = MALLOC(sizeof(struct nv10_sampler_state));
+
+ ps->wrap = ((wrap_mode(cso->wrap_s) << NV10TCL_TX_FORMAT_WRAP_S_SHIFT) |
+ (wrap_mode(cso->wrap_t) << NV10TCL_TX_FORMAT_WRAP_T_SHIFT));
+
+ ps->en = 0;
+ if (cso->max_anisotropy > 1.0) {
+ /* no idea, binary driver sets it, works without it.. meh.. */
+ ps->wrap |= (1 << 5);
+
+/* if (cso->max_anisotropy >= 16.0) {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_16X;
+ } else
+ if (cso->max_anisotropy >= 12.0) {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_12X;
+ } else
+ if (cso->max_anisotropy >= 10.0) {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_10X;
+ } else
+ if (cso->max_anisotropy >= 8.0) {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_8X;
+ } else
+ if (cso->max_anisotropy >= 6.0) {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_6X;
+ } else
+ if (cso->max_anisotropy >= 4.0) {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_4X;
+ } else {
+ ps->en |= NV10TCL_TX_ENABLE_ANISO_2X;
+ }*/
+ }
+
+ switch (cso->mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ filter |= NV10TCL_TX_FILTER_MAGNIFY_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ filter |= NV10TCL_TX_FILTER_MAGNIFY_NEAREST;
+ break;
+ }
+
+ switch (cso->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV10TCL_TX_FILTER_MINIFY_LINEAR_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV10TCL_TX_FILTER_MINIFY_LINEAR_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV10TCL_TX_FILTER_MINIFY_LINEAR;
+ break;
+ }
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV10TCL_TX_FILTER_MINIFY_NEAREST_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV10TCL_TX_FILTER_MINIFY_NEAREST_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV10TCL_TX_FILTER_MINIFY_NEAREST;
+ break;
+ }
+ break;
+ }
+
+ ps->filt = filter;
+
+/* if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ switch (cso->compare_func) {
+ case PIPE_FUNC_NEVER:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_NEVER;
+ break;
+ case PIPE_FUNC_GREATER:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_GREATER;
+ break;
+ case PIPE_FUNC_EQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_EQUAL;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_GEQUAL;
+ break;
+ case PIPE_FUNC_LESS:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_LESS;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_NOTEQUAL;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_LEQUAL;
+ break;
+ case PIPE_FUNC_ALWAYS:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_ALWAYS;
+ break;
+ default:
+ break;
+ }
+ }*/
+
+ ps->bcol = ((float_to_ubyte(cso->border_color[3]) << 24) |
+ (float_to_ubyte(cso->border_color[0]) << 16) |
+ (float_to_ubyte(cso->border_color[1]) << 8) |
+ (float_to_ubyte(cso->border_color[2]) << 0));
+
+ return (void *)ps;
+}
+
+static void
+nv10_sampler_state_bind(struct pipe_context *pipe, unsigned nr, void **sampler)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv10->tex_sampler[unit] = sampler[unit];
+ nv10->dirty_samplers |= (1 << unit);
+ }
+}
+
+static void
+nv10_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void
+nv10_set_sampler_texture(struct pipe_context *pipe, unsigned nr,
+ struct pipe_texture **miptree)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv10->tex_miptree[unit] = (struct nv10_miptree *)miptree[unit];
+ nv10->dirty_samplers |= (1 << unit);
+ }
+}
+
+static void *
+nv10_rasterizer_state_create(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct nv10_rasterizer_state *rs;
+ int i;
+
+ /*XXX: ignored:
+ * light_twoside
+ * offset_cw/ccw -nohw
+ * scissor
+ * point_smooth -nohw
+ * multisample
+ * offset_units / offset_scale
+ */
+ rs = MALLOC(sizeof(struct nv10_rasterizer_state));
+
+ rs->templ = cso;
+
+ rs->shade_model = cso->flatshade ? 0x1d00 : 0x1d01;
+
+ rs->line_width = (unsigned char)(cso->line_width * 8.0) & 0xff;
+ rs->line_smooth_en = cso->line_smooth ? 1 : 0;
+
+ rs->point_size = *(uint32_t*)&cso->point_size;
+
+ rs->poly_smooth_en = cso->poly_smooth ? 1 : 0;
+
+ if (cso->front_winding == PIPE_WINDING_CCW) {
+ rs->front_face = NV10TCL_FRONT_FACE_CCW;
+ rs->poly_mode_front = nvgl_polygon_mode(cso->fill_ccw);
+ rs->poly_mode_back = nvgl_polygon_mode(cso->fill_cw);
+ } else {
+ rs->front_face = NV10TCL_FRONT_FACE_CW;
+ rs->poly_mode_front = nvgl_polygon_mode(cso->fill_cw);
+ rs->poly_mode_back = nvgl_polygon_mode(cso->fill_ccw);
+ }
+
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ rs->cull_face_en = 1;
+ if (cso->front_winding == PIPE_WINDING_CCW)
+ rs->cull_face = NV10TCL_CULL_FACE_FRONT;
+ else
+ rs->cull_face = NV10TCL_CULL_FACE_BACK;
+ break;
+ case PIPE_WINDING_CW:
+ rs->cull_face_en = 1;
+ if (cso->front_winding == PIPE_WINDING_CW)
+ rs->cull_face = NV10TCL_CULL_FACE_FRONT;
+ else
+ rs->cull_face = NV10TCL_CULL_FACE_BACK;
+ break;
+ case PIPE_WINDING_BOTH:
+ rs->cull_face_en = 1;
+ rs->cull_face = NV10TCL_CULL_FACE_FRONT_AND_BACK;
+ break;
+ case PIPE_WINDING_NONE:
+ default:
+ rs->cull_face_en = 0;
+ rs->cull_face = 0;
+ break;
+ }
+
+ if (cso->point_sprite) {
+ rs->point_sprite = (1 << 0);
+ for (i = 0; i < 8; i++) {
+ if (cso->sprite_coord_mode[i] != PIPE_SPRITE_COORD_NONE)
+ rs->point_sprite |= (1 << (8 + i));
+ }
+ } else {
+ rs->point_sprite = 0;
+ }
+
+ return (void *)rs;
+}
+
+static void
+nv10_rasterizer_state_bind(struct pipe_context *pipe, void *rast)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->rast = (struct nv10_rasterizer_state*)rast;
+
+ draw_set_rasterizer_state(nv10->draw, (nv10->rast ? nv10->rast->templ : NULL));
+
+ nv10->dirty |= NV10_NEW_RAST;
+}
+
+static void
+nv10_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void *
+nv10_depth_stencil_alpha_state_create(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct nv10_depth_stencil_alpha_state *hw;
+
+ hw = MALLOC(sizeof(struct nv10_depth_stencil_alpha_state));
+
+ hw->depth.func = nvgl_comparison_op(cso->depth.func);
+ hw->depth.write_enable = cso->depth.writemask ? 1 : 0;
+ hw->depth.test_enable = cso->depth.enabled ? 1 : 0;
+
+ hw->stencil.enable = cso->stencil[0].enabled ? 1 : 0;
+ hw->stencil.wmask = cso->stencil[0].writemask;
+ hw->stencil.func = nvgl_comparison_op(cso->stencil[0].func);
+ hw->stencil.ref = cso->stencil[0].ref_value;
+ hw->stencil.vmask = cso->stencil[0].valuemask;
+ hw->stencil.fail = nvgl_stencil_op(cso->stencil[0].fail_op);
+ hw->stencil.zfail = nvgl_stencil_op(cso->stencil[0].zfail_op);
+ hw->stencil.zpass = nvgl_stencil_op(cso->stencil[0].zpass_op);
+
+ hw->alpha.enabled = cso->alpha.enabled ? 1 : 0;
+ hw->alpha.func = nvgl_comparison_op(cso->alpha.func);
+ hw->alpha.ref = float_to_ubyte(cso->alpha.ref_value);
+
+ return (void *)hw;
+}
+
+static void
+nv10_depth_stencil_alpha_state_bind(struct pipe_context *pipe, void *dsa)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->dsa = (struct nv10_depth_stencil_alpha_state*)dsa;
+
+ nv10->dirty |= NV10_NEW_DSA;
+}
+
+static void
+nv10_depth_stencil_alpha_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void *
+nv10_vp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ return draw_create_vertex_shader(nv10->draw, templ);
+}
+
+static void
+nv10_vp_state_bind(struct pipe_context *pipe, void *shader)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ draw_bind_vertex_shader(nv10->draw, (struct draw_vertex_shader *) shader);
+
+ nv10->dirty |= NV10_NEW_VERTPROG;
+}
+
+static void
+nv10_vp_state_delete(struct pipe_context *pipe, void *shader)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ draw_delete_vertex_shader(nv10->draw, (struct draw_vertex_shader *) shader);
+}
+
+static void *
+nv10_fp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv10_fragment_program *fp;
+
+ fp = CALLOC(1, sizeof(struct nv10_fragment_program));
+ fp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+
+ tgsi_scan_shader(cso->tokens, &fp->info);
+
+ return (void *)fp;
+}
+
+static void
+nv10_fp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ struct nv10_fragment_program *fp = hwcso;
+
+ nv10->fragprog.current = fp;
+ nv10->dirty |= NV10_NEW_FRAGPROG;
+}
+
+static void
+nv10_fp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ struct nv10_fragment_program *fp = hwcso;
+
+ nv10_fragprog_destroy(nv10, fp);
+ FREE((void*)fp->pipe.tokens);
+ FREE(fp);
+}
+
+static void
+nv10_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *bcol)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->blend_color = (struct pipe_blend_color*)bcol;
+
+ nv10->dirty |= NV10_NEW_BLENDCOL;
+}
+
+static void
+nv10_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ draw_set_clip_state(nv10->draw, clip);
+}
+
+static void
+nv10_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+ const struct pipe_constant_buffer *buf )
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ struct pipe_screen *pscreen = pipe->screen;
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ if (buf) {
+ void *mapped;
+ if (buf->buffer && buf->buffer->size &&
+ (mapped = pipe_buffer_map(pscreen, buf->buffer, PIPE_BUFFER_USAGE_CPU_READ)))
+ {
+ memcpy(nv10->constbuf[shader], mapped, buf->buffer->size);
+ nv10->constbuf_nr[shader] =
+ buf->buffer->size / (4 * sizeof(float));
+ pipe_buffer_unmap(pscreen, buf->buffer);
+ }
+ }
+}
+
+static void
+nv10_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->framebuffer = (struct pipe_framebuffer_state*)fb;
+
+ nv10->dirty |= NV10_NEW_FRAMEBUFFER;
+}
+
+static void
+nv10_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ NOUVEAU_ERR("line stipple hahaha\n");
+}
+
+static void
+nv10_set_scissor_state(struct pipe_context *pipe,
+ const struct pipe_scissor_state *s)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->scissor = (struct pipe_scissor_state*)s;
+
+ nv10->dirty |= NV10_NEW_SCISSOR;
+}
+
+static void
+nv10_set_viewport_state(struct pipe_context *pipe,
+ const struct pipe_viewport_state *vpt)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ nv10->viewport = (struct pipe_viewport_state*)vpt;
+
+ draw_set_viewport_state(nv10->draw, nv10->viewport);
+
+ nv10->dirty |= NV10_NEW_VIEWPORT;
+}
+
+static void
+nv10_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_buffer *vb)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ memcpy(nv10->vtxbuf, vb, sizeof(*vb) * count);
+ nv10->dirty |= NV10_NEW_VTXARRAYS;
+
+ draw_set_vertex_buffers(nv10->draw, count, vb);
+}
+
+static void
+nv10_set_vertex_elements(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_element *ve)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+
+ memcpy(nv10->vtxelt, ve, sizeof(*ve) * count);
+ nv10->dirty |= NV10_NEW_VTXARRAYS;
+
+ draw_set_vertex_elements(nv10->draw, count, ve);
+}
+
+void
+nv10_init_state_functions(struct nv10_context *nv10)
+{
+ nv10->pipe.create_blend_state = nv10_blend_state_create;
+ nv10->pipe.bind_blend_state = nv10_blend_state_bind;
+ nv10->pipe.delete_blend_state = nv10_blend_state_delete;
+
+ nv10->pipe.create_sampler_state = nv10_sampler_state_create;
+ nv10->pipe.bind_sampler_states = nv10_sampler_state_bind;
+ nv10->pipe.delete_sampler_state = nv10_sampler_state_delete;
+ nv10->pipe.set_sampler_textures = nv10_set_sampler_texture;
+
+ nv10->pipe.create_rasterizer_state = nv10_rasterizer_state_create;
+ nv10->pipe.bind_rasterizer_state = nv10_rasterizer_state_bind;
+ nv10->pipe.delete_rasterizer_state = nv10_rasterizer_state_delete;
+
+ nv10->pipe.create_depth_stencil_alpha_state =
+ nv10_depth_stencil_alpha_state_create;
+ nv10->pipe.bind_depth_stencil_alpha_state =
+ nv10_depth_stencil_alpha_state_bind;
+ nv10->pipe.delete_depth_stencil_alpha_state =
+ nv10_depth_stencil_alpha_state_delete;
+
+ nv10->pipe.create_vs_state = nv10_vp_state_create;
+ nv10->pipe.bind_vs_state = nv10_vp_state_bind;
+ nv10->pipe.delete_vs_state = nv10_vp_state_delete;
+
+ nv10->pipe.create_fs_state = nv10_fp_state_create;
+ nv10->pipe.bind_fs_state = nv10_fp_state_bind;
+ nv10->pipe.delete_fs_state = nv10_fp_state_delete;
+
+ nv10->pipe.set_blend_color = nv10_set_blend_color;
+ nv10->pipe.set_clip_state = nv10_set_clip_state;
+ nv10->pipe.set_constant_buffer = nv10_set_constant_buffer;
+ nv10->pipe.set_framebuffer_state = nv10_set_framebuffer_state;
+ nv10->pipe.set_polygon_stipple = nv10_set_polygon_stipple;
+ nv10->pipe.set_scissor_state = nv10_set_scissor_state;
+ nv10->pipe.set_viewport_state = nv10_set_viewport_state;
+
+ nv10->pipe.set_vertex_buffers = nv10_set_vertex_buffers;
+ nv10->pipe.set_vertex_elements = nv10_set_vertex_elements;
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_state.h b/src/gallium/drivers/nv10/nv10_state.h
new file mode 100644
index 0000000000..3a3fd0d4f4
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_state.h
@@ -0,0 +1,139 @@
+#ifndef __NV10_STATE_H__
+#define __NV10_STATE_H__
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+struct nv10_blend_state {
+ uint32_t b_enable;
+ uint32_t b_srcfunc;
+ uint32_t b_dstfunc;
+
+ uint32_t c_mask;
+
+ uint32_t d_enable;
+};
+
+struct nv10_sampler_state {
+ uint32_t wrap;
+ uint32_t en;
+ uint32_t filt;
+ uint32_t bcol;
+};
+
+struct nv10_rasterizer_state {
+ uint32_t shade_model;
+
+ uint32_t line_width;
+ uint32_t line_smooth_en;
+
+ uint32_t point_size;
+
+ uint32_t poly_smooth_en;
+
+ uint32_t poly_mode_front;
+ uint32_t poly_mode_back;
+
+ uint32_t front_face;
+ uint32_t cull_face;
+ uint32_t cull_face_en;
+
+ uint32_t point_sprite;
+
+ const struct pipe_rasterizer_state *templ;
+};
+
+struct nv10_vertex_program_exec {
+ uint32_t data[4];
+ boolean has_branch_offset;
+ int const_index;
+};
+
+struct nv10_vertex_program_data {
+ int index; /* immediates == -1 */
+ float value[4];
+};
+
+struct nv10_vertex_program {
+ const struct pipe_shader_state *pipe;
+
+ boolean translated;
+ struct nv10_vertex_program_exec *insns;
+ unsigned nr_insns;
+ struct nv10_vertex_program_data *consts;
+ unsigned nr_consts;
+
+ struct nouveau_resource *exec;
+ unsigned exec_start;
+ struct nouveau_resource *data;
+ unsigned data_start;
+ unsigned data_start_min;
+
+ uint32_t ir;
+ uint32_t or;
+};
+
+struct nv10_fragment_program_data {
+ unsigned offset;
+ unsigned index;
+};
+
+struct nv10_fragment_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+
+ boolean translated;
+ boolean on_hw;
+ unsigned samplers;
+
+ uint32_t *insn;
+ int insn_len;
+
+ struct nv10_fragment_program_data *consts;
+ unsigned nr_consts;
+
+ struct pipe_buffer *buffer;
+
+ uint32_t fp_control;
+ uint32_t fp_reg_control;
+};
+
+
+struct nv10_depth_stencil_alpha_state {
+ struct {
+ uint32_t func;
+ uint32_t write_enable;
+ uint32_t test_enable;
+ } depth;
+
+ struct {
+ uint32_t enable;
+ uint32_t wmask;
+ uint32_t func;
+ uint32_t ref;
+ uint32_t vmask;
+ uint32_t fail;
+ uint32_t zfail;
+ uint32_t zpass;
+ } stencil;
+
+ struct {
+ uint32_t enabled;
+ uint32_t func;
+ uint32_t ref;
+ } alpha;
+};
+
+struct nv10_miptree {
+ struct pipe_texture base;
+
+ struct pipe_buffer *buffer;
+ uint total_size;
+
+ struct {
+ uint pitch;
+ uint *image_offset;
+ } level[PIPE_MAX_TEXTURE_LEVELS];
+};
+
+#endif
diff --git a/src/gallium/drivers/nv10/nv10_state_emit.c b/src/gallium/drivers/nv10/nv10_state_emit.c
new file mode 100644
index 0000000000..d8691ef9c6
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_state_emit.c
@@ -0,0 +1,306 @@
+#include "nv10_context.h"
+#include "nv10_state.h"
+
+static void nv10_state_emit_blend(struct nv10_context* nv10)
+{
+ struct nv10_blend_state *b = nv10->blend;
+
+ BEGIN_RING(celsius, NV10TCL_DITHER_ENABLE, 1);
+ OUT_RING (b->d_enable);
+
+ BEGIN_RING(celsius, NV10TCL_BLEND_FUNC_ENABLE, 3);
+ OUT_RING (b->b_enable);
+ OUT_RING (b->b_srcfunc);
+ OUT_RING (b->b_dstfunc);
+
+ BEGIN_RING(celsius, NV10TCL_COLOR_MASK, 1);
+ OUT_RING (b->c_mask);
+}
+
+static void nv10_state_emit_blend_color(struct nv10_context* nv10)
+{
+ struct pipe_blend_color *c = nv10->blend_color;
+
+ BEGIN_RING(celsius, NV10TCL_BLEND_COLOR, 1);
+ OUT_RING ((float_to_ubyte(c->color[3]) << 24)|
+ (float_to_ubyte(c->color[0]) << 16)|
+ (float_to_ubyte(c->color[1]) << 8) |
+ (float_to_ubyte(c->color[2]) << 0));
+}
+
+static void nv10_state_emit_rast(struct nv10_context* nv10)
+{
+ struct nv10_rasterizer_state *r = nv10->rast;
+
+ BEGIN_RING(celsius, NV10TCL_SHADE_MODEL, 2);
+ OUT_RING (r->shade_model);
+ OUT_RING (r->line_width);
+
+
+ BEGIN_RING(celsius, NV10TCL_POINT_SIZE, 1);
+ OUT_RING (r->point_size);
+
+ BEGIN_RING(celsius, NV10TCL_POLYGON_MODE_FRONT, 2);
+ OUT_RING (r->poly_mode_front);
+ OUT_RING (r->poly_mode_back);
+
+
+ BEGIN_RING(celsius, NV10TCL_CULL_FACE, 2);
+ OUT_RING (r->cull_face);
+ OUT_RING (r->front_face);
+
+ BEGIN_RING(celsius, NV10TCL_LINE_SMOOTH_ENABLE, 2);
+ OUT_RING (r->line_smooth_en);
+ OUT_RING (r->poly_smooth_en);
+
+ BEGIN_RING(celsius, NV10TCL_CULL_FACE_ENABLE, 1);
+ OUT_RING (r->cull_face_en);
+}
+
+static void nv10_state_emit_dsa(struct nv10_context* nv10)
+{
+ struct nv10_depth_stencil_alpha_state *d = nv10->dsa;
+
+ BEGIN_RING(celsius, NV10TCL_DEPTH_FUNC, 1);
+ OUT_RING (d->depth.func);
+
+ BEGIN_RING(celsius, NV10TCL_DEPTH_WRITE_ENABLE, 1);
+ OUT_RING (d->depth.write_enable);
+
+ BEGIN_RING(celsius, NV10TCL_DEPTH_TEST_ENABLE, 1);
+ OUT_RING (d->depth.test_enable);
+
+#if 0
+ BEGIN_RING(celsius, NV10TCL_STENCIL_ENABLE, 1);
+ OUT_RING (d->stencil.enable);
+ BEGIN_RING(celsius, NV10TCL_STENCIL_MASK, 7);
+ OUT_RINGp ((uint32_t *)&(d->stencil.wmask), 7);
+#endif
+
+ BEGIN_RING(celsius, NV10TCL_ALPHA_FUNC_ENABLE, 1);
+ OUT_RING (d->alpha.enabled);
+
+ BEGIN_RING(celsius, NV10TCL_ALPHA_FUNC_FUNC, 1);
+ OUT_RING (d->alpha.func);
+
+ BEGIN_RING(celsius, NV10TCL_ALPHA_FUNC_REF, 1);
+ OUT_RING (d->alpha.ref);
+}
+
+static void nv10_state_emit_viewport(struct nv10_context* nv10)
+{
+}
+
+static void nv10_state_emit_scissor(struct nv10_context* nv10)
+{
+ // XXX this is so not working
+/* struct pipe_scissor_state *s = nv10->scissor;
+ BEGIN_RING(celsius, NV10TCL_SCISSOR_HORIZ, 2);
+ OUT_RING (((s->maxx - s->minx) << 16) | s->minx);
+ OUT_RING (((s->maxy - s->miny) << 16) | s->miny);*/
+}
+
+static void nv10_state_emit_framebuffer(struct nv10_context* nv10)
+{
+ struct pipe_framebuffer_state* fb = nv10->framebuffer;
+ struct nv04_surface *rt, *zeta = NULL;
+ uint32_t rt_format, w, h;
+ int colour_format = 0, zeta_format = 0;
+ struct nv10_miptree *nv10mt = 0;
+
+ w = fb->cbufs[0]->width;
+ h = fb->cbufs[0]->height;
+ colour_format = fb->cbufs[0]->format;
+ rt = (struct nv04_surface *)fb->cbufs[0];
+
+ if (fb->zsbuf) {
+ if (colour_format) {
+ assert(w == fb->zsbuf->width);
+ assert(h == fb->zsbuf->height);
+ } else {
+ w = fb->zsbuf->width;
+ h = fb->zsbuf->height;
+ }
+
+ zeta_format = fb->zsbuf->format;
+ zeta = (struct nv04_surface *)fb->zsbuf;
+ }
+
+ rt_format = NV10TCL_RT_FORMAT_TYPE_LINEAR;
+
+ switch (colour_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case 0:
+ rt_format |= NV10TCL_RT_FORMAT_COLOR_A8R8G8B8;
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ rt_format |= NV10TCL_RT_FORMAT_COLOR_R5G6B5;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (zeta) {
+ BEGIN_RING(celsius, NV10TCL_RT_PITCH, 1);
+ OUT_RING (rt->pitch | (zeta->pitch << 16));
+ } else {
+ BEGIN_RING(celsius, NV10TCL_RT_PITCH, 1);
+ OUT_RING (rt->pitch | (rt->pitch << 16));
+ }
+
+ nv10mt = (struct nv10_miptree *)rt->base.texture;
+ nv10->rt[0] = nv10mt->buffer;
+
+ if (zeta_format)
+ {
+ nv10mt = (struct nv10_miptree *)zeta->base.texture;
+ nv10->zeta = nv10mt->buffer;
+ }
+
+ BEGIN_RING(celsius, NV10TCL_RT_HORIZ, 3);
+ OUT_RING ((w << 16) | 0);
+ OUT_RING ((h << 16) | 0);
+ OUT_RING (rt_format);
+ BEGIN_RING(celsius, NV10TCL_VIEWPORT_CLIP_HORIZ(0), 2);
+ OUT_RING (((w - 1) << 16) | 0 | 0x08000800);
+ OUT_RING (((h - 1) << 16) | 0 | 0x08000800);
+}
+
+static void nv10_vertex_layout(struct nv10_context *nv10)
+{
+ struct nv10_fragment_program *fp = nv10->fragprog.current;
+ uint32_t src = 0;
+ int i;
+ struct vertex_info vinfo;
+
+ memset(&vinfo, 0, sizeof(vinfo));
+
+ for (i = 0; i < fp->info.num_inputs; i++) {
+ switch (fp->info.input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_LINEAR, src++);
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_LINEAR, src++);
+ break;
+ default:
+ case TGSI_SEMANTIC_GENERIC:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_PERSPECTIVE, src++);
+ break;
+ case TGSI_SEMANTIC_FOG:
+ draw_emit_vertex_attr(&vinfo, EMIT_4F, INTERP_PERSPECTIVE, src++);
+ break;
+ }
+ }
+ draw_compute_vertex_size(&vinfo);
+}
+
+void
+nv10_emit_hw_state(struct nv10_context *nv10)
+{
+ int i;
+
+ if (nv10->dirty & NV10_NEW_VERTPROG) {
+ //nv10_vertprog_bind(nv10, nv10->vertprog.current);
+ nv10->dirty &= ~NV10_NEW_VERTPROG;
+ }
+
+ if (nv10->dirty & NV10_NEW_FRAGPROG) {
+ nv10_fragprog_bind(nv10, nv10->fragprog.current);
+ /*XXX: clear NV10_NEW_FRAGPROG if no new program uploaded */
+ nv10->dirty_samplers |= (1<<10);
+ nv10->dirty_samplers = 0;
+ }
+
+ if (nv10->dirty_samplers || (nv10->dirty & NV10_NEW_FRAGPROG)) {
+ nv10_fragtex_bind(nv10);
+ nv10->dirty &= ~NV10_NEW_FRAGPROG;
+ }
+
+ if (nv10->dirty & NV10_NEW_VTXARRAYS) {
+ nv10->dirty &= ~NV10_NEW_VTXARRAYS;
+ nv10_vertex_layout(nv10);
+ nv10_vtxbuf_bind(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_BLEND) {
+ nv10->dirty &= ~NV10_NEW_BLEND;
+ nv10_state_emit_blend(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_BLENDCOL) {
+ nv10->dirty &= ~NV10_NEW_BLENDCOL;
+ nv10_state_emit_blend_color(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_RAST) {
+ nv10->dirty &= ~NV10_NEW_RAST;
+ nv10_state_emit_rast(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_DSA) {
+ nv10->dirty &= ~NV10_NEW_DSA;
+ nv10_state_emit_dsa(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_VIEWPORT) {
+ nv10->dirty &= ~NV10_NEW_VIEWPORT;
+ nv10_state_emit_viewport(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_SCISSOR) {
+ nv10->dirty &= ~NV10_NEW_SCISSOR;
+ nv10_state_emit_scissor(nv10);
+ }
+
+ if (nv10->dirty & NV10_NEW_FRAMEBUFFER) {
+ nv10->dirty &= ~NV10_NEW_FRAMEBUFFER;
+ nv10_state_emit_framebuffer(nv10);
+ }
+
+ /* Emit relocs for every referenced buffer.
+ * This is to ensure the bufmgr has an accurate idea of how
+ * the buffer is used. This isn't very efficient, but we don't
+ * seem to take a significant performance hit. Will be improved
+ * at some point. Vertex arrays are emitted by nv10_vbo.c
+ */
+
+ /* Render target */
+// XXX figre out who's who for NV10TCL_DMA_* and fill accordingly
+// BEGIN_RING(celsius, NV10TCL_DMA_COLOR0, 1);
+// OUT_RELOCo(nv10->rt[0], NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(celsius, NV10TCL_COLOR_OFFSET, 1);
+ OUT_RELOCl(nv10->rt[0], 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ if (nv10->zeta) {
+// XXX
+// BEGIN_RING(celsius, NV10TCL_DMA_ZETA, 1);
+// OUT_RELOCo(nv10->zeta, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(celsius, NV10TCL_ZETA_OFFSET, 1);
+ OUT_RELOCl(nv10->zeta, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ /* XXX for when we allocate LMA on nv17 */
+/* BEGIN_RING(celsius, NV10TCL_LMA_DEPTH_BUFFER_OFFSET, 1);
+ OUT_RELOCl(nv10->zeta + lma_offset);*/
+ }
+
+ /* Vertex buffer */
+ BEGIN_RING(celsius, NV10TCL_DMA_VTXBUF0, 1);
+ OUT_RELOCo(nv10->rt[0], NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(celsius, NV10TCL_COLOR_OFFSET, 1);
+ OUT_RELOCl(nv10->rt[0], 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ /* Texture images */
+ for (i = 0; i < 2; i++) {
+ if (!(nv10->fp_samplers & (1 << i)))
+ continue;
+ BEGIN_RING(celsius, NV10TCL_TX_OFFSET(i), 1);
+ OUT_RELOCl(nv10->tex[i].buffer, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ BEGIN_RING(celsius, NV10TCL_TX_FORMAT(i), 1);
+ OUT_RELOCd(nv10->tex[i].buffer, nv10->tex[i].format,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
+ NOUVEAU_BO_OR, NV10TCL_TX_FORMAT_DMA0,
+ NV10TCL_TX_FORMAT_DMA1);
+ }
+}
+
diff --git a/src/gallium/drivers/nv10/nv10_surface.c b/src/gallium/drivers/nv10/nv10_surface.c
new file mode 100644
index 0000000000..5b52246a9c
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_surface.c
@@ -0,0 +1,63 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "nv10_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_tile.h"
+
+static void
+nv10_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ struct nv04_surface_2d *eng2d = nv10->screen->eng2d;
+
+ eng2d->copy(eng2d, dest, destx, desty, src, srcx, srcy, width, height);
+}
+
+static void
+nv10_surface_fill(struct pipe_context *pipe, struct pipe_surface *dest,
+ unsigned destx, unsigned desty, unsigned width,
+ unsigned height, unsigned value)
+{
+ struct nv10_context *nv10 = nv10_context(pipe);
+ struct nv04_surface_2d *eng2d = nv10->screen->eng2d;
+
+ eng2d->fill(eng2d, dest, destx, desty, width, height, value);
+}
+
+void
+nv10_init_surface_functions(struct nv10_context *nv10)
+{
+ nv10->pipe.surface_copy = nv10_surface_copy;
+ nv10->pipe.surface_fill = nv10_surface_fill;
+}
diff --git a/src/gallium/drivers/nv10/nv10_transfer.c b/src/gallium/drivers/nv10/nv10_transfer.c
new file mode 100644
index 0000000000..c06b8d34c7
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_transfer.c
@@ -0,0 +1,195 @@
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv10_context.h"
+#include "nv10_screen.h"
+#include "nv10_state.h"
+
+struct nv10_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv10_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv10_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv10_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv10_miptree *mt = (struct nv10_miptree *)pt;
+ struct nv10_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv10_transfer);
+ if (!tx)
+ return NULL;
+
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ 0, 0, 0,
+ nv10_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv10_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv10_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv10_screen *nvscreen = nv10_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv10_transfer_del(struct pipe_transfer *ptx)
+{
+ struct nv10_transfer *tx = (struct nv10_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct pipe_screen *pscreen = ptx->texture->screen;
+ struct nv10_screen *nvscreen = nv10_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv10_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv10_transfer *tx = (struct nv10_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv10_miptree *mt = (struct nv10_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv10_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv10_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv10_transfer *tx = (struct nv10_transfer *)ptx;
+ struct nv10_miptree *mt = (struct nv10_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv10_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv10_transfer_new;
+ pscreen->tex_transfer_destroy = nv10_transfer_del;
+ pscreen->transfer_map = nv10_transfer_map;
+ pscreen->transfer_unmap = nv10_transfer_unmap;
+}
diff --git a/src/gallium/drivers/nv10/nv10_vbo.c b/src/gallium/drivers/nv10/nv10_vbo.c
new file mode 100644
index 0000000000..441a4f75f3
--- /dev/null
+++ b/src/gallium/drivers/nv10/nv10_vbo.c
@@ -0,0 +1,78 @@
+#include "draw/draw_context.h"
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "nv10_context.h"
+#include "nv10_state.h"
+
+#include "nouveau/nouveau_channel.h"
+#include "nouveau/nouveau_pushbuf.h"
+
+boolean nv10_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count)
+{
+ struct nv10_context *nv10 = nv10_context( pipe );
+ struct draw_context *draw = nv10->draw;
+ struct pipe_screen *pscreen = pipe->screen;
+ unsigned i;
+
+ nv10_emit_hw_state(nv10);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
+ if (nv10->vtxbuf[i].buffer) {
+ void *buf =
+ pipe_buffer_map(pscreen, nv10->vtxbuf[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+ }
+ /* Map index buffer, if present */
+ if (indexBuffer) {
+ void *mapped_indexes
+ = pipe_buffer_map(pscreen, indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes);
+ }
+ else {
+ /* no index/element buffer */
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+ draw_set_mapped_constant_buffer(draw,
+ nv10->constbuf[PIPE_SHADER_VERTEX],
+ nv10->constbuf_nr[PIPE_SHADER_VERTEX]);
+
+ /* draw! */
+ draw_arrays(nv10->draw, prim, start, count);
+
+ /*
+ * unmap vertex/index buffers
+ */
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
+ if (nv10->vtxbuf[i].buffer) {
+ pipe_buffer_unmap(pscreen, nv10->vtxbuf[i].buffer);
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ }
+ }
+ if (indexBuffer) {
+ pipe_buffer_unmap(pscreen, indexBuffer);
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+ return TRUE;
+}
+
+boolean nv10_draw_arrays( struct pipe_context *pipe,
+ unsigned prim, unsigned start, unsigned count)
+{
+ return nv10_draw_elements(pipe, NULL, 0, prim, start, count);
+}
+
+
+
diff --git a/src/gallium/drivers/nv20/Makefile b/src/gallium/drivers/nv20/Makefile
new file mode 100644
index 0000000000..1305f26c59
--- /dev/null
+++ b/src/gallium/drivers/nv20/Makefile
@@ -0,0 +1,21 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nv20
+
+C_SOURCES = \
+ nv20_clear.c \
+ nv20_context.c \
+ nv20_fragprog.c \
+ nv20_fragtex.c \
+ nv20_miptree.c \
+ nv20_prim_vbuf.c \
+ nv20_screen.c \
+ nv20_state.c \
+ nv20_state_emit.c \
+ nv20_surface.c \
+ nv20_transfer.c \
+ nv20_vbo.c
+# nv20_vertprog.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nv20/nv20_clear.c b/src/gallium/drivers/nv20/nv20_clear.c
new file mode 100644
index 0000000000..2b4490fa5e
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_clear.c
@@ -0,0 +1,14 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "util/u_clear.h"
+
+#include "nv20_context.h"
+
+void
+nv20_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil)
+{
+ util_clear(pipe, nv20_context(pipe)->framebuffer, buffers, rgba, depth,
+ stencil);
+}
diff --git a/src/gallium/drivers/nv20/nv20_context.c b/src/gallium/drivers/nv20/nv20_context.c
new file mode 100644
index 0000000000..b32d0d83ba
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_context.c
@@ -0,0 +1,446 @@
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv20_context.h"
+#include "nv20_screen.h"
+
+static void
+nv20_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ draw_flush(nv20->draw);
+
+ FIRE_RING(fence);
+}
+
+static void
+nv20_destroy(struct pipe_context *pipe)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ if (nv20->draw)
+ draw_destroy(nv20->draw);
+
+ FREE(nv20);
+}
+
+static void nv20_init_hwctx(struct nv20_context *nv20)
+{
+ struct nv20_screen *screen = nv20->screen;
+ struct nouveau_channel *chan = screen->base.channel;
+ int i;
+ float projectionmatrix[16];
+ const boolean is_nv25tcl = (nv20->screen->kelvin->grclass == NV25TCL);
+
+ BEGIN_RING(kelvin, NV20TCL_DMA_NOTIFY, 1);
+ OUT_RING (screen->sync->handle);
+ BEGIN_RING(kelvin, NV20TCL_DMA_TEXTURE0, 2);
+ OUT_RING (chan->vram->handle);
+ OUT_RING (chan->gart->handle); /* TEXTURE1 */
+ BEGIN_RING(kelvin, NV20TCL_DMA_COLOR, 2);
+ OUT_RING (chan->vram->handle);
+ OUT_RING (chan->vram->handle); /* ZETA */
+
+ BEGIN_RING(kelvin, NV20TCL_DMA_QUERY, 1);
+ OUT_RING (0); /* renouveau: beef0351, unique */
+
+ BEGIN_RING(kelvin, NV20TCL_RT_HORIZ, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_CLIP_HORIZ(0), 1);
+ OUT_RING ((0xfff << 16) | 0x0);
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_CLIP_VERT(0), 1);
+ OUT_RING ((0xfff << 16) | 0x0);
+
+ for (i = 1; i < NV20TCL_VIEWPORT_CLIP_HORIZ__SIZE; i++) {
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_CLIP_HORIZ(i), 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_CLIP_VERT(i), 1);
+ OUT_RING (0);
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_CLIP_MODE, 1);
+ OUT_RING (0);
+
+ BEGIN_RING(kelvin, 0x17e0, 3);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0);
+ OUT_RINGf (1.0);
+
+ if (is_nv25tcl) {
+ BEGIN_RING(kelvin, NV20TCL_TX_RCOMP, 1);
+ OUT_RING (NV20TCL_TX_RCOMP_LEQUAL | 0xdb0);
+ } else {
+ BEGIN_RING(kelvin, 0x1e68, 1);
+ OUT_RING (0x4b800000); /* 16777216.000000 */
+ BEGIN_RING(kelvin, NV20TCL_TX_RCOMP, 1);
+ OUT_RING (NV20TCL_TX_RCOMP_LEQUAL);
+ }
+
+ BEGIN_RING(kelvin, 0x290, 1);
+ OUT_RING ((0x10 << 16) | 1);
+ BEGIN_RING(kelvin, 0x9fc, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, 0x1d80, 1);
+ OUT_RING (1);
+ BEGIN_RING(kelvin, 0x9f8, 1);
+ OUT_RING (4);
+ BEGIN_RING(kelvin, 0x17ec, 3);
+ OUT_RINGf (0.0);
+ OUT_RINGf (1.0);
+ OUT_RINGf (0.0);
+
+ if (is_nv25tcl) {
+ BEGIN_RING(kelvin, 0x1d88, 1);
+ OUT_RING (3);
+
+ BEGIN_RING(kelvin, NV25TCL_DMA_IN_MEMORY9, 1);
+ OUT_RING (chan->vram->handle);
+ BEGIN_RING(kelvin, NV25TCL_DMA_IN_MEMORY8, 1);
+ OUT_RING (chan->vram->handle);
+ }
+ BEGIN_RING(kelvin, NV20TCL_DMA_FENCE, 1);
+ OUT_RING (0); /* renouveau: beef1e10 */
+
+ BEGIN_RING(kelvin, 0x1e98, 1);
+ OUT_RING (0);
+#if 0
+ if (is_nv25tcl) {
+ BEGIN_RING(NvSub3D, NV25TCL_DMA_IN_MEMORY4, 2);
+ OUT_RING (NvDmaTT); /* renouveau: beef0202 */
+ OUT_RING (NvDmaFB); /* renouveau: beef0201 */
+
+ BEGIN_RING(NvSub3D, NV20TCL_DMA_TEXTURE1, 1);
+ OUT_RING (NvDmaTT); /* renouveau: beef0202 */
+ }
+#endif
+ BEGIN_RING(kelvin, NV20TCL_NOTIFY, 1);
+ OUT_RING (0);
+
+ BEGIN_RING(kelvin, 0x120, 3);
+ OUT_RING (0);
+ OUT_RING (1);
+ OUT_RING (2);
+
+/* error: ILLEGAL_MTHD, PROTECTION_FAULT
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_TRANSLATE_X, 4);
+ OUT_RINGf (0.0);
+ OUT_RINGf (512.0);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0);
+*/
+
+ if (is_nv25tcl) {
+ BEGIN_RING(kelvin, 0x022c, 2);
+ OUT_RING (0x280);
+ OUT_RING (0x07d28000);
+ }
+
+/* * illegal method, protection fault
+ BEGIN_RING(NvSub3D, 0x1c2c, 1);
+ OUT_RING (0); */
+
+ if (is_nv25tcl) {
+ BEGIN_RING(kelvin, 0x1da4, 1);
+ OUT_RING (0);
+ }
+
+/* * crashes with illegal method, protection fault
+ BEGIN_RING(NvSub3D, 0x1c18, 1);
+ OUT_RING (0x200); */
+
+ BEGIN_RING(kelvin, NV20TCL_RT_HORIZ, 2);
+ OUT_RING ((0 << 16) | 0);
+ OUT_RING ((0 << 16) | 0);
+
+ /* *** Set state *** */
+
+ BEGIN_RING(kelvin, NV20TCL_ALPHA_FUNC_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_ALPHA_FUNC_FUNC, 2);
+ OUT_RING (NV20TCL_ALPHA_FUNC_FUNC_ALWAYS);
+ OUT_RING (0); /* NV20TCL_ALPHA_FUNC_REF */
+
+ for (i = 0; i < NV20TCL_TX_ENABLE__SIZE; ++i) {
+ BEGIN_RING(kelvin, NV20TCL_TX_ENABLE(i), 1);
+ OUT_RING (0);
+ }
+ BEGIN_RING(kelvin, NV20TCL_TX_SHADER_OP, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_TX_SHADER_CULL_MODE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_RC_IN_ALPHA(0), 4);
+ OUT_RING (0x30d410d0);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_RC_OUT_RGB(0), 4);
+ OUT_RING (0x00000c00);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_RC_ENABLE, 1);
+ OUT_RING (0x00011101);
+ BEGIN_RING(kelvin, NV20TCL_RC_FINAL0, 2);
+ OUT_RING (0x130e0300);
+ OUT_RING (0x0c091c80);
+ BEGIN_RING(kelvin, NV20TCL_RC_OUT_ALPHA(0), 4);
+ OUT_RING (0x00000c00);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_RC_IN_RGB(0), 4);
+ OUT_RING (0x20c400c0);
+ OUT_RING (0);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_RC_COLOR0, 2);
+ OUT_RING (0);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_RC_CONSTANT_COLOR0(0), 4);
+ OUT_RING (0x035125a0);
+ OUT_RING (0);
+ OUT_RING (0x40002000);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_MULTISAMPLE_CONTROL, 1);
+ OUT_RING (0xffff0000);
+
+ BEGIN_RING(kelvin, NV20TCL_BLEND_FUNC_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_DITHER_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_STENCIL_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_BLEND_FUNC_SRC, 4);
+ OUT_RING (NV20TCL_BLEND_FUNC_SRC_ONE);
+ OUT_RING (NV20TCL_BLEND_FUNC_DST_ZERO);
+ OUT_RING (0); /* NV20TCL_BLEND_COLOR */
+ OUT_RING (NV20TCL_BLEND_EQUATION_FUNC_ADD);
+ BEGIN_RING(kelvin, NV20TCL_STENCIL_MASK, 7);
+ OUT_RING (0xff);
+ OUT_RING (NV20TCL_STENCIL_FUNC_FUNC_ALWAYS);
+ OUT_RING (0); /* NV20TCL_STENCIL_FUNC_REF */
+ OUT_RING (0xff); /* NV20TCL_STENCIL_FUNC_MASK */
+ OUT_RING (NV20TCL_STENCIL_OP_FAIL_KEEP);
+ OUT_RING (NV20TCL_STENCIL_OP_ZFAIL_KEEP);
+ OUT_RING (NV20TCL_STENCIL_OP_ZPASS_KEEP);
+
+ BEGIN_RING(kelvin, NV20TCL_COLOR_LOGIC_OP_ENABLE, 2);
+ OUT_RING (0);
+ OUT_RING (NV20TCL_COLOR_LOGIC_OP_OP_COPY);
+ BEGIN_RING(kelvin, 0x17cc, 1);
+ OUT_RING (0);
+ if (is_nv25tcl) {
+ BEGIN_RING(kelvin, 0x1d84, 1);
+ OUT_RING (1);
+ }
+ BEGIN_RING(kelvin, NV20TCL_LIGHTING_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_LIGHT_CONTROL, 1);
+ OUT_RING (0x00020000);
+ BEGIN_RING(kelvin, NV20TCL_SEPARATE_SPECULAR_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_LIGHT_MODEL_TWO_SIDE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_ENABLED_LIGHTS, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_NORMALIZE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_STIPPLE_PATTERN(0),
+ NV20TCL_POLYGON_STIPPLE_PATTERN__SIZE);
+ for (i = 0; i < NV20TCL_POLYGON_STIPPLE_PATTERN__SIZE; ++i) {
+ OUT_RING(0xffffffff);
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_OFFSET_POINT_ENABLE, 3);
+ OUT_RING (0);
+ OUT_RING (0); /* NV20TCL.POLYGON_OFFSET_LINE_ENABLE */
+ OUT_RING (0); /* NV20TCL.POLYGON_OFFSET_FILL_ENABLE */
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_FUNC, 1);
+ OUT_RING (NV20TCL_DEPTH_FUNC_LESS);
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_WRITE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_TEST_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_OFFSET_FACTOR, 2);
+ OUT_RINGf (0.0);
+ OUT_RINGf (0.0); /* NV20TCL.POLYGON_OFFSET_UNITS */
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_UNK17D8, 1);
+ OUT_RING (1);
+ if (!is_nv25tcl) {
+ BEGIN_RING(kelvin, 0x1d84, 1);
+ OUT_RING (3);
+ }
+ BEGIN_RING(kelvin, NV20TCL_POINT_SIZE, 1);
+ if (!is_nv25tcl) {
+ OUT_RING (8);
+ } else {
+ OUT_RINGf (1.0);
+ }
+ if (!is_nv25tcl) {
+ BEGIN_RING(kelvin, NV20TCL_POINT_PARAMETERS_ENABLE, 2);
+ OUT_RING (0);
+ OUT_RING (0); /* NV20TCL.POINT_SMOOTH_ENABLE */
+ } else {
+ BEGIN_RING(kelvin, NV20TCL_POINT_PARAMETERS_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, 0x0a1c, 1);
+ OUT_RING (0x800);
+ }
+ BEGIN_RING(kelvin, NV20TCL_LINE_WIDTH, 1);
+ OUT_RING (8);
+ BEGIN_RING(kelvin, NV20TCL_LINE_SMOOTH_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_MODE_FRONT, 2);
+ OUT_RING (NV20TCL_POLYGON_MODE_FRONT_FILL);
+ OUT_RING (NV20TCL_POLYGON_MODE_BACK_FILL);
+ BEGIN_RING(kelvin, NV20TCL_CULL_FACE, 2);
+ OUT_RING (NV20TCL_CULL_FACE_BACK);
+ OUT_RING (NV20TCL_FRONT_FACE_CCW);
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_SMOOTH_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_CULL_FACE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_SHADE_MODEL, 1);
+ OUT_RING (NV20TCL_SHADE_MODEL_SMOOTH);
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_STIPPLE_ENABLE, 1);
+ OUT_RING (0);
+ BEGIN_RING(kelvin, NV20TCL_TX_GEN_S(0), 4 * NV20TCL_TX_GEN_S__SIZE);
+ for (i=0; i < 4 * NV20TCL_TX_GEN_S__SIZE; ++i) {
+ OUT_RING(0);
+ }
+ BEGIN_RING(kelvin, NV20TCL_FOG_EQUATION_CONSTANT, 3);
+ OUT_RINGf (1.5);
+ OUT_RINGf (-0.090168); /* NV20TCL.FOG_EQUATION_LINEAR */
+ OUT_RINGf (0.0); /* NV20TCL.FOG_EQUATION_QUADRATIC */
+ BEGIN_RING(kelvin, NV20TCL_FOG_MODE, 2);
+ OUT_RING (NV20TCL_FOG_MODE_EXP_2);
+ OUT_RING (NV20TCL_FOG_COORD_DIST_COORD_FOG);
+ BEGIN_RING(kelvin, NV20TCL_FOG_ENABLE, 2);
+ OUT_RING (0);
+ OUT_RING (0); /* NV20TCL.FOG_COLOR */
+ BEGIN_RING(kelvin, NV20TCL_ENGINE, 1);
+ OUT_RING (NV20TCL_ENGINE_FIXED);
+
+ for (i = 0; i < NV20TCL_TX_MATRIX_ENABLE__SIZE; ++i) {
+ BEGIN_RING(kelvin, NV20TCL_TX_MATRIX_ENABLE(i), 1);
+ OUT_RING (0);
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_VTX_ATTR_4F_X(1), 4 * 15);
+ OUT_RINGf(1.0); OUT_RINGf(0.0); OUT_RINGf(0.0); OUT_RINGf(1.0);
+ OUT_RINGf(0.0); OUT_RINGf(0.0); OUT_RINGf(1.0); OUT_RINGf(1.0);
+ OUT_RINGf(1.0); OUT_RINGf(1.0); OUT_RINGf(1.0); OUT_RINGf(1.0);
+ for (i = 4; i < 16; ++i) {
+ OUT_RINGf(0.0); OUT_RINGf(0.0); OUT_RINGf(0.0); OUT_RINGf(1.0);
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_EDGEFLAG_ENABLE, 1);
+ OUT_RING (1);
+ BEGIN_RING(kelvin, NV20TCL_COLOR_MASK, 1);
+ OUT_RING (0x00010101);
+ BEGIN_RING(kelvin, NV20TCL_CLEAR_VALUE, 1);
+ OUT_RING (0);
+
+ memset(projectionmatrix, 0, sizeof(projectionmatrix));
+ projectionmatrix[0*4+0] = 1.0;
+ projectionmatrix[1*4+1] = 1.0;
+ projectionmatrix[2*4+2] = 16777215.0;
+ projectionmatrix[3*4+3] = 1.0;
+ BEGIN_RING(kelvin, NV20TCL_PROJECTION_MATRIX(0), 16);
+ for (i = 0; i < 16; i++) {
+ OUT_RINGf (projectionmatrix[i]);
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_RANGE_NEAR, 2);
+ OUT_RINGf (0.0);
+ OUT_RINGf (16777216.0); /* [0, 1] scaled approx to [0, 2^24] */
+
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_SCALE0_X, 4);
+ OUT_RINGf (0.0); /* x-offset, w/2 + 1.031250 */
+ OUT_RINGf (0.0); /* y-offset, h/2 + 0.030762 */
+ OUT_RINGf (0.0);
+ OUT_RINGf (16777215.0);
+
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_SCALE1_X, 4);
+ OUT_RINGf (0.0); /* no effect?, w/2 */
+ OUT_RINGf (0.0); /* no effect?, h/2 */
+ OUT_RINGf (16777215.0 * 0.5);
+ OUT_RINGf (65535.0);
+
+ FIRE_RING (NULL);
+}
+
+static void
+nv20_set_edgeflags(struct pipe_context *pipe, const unsigned *bitfield)
+{
+}
+
+
+static unsigned int
+nv20_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+nv20_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *
+nv20_create(struct pipe_screen *pscreen, unsigned pctx_id)
+{
+ struct nv20_screen *screen = nv20_screen(pscreen);
+ struct pipe_winsys *ws = pscreen->winsys;
+ struct nv20_context *nv20;
+ struct nouveau_winsys *nvws = screen->nvws;
+
+ nv20 = CALLOC(1, sizeof(struct nv20_context));
+ if (!nv20)
+ return NULL;
+ nv20->screen = screen;
+ nv20->pctx_id = pctx_id;
+
+ nv20->nvws = nvws;
+
+ nv20->pipe.winsys = ws;
+ nv20->pipe.screen = pscreen;
+ nv20->pipe.destroy = nv20_destroy;
+ nv20->pipe.set_edgeflags = nv20_set_edgeflags;
+ nv20->pipe.draw_arrays = nv20_draw_arrays;
+ nv20->pipe.draw_elements = nv20_draw_elements;
+ nv20->pipe.clear = nv20_clear;
+ nv20->pipe.flush = nv20_flush;
+
+ nv20->pipe.is_texture_referenced = nv20_is_texture_referenced;
+ nv20->pipe.is_buffer_referenced = nv20_is_buffer_referenced;
+
+ nv20_init_surface_functions(nv20);
+ nv20_init_state_functions(nv20);
+
+ nv20->draw = draw_create();
+ assert(nv20->draw);
+ draw_set_rasterize_stage(nv20->draw, nv20_draw_vbuf_stage(nv20));
+
+ nv20_init_hwctx(nv20);
+
+ return &nv20->pipe;
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_context.h b/src/gallium/drivers/nv20/nv20_context.h
new file mode 100644
index 0000000000..fc932f1f90
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_context.h
@@ -0,0 +1,153 @@
+#ifndef __NV20_CONTEXT_H__
+#define __NV20_CONTEXT_H__
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "draw/draw_vertex.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_gldefs.h"
+
+#define NOUVEAU_PUSH_CONTEXT(ctx) \
+ struct nv20_screen *ctx = nv20->screen
+#include "nouveau/nouveau_push.h"
+
+#include "nv20_state.h"
+
+#define NOUVEAU_ERR(fmt, args...) \
+ fprintf(stderr, "%s:%d - "fmt, __func__, __LINE__, ##args);
+#define NOUVEAU_MSG(fmt, args...) \
+ fprintf(stderr, "nouveau: "fmt, ##args);
+
+#define NV20_NEW_VERTPROG (1 << 0)
+#define NV20_NEW_FRAGPROG (1 << 1)
+#define NV20_NEW_VTXARRAYS (1 << 2)
+#define NV20_NEW_BLEND (1 << 3)
+#define NV20_NEW_BLENDCOL (1 << 4)
+#define NV20_NEW_RAST (1 << 5)
+#define NV20_NEW_DSA (1 << 6)
+#define NV20_NEW_VIEWPORT (1 << 7)
+#define NV20_NEW_SCISSOR (1 << 8)
+#define NV20_NEW_FRAMEBUFFER (1 << 9)
+
+#include "nv20_screen.h"
+
+struct nv20_context {
+ struct pipe_context pipe;
+
+ struct nouveau_winsys *nvws;
+ struct nv20_screen *screen;
+ unsigned pctx_id;
+
+ struct draw_context *draw;
+
+ uint32_t dirty;
+
+ struct nv20_sampler_state *tex_sampler[PIPE_MAX_SAMPLERS];
+ struct nv20_miptree *tex_miptree[PIPE_MAX_SAMPLERS];
+ unsigned dirty_samplers;
+ unsigned fp_samplers;
+ unsigned vp_samplers;
+
+ uint32_t rt_enable;
+ struct pipe_buffer *rt[4];
+ struct pipe_buffer *zeta;
+ uint32_t lma_offset;
+
+ struct nv20_blend_state *blend;
+ struct pipe_blend_color *blend_color;
+ struct nv20_rasterizer_state *rast;
+ struct nv20_depth_stencil_alpha_state *dsa;
+ struct pipe_viewport_state *viewport;
+ struct pipe_scissor_state *scissor;
+ struct pipe_framebuffer_state *framebuffer;
+
+ //struct pipe_buffer *constbuf[PIPE_SHADER_TYPES];
+ float *constbuf[PIPE_SHADER_TYPES][32][4];
+ unsigned constbuf_nr[PIPE_SHADER_TYPES];
+
+ struct vertex_info vertex_info;
+
+ struct {
+ struct pipe_buffer *buffer;
+ uint32_t format;
+ } tex[2];
+
+ unsigned vb_enable;
+ struct {
+ struct pipe_buffer *buffer;
+ unsigned delta;
+ } vb[16];
+
+/* struct {
+
+ struct nouveau_resource *exec_heap;
+ struct nouveau_resource *data_heap;
+
+ struct nv20_vertex_program *active;
+
+ struct nv20_vertex_program *current;
+ } vertprog;
+*/
+ struct {
+ struct nv20_fragment_program *active;
+
+ struct nv20_fragment_program *current;
+ struct pipe_buffer *constant_buf;
+ } fragprog;
+
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
+};
+
+static INLINE struct nv20_context *
+nv20_context(struct pipe_context *pipe)
+{
+ return (struct nv20_context *)pipe;
+}
+
+extern void nv20_init_state_functions(struct nv20_context *nv20);
+extern void nv20_init_surface_functions(struct nv20_context *nv20);
+
+extern void nv20_screen_init_miptree_functions(struct pipe_screen *pscreen);
+
+/* nv20_clear.c */
+extern void nv20_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil);
+
+/* nv20_draw.c */
+extern struct draw_stage *nv20_draw_render_stage(struct nv20_context *nv20);
+
+/* nv20_fragprog.c */
+extern void nv20_fragprog_bind(struct nv20_context *,
+ struct nv20_fragment_program *);
+extern void nv20_fragprog_destroy(struct nv20_context *,
+ struct nv20_fragment_program *);
+
+/* nv20_fragtex.c */
+extern void nv20_fragtex_bind(struct nv20_context *);
+
+/* nv20_prim_vbuf.c */
+struct draw_stage *nv20_draw_vbuf_stage( struct nv20_context *nv20 );
+extern void nv20_vtxbuf_bind(struct nv20_context* nv20);
+
+/* nv20_state.c and friends */
+extern void nv20_emit_hw_state(struct nv20_context *nv20);
+extern void nv20_state_tex_update(struct nv20_context *nv20);
+
+/* nv20_vbo.c */
+extern boolean nv20_draw_arrays(struct pipe_context *, unsigned mode,
+ unsigned start, unsigned count);
+extern boolean nv20_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count);
+
+
+#endif
diff --git a/src/gallium/drivers/nv20/nv20_fragprog.c b/src/gallium/drivers/nv20/nv20_fragprog.c
new file mode 100644
index 0000000000..4f496369dd
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_fragprog.c
@@ -0,0 +1,21 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv20_context.h"
+
+void
+nv20_fragprog_bind(struct nv20_context *nv20, struct nv20_fragment_program *fp)
+{
+}
+
+void
+nv20_fragprog_destroy(struct nv20_context *nv20,
+ struct nv20_fragment_program *fp)
+{
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_fragtex.c b/src/gallium/drivers/nv20/nv20_fragtex.c
new file mode 100644
index 0000000000..495a7be912
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_fragtex.c
@@ -0,0 +1,124 @@
+#include "nv20_context.h"
+#include "nouveau/nouveau_util.h"
+
+#define _(m,tf) \
+{ \
+ TRUE, \
+ PIPE_FORMAT_##m, \
+ NV20TCL_TX_FORMAT_FORMAT_##tf, \
+}
+
+struct nv20_texture_format {
+ boolean defined;
+ uint pipe;
+ int format;
+};
+
+static struct nv20_texture_format
+nv20_texture_formats[] = {
+ _(A8R8G8B8_UNORM, A8R8G8B8),
+ _(A1R5G5B5_UNORM, A1R5G5B5),
+ _(A4R4G4B4_UNORM, A4R4G4B4),
+ _(L8_UNORM , L8 ),
+ _(A8_UNORM , A8 ),
+ _(A8L8_UNORM , A8L8 ),
+/* _(RGB_DXT1 , DXT1, ), */
+/* _(RGBA_DXT1 , DXT1, ), */
+/* _(RGBA_DXT3 , DXT3, ), */
+/* _(RGBA_DXT5 , DXT5, ), */
+ {},
+};
+
+static struct nv20_texture_format *
+nv20_fragtex_format(uint pipe_format)
+{
+ struct nv20_texture_format *tf = nv20_texture_formats;
+
+ while (tf->defined) {
+ if (tf->pipe == pipe_format)
+ return tf;
+ tf++;
+ }
+
+ return NULL;
+}
+
+
+static void
+nv20_fragtex_build(struct nv20_context *nv20, int unit)
+{
+#if 0
+ struct nv20_sampler_state *ps = nv20->tex_sampler[unit];
+ struct nv20_miptree *nv20mt = nv20->tex_miptree[unit];
+ struct pipe_texture *pt = &nv20mt->base;
+ struct nv20_texture_format *tf;
+ uint32_t txf, txs, txp;
+
+ tf = nv20_fragtex_format(pt->format);
+ if (!tf || !tf->defined) {
+ NOUVEAU_ERR("Unsupported texture format: 0x%x\n", pt->format);
+ return;
+ }
+
+ txf = tf->format << 8;
+ txf |= (pt->last_level + 1) << 16;
+ txf |= log2i(pt->width[0]) << 20;
+ txf |= log2i(pt->height[0]) << 24;
+ txf |= log2i(pt->depth[0]) << 28;
+ txf |= 8;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_CUBE:
+ txf |= NV10TCL_TX_FORMAT_CUBE_MAP;
+ /* fall-through */
+ case PIPE_TEXTURE_2D:
+ txf |= (2<<4);
+ break;
+ case PIPE_TEXTURE_1D:
+ txf |= (1<<4);
+ break;
+ default:
+ NOUVEAU_ERR("Unknown target %d\n", pt->target);
+ return;
+ }
+
+ BEGIN_RING(kelvin, NV10TCL_TX_OFFSET(unit), 8);
+ OUT_RELOCl(nv20mt->buffer, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ OUT_RELOCd(nv20mt->buffer,txf,NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_OR | NOUVEAU_BO_RD, 1/*VRAM*/,2/*TT*/);
+ OUT_RING (ps->wrap);
+ OUT_RING (0x40000000); /* enable */
+ OUT_RING (txs);
+ OUT_RING (ps->filt | 0x2000 /* magic */);
+ OUT_RING ((pt->width[0] << 16) | pt->height[0]);
+ OUT_RING (ps->bcol);
+#endif
+}
+
+void
+nv20_fragtex_bind(struct nv20_context *nv20)
+{
+#if 0
+ struct nv20_fragment_program *fp = nv20->fragprog.active;
+ unsigned samplers, unit;
+
+ samplers = nv20->fp_samplers & ~fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ BEGIN_RING(kelvin, NV10TCL_TX_ENABLE(unit), 1);
+ OUT_RING (0);
+ }
+
+ samplers = nv20->dirty_samplers & fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ nv20_fragtex_build(nv20, unit);
+ }
+
+ nv20->fp_samplers = fp->samplers;
+#endif
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_miptree.c b/src/gallium/drivers/nv20/nv20_miptree.c
new file mode 100644
index 0000000000..185fbf53e0
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_miptree.c
@@ -0,0 +1,206 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "nv20_context.h"
+#include "nv20_screen.h"
+
+static void
+nv20_miptree_layout(struct nv20_miptree *nv20mt)
+{
+ struct pipe_texture *pt = &nv20mt->base;
+ uint width = pt->width[0], height = pt->height[0];
+ uint offset = 0;
+ int nr_faces, l, f;
+ uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
+ PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY);
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ nr_faces = 6;
+ } else {
+ nr_faces = 1;
+ }
+
+ for (l = 0; l <= pt->last_level; l++) {
+ pt->width[l] = width;
+ pt->height[l] = height;
+ pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
+
+ if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
+ nv20mt->level[l].pitch = align(pt->width[0] * pt->block.size, 64);
+ else
+ nv20mt->level[l].pitch = pt->width[l] * pt->block.size;
+
+ nv20mt->level[l].image_offset =
+ CALLOC(nr_faces, sizeof(unsigned));
+
+ width = MAX2(1, width >> 1);
+ height = MAX2(1, height >> 1);
+ }
+
+ for (f = 0; f < nr_faces; f++) {
+ for (l = 0; l < pt->last_level; l++) {
+ nv20mt->level[l].image_offset[f] = offset;
+
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
+ pt->width[l + 1] > 1 && pt->height[l + 1] > 1)
+ offset += align(nv20mt->level[l].pitch * pt->height[l], 64);
+ else
+ offset += nv20mt->level[l].pitch * pt->height[l];
+ }
+
+ nv20mt->level[l].image_offset[f] = offset;
+ offset += nv20mt->level[l].pitch * pt->height[l];
+ }
+
+ nv20mt->total_size = offset;
+}
+
+static struct pipe_texture *
+nv20_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
+ const unsigned *stride, struct pipe_buffer *pb)
+{
+ struct nv20_miptree *mt;
+
+ /* Only supports 2D, non-mipmapped textures for the moment */
+ if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
+ pt->depth[0] != 1)
+ return NULL;
+
+ mt = CALLOC_STRUCT(nv20_miptree);
+ if (!mt)
+ return NULL;
+
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+ mt->level[0].pitch = stride[0];
+ mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
+
+ pipe_buffer_reference(&mt->buffer, pb);
+ return &mt->base;
+}
+
+static struct pipe_texture *
+nv20_miptree_create(struct pipe_screen *screen, const struct pipe_texture *pt)
+{
+ struct nv20_miptree *mt;
+ unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
+ NOUVEAU_BUFFER_USAGE_TEXTURE;
+
+ mt = MALLOC(sizeof(struct nv20_miptree));
+ if (!mt)
+ return NULL;
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = screen;
+
+ /* Swizzled textures must be POT */
+ if (pt->width[0] & (pt->width[0] - 1) ||
+ pt->height[0] & (pt->height[0] - 1))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & (PIPE_TEXTURE_USAGE_PRIMARY |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else {
+ switch (pt->format) {
+ /* TODO: Figure out which formats can be swizzled */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ case PIPE_FORMAT_R16_SNORM:
+ {
+ if (debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ break;
+ }
+ default:
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ }
+ }
+
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+
+ nv20_miptree_layout(mt);
+
+ mt->buffer = screen->buffer_create(screen, 256, buf_usage, mt->total_size);
+ if (!mt->buffer) {
+ FREE(mt);
+ return NULL;
+ }
+
+ return &mt->base;
+}
+
+static void
+nv20_miptree_destroy(struct pipe_texture *pt)
+{
+ struct nv20_miptree *nv20mt = (struct nv20_miptree *)pt;
+ int l;
+
+ pipe_buffer_reference(&nv20mt->buffer, NULL);
+ for (l = 0; l <= pt->last_level; l++) {
+ if (nv20mt->level[l].image_offset)
+ FREE(nv20mt->level[l].image_offset);
+ }
+}
+
+static struct pipe_surface *
+nv20_miptree_surface_get(struct pipe_screen *screen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct nv20_miptree *nv20mt = (struct nv20_miptree *)pt;
+ struct nv04_surface *ns;
+
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
+ return NULL;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ pipe_reference_init(&ns->base.reference, 1);
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv20mt->level[level].pitch;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ns->base.offset = nv20mt->level[level].image_offset[face];
+ } else
+ if (pt->target == PIPE_TEXTURE_3D) {
+ ns->base.offset = nv20mt->level[level].image_offset[zslice];
+ } else {
+ ns->base.offset = nv20mt->level[level].image_offset[0];
+ }
+
+ return &ns->base;
+}
+
+static void
+nv20_miptree_surface_destroy(struct pipe_surface *ps)
+{
+ pipe_texture_reference(&ps->texture, NULL);
+ FREE(ps);
+}
+
+void nv20_screen_init_miptree_functions(struct pipe_screen *pscreen)
+{
+ pscreen->texture_create = nv20_miptree_create;
+ pscreen->texture_blanket = nv20_miptree_blanket;
+ pscreen->texture_destroy = nv20_miptree_destroy;
+ pscreen->get_tex_surface = nv20_miptree_surface_get;
+ pscreen->tex_surface_destroy = nv20_miptree_surface_destroy;
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_prim_vbuf.c b/src/gallium/drivers/nv20/nv20_prim_vbuf.c
new file mode 100644
index 0000000000..ddfcdb8057
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_prim_vbuf.c
@@ -0,0 +1,428 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * Build post-transformation, post-clipping vertex buffers and element
+ * lists by hooking into the end of the primitive pipeline and
+ * manipulating the vertex_id field in the vertex headers.
+ *
+ * XXX: work in progress
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "util/u_debug.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv20_context.h"
+#include "nv20_state.h"
+
+#include "draw/draw_vbuf.h"
+
+/**
+ * Primitive renderer for nv20.
+ */
+struct nv20_vbuf_render {
+ struct vbuf_render base;
+
+ struct nv20_context *nv20;
+
+ /** Vertex buffer in VRAM */
+ struct pipe_buffer *pbuffer;
+
+ /** Vertex buffer in normal memory */
+ void *mbuffer;
+
+ /** Vertex size in bytes */
+ /*unsigned vertex_size;*/
+
+ /** Hardware primitive */
+ unsigned hwprim;
+};
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct nv20_vbuf_render *
+nv20_vbuf_render(struct vbuf_render *render)
+{
+ assert(render);
+ return (struct nv20_vbuf_render *)render;
+}
+
+void nv20_vtxbuf_bind( struct nv20_context* nv20 )
+{
+#if 0
+ int i;
+ for(i = 0; i < NV20TCL_VTXBUF_ADDRESS__SIZE; i++) {
+ BEGIN_RING(kelvin, NV20TCL_VTXBUF_ADDRESS(i), 1);
+ OUT_RING(0/*nv20->vtxbuf*/);
+ BEGIN_RING(kelvin, NV20TCL_VTXFMT(i) ,1);
+ OUT_RING(0/*XXX*/);
+ }
+#endif
+}
+
+static const struct vertex_info *
+nv20_vbuf_render_get_vertex_info( struct vbuf_render *render )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+ struct nv20_context *nv20 = nv20_render->nv20;
+
+ nv20_emit_hw_state(nv20);
+
+ return &nv20->vertex_info;
+}
+
+static void *
+nv20__allocate_mbuffer(struct nv20_vbuf_render *nv20_render, size_t size)
+{
+ nv20_render->mbuffer = MALLOC(size);
+ return nv20_render->mbuffer;
+}
+
+static void
+nv20__allocate_pbuffer(struct nv20_vbuf_render *nv20_render, size_t size)
+{
+ struct pipe_screen *screen = nv20_render->nv20->pipe.screen;
+ nv20_render->pbuffer = screen->buffer_create(screen, 64,
+ PIPE_BUFFER_USAGE_VERTEX, size);
+}
+
+static boolean
+nv20_vbuf_render_allocate_vertices( struct vbuf_render *render,
+ ushort vertex_size,
+ ushort nr_vertices )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+ size_t size = (size_t)vertex_size * (size_t)nr_vertices;
+ void *buf;
+
+ assert(!nv20_render->pbuffer);
+ assert(!nv20_render->mbuffer);
+
+ /*
+ * For small amount of vertices, don't bother with pipe vertex
+ * buffer, the data will be passed directly via the fifo.
+ */
+ /* XXX: Pipe vertex buffers don't work. */
+ if (0 && size > 16 * 1024) {
+ nv20__allocate_pbuffer(nv20_render, size);
+ /* umm yeah so this is ugly */
+ buf = nv20_render->pbuffer;
+ } else {
+ buf = nv20__allocate_mbuffer(nv20_render, size);
+ }
+
+ if (buf)
+ nv20_render->nv20->dirty |= NV20_NEW_VTXARRAYS;
+
+ return buf ? TRUE : FALSE;
+}
+
+static void *
+nv20_vbuf_render_map_vertices( struct vbuf_render *render )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+ struct pipe_screen *pscreen = nv20_render->nv20->pipe.screen;
+
+ if (nv20_render->pbuffer) {
+ return pipe_buffer_map(pscreen, nv20_render->pbuffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ } else if (nv20_render->mbuffer) {
+ return nv20_render->mbuffer;
+ } else
+ assert(0);
+
+ /* warnings be gone */
+ return NULL;
+}
+
+static void
+nv20_vbuf_render_unmap_vertices( struct vbuf_render *render,
+ ushort min_index,
+ ushort max_index )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+ struct pipe_screen *pscreen = nv20_render->nv20->pipe.screen;
+
+ if (nv20_render->pbuffer)
+ pipe_buffer_unmap(pscreen, nv20_render->pbuffer);
+}
+
+static boolean
+nv20_vbuf_render_set_primitive( struct vbuf_render *render,
+ unsigned prim )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+ unsigned hwp = nvgl_primitive(prim);
+ if (hwp == 0)
+ return FALSE;
+
+ nv20_render->hwprim = hwp;
+ return TRUE;
+}
+
+static uint32_t
+nv20__vtxhwformat(unsigned stride, unsigned fields, unsigned type)
+{
+ return (stride << NV20TCL_VTXFMT_STRIDE_SHIFT) |
+ (fields << NV20TCL_VTXFMT_SIZE_SHIFT) |
+ (type << NV20TCL_VTXFMT_TYPE_SHIFT);
+}
+
+static unsigned
+nv20__emit_format(struct nv20_context *nv20, enum attrib_emit type, int hwattr)
+{
+ uint32_t hwfmt = 0;
+ unsigned fields;
+
+ switch (type) {
+ case EMIT_OMIT:
+ hwfmt = nv20__vtxhwformat(0, 0, 2);
+ fields = 0;
+ break;
+ case EMIT_1F:
+ hwfmt = nv20__vtxhwformat(4, 1, 2);
+ fields = 1;
+ break;
+ case EMIT_2F:
+ hwfmt = nv20__vtxhwformat(8, 2, 2);
+ fields = 2;
+ break;
+ case EMIT_3F:
+ hwfmt = nv20__vtxhwformat(12, 3, 2);
+ fields = 3;
+ break;
+ case EMIT_4F:
+ hwfmt = nv20__vtxhwformat(16, 4, 2);
+ fields = 4;
+ break;
+ default:
+ NOUVEAU_ERR("unhandled attrib_emit %d\n", type);
+ return 0;
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_VTXFMT(hwattr), 1);
+ OUT_RING(hwfmt);
+ return fields;
+}
+
+static unsigned
+nv20__emit_vertex_array_format(struct nv20_context *nv20)
+{
+ struct vertex_info *vinfo = &nv20->vertex_info;
+ int hwattr = NV20TCL_VTXFMT__SIZE;
+ int attr = 0;
+ unsigned nr_fields = 0;
+
+ while (hwattr-- > 0) {
+ if (vinfo->hwfmt[0] & (1 << hwattr)) {
+ nr_fields += nv20__emit_format(nv20,
+ vinfo->attrib[attr].emit, hwattr);
+ attr++;
+ } else
+ nv20__emit_format(nv20, EMIT_OMIT, hwattr);
+ }
+
+ return nr_fields;
+}
+
+static void
+nv20__draw_mbuffer(struct nv20_vbuf_render *nv20_render,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct nv20_context *nv20 = nv20_render->nv20;
+ struct vertex_info *vinfo = &nv20->vertex_info;
+ unsigned nr_fields;
+ int max_push;
+ ubyte *data = nv20_render->mbuffer;
+ int vsz = 4 * vinfo->size;
+
+ nr_fields = nv20__emit_vertex_array_format(nv20);
+
+ BEGIN_RING(kelvin, NV20TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING(nv20_render->hwprim);
+
+ max_push = 1200 / nr_fields;
+ while (nr_indices) {
+ int i;
+ int push = MIN2(nr_indices, max_push);
+
+ BEGIN_RING_NI(kelvin, NV20TCL_VERTEX_DATA, push * nr_fields);
+ for (i = 0; i < push; i++) {
+ /* XXX: fixme to handle other than floats? */
+ int f = nr_fields;
+ float *attrv = (float*)&data[indices[i] * vsz];
+ while (f-- > 0)
+ OUT_RINGf(*attrv++);
+ }
+
+ nr_indices -= push;
+ indices += push;
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING(NV20TCL_VERTEX_BEGIN_END_STOP);
+}
+
+static void
+nv20__draw_pbuffer(struct nv20_vbuf_render *nv20_render,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct nv20_context *nv20 = nv20_render->nv20;
+ int push, i;
+
+ NOUVEAU_ERR("nv20__draw_pbuffer: this path is broken.\n");
+
+ BEGIN_RING(kelvin, NV10TCL_VERTEX_ARRAY_OFFSET_POS, 1);
+ OUT_RELOCl(nv20_render->pbuffer, 0,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+
+ BEGIN_RING(kelvin, NV10TCL_VERTEX_BUFFER_BEGIN_END, 1);
+ OUT_RING(nv20_render->hwprim);
+
+ if (nr_indices & 1) {
+ BEGIN_RING(kelvin, NV10TCL_VB_ELEMENT_U32, 1);
+ OUT_RING (indices[0]);
+ indices++; nr_indices--;
+ }
+
+ while (nr_indices) {
+ // XXX too big/small ? check the size
+ push = MIN2(nr_indices, 1200 * 2);
+
+ BEGIN_RING_NI(kelvin, NV10TCL_VB_ELEMENT_U16, push >> 1);
+ for (i = 0; i < push; i+=2)
+ OUT_RING((indices[i+1] << 16) | indices[i]);
+
+ nr_indices -= push;
+ indices += push;
+ }
+
+ BEGIN_RING(kelvin, NV10TCL_VERTEX_BUFFER_BEGIN_END, 1);
+ OUT_RING (0);
+}
+
+static void
+nv20_vbuf_render_draw( struct vbuf_render *render,
+ const ushort *indices,
+ uint nr_indices)
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+
+ nv20_emit_hw_state(nv20_render->nv20);
+
+ if (nv20_render->pbuffer)
+ nv20__draw_pbuffer(nv20_render, indices, nr_indices);
+ else if (nv20_render->mbuffer)
+ nv20__draw_mbuffer(nv20_render, indices, nr_indices);
+ else
+ assert(0);
+}
+
+
+static void
+nv20_vbuf_render_release_vertices( struct vbuf_render *render )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+ struct nv20_context *nv20 = nv20_render->nv20;
+
+ if (nv20_render->pbuffer) {
+ pipe_buffer_reference(&nv20_render->pbuffer, NULL);
+ } else if (nv20_render->mbuffer) {
+ FREE(nv20_render->mbuffer);
+ nv20_render->mbuffer = NULL;
+ } else
+ assert(0);
+}
+
+
+static void
+nv20_vbuf_render_destroy( struct vbuf_render *render )
+{
+ struct nv20_vbuf_render *nv20_render = nv20_vbuf_render(render);
+
+ assert(!nv20_render->pbuffer);
+ assert(!nv20_render->mbuffer);
+
+ FREE(nv20_render);
+}
+
+
+/**
+ * Create a new primitive render.
+ */
+static struct vbuf_render *
+nv20_vbuf_render_create( struct nv20_context *nv20 )
+{
+ struct nv20_vbuf_render *nv20_render = CALLOC_STRUCT(nv20_vbuf_render);
+
+ nv20_render->nv20 = nv20;
+
+ nv20_render->base.max_vertex_buffer_bytes = 16*1024;
+ nv20_render->base.max_indices = 1024;
+ nv20_render->base.get_vertex_info = nv20_vbuf_render_get_vertex_info;
+ nv20_render->base.allocate_vertices =
+ nv20_vbuf_render_allocate_vertices;
+ nv20_render->base.map_vertices = nv20_vbuf_render_map_vertices;
+ nv20_render->base.unmap_vertices = nv20_vbuf_render_unmap_vertices;
+ nv20_render->base.set_primitive = nv20_vbuf_render_set_primitive;
+ nv20_render->base.draw = nv20_vbuf_render_draw;
+ nv20_render->base.release_vertices = nv20_vbuf_render_release_vertices;
+ nv20_render->base.destroy = nv20_vbuf_render_destroy;
+
+ return &nv20_render->base;
+}
+
+
+/**
+ * Create a new primitive vbuf/render stage.
+ */
+struct draw_stage *nv20_draw_vbuf_stage( struct nv20_context *nv20 )
+{
+ struct vbuf_render *render;
+ struct draw_stage *stage;
+
+ render = nv20_vbuf_render_create(nv20);
+ if(!render)
+ return NULL;
+
+ stage = draw_vbuf_stage( nv20->draw, render );
+ if(!stage) {
+ render->destroy(render);
+ return NULL;
+ }
+
+ return stage;
+}
diff --git a/src/gallium/drivers/nv20/nv20_screen.c b/src/gallium/drivers/nv20/nv20_screen.c
new file mode 100644
index 0000000000..e6924ad71e
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_screen.c
@@ -0,0 +1,194 @@
+#include "pipe/p_screen.h"
+
+#include "nv20_context.h"
+#include "nv20_screen.h"
+
+static int
+nv20_screen_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 2;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 0;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 0;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 1;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 0;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 12;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 12;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return 0;
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 0;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ case NOUVEAU_CAP_HW_IDXBUF:
+ return 0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+}
+
+static float
+nv20_screen_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 10.0;
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 64.0;
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 2.0;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 4.0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0.0;
+ }
+}
+
+static boolean
+nv20_screen_is_format_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage, unsigned geom_flags)
+{
+ if (tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET) {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else
+ if (tex_usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+static void
+nv20_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct nv20_screen *screen = nv20_screen(pscreen);
+
+ nouveau_notifier_free(&screen->sync);
+ nouveau_grobj_free(&screen->kelvin);
+
+ FREE(pscreen);
+}
+
+static struct pipe_buffer *
+nv20_surface_buffer(struct pipe_surface *surf)
+{
+ struct nv20_miptree *mt = (struct nv20_miptree *)surf->texture;
+
+ return mt->buffer;
+}
+
+struct pipe_screen *
+nv20_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
+{
+ struct nv20_screen *screen = CALLOC_STRUCT(nv20_screen);
+ struct nouveau_channel *chan;
+ struct pipe_screen *pscreen;
+ unsigned kelvin_class = 0;
+ int ret;
+
+ if (!screen)
+ return NULL;
+ pscreen = &screen->base.base;
+
+ ret = nouveau_screen_init(&screen->base, dev);
+ if (ret) {
+ nv20_screen_destroy(pscreen);
+ return NULL;
+ }
+ chan = screen->base.channel;
+
+ pscreen->winsys = ws;
+ pscreen->destroy = nv20_screen_destroy;
+ pscreen->get_param = nv20_screen_get_param;
+ pscreen->get_paramf = nv20_screen_get_paramf;
+ pscreen->is_format_supported = nv20_screen_is_format_supported;
+
+ nv20_screen_init_miptree_functions(pscreen);
+ nv20_screen_init_transfer_functions(pscreen);
+
+ /* 3D object */
+ if (dev->chipset >= 0x25)
+ kelvin_class = NV25TCL;
+ else if (dev->chipset >= 0x20)
+ kelvin_class = NV20TCL;
+
+ if (!kelvin_class || dev->chipset >= 0x30) {
+ NOUVEAU_ERR("Unknown nv2x chipset: nv%02x\n", dev->chipset);
+ return NULL;
+ }
+
+ ret = nouveau_grobj_alloc(chan, 0xbeef0097, kelvin_class,
+ &screen->kelvin);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
+ return FALSE;
+ }
+ BIND_RING(chan, screen->kelvin, 7);
+
+ /* 2D engine setup */
+ screen->eng2d = nv04_surface_2d_init(&screen->base);
+ screen->eng2d->buf = nv20_surface_buffer;
+
+ /* Notifier for sync purposes */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
+ if (ret) {
+ NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
+ nv20_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ return pscreen;
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_screen.h b/src/gallium/drivers/nv20/nv20_screen.h
new file mode 100644
index 0000000000..fc7bb05033
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_screen.h
@@ -0,0 +1,28 @@
+#ifndef __NV20_SCREEN_H__
+#define __NV20_SCREEN_H__
+
+#include "nouveau/nouveau_screen.h"
+#include "nv04/nv04_surface_2d.h"
+
+struct nv20_screen {
+ struct nouveau_screen base;
+
+ struct nouveau_winsys *nvws;
+
+ /* HW graphics objects */
+ struct nv04_surface_2d *eng2d;
+ struct nouveau_grobj *kelvin;
+ struct nouveau_notifier *sync;
+};
+
+static INLINE struct nv20_screen *
+nv20_screen(struct pipe_screen *screen)
+{
+ return (struct nv20_screen *)screen;
+}
+
+
+void
+nv20_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
+#endif
diff --git a/src/gallium/drivers/nv20/nv20_state.c b/src/gallium/drivers/nv20/nv20_state.c
new file mode 100644
index 0000000000..ed4084980f
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_state.c
@@ -0,0 +1,583 @@
+#include "draw/draw_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_inlines.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "nv20_context.h"
+#include "nv20_state.h"
+
+static void *
+nv20_blend_state_create(struct pipe_context *pipe,
+ const struct pipe_blend_state *cso)
+{
+ struct nv20_blend_state *cb;
+
+ cb = MALLOC(sizeof(struct nv20_blend_state));
+
+ cb->b_enable = cso->blend_enable ? 1 : 0;
+ cb->b_srcfunc = ((nvgl_blend_func(cso->alpha_src_factor)<<16) |
+ (nvgl_blend_func(cso->rgb_src_factor)));
+ cb->b_dstfunc = ((nvgl_blend_func(cso->alpha_dst_factor)<<16) |
+ (nvgl_blend_func(cso->rgb_dst_factor)));
+
+ cb->c_mask = (((cso->colormask & PIPE_MASK_A) ? (0x01<<24) : 0) |
+ ((cso->colormask & PIPE_MASK_R) ? (0x01<<16) : 0) |
+ ((cso->colormask & PIPE_MASK_G) ? (0x01<< 8) : 0) |
+ ((cso->colormask & PIPE_MASK_B) ? (0x01<< 0) : 0));
+
+ cb->d_enable = cso->dither ? 1 : 0;
+
+ return (void *)cb;
+}
+
+static void
+nv20_blend_state_bind(struct pipe_context *pipe, void *blend)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->blend = (struct nv20_blend_state*)blend;
+
+ nv20->dirty |= NV20_NEW_BLEND;
+}
+
+static void
+nv20_blend_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+
+static INLINE unsigned
+wrap_mode(unsigned wrap) {
+ unsigned ret;
+
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ ret = NV20TCL_TX_WRAP_S_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ ret = NV20TCL_TX_WRAP_S_MIRRORED_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ ret = NV20TCL_TX_WRAP_S_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ ret = NV20TCL_TX_WRAP_S_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_CLAMP:
+ ret = NV20TCL_TX_WRAP_S_CLAMP;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ default:
+ NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
+ ret = NV20TCL_TX_WRAP_S_REPEAT;
+ break;
+ }
+
+ return (ret >> NV20TCL_TX_WRAP_S_SHIFT);
+}
+
+static void *
+nv20_sampler_state_create(struct pipe_context *pipe,
+ const struct pipe_sampler_state *cso)
+{
+ struct nv20_sampler_state *ps;
+ uint32_t filter = 0;
+
+ ps = MALLOC(sizeof(struct nv20_sampler_state));
+
+ ps->wrap = ((wrap_mode(cso->wrap_s) << NV20TCL_TX_WRAP_S_SHIFT) |
+ (wrap_mode(cso->wrap_t) << NV20TCL_TX_WRAP_T_SHIFT));
+
+ ps->en = 0;
+ if (cso->max_anisotropy > 1.0) {
+ /* no idea, binary driver sets it, works without it.. meh.. */
+ ps->wrap |= (1 << 5);
+
+/* if (cso->max_anisotropy >= 8.0) {
+ ps->en |= NV20TCL_TX_ENABLE_ANISO_8X;
+ } else
+ if (cso->max_anisotropy >= 4.0) {
+ ps->en |= NV20TCL_TX_ENABLE_ANISO_4X;
+ } else {
+ ps->en |= NV20TCL_TX_ENABLE_ANISO_2X;
+ }*/
+ }
+
+ switch (cso->mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ filter |= NV20TCL_TX_FILTER_MAGNIFY_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ filter |= NV20TCL_TX_FILTER_MAGNIFY_NEAREST;
+ break;
+ }
+
+ switch (cso->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |=
+ NV20TCL_TX_FILTER_MINIFY_LINEAR_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV20TCL_TX_FILTER_MINIFY_LINEAR_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV20TCL_TX_FILTER_MINIFY_LINEAR;
+ break;
+ }
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |=
+ NV20TCL_TX_FILTER_MINIFY_NEAREST_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |=
+ NV20TCL_TX_FILTER_MINIFY_NEAREST_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV20TCL_TX_FILTER_MINIFY_NEAREST;
+ break;
+ }
+ break;
+ }
+
+ ps->filt = filter;
+
+/* if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ switch (cso->compare_func) {
+ case PIPE_FUNC_NEVER:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_NEVER;
+ break;
+ case PIPE_FUNC_GREATER:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_GREATER;
+ break;
+ case PIPE_FUNC_EQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_EQUAL;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_GEQUAL;
+ break;
+ case PIPE_FUNC_LESS:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_LESS;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_NOTEQUAL;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_LEQUAL;
+ break;
+ case PIPE_FUNC_ALWAYS:
+ ps->wrap |= NV10TCL_TX_WRAP_RCOMP_ALWAYS;
+ break;
+ default:
+ break;
+ }
+ }*/
+
+ ps->bcol = ((float_to_ubyte(cso->border_color[3]) << 24) |
+ (float_to_ubyte(cso->border_color[0]) << 16) |
+ (float_to_ubyte(cso->border_color[1]) << 8) |
+ (float_to_ubyte(cso->border_color[2]) << 0));
+
+ return (void *)ps;
+}
+
+static void
+nv20_sampler_state_bind(struct pipe_context *pipe, unsigned nr, void **sampler)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv20->tex_sampler[unit] = sampler[unit];
+ nv20->dirty_samplers |= (1 << unit);
+ }
+}
+
+static void
+nv20_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void
+nv20_set_sampler_texture(struct pipe_context *pipe, unsigned nr,
+ struct pipe_texture **miptree)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv20->tex_miptree[unit] = (struct nv20_miptree *)miptree[unit];
+ nv20->dirty_samplers |= (1 << unit);
+ }
+}
+
+static void *
+nv20_rasterizer_state_create(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct nv20_rasterizer_state *rs;
+ int i;
+
+ /*XXX: ignored:
+ * light_twoside
+ * offset_cw/ccw -nohw
+ * scissor
+ * point_smooth -nohw
+ * multisample
+ * offset_units / offset_scale
+ */
+ rs = MALLOC(sizeof(struct nv20_rasterizer_state));
+
+ rs->templ = cso;
+
+ rs->shade_model = cso->flatshade ? NV20TCL_SHADE_MODEL_FLAT :
+ NV20TCL_SHADE_MODEL_SMOOTH;
+
+ rs->line_width = (unsigned char)(cso->line_width * 8.0) & 0xff;
+ rs->line_smooth_en = cso->line_smooth ? 1 : 0;
+
+ /* XXX: nv20 and nv25 different! */
+ rs->point_size = *(uint32_t*)&cso->point_size;
+
+ rs->poly_smooth_en = cso->poly_smooth ? 1 : 0;
+
+ if (cso->front_winding == PIPE_WINDING_CCW) {
+ rs->front_face = NV20TCL_FRONT_FACE_CCW;
+ rs->poly_mode_front = nvgl_polygon_mode(cso->fill_ccw);
+ rs->poly_mode_back = nvgl_polygon_mode(cso->fill_cw);
+ } else {
+ rs->front_face = NV20TCL_FRONT_FACE_CW;
+ rs->poly_mode_front = nvgl_polygon_mode(cso->fill_cw);
+ rs->poly_mode_back = nvgl_polygon_mode(cso->fill_ccw);
+ }
+
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ rs->cull_face_en = 1;
+ if (cso->front_winding == PIPE_WINDING_CCW)
+ rs->cull_face = NV20TCL_CULL_FACE_FRONT;
+ else
+ rs->cull_face = NV20TCL_CULL_FACE_BACK;
+ break;
+ case PIPE_WINDING_CW:
+ rs->cull_face_en = 1;
+ if (cso->front_winding == PIPE_WINDING_CW)
+ rs->cull_face = NV20TCL_CULL_FACE_FRONT;
+ else
+ rs->cull_face = NV20TCL_CULL_FACE_BACK;
+ break;
+ case PIPE_WINDING_BOTH:
+ rs->cull_face_en = 1;
+ rs->cull_face = NV20TCL_CULL_FACE_FRONT_AND_BACK;
+ break;
+ case PIPE_WINDING_NONE:
+ default:
+ rs->cull_face_en = 0;
+ rs->cull_face = 0;
+ break;
+ }
+
+ if (cso->point_sprite) {
+ rs->point_sprite = (1 << 0);
+ for (i = 0; i < 8; i++) {
+ if (cso->sprite_coord_mode[i] != PIPE_SPRITE_COORD_NONE)
+ rs->point_sprite |= (1 << (8 + i));
+ }
+ } else {
+ rs->point_sprite = 0;
+ }
+
+ return (void *)rs;
+}
+
+static void
+nv20_rasterizer_state_bind(struct pipe_context *pipe, void *rast)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->rast = (struct nv20_rasterizer_state*)rast;
+
+ draw_set_rasterizer_state(nv20->draw, (nv20->rast ? nv20->rast->templ : NULL));
+
+ nv20->dirty |= NV20_NEW_RAST;
+}
+
+static void
+nv20_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void *
+nv20_depth_stencil_alpha_state_create(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct nv20_depth_stencil_alpha_state *hw;
+
+ hw = MALLOC(sizeof(struct nv20_depth_stencil_alpha_state));
+
+ hw->depth.func = nvgl_comparison_op(cso->depth.func);
+ hw->depth.write_enable = cso->depth.writemask ? 1 : 0;
+ hw->depth.test_enable = cso->depth.enabled ? 1 : 0;
+
+ hw->stencil.enable = cso->stencil[0].enabled ? 1 : 0;
+ hw->stencil.wmask = cso->stencil[0].writemask;
+ hw->stencil.func = nvgl_comparison_op(cso->stencil[0].func);
+ hw->stencil.ref = cso->stencil[0].ref_value;
+ hw->stencil.vmask = cso->stencil[0].valuemask;
+ hw->stencil.fail = nvgl_stencil_op(cso->stencil[0].fail_op);
+ hw->stencil.zfail = nvgl_stencil_op(cso->stencil[0].zfail_op);
+ hw->stencil.zpass = nvgl_stencil_op(cso->stencil[0].zpass_op);
+
+ hw->alpha.enabled = cso->alpha.enabled ? 1 : 0;
+ hw->alpha.func = nvgl_comparison_op(cso->alpha.func);
+ hw->alpha.ref = float_to_ubyte(cso->alpha.ref_value);
+
+ return (void *)hw;
+}
+
+static void
+nv20_depth_stencil_alpha_state_bind(struct pipe_context *pipe, void *dsa)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->dsa = (struct nv20_depth_stencil_alpha_state*)dsa;
+
+ nv20->dirty |= NV20_NEW_DSA;
+}
+
+static void
+nv20_depth_stencil_alpha_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void *
+nv20_vp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ return draw_create_vertex_shader(nv20->draw, templ);
+}
+
+static void
+nv20_vp_state_bind(struct pipe_context *pipe, void *shader)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ draw_bind_vertex_shader(nv20->draw, (struct draw_vertex_shader *) shader);
+
+ nv20->dirty |= NV20_NEW_VERTPROG;
+}
+
+static void
+nv20_vp_state_delete(struct pipe_context *pipe, void *shader)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ draw_delete_vertex_shader(nv20->draw, (struct draw_vertex_shader *) shader);
+}
+
+static void *
+nv20_fp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv20_fragment_program *fp;
+
+ fp = CALLOC(1, sizeof(struct nv20_fragment_program));
+ fp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+
+ tgsi_scan_shader(cso->tokens, &fp->info);
+
+ return (void *)fp;
+}
+
+static void
+nv20_fp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ struct nv20_fragment_program *fp = hwcso;
+
+ nv20->fragprog.current = fp;
+ nv20->dirty |= NV20_NEW_FRAGPROG;
+}
+
+static void
+nv20_fp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ struct nv20_fragment_program *fp = hwcso;
+
+ nv20_fragprog_destroy(nv20, fp);
+ FREE((void*)fp->pipe.tokens);
+ FREE(fp);
+}
+
+static void
+nv20_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *bcol)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->blend_color = (struct pipe_blend_color*)bcol;
+
+ nv20->dirty |= NV20_NEW_BLENDCOL;
+}
+
+static void
+nv20_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ draw_set_clip_state(nv20->draw, clip);
+}
+
+static void
+nv20_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+ const struct pipe_constant_buffer *buf )
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ struct pipe_screen *pscreen = pipe->screen;
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ if (buf) {
+ void *mapped;
+ if (buf->buffer && buf->buffer->size &&
+ (mapped = pipe_buffer_map(pscreen, buf->buffer, PIPE_BUFFER_USAGE_CPU_READ)))
+ {
+ memcpy(nv20->constbuf[shader], mapped, buf->buffer->size);
+ nv20->constbuf_nr[shader] =
+ buf->buffer->size / (4 * sizeof(float));
+ pipe_buffer_unmap(pscreen, buf->buffer);
+ }
+ }
+}
+
+static void
+nv20_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->framebuffer = (struct pipe_framebuffer_state*)fb;
+
+ nv20->dirty |= NV20_NEW_FRAMEBUFFER;
+}
+
+static void
+nv20_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ NOUVEAU_ERR("line stipple hahaha\n");
+}
+
+static void
+nv20_set_scissor_state(struct pipe_context *pipe,
+ const struct pipe_scissor_state *s)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->scissor = (struct pipe_scissor_state*)s;
+
+ nv20->dirty |= NV20_NEW_SCISSOR;
+}
+
+static void
+nv20_set_viewport_state(struct pipe_context *pipe,
+ const struct pipe_viewport_state *vpt)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ nv20->viewport = (struct pipe_viewport_state*)vpt;
+
+ draw_set_viewport_state(nv20->draw, nv20->viewport);
+
+ nv20->dirty |= NV20_NEW_VIEWPORT;
+}
+
+static void
+nv20_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_buffer *vb)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ memcpy(nv20->vtxbuf, vb, sizeof(*vb) * count);
+ nv20->dirty |= NV20_NEW_VTXARRAYS;
+
+ draw_set_vertex_buffers(nv20->draw, count, vb);
+}
+
+static void
+nv20_set_vertex_elements(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_element *ve)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+
+ memcpy(nv20->vtxelt, ve, sizeof(*ve) * count);
+ nv20->dirty |= NV20_NEW_VTXARRAYS;
+
+ draw_set_vertex_elements(nv20->draw, count, ve);
+}
+
+void
+nv20_init_state_functions(struct nv20_context *nv20)
+{
+ nv20->pipe.create_blend_state = nv20_blend_state_create;
+ nv20->pipe.bind_blend_state = nv20_blend_state_bind;
+ nv20->pipe.delete_blend_state = nv20_blend_state_delete;
+
+ nv20->pipe.create_sampler_state = nv20_sampler_state_create;
+ nv20->pipe.bind_sampler_states = nv20_sampler_state_bind;
+ nv20->pipe.delete_sampler_state = nv20_sampler_state_delete;
+ nv20->pipe.set_sampler_textures = nv20_set_sampler_texture;
+
+ nv20->pipe.create_rasterizer_state = nv20_rasterizer_state_create;
+ nv20->pipe.bind_rasterizer_state = nv20_rasterizer_state_bind;
+ nv20->pipe.delete_rasterizer_state = nv20_rasterizer_state_delete;
+
+ nv20->pipe.create_depth_stencil_alpha_state =
+ nv20_depth_stencil_alpha_state_create;
+ nv20->pipe.bind_depth_stencil_alpha_state =
+ nv20_depth_stencil_alpha_state_bind;
+ nv20->pipe.delete_depth_stencil_alpha_state =
+ nv20_depth_stencil_alpha_state_delete;
+
+ nv20->pipe.create_vs_state = nv20_vp_state_create;
+ nv20->pipe.bind_vs_state = nv20_vp_state_bind;
+ nv20->pipe.delete_vs_state = nv20_vp_state_delete;
+
+ nv20->pipe.create_fs_state = nv20_fp_state_create;
+ nv20->pipe.bind_fs_state = nv20_fp_state_bind;
+ nv20->pipe.delete_fs_state = nv20_fp_state_delete;
+
+ nv20->pipe.set_blend_color = nv20_set_blend_color;
+ nv20->pipe.set_clip_state = nv20_set_clip_state;
+ nv20->pipe.set_constant_buffer = nv20_set_constant_buffer;
+ nv20->pipe.set_framebuffer_state = nv20_set_framebuffer_state;
+ nv20->pipe.set_polygon_stipple = nv20_set_polygon_stipple;
+ nv20->pipe.set_scissor_state = nv20_set_scissor_state;
+ nv20->pipe.set_viewport_state = nv20_set_viewport_state;
+
+ nv20->pipe.set_vertex_buffers = nv20_set_vertex_buffers;
+ nv20->pipe.set_vertex_elements = nv20_set_vertex_elements;
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_state.h b/src/gallium/drivers/nv20/nv20_state.h
new file mode 100644
index 0000000000..34f402fdcb
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_state.h
@@ -0,0 +1,139 @@
+#ifndef __NV20_STATE_H__
+#define __NV20_STATE_H__
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+struct nv20_blend_state {
+ uint32_t b_enable;
+ uint32_t b_srcfunc;
+ uint32_t b_dstfunc;
+
+ uint32_t c_mask;
+
+ uint32_t d_enable;
+};
+
+struct nv20_sampler_state {
+ uint32_t wrap;
+ uint32_t en;
+ uint32_t filt;
+ uint32_t bcol;
+};
+
+struct nv20_rasterizer_state {
+ uint32_t shade_model;
+
+ uint32_t line_width;
+ uint32_t line_smooth_en;
+
+ uint32_t point_size;
+
+ uint32_t poly_smooth_en;
+
+ uint32_t poly_mode_front;
+ uint32_t poly_mode_back;
+
+ uint32_t front_face;
+ uint32_t cull_face;
+ uint32_t cull_face_en;
+
+ uint32_t point_sprite;
+
+ const struct pipe_rasterizer_state *templ;
+};
+
+struct nv20_vertex_program_exec {
+ uint32_t data[4];
+ boolean has_branch_offset;
+ int const_index;
+};
+
+struct nv20_vertex_program_data {
+ int index; /* immediates == -1 */
+ float value[4];
+};
+
+struct nv20_vertex_program {
+ const struct pipe_shader_state *pipe;
+
+ boolean translated;
+ struct nv20_vertex_program_exec *insns;
+ unsigned nr_insns;
+ struct nv20_vertex_program_data *consts;
+ unsigned nr_consts;
+
+ struct nouveau_resource *exec;
+ unsigned exec_start;
+ struct nouveau_resource *data;
+ unsigned data_start;
+ unsigned data_start_min;
+
+ uint32_t ir;
+ uint32_t or;
+};
+
+struct nv20_fragment_program_data {
+ unsigned offset;
+ unsigned index;
+};
+
+struct nv20_fragment_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+
+ boolean translated;
+ boolean on_hw;
+ unsigned samplers;
+
+ uint32_t *insn;
+ int insn_len;
+
+ struct nv20_fragment_program_data *consts;
+ unsigned nr_consts;
+
+ struct pipe_buffer *buffer;
+
+ uint32_t fp_control;
+ uint32_t fp_reg_control;
+};
+
+
+struct nv20_depth_stencil_alpha_state {
+ struct {
+ uint32_t func;
+ uint32_t write_enable;
+ uint32_t test_enable;
+ } depth;
+
+ struct {
+ uint32_t enable;
+ uint32_t wmask;
+ uint32_t func;
+ uint32_t ref;
+ uint32_t vmask;
+ uint32_t fail;
+ uint32_t zfail;
+ uint32_t zpass;
+ } stencil;
+
+ struct {
+ uint32_t enabled;
+ uint32_t func;
+ uint32_t ref;
+ } alpha;
+};
+
+struct nv20_miptree {
+ struct pipe_texture base;
+
+ struct pipe_buffer *buffer;
+ uint total_size;
+
+ struct {
+ uint pitch;
+ uint *image_offset;
+ } level[PIPE_MAX_TEXTURE_LEVELS];
+};
+
+#endif
diff --git a/src/gallium/drivers/nv20/nv20_state_emit.c b/src/gallium/drivers/nv20/nv20_state_emit.c
new file mode 100644
index 0000000000..4042f46d05
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_state_emit.c
@@ -0,0 +1,396 @@
+#include "nv20_context.h"
+#include "nv20_state.h"
+#include "draw/draw_context.h"
+
+static void nv20_state_emit_blend(struct nv20_context* nv20)
+{
+ struct nv20_blend_state *b = nv20->blend;
+
+ BEGIN_RING(kelvin, NV20TCL_DITHER_ENABLE, 1);
+ OUT_RING (b->d_enable);
+
+ BEGIN_RING(kelvin, NV20TCL_BLEND_FUNC_ENABLE, 1);
+ OUT_RING (b->b_enable);
+
+ BEGIN_RING(kelvin, NV20TCL_BLEND_FUNC_SRC, 2);
+ OUT_RING (b->b_srcfunc);
+ OUT_RING (b->b_dstfunc);
+
+ BEGIN_RING(kelvin, NV20TCL_COLOR_MASK, 1);
+ OUT_RING (b->c_mask);
+}
+
+static void nv20_state_emit_blend_color(struct nv20_context* nv20)
+{
+ struct pipe_blend_color *c = nv20->blend_color;
+
+ BEGIN_RING(kelvin, NV20TCL_BLEND_COLOR, 1);
+ OUT_RING ((float_to_ubyte(c->color[3]) << 24)|
+ (float_to_ubyte(c->color[0]) << 16)|
+ (float_to_ubyte(c->color[1]) << 8) |
+ (float_to_ubyte(c->color[2]) << 0));
+}
+
+static void nv20_state_emit_rast(struct nv20_context* nv20)
+{
+ struct nv20_rasterizer_state *r = nv20->rast;
+
+ BEGIN_RING(kelvin, NV20TCL_SHADE_MODEL, 2);
+ OUT_RING (r->shade_model);
+ OUT_RING (r->line_width);
+
+
+ BEGIN_RING(kelvin, NV20TCL_POINT_SIZE, 1);
+ OUT_RING (r->point_size);
+
+ BEGIN_RING(kelvin, NV20TCL_POLYGON_MODE_FRONT, 2);
+ OUT_RING (r->poly_mode_front);
+ OUT_RING (r->poly_mode_back);
+
+
+ BEGIN_RING(kelvin, NV20TCL_CULL_FACE, 2);
+ OUT_RING (r->cull_face);
+ OUT_RING (r->front_face);
+
+ BEGIN_RING(kelvin, NV20TCL_LINE_SMOOTH_ENABLE, 2);
+ OUT_RING (r->line_smooth_en);
+ OUT_RING (r->poly_smooth_en);
+
+ BEGIN_RING(kelvin, NV20TCL_CULL_FACE_ENABLE, 1);
+ OUT_RING (r->cull_face_en);
+}
+
+static void nv20_state_emit_dsa(struct nv20_context* nv20)
+{
+ struct nv20_depth_stencil_alpha_state *d = nv20->dsa;
+
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_FUNC, 1);
+ OUT_RING (d->depth.func);
+
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_WRITE_ENABLE, 1);
+ OUT_RING (d->depth.write_enable);
+
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_TEST_ENABLE, 1);
+ OUT_RING (d->depth.test_enable);
+
+ BEGIN_RING(kelvin, NV20TCL_DEPTH_UNK17D8, 1);
+ OUT_RING (1);
+
+#if 0
+ BEGIN_RING(kelvin, NV20TCL_STENCIL_ENABLE, 1);
+ OUT_RING (d->stencil.enable);
+ BEGIN_RING(kelvin, NV20TCL_STENCIL_MASK, 7);
+ OUT_RINGp ((uint32_t *)&(d->stencil.wmask), 7);
+#endif
+
+ BEGIN_RING(kelvin, NV20TCL_ALPHA_FUNC_ENABLE, 1);
+ OUT_RING (d->alpha.enabled);
+
+ BEGIN_RING(kelvin, NV20TCL_ALPHA_FUNC_FUNC, 1);
+ OUT_RING (d->alpha.func);
+
+ BEGIN_RING(kelvin, NV20TCL_ALPHA_FUNC_REF, 1);
+ OUT_RING (d->alpha.ref);
+}
+
+static void nv20_state_emit_viewport(struct nv20_context* nv20)
+{
+}
+
+static void nv20_state_emit_scissor(struct nv20_context* nv20)
+{
+ /* NV20TCL_SCISSOR_* is probably a software method */
+/* struct pipe_scissor_state *s = nv20->scissor;
+ BEGIN_RING(kelvin, NV20TCL_SCISSOR_HORIZ, 2);
+ OUT_RING (((s->maxx - s->minx) << 16) | s->minx);
+ OUT_RING (((s->maxy - s->miny) << 16) | s->miny);*/
+}
+
+static void nv20_state_emit_framebuffer(struct nv20_context* nv20)
+{
+ struct pipe_framebuffer_state* fb = nv20->framebuffer;
+ struct nv04_surface *rt, *zeta = NULL;
+ uint32_t rt_format, w, h;
+ int colour_format = 0, zeta_format = 0;
+ struct nv20_miptree *nv20mt = 0;
+
+ w = fb->cbufs[0]->width;
+ h = fb->cbufs[0]->height;
+ colour_format = fb->cbufs[0]->format;
+ rt = (struct nv04_surface *)fb->cbufs[0];
+
+ if (fb->zsbuf) {
+ if (colour_format) {
+ assert(w == fb->zsbuf->width);
+ assert(h == fb->zsbuf->height);
+ } else {
+ w = fb->zsbuf->width;
+ h = fb->zsbuf->height;
+ }
+
+ zeta_format = fb->zsbuf->format;
+ zeta = (struct nv04_surface *)fb->zsbuf;
+ }
+
+ rt_format = NV20TCL_RT_FORMAT_TYPE_LINEAR | 0x20;
+
+ switch (colour_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case 0:
+ rt_format |= NV20TCL_RT_FORMAT_COLOR_A8R8G8B8;
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ rt_format |= NV20TCL_RT_FORMAT_COLOR_R5G6B5;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (zeta) {
+ BEGIN_RING(kelvin, NV20TCL_RT_PITCH, 1);
+ OUT_RING (rt->pitch | (zeta->pitch << 16));
+ } else {
+ BEGIN_RING(kelvin, NV20TCL_RT_PITCH, 1);
+ OUT_RING (rt->pitch | (rt->pitch << 16));
+ }
+
+ nv20mt = (struct nv20_miptree *)rt->base.texture;
+ nv20->rt[0] = nv20mt->buffer;
+
+ if (zeta_format)
+ {
+ nv20mt = (struct nv20_miptree *)zeta->base.texture;
+ nv20->zeta = nv20mt->buffer;
+ }
+
+ BEGIN_RING(kelvin, NV20TCL_RT_HORIZ, 3);
+ OUT_RING ((w << 16) | 0);
+ OUT_RING ((h << 16) | 0); /*NV20TCL_RT_VERT */
+ OUT_RING (rt_format); /* NV20TCL_RT_FORMAT */
+ BEGIN_RING(kelvin, NV20TCL_VIEWPORT_CLIP_HORIZ(0), 2);
+ OUT_RING (((w - 1) << 16) | 0);
+ OUT_RING (((h - 1) << 16) | 0);
+}
+
+static void nv20_vertex_layout(struct nv20_context *nv20)
+{
+ struct nv20_fragment_program *fp = nv20->fragprog.current;
+ struct draw_context *dc = nv20->draw;
+ int src;
+ int i;
+ struct vertex_info *vinfo = &nv20->vertex_info;
+ const enum interp_mode colorInterp = INTERP_LINEAR;
+ boolean colors[2] = { FALSE };
+ boolean generics[12] = { FALSE };
+ boolean fog = FALSE;
+
+ memset(vinfo, 0, sizeof(*vinfo));
+
+ /*
+ * Assumed NV20 hardware vertex attribute order:
+ * 0 position, 1 ?, 2 ?, 3 col0,
+ * 4 col1?, 5 ?, 6 ?, 7 ?,
+ * 8 ?, 9 tex0, 10 tex1, 11 tex2,
+ * 12 tex3, 13 ?, 14 ?, 15 ?
+ * unaccounted: wgh, nor, fog
+ * There are total 16 attrs.
+ * vinfo->hwfmt[0] has a used-bit corresponding to each of these.
+ * relation to TGSI_SEMANTIC_*:
+ * - POSITION: position (always used)
+ * - COLOR: col1, col0
+ * - GENERIC: tex3, tex2, tex1, tex0, normal, weight
+ * - FOG: fog
+ */
+
+ for (i = 0; i < fp->info.num_inputs; i++) {
+ int isn = fp->info.input_semantic_name[i];
+ int isi = fp->info.input_semantic_index[i];
+ switch (isn) {
+ case TGSI_SEMANTIC_POSITION:
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ assert(isi < 2);
+ colors[isi] = TRUE;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ assert(isi < 12);
+ generics[isi] = TRUE;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ fog = TRUE;
+ break;
+ default:
+ assert(0 && "unknown input_semantic_name");
+ }
+ }
+
+ /* always do position */ {
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_POSITION, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_LINEAR, src);
+ vinfo->hwfmt[0] |= (1 << 0);
+ }
+
+ /* two unnamed generics */
+ for (i = 4; i < 6; i++) {
+ if (!generics[i])
+ continue;
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_GENERIC, i);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ vinfo->hwfmt[0] |= (1 << (i - 3));
+ }
+
+ if (colors[0]) {
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_COLOR, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, colorInterp, src);
+ vinfo->hwfmt[0] |= (1 << 3);
+ }
+
+ if (colors[1]) {
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_COLOR, 1);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, colorInterp, src);
+ vinfo->hwfmt[0] |= (1 << 4);
+ }
+
+ /* four unnamed generics */
+ for (i = 6; i < 10; i++) {
+ if (!generics[i])
+ continue;
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_GENERIC, i);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ vinfo->hwfmt[0] |= (1 << (i - 1));
+ }
+
+ /* tex0, tex1, tex2, tex3 */
+ for (i = 0; i < 4; i++) {
+ if (!generics[i])
+ continue;
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_GENERIC, i);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ vinfo->hwfmt[0] |= (1 << (i + 9));
+ }
+
+ /* two unnamed generics */
+ for (i = 10; i < 12; i++) {
+ if (!generics[i])
+ continue;
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_GENERIC, i);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ vinfo->hwfmt[0] |= (1 << (i + 3));
+ }
+
+ if (fog) {
+ src = draw_find_vs_output(dc, TGSI_SEMANTIC_FOG, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_1F, INTERP_PERSPECTIVE, src);
+ vinfo->hwfmt[0] |= (1 << 15);
+ }
+
+ draw_compute_vertex_size(vinfo);
+}
+
+void
+nv20_emit_hw_state(struct nv20_context *nv20)
+{
+ int i;
+
+ if (nv20->dirty & NV20_NEW_VERTPROG) {
+ //nv20_vertprog_bind(nv20, nv20->vertprog.current);
+ nv20->dirty &= ~NV20_NEW_VERTPROG;
+ }
+
+ if (nv20->dirty & NV20_NEW_FRAGPROG) {
+ nv20_fragprog_bind(nv20, nv20->fragprog.current);
+ /*XXX: clear NV20_NEW_FRAGPROG if no new program uploaded */
+ nv20->dirty_samplers |= (1<<10);
+ nv20->dirty_samplers = 0;
+ }
+
+ if (nv20->dirty_samplers || (nv20->dirty & NV20_NEW_FRAGPROG)) {
+ nv20_fragtex_bind(nv20);
+ nv20->dirty &= ~NV20_NEW_FRAGPROG;
+ }
+
+ if (nv20->dirty & NV20_NEW_VTXARRAYS) {
+ nv20->dirty &= ~NV20_NEW_VTXARRAYS;
+ nv20_vertex_layout(nv20);
+ nv20_vtxbuf_bind(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_BLEND) {
+ nv20->dirty &= ~NV20_NEW_BLEND;
+ nv20_state_emit_blend(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_BLENDCOL) {
+ nv20->dirty &= ~NV20_NEW_BLENDCOL;
+ nv20_state_emit_blend_color(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_RAST) {
+ nv20->dirty &= ~NV20_NEW_RAST;
+ nv20_state_emit_rast(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_DSA) {
+ nv20->dirty &= ~NV20_NEW_DSA;
+ nv20_state_emit_dsa(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_VIEWPORT) {
+ nv20->dirty &= ~NV20_NEW_VIEWPORT;
+ nv20_state_emit_viewport(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_SCISSOR) {
+ nv20->dirty &= ~NV20_NEW_SCISSOR;
+ nv20_state_emit_scissor(nv20);
+ }
+
+ if (nv20->dirty & NV20_NEW_FRAMEBUFFER) {
+ nv20->dirty &= ~NV20_NEW_FRAMEBUFFER;
+ nv20_state_emit_framebuffer(nv20);
+ }
+
+ /* Emit relocs for every referenced buffer.
+ * This is to ensure the bufmgr has an accurate idea of how
+ * the buffer is used. This isn't very efficient, but we don't
+ * seem to take a significant performance hit. Will be improved
+ * at some point. Vertex arrays are emitted by nv20_vbo.c
+ */
+
+ /* Render target */
+ BEGIN_RING(kelvin, NV20TCL_DMA_COLOR, 1);
+ OUT_RELOCo(nv20->rt[0], NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(kelvin, NV20TCL_COLOR_OFFSET, 1);
+ OUT_RELOCl(nv20->rt[0], 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ if (nv20->zeta) {
+ BEGIN_RING(kelvin, NV20TCL_DMA_ZETA, 1);
+ OUT_RELOCo(nv20->zeta, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(kelvin, NV20TCL_ZETA_OFFSET, 1);
+ OUT_RELOCl(nv20->zeta, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ /* XXX for when we allocate LMA on nv17 */
+/* BEGIN_RING(kelvin, NV10TCL_LMA_DEPTH_BUFFER_OFFSET, 1);
+ OUT_RELOCl(nv20->zeta + lma_offset);*/
+ }
+
+ /* Vertex buffer */
+ BEGIN_RING(kelvin, NV20TCL_DMA_VTXBUF0, 1);
+ OUT_RELOCo(nv20->rt[0], NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ BEGIN_RING(kelvin, NV20TCL_COLOR_OFFSET, 1);
+ OUT_RELOCl(nv20->rt[0], 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+
+ /* Texture images */
+ for (i = 0; i < 2; i++) {
+ if (!(nv20->fp_samplers & (1 << i)))
+ continue;
+ BEGIN_RING(kelvin, NV20TCL_TX_OFFSET(i), 1);
+ OUT_RELOCl(nv20->tex[i].buffer, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_GART | NOUVEAU_BO_RD);
+ BEGIN_RING(kelvin, NV20TCL_TX_FORMAT(i), 1);
+ OUT_RELOCd(nv20->tex[i].buffer, nv20->tex[i].format,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
+ NOUVEAU_BO_OR, NV20TCL_TX_FORMAT_DMA0,
+ NV20TCL_TX_FORMAT_DMA1);
+ }
+}
+
diff --git a/src/gallium/drivers/nv20/nv20_surface.c b/src/gallium/drivers/nv20/nv20_surface.c
new file mode 100644
index 0000000000..4224bdd6af
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_surface.c
@@ -0,0 +1,63 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "nv20_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_tile.h"
+
+static void
+nv20_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ struct nv04_surface_2d *eng2d = nv20->screen->eng2d;
+
+ eng2d->copy(eng2d, dest, destx, desty, src, srcx, srcy, width, height);
+}
+
+static void
+nv20_surface_fill(struct pipe_context *pipe, struct pipe_surface *dest,
+ unsigned destx, unsigned desty, unsigned width,
+ unsigned height, unsigned value)
+{
+ struct nv20_context *nv20 = nv20_context(pipe);
+ struct nv04_surface_2d *eng2d = nv20->screen->eng2d;
+
+ eng2d->fill(eng2d, dest, destx, desty, width, height, value);
+}
+
+void
+nv20_init_surface_functions(struct nv20_context *nv20)
+{
+ nv20->pipe.surface_copy = nv20_surface_copy;
+ nv20->pipe.surface_fill = nv20_surface_fill;
+}
diff --git a/src/gallium/drivers/nv20/nv20_transfer.c b/src/gallium/drivers/nv20/nv20_transfer.c
new file mode 100644
index 0000000000..5018995596
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_transfer.c
@@ -0,0 +1,195 @@
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv20_context.h"
+#include "nv20_screen.h"
+#include "nv20_state.h"
+
+struct nv20_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv20_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv20_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv20_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv20_miptree *mt = (struct nv20_miptree *)pt;
+ struct nv20_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv20_transfer);
+ if (!tx)
+ return NULL;
+
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ 0, 0, 0,
+ nv20_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv20_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ face, level, zslice,
+ nv20_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv20_screen *nvscreen = nv20_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv20_transfer_del(struct pipe_transfer *ptx)
+{
+ struct nv20_transfer *tx = (struct nv20_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct pipe_screen *pscreen = ptx->texture->screen;
+ struct nv20_screen *nvscreen = nv20_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv20_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv20_transfer *tx = (struct nv20_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv20_miptree *mt = (struct nv20_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv20_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv20_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv20_transfer *tx = (struct nv20_transfer *)ptx;
+ struct nv20_miptree *mt = (struct nv20_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv20_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv20_transfer_new;
+ pscreen->tex_transfer_destroy = nv20_transfer_del;
+ pscreen->transfer_map = nv20_transfer_map;
+ pscreen->transfer_unmap = nv20_transfer_unmap;
+}
diff --git a/src/gallium/drivers/nv20/nv20_vbo.c b/src/gallium/drivers/nv20/nv20_vbo.c
new file mode 100644
index 0000000000..84d7db6c5e
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_vbo.c
@@ -0,0 +1,80 @@
+#include "draw/draw_context.h"
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "nv20_context.h"
+#include "nv20_state.h"
+
+#include "nouveau/nouveau_channel.h"
+#include "nouveau/nouveau_pushbuf.h"
+
+boolean nv20_draw_elements( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned prim, unsigned start, unsigned count)
+{
+ struct pipe_screen *pscreen = pipe->screen;
+ struct nv20_context *nv20 = nv20_context( pipe );
+ struct draw_context *draw = nv20->draw;
+ unsigned i;
+
+ nv20_emit_hw_state(nv20);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
+ if (nv20->vtxbuf[i].buffer) {
+ void *buf
+ = pipe_buffer_map(pscreen,
+ nv20->vtxbuf[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+ }
+ /* Map index buffer, if present */
+ if (indexBuffer) {
+ void *mapped_indexes
+ = pipe_buffer_map(pscreen, indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes);
+ }
+ else {
+ /* no index/element buffer */
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+ draw_set_mapped_constant_buffer(draw,
+ nv20->constbuf[PIPE_SHADER_VERTEX],
+ nv20->constbuf_nr[PIPE_SHADER_VERTEX]);
+
+ /* draw! */
+ draw_arrays(nv20->draw, prim, start, count);
+
+ /*
+ * unmap vertex/index buffers
+ */
+ for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
+ if (nv20->vtxbuf[i].buffer) {
+ pipe_buffer_unmap(pscreen, nv20->vtxbuf[i].buffer);
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ }
+ }
+ if (indexBuffer) {
+ pipe_buffer_unmap(pscreen, indexBuffer);
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ }
+
+ draw_flush(nv20->draw);
+ return TRUE;
+}
+
+boolean nv20_draw_arrays( struct pipe_context *pipe,
+ unsigned prim, unsigned start, unsigned count)
+{
+ return nv20_draw_elements(pipe, NULL, 0, prim, start, count);
+}
+
+
+
diff --git a/src/gallium/drivers/nv20/nv20_vertprog.c b/src/gallium/drivers/nv20/nv20_vertprog.c
new file mode 100644
index 0000000000..388245ecb0
--- /dev/null
+++ b/src/gallium/drivers/nv20/nv20_vertprog.c
@@ -0,0 +1,838 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "nv20_context.h"
+#include "nv20_state.h"
+
+/* TODO (at least...):
+ * 1. Indexed consts + ARL
+ * 2. Arb. swz/negation
+ * 3. NV_vp11, NV_vp2, NV_vp3 features
+ * - extra arith opcodes
+ * - branching
+ * - texture sampling
+ * - indexed attribs
+ * - indexed results
+ * 4. bugs
+ */
+
+#define SWZ_X 0
+#define SWZ_Y 1
+#define SWZ_Z 2
+#define SWZ_W 3
+#define MASK_X 8
+#define MASK_Y 4
+#define MASK_Z 2
+#define MASK_W 1
+#define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
+#define DEF_SCALE 0
+#define DEF_CTEST 0
+#include "nv20_shader.h"
+
+#define swz(s,x,y,z,w) nv20_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
+#define neg(s) nv20_sr_neg((s))
+#define abs(s) nv20_sr_abs((s))
+
+struct nv20_vpc {
+ struct nv20_vertex_program *vp;
+
+ struct nv20_vertex_program_exec *vpi;
+
+ unsigned output_map[PIPE_MAX_SHADER_OUTPUTS];
+
+ int high_temp;
+ int temp_temp_count;
+
+ struct nv20_sreg *imm;
+ unsigned nr_imm;
+};
+
+static struct nv20_sreg
+temp(struct nv20_vpc *vpc)
+{
+ int idx;
+
+ idx = vpc->temp_temp_count++;
+ idx += vpc->high_temp + 1;
+ return nv20_sr(NV30SR_TEMP, idx);
+}
+
+static struct nv20_sreg
+constant(struct nv20_vpc *vpc, int pipe, float x, float y, float z, float w)
+{
+ struct nv20_vertex_program *vp = vpc->vp;
+ struct nv20_vertex_program_data *vpd;
+ int idx;
+
+ if (pipe >= 0) {
+ for (idx = 0; idx < vp->nr_consts; idx++) {
+ if (vp->consts[idx].index == pipe)
+ return nv20_sr(NV30SR_CONST, idx);
+ }
+ }
+
+ idx = vp->nr_consts++;
+ vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
+ vpd = &vp->consts[idx];
+
+ vpd->index = pipe;
+ vpd->value[0] = x;
+ vpd->value[1] = y;
+ vpd->value[2] = z;
+ vpd->value[3] = w;
+ return nv20_sr(NV30SR_CONST, idx);
+}
+
+#define arith(cc,s,o,d,m,s0,s1,s2) \
+ nv20_vp_arith((cc), (s), NV30_VP_INST_##o, (d), (m), (s0), (s1), (s2))
+
+static void
+emit_src(struct nv20_vpc *vpc, uint32_t *hw, int pos, struct nv20_sreg src)
+{
+ struct nv20_vertex_program *vp = vpc->vp;
+ uint32_t sr = 0;
+
+ switch (src.type) {
+ case NV30SR_TEMP:
+ sr |= (NV30_VP_SRC_REG_TYPE_TEMP << NV30_VP_SRC_REG_TYPE_SHIFT);
+ sr |= (src.index << NV30_VP_SRC_TEMP_SRC_SHIFT);
+ break;
+ case NV30SR_INPUT:
+ sr |= (NV30_VP_SRC_REG_TYPE_INPUT <<
+ NV30_VP_SRC_REG_TYPE_SHIFT);
+ vp->ir |= (1 << src.index);
+ hw[1] |= (src.index << NV30_VP_INST_INPUT_SRC_SHIFT);
+ break;
+ case NV30SR_CONST:
+ sr |= (NV30_VP_SRC_REG_TYPE_CONST <<
+ NV30_VP_SRC_REG_TYPE_SHIFT);
+ assert(vpc->vpi->const_index == -1 ||
+ vpc->vpi->const_index == src.index);
+ vpc->vpi->const_index = src.index;
+ break;
+ case NV30SR_NONE:
+ sr |= (NV30_VP_SRC_REG_TYPE_INPUT <<
+ NV30_VP_SRC_REG_TYPE_SHIFT);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (src.negate)
+ sr |= NV30_VP_SRC_NEGATE;
+
+ if (src.abs)
+ hw[0] |= (1 << (21 + pos));
+
+ sr |= ((src.swz[0] << NV30_VP_SRC_SWZ_X_SHIFT) |
+ (src.swz[1] << NV30_VP_SRC_SWZ_Y_SHIFT) |
+ (src.swz[2] << NV30_VP_SRC_SWZ_Z_SHIFT) |
+ (src.swz[3] << NV30_VP_SRC_SWZ_W_SHIFT));
+
+/*
+ * |VVV|
+ * d�.�b
+ * \u/
+ *
+ */
+
+ switch (pos) {
+ case 0:
+ hw[1] |= ((sr & NV30_VP_SRC0_HIGH_MASK) >>
+ NV30_VP_SRC0_HIGH_SHIFT) << NV30_VP_INST_SRC0H_SHIFT;
+ hw[2] |= (sr & NV30_VP_SRC0_LOW_MASK) <<
+ NV30_VP_INST_SRC0L_SHIFT;
+ break;
+ case 1:
+ hw[2] |= sr << NV30_VP_INST_SRC1_SHIFT;
+ break;
+ case 2:
+ hw[2] |= ((sr & NV30_VP_SRC2_HIGH_MASK) >>
+ NV30_VP_SRC2_HIGH_SHIFT) << NV30_VP_INST_SRC2H_SHIFT;
+ hw[3] |= (sr & NV30_VP_SRC2_LOW_MASK) <<
+ NV30_VP_INST_SRC2L_SHIFT;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void
+emit_dst(struct nv20_vpc *vpc, uint32_t *hw, int slot, struct nv20_sreg dst)
+{
+ struct nv20_vertex_program *vp = vpc->vp;
+
+ switch (dst.type) {
+ case NV30SR_TEMP:
+ hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
+ break;
+ case NV30SR_OUTPUT:
+ switch (dst.index) {
+ case NV30_VP_INST_DEST_COL0 : vp->or |= (1 << 0); break;
+ case NV30_VP_INST_DEST_COL1 : vp->or |= (1 << 1); break;
+ case NV30_VP_INST_DEST_BFC0 : vp->or |= (1 << 2); break;
+ case NV30_VP_INST_DEST_BFC1 : vp->or |= (1 << 3); break;
+ case NV30_VP_INST_DEST_FOGC : vp->or |= (1 << 4); break;
+ case NV30_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
+ case NV30_VP_INST_DEST_TC(0): vp->or |= (1 << 14); break;
+ case NV30_VP_INST_DEST_TC(1): vp->or |= (1 << 15); break;
+ case NV30_VP_INST_DEST_TC(2): vp->or |= (1 << 16); break;
+ case NV30_VP_INST_DEST_TC(3): vp->or |= (1 << 17); break;
+ case NV30_VP_INST_DEST_TC(4): vp->or |= (1 << 18); break;
+ case NV30_VP_INST_DEST_TC(5): vp->or |= (1 << 19); break;
+ case NV30_VP_INST_DEST_TC(6): vp->or |= (1 << 20); break;
+ case NV30_VP_INST_DEST_TC(7): vp->or |= (1 << 21); break;
+ default:
+ break;
+ }
+
+ hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
+ hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK | (1<<20);
+
+ /*XXX: no way this is entirely correct, someone needs to
+ * figure out what exactly it is.
+ */
+ hw[3] |= 0x800;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void
+nv20_vp_arith(struct nv20_vpc *vpc, int slot, int op,
+ struct nv20_sreg dst, int mask,
+ struct nv20_sreg s0, struct nv20_sreg s1,
+ struct nv20_sreg s2)
+{
+ struct nv20_vertex_program *vp = vpc->vp;
+ uint32_t *hw;
+
+ vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
+ vpc->vpi = &vp->insns[vp->nr_insns - 1];
+ memset(vpc->vpi, 0, sizeof(*vpc->vpi));
+ vpc->vpi->const_index = -1;
+
+ hw = vpc->vpi->data;
+
+ hw[0] |= (NV30_VP_INST_COND_TR << NV30_VP_INST_COND_SHIFT);
+ hw[0] |= ((0 << NV30_VP_INST_COND_SWZ_X_SHIFT) |
+ (1 << NV30_VP_INST_COND_SWZ_Y_SHIFT) |
+ (2 << NV30_VP_INST_COND_SWZ_Z_SHIFT) |
+ (3 << NV30_VP_INST_COND_SWZ_W_SHIFT));
+
+ hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
+// hw[3] |= NV30_VP_INST_SCA_DEST_TEMP_MASK;
+// hw[3] |= (mask << NV30_VP_INST_VEC_WRITEMASK_SHIFT);
+
+ if (dst.type == NV30SR_OUTPUT) {
+ if (slot)
+ hw[3] |= (mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
+ else
+ hw[3] |= (mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
+ } else {
+ if (slot)
+ hw[3] |= (mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
+ else
+ hw[3] |= (mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
+ }
+
+ emit_dst(vpc, hw, slot, dst);
+ emit_src(vpc, hw, 0, s0);
+ emit_src(vpc, hw, 1, s1);
+ emit_src(vpc, hw, 2, s2);
+}
+
+static INLINE struct nv20_sreg
+tgsi_src(struct nv20_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
+ struct nv20_sreg src;
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ src = nv20_sr(NV30SR_INPUT, fsrc->SrcRegister.Index);
+ break;
+ case TGSI_FILE_CONSTANT:
+ src = constant(vpc, fsrc->SrcRegister.Index, 0, 0, 0, 0);
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ src = vpc->imm[fsrc->SrcRegister.Index];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ if (vpc->high_temp < fsrc->SrcRegister.Index)
+ vpc->high_temp = fsrc->SrcRegister.Index;
+ src = nv20_sr(NV30SR_TEMP, fsrc->SrcRegister.Index);
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ break;
+ }
+
+ src.abs = fsrc->SrcRegisterExtMod.Absolute;
+ src.negate = fsrc->SrcRegister.Negate;
+ src.swz[0] = fsrc->SrcRegister.SwizzleX;
+ src.swz[1] = fsrc->SrcRegister.SwizzleY;
+ src.swz[2] = fsrc->SrcRegister.SwizzleZ;
+ src.swz[3] = fsrc->SrcRegister.SwizzleW;
+ return src;
+}
+
+static INLINE struct nv20_sreg
+tgsi_dst(struct nv20_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
+ struct nv20_sreg dst;
+
+ switch (fdst->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ dst = nv20_sr(NV30SR_OUTPUT,
+ vpc->output_map[fdst->DstRegister.Index]);
+
+ break;
+ case TGSI_FILE_TEMPORARY:
+ dst = nv20_sr(NV30SR_TEMP, fdst->DstRegister.Index);
+ if (vpc->high_temp < dst.index)
+ vpc->high_temp = dst.index;
+ break;
+ default:
+ NOUVEAU_ERR("bad dst file\n");
+ break;
+ }
+
+ return dst;
+}
+
+static INLINE int
+tgsi_mask(uint tgsi)
+{
+ int mask = 0;
+
+ if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
+ if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
+ if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
+ if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
+ return mask;
+}
+
+static boolean
+nv20_vertprog_parse_instruction(struct nv20_vpc *vpc,
+ const struct tgsi_full_instruction *finst)
+{
+ struct nv20_sreg src[3], dst, tmp;
+ struct nv20_sreg none = nv20_sr(NV30SR_NONE, 0);
+ int mask;
+ int ai = -1, ci = -1;
+ int i;
+
+ if (finst->Instruction.Opcode == TGSI_OPCODE_END)
+ return TRUE;
+
+ vpc->temp_temp_count = 0;
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ if (fsrc->SrcRegister.File == TGSI_FILE_TEMPORARY) {
+ src[i] = tgsi_src(vpc, fsrc);
+ }
+ }
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ if (ai == -1 || ai == fsrc->SrcRegister.Index) {
+ ai = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ /*XXX: index comparison is broken now that consts come from
+ * two different register files.
+ */
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_IMMEDIATE:
+ if (ci == -1 || ci == fsrc->SrcRegister.Index) {
+ ci = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ /* handled above */
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ return FALSE;
+ }
+ }
+
+ dst = tgsi_dst(vpc, &finst->FullDstRegisters[0]);
+ mask = tgsi_mask(finst->FullDstRegisters[0].DstRegister.WriteMask);
+
+ switch (finst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ arith(vpc, 0, OP_MOV, dst, mask, abs(src[0]), none, none);
+ break;
+ case TGSI_OPCODE_ADD:
+ arith(vpc, 0, OP_ADD, dst, mask, src[0], none, src[1]);
+ break;
+ case TGSI_OPCODE_ARL:
+ arith(vpc, 0, OP_ARL, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_DP3:
+ arith(vpc, 0, OP_DP3, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DP4:
+ arith(vpc, 0, OP_DP4, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DPH:
+ arith(vpc, 0, OP_DPH, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DST:
+ arith(vpc, 0, OP_DST, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_EX2:
+ arith(vpc, 1, OP_EX2, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_EXP:
+ arith(vpc, 1, OP_EXP, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_FLR:
+ arith(vpc, 0, OP_FLR, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FRC:
+ arith(vpc, 0, OP_FRC, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_LG2:
+ arith(vpc, 1, OP_LG2, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_LIT:
+ arith(vpc, 1, OP_LIT, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_LOG:
+ arith(vpc, 1, OP_LOG, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_MAD:
+ arith(vpc, 0, OP_MAD, dst, mask, src[0], src[1], src[2]);
+ break;
+ case TGSI_OPCODE_MAX:
+ arith(vpc, 0, OP_MAX, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MIN:
+ arith(vpc, 0, OP_MIN, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MOV:
+ arith(vpc, 0, OP_MOV, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_MUL:
+ arith(vpc, 0, OP_MUL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_POW:
+ tmp = temp(vpc);
+ arith(vpc, 1, OP_LG2, tmp, MASK_X, none, none,
+ swz(src[0], X, X, X, X));
+ arith(vpc, 0, OP_MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
+ swz(src[1], X, X, X, X), none);
+ arith(vpc, 1, OP_EX2, dst, mask, none, none,
+ swz(tmp, X, X, X, X));
+ break;
+ case TGSI_OPCODE_RCP:
+ arith(vpc, 1, OP_RCP, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_RET:
+ break;
+ case TGSI_OPCODE_RSQ:
+ arith(vpc, 1, OP_RSQ, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_SGE:
+ arith(vpc, 0, OP_SGE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SGT:
+ arith(vpc, 0, OP_SGT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SLT:
+ arith(vpc, 0, OP_SLT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SUB:
+ arith(vpc, 0, OP_ADD, dst, mask, src[0], none, neg(src[1]));
+ break;
+ case TGSI_OPCODE_XPD:
+ tmp = temp(vpc);
+ arith(vpc, 0, OP_MUL, tmp, mask,
+ swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
+ arith(vpc, 0, OP_MAD, dst, (mask & ~MASK_W),
+ swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
+ neg(tmp));
+ break;
+ default:
+ NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static boolean
+nv20_vertprog_parse_decl_output(struct nv20_vpc *vpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ int hw;
+
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ hw = NV30_VP_INST_DEST_POS;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV30_VP_INST_DEST_COL0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV30_VP_INST_DEST_COL1;
+ } else {
+ NOUVEAU_ERR("bad colour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_BCOLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV30_VP_INST_DEST_BFC0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV30_VP_INST_DEST_BFC1;
+ } else {
+ NOUVEAU_ERR("bad bcolour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ hw = NV30_VP_INST_DEST_FOGC;
+ break;
+ case TGSI_SEMANTIC_PSIZE:
+ hw = NV30_VP_INST_DEST_PSZ;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ if (fdec->Semantic.SemanticIndex <= 7) {
+ hw = NV30_VP_INST_DEST_TC(fdec->Semantic.SemanticIndex);
+ } else {
+ NOUVEAU_ERR("bad generic semantic index\n");
+ return FALSE;
+ }
+ break;
+ default:
+ NOUVEAU_ERR("bad output semantic\n");
+ return FALSE;
+ }
+
+ vpc->output_map[fdec->DeclarationRange.First] = hw;
+ return TRUE;
+}
+
+static boolean
+nv20_vertprog_prepare(struct nv20_vpc *vpc)
+{
+ struct tgsi_parse_context p;
+ int nr_imm = 0;
+
+ tgsi_parse_init(&p, vpc->vp->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&p)) {
+ const union tgsi_full_token *tok = &p.FullToken;
+
+ tgsi_parse_token(&p);
+ switch(tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ nr_imm++;
+ break;
+ default:
+ break;
+ }
+ }
+ tgsi_parse_free(&p);
+
+ if (nr_imm) {
+ vpc->imm = CALLOC(nr_imm, sizeof(struct nv20_sreg));
+ assert(vpc->imm);
+ }
+
+ return TRUE;
+}
+
+static void
+nv20_vertprog_translate(struct nv20_context *nv20,
+ struct nv20_vertex_program *vp)
+{
+ struct tgsi_parse_context parse;
+ struct nv20_vpc *vpc = NULL;
+
+ tgsi_dump(vp->pipe.tokens,0);
+
+ vpc = CALLOC(1, sizeof(struct nv20_vpc));
+ if (!vpc)
+ return;
+ vpc->vp = vp;
+ vpc->high_temp = -1;
+
+ if (!nv20_vertprog_prepare(vpc)) {
+ FREE(vpc);
+ return;
+ }
+
+ tgsi_parse_init(&parse, vp->pipe.tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *fdec;
+ fdec = &parse.FullToken.FullDeclaration;
+ switch (fdec->Declaration.File) {
+ case TGSI_FILE_OUTPUT:
+ if (!nv20_vertprog_parse_decl_output(vpc, fdec))
+ goto out_err;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ const struct tgsi_full_immediate *imm;
+
+ imm = &parse.FullToken.FullImmediate;
+ assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
+ assert(imm->Immediate.NrTokens == 4 + 1);
+ vpc->imm[vpc->nr_imm++] =
+ constant(vpc, -1,
+ imm->u[0].Float,
+ imm->u[1].Float,
+ imm->u[2].Float,
+ imm->u[3].Float);
+ }
+ break;
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *finst;
+ finst = &parse.FullToken.FullInstruction;
+ if (!nv20_vertprog_parse_instruction(vpc, finst))
+ goto out_err;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ vp->insns[vp->nr_insns - 1].data[3] |= NV30_VP_INST_LAST;
+ vp->translated = TRUE;
+out_err:
+ tgsi_parse_free(&parse);
+ FREE(vpc);
+}
+
+static boolean
+nv20_vertprog_validate(struct nv20_context *nv20)
+{
+ struct pipe_screen *pscreen = nv20->pipe.screen;
+ struct nouveau_winsys *nvws = nv20->nvws;
+ struct nouveau_grobj *rankine = nv20->screen->rankine;
+ struct nv20_vertex_program *vp;
+ struct pipe_buffer *constbuf;
+ boolean upload_code = FALSE, upload_data = FALSE;
+ int i;
+
+ vp = nv20->vertprog;
+ constbuf = nv20->constbuf[PIPE_SHADER_VERTEX];
+
+ /* Translate TGSI shader into hw bytecode */
+ if (!vp->translated) {
+ nv20_vertprog_translate(nv20, vp);
+ if (!vp->translated)
+ return FALSE;
+ }
+
+ /* Allocate hw vtxprog exec slots */
+ if (!vp->exec) {
+ struct nouveau_resource *heap = nv20->screen->vp_exec_heap;
+ struct nouveau_stateobj *so;
+ uint vplen = vp->nr_insns;
+
+ if (nvws->res_alloc(heap, vplen, vp, &vp->exec)) {
+ while (heap->next && heap->size < vplen) {
+ struct nv20_vertex_program *evict;
+
+ evict = heap->next->priv;
+ nvws->res_free(&evict->exec);
+ }
+
+ if (nvws->res_alloc(heap, vplen, vp, &vp->exec))
+ assert(0);
+ }
+
+ so = so_new(2, 0);
+ so_method(so, rankine, NV34TCL_VP_START_FROM_ID, 1);
+ so_data (so, vp->exec->start);
+ so_ref(so, &vp->so);
+
+ upload_code = TRUE;
+ }
+
+ /* Allocate hw vtxprog const slots */
+ if (vp->nr_consts && !vp->data) {
+ struct nouveau_resource *heap = nv20->screen->vp_data_heap;
+
+ if (nvws->res_alloc(heap, vp->nr_consts, vp, &vp->data)) {
+ while (heap->next && heap->size < vp->nr_consts) {
+ struct nv20_vertex_program *evict;
+
+ evict = heap->next->priv;
+ nvws->res_free(&evict->data);
+ }
+
+ if (nvws->res_alloc(heap, vp->nr_consts, vp, &vp->data))
+ assert(0);
+ }
+
+ /*XXX: handle this some day */
+ assert(vp->data->start >= vp->data_start_min);
+
+ upload_data = TRUE;
+ if (vp->data_start != vp->data->start)
+ upload_code = TRUE;
+ }
+
+ /* If exec or data segments moved we need to patch the program to
+ * fixup offsets and register IDs.
+ */
+ if (vp->exec_start != vp->exec->start) {
+ for (i = 0; i < vp->nr_insns; i++) {
+ struct nv20_vertex_program_exec *vpi = &vp->insns[i];
+
+ if (vpi->has_branch_offset) {
+ assert(0);
+ }
+ }
+
+ vp->exec_start = vp->exec->start;
+ }
+
+ if (vp->nr_consts && vp->data_start != vp->data->start) {
+ for (i = 0; i < vp->nr_insns; i++) {
+ struct nv20_vertex_program_exec *vpi = &vp->insns[i];
+
+ if (vpi->const_index >= 0) {
+ vpi->data[1] &= ~NV30_VP_INST_CONST_SRC_MASK;
+ vpi->data[1] |=
+ (vpi->const_index + vp->data->start) <<
+ NV30_VP_INST_CONST_SRC_SHIFT;
+
+ }
+ }
+
+ vp->data_start = vp->data->start;
+ }
+
+ /* Update + Upload constant values */
+ if (vp->nr_consts) {
+ float *map = NULL;
+
+ if (constbuf) {
+ map = pipe_buffer_map(pscreen, constbuf,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ }
+
+ for (i = 0; i < vp->nr_consts; i++) {
+ struct nv20_vertex_program_data *vpd = &vp->consts[i];
+
+ if (vpd->index >= 0) {
+ if (!upload_data &&
+ !memcmp(vpd->value, &map[vpd->index * 4],
+ 4 * sizeof(float)))
+ continue;
+ memcpy(vpd->value, &map[vpd->index * 4],
+ 4 * sizeof(float));
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_CONST_ID, 5);
+ OUT_RING (i + vp->data->start);
+ OUT_RINGp ((uint32_t *)vpd->value, 4);
+ }
+
+ if (constbuf)
+ pipe_buffer_unmap(pscreen, constbuf);
+ }
+
+ /* Upload vtxprog */
+ if (upload_code) {
+#if 0
+ for (i = 0; i < vp->nr_insns; i++) {
+ NOUVEAU_MSG("VP inst %d: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, vp->insns[i].data[0], vp->insns[i].data[1],
+ vp->insns[i].data[2], vp->insns[i].data[3]);
+ }
+#endif
+ BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_FROM_ID, 1);
+ OUT_RING (vp->exec->start);
+ for (i = 0; i < vp->nr_insns; i++) {
+ BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_INST(0), 4);
+ OUT_RINGp (vp->insns[i].data, 4);
+ }
+ }
+
+ if (vp->so != nv20->state.hw[NV30_STATE_VERTPROG]) {
+ so_ref(vp->so, &nv20->state.hw[NV30_STATE_VERTPROG]);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void
+nv20_vertprog_destroy(struct nv20_context *nv20, struct nv20_vertex_program *vp)
+{
+ struct nouveau_winsys *nvws = nv20->screen->nvws;
+
+ vp->translated = FALSE;
+
+ if (vp->nr_insns) {
+ FREE(vp->insns);
+ vp->insns = NULL;
+ vp->nr_insns = 0;
+ }
+
+ if (vp->nr_consts) {
+ FREE(vp->consts);
+ vp->consts = NULL;
+ vp->nr_consts = 0;
+ }
+
+ nvws->res_free(&vp->exec);
+ vp->exec_start = 0;
+ nvws->res_free(&vp->data);
+ vp->data_start = 0;
+ vp->data_start_min = 0;
+
+ vp->ir = vp->or = 0;
+ so_ref(NULL, &vp->so);
+}
+
+struct nv20_state_entry nv20_state_vertprog = {
+ .validate = nv20_vertprog_validate,
+ .dirty = {
+ .pipe = NV30_NEW_VERTPROG /*| NV30_NEW_UCP*/,
+ .hw = NV30_STATE_VERTPROG,
+ }
+};
diff --git a/src/gallium/drivers/nv30/Makefile b/src/gallium/drivers/nv30/Makefile
new file mode 100644
index 0000000000..364c80d8f3
--- /dev/null
+++ b/src/gallium/drivers/nv30/Makefile
@@ -0,0 +1,29 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nv30
+
+C_SOURCES = \
+ nv30_clear.c \
+ nv30_context.c \
+ nv30_draw.c \
+ nv30_fragprog.c \
+ nv30_fragtex.c \
+ nv30_miptree.c \
+ nv30_query.c \
+ nv30_screen.c \
+ nv30_state.c \
+ nv30_state_blend.c \
+ nv30_state_emit.c \
+ nv30_state_fb.c \
+ nv30_state_rasterizer.c \
+ nv30_state_scissor.c \
+ nv30_state_stipple.c \
+ nv30_state_viewport.c \
+ nv30_state_zsa.c \
+ nv30_surface.c \
+ nv30_transfer.c \
+ nv30_vbo.c \
+ nv30_vertprog.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nv30/nv30_clear.c b/src/gallium/drivers/nv30/nv30_clear.c
new file mode 100644
index 0000000000..c4ba926664
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_clear.c
@@ -0,0 +1,14 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "util/u_clear.h"
+
+#include "nv30_context.h"
+
+void
+nv30_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil)
+{
+ util_clear(pipe, &nv30_context(pipe)->framebuffer, buffers, rgba, depth,
+ stencil);
+}
diff --git a/src/gallium/drivers/nv30/nv30_context.c b/src/gallium/drivers/nv30/nv30_context.c
new file mode 100644
index 0000000000..f827bdc78b
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_context.c
@@ -0,0 +1,98 @@
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv30_context.h"
+#include "nv30_screen.h"
+
+static void
+nv30_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ if (flags & PIPE_FLUSH_TEXTURE_CACHE) {
+ BEGIN_RING(rankine, 0x1fd8, 1);
+ OUT_RING (2);
+ BEGIN_RING(rankine, 0x1fd8, 1);
+ OUT_RING (1);
+ }
+
+ FIRE_RING(fence);
+}
+
+static void
+nv30_destroy(struct pipe_context *pipe)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ if (nv30->draw)
+ draw_destroy(nv30->draw);
+ FREE(nv30);
+}
+
+static unsigned int
+nv30_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+nv30_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *
+nv30_create(struct pipe_screen *pscreen, unsigned pctx_id)
+{
+ struct nv30_screen *screen = nv30_screen(pscreen);
+ struct pipe_winsys *ws = pscreen->winsys;
+ struct nv30_context *nv30;
+ struct nouveau_winsys *nvws = screen->nvws;
+
+ nv30 = CALLOC(1, sizeof(struct nv30_context));
+ if (!nv30)
+ return NULL;
+ nv30->screen = screen;
+ nv30->pctx_id = pctx_id;
+
+ nv30->nvws = nvws;
+
+ nv30->pipe.winsys = ws;
+ nv30->pipe.screen = pscreen;
+ nv30->pipe.destroy = nv30_destroy;
+ nv30->pipe.draw_arrays = nv30_draw_arrays;
+ nv30->pipe.draw_elements = nv30_draw_elements;
+ nv30->pipe.clear = nv30_clear;
+ nv30->pipe.flush = nv30_flush;
+
+ nv30->pipe.is_texture_referenced = nv30_is_texture_referenced;
+ nv30->pipe.is_buffer_referenced = nv30_is_buffer_referenced;
+
+ nv30_init_query_functions(nv30);
+ nv30_init_surface_functions(nv30);
+ nv30_init_state_functions(nv30);
+
+ /* Create, configure, and install fallback swtnl path */
+ nv30->draw = draw_create();
+ draw_wide_point_threshold(nv30->draw, 9999999.0);
+ draw_wide_line_threshold(nv30->draw, 9999999.0);
+ draw_enable_line_stipple(nv30->draw, FALSE);
+ draw_enable_point_sprites(nv30->draw, FALSE);
+ draw_set_rasterize_stage(nv30->draw, nv30_draw_render_stage(nv30));
+
+ return &nv30->pipe;
+}
+
diff --git a/src/gallium/drivers/nv30/nv30_context.h b/src/gallium/drivers/nv30/nv30_context.h
new file mode 100644
index 0000000000..4229c0a0e1
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_context.h
@@ -0,0 +1,212 @@
+#ifndef __NV30_CONTEXT_H__
+#define __NV30_CONTEXT_H__
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "draw/draw_vertex.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_gldefs.h"
+
+#define NOUVEAU_PUSH_CONTEXT(ctx) \
+ struct nv30_screen *ctx = nv30->screen
+#include "nouveau/nouveau_push.h"
+#include "nouveau/nouveau_stateobj.h"
+
+#include "nv30_state.h"
+
+#define NOUVEAU_ERR(fmt, args...) \
+ fprintf(stderr, "%s:%d - "fmt, __func__, __LINE__, ##args);
+#define NOUVEAU_MSG(fmt, args...) \
+ fprintf(stderr, "nouveau: "fmt, ##args);
+
+enum nv30_state_index {
+ NV30_STATE_FB = 0,
+ NV30_STATE_VIEWPORT = 1,
+ NV30_STATE_BLEND = 2,
+ NV30_STATE_RAST = 3,
+ NV30_STATE_ZSA = 4,
+ NV30_STATE_BCOL = 5,
+ NV30_STATE_CLIP = 6,
+ NV30_STATE_SCISSOR = 7,
+ NV30_STATE_STIPPLE = 8,
+ NV30_STATE_FRAGPROG = 9,
+ NV30_STATE_VERTPROG = 10,
+ NV30_STATE_FRAGTEX0 = 11,
+ NV30_STATE_FRAGTEX1 = 12,
+ NV30_STATE_FRAGTEX2 = 13,
+ NV30_STATE_FRAGTEX3 = 14,
+ NV30_STATE_FRAGTEX4 = 15,
+ NV30_STATE_FRAGTEX5 = 16,
+ NV30_STATE_FRAGTEX6 = 17,
+ NV30_STATE_FRAGTEX7 = 18,
+ NV30_STATE_FRAGTEX8 = 19,
+ NV30_STATE_FRAGTEX9 = 20,
+ NV30_STATE_FRAGTEX10 = 21,
+ NV30_STATE_FRAGTEX11 = 22,
+ NV30_STATE_FRAGTEX12 = 23,
+ NV30_STATE_FRAGTEX13 = 24,
+ NV30_STATE_FRAGTEX14 = 25,
+ NV30_STATE_FRAGTEX15 = 26,
+ NV30_STATE_VERTTEX0 = 27,
+ NV30_STATE_VERTTEX1 = 28,
+ NV30_STATE_VERTTEX2 = 29,
+ NV30_STATE_VERTTEX3 = 30,
+ NV30_STATE_VTXBUF = 31,
+ NV30_STATE_VTXFMT = 32,
+ NV30_STATE_VTXATTR = 33,
+ NV30_STATE_MAX = 34
+};
+
+#include "nv30_screen.h"
+
+#define NV30_NEW_BLEND (1 << 0)
+#define NV30_NEW_RAST (1 << 1)
+#define NV30_NEW_ZSA (1 << 2)
+#define NV30_NEW_SAMPLER (1 << 3)
+#define NV30_NEW_FB (1 << 4)
+#define NV30_NEW_STIPPLE (1 << 5)
+#define NV30_NEW_SCISSOR (1 << 6)
+#define NV30_NEW_VIEWPORT (1 << 7)
+#define NV30_NEW_BCOL (1 << 8)
+#define NV30_NEW_VERTPROG (1 << 9)
+#define NV30_NEW_FRAGPROG (1 << 10)
+#define NV30_NEW_ARRAYS (1 << 11)
+#define NV30_NEW_UCP (1 << 12)
+
+struct nv30_rasterizer_state {
+ struct pipe_rasterizer_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv30_zsa_state {
+ struct pipe_depth_stencil_alpha_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv30_blend_state {
+ struct pipe_blend_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+
+struct nv30_state {
+ unsigned scissor_enabled;
+ unsigned stipple_enabled;
+ unsigned viewport_bypass;
+ unsigned fp_samplers;
+
+ uint64_t dirty;
+ struct nouveau_stateobj *hw[NV30_STATE_MAX];
+};
+
+struct nv30_context {
+ struct pipe_context pipe;
+
+ struct nouveau_winsys *nvws;
+ struct nv30_screen *screen;
+ unsigned pctx_id;
+
+ struct draw_context *draw;
+
+ /* HW state derived from pipe states */
+ struct nv30_state state;
+
+ /* Context state */
+ unsigned dirty;
+ struct pipe_scissor_state scissor;
+ unsigned stipple[32];
+ struct nv30_vertex_program *vertprog;
+ struct nv30_fragment_program *fragprog;
+ struct pipe_buffer *constbuf[PIPE_SHADER_TYPES];
+ unsigned constbuf_nr[PIPE_SHADER_TYPES];
+ struct nv30_rasterizer_state *rasterizer;
+ struct nv30_zsa_state *zsa;
+ struct nv30_blend_state *blend;
+ struct pipe_blend_color blend_colour;
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_buffer *idxbuf;
+ unsigned idxbuf_format;
+ struct nv30_sampler_state *tex_sampler[PIPE_MAX_SAMPLERS];
+ struct nv30_miptree *tex_miptree[PIPE_MAX_SAMPLERS];
+ unsigned nr_samplers;
+ unsigned nr_textures;
+ unsigned dirty_samplers;
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ unsigned vtxbuf_nr;
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
+ unsigned vtxelt_nr;
+ const unsigned *edgeflags;
+};
+
+static INLINE struct nv30_context *
+nv30_context(struct pipe_context *pipe)
+{
+ return (struct nv30_context *)pipe;
+}
+
+struct nv30_state_entry {
+ boolean (*validate)(struct nv30_context *nv30);
+ struct {
+ unsigned pipe;
+ unsigned hw;
+ } dirty;
+};
+
+extern void nv30_init_state_functions(struct nv30_context *nv30);
+extern void nv30_init_surface_functions(struct nv30_context *nv30);
+extern void nv30_init_query_functions(struct nv30_context *nv30);
+
+extern void nv30_screen_init_miptree_functions(struct pipe_screen *pscreen);
+
+/* nv30_draw.c */
+extern struct draw_stage *nv30_draw_render_stage(struct nv30_context *nv30);
+
+/* nv30_vertprog.c */
+extern void nv30_vertprog_destroy(struct nv30_context *,
+ struct nv30_vertex_program *);
+
+/* nv30_fragprog.c */
+extern void nv30_fragprog_destroy(struct nv30_context *,
+ struct nv30_fragment_program *);
+
+/* nv30_fragtex.c */
+extern void nv30_fragtex_bind(struct nv30_context *);
+
+/* nv30_state.c and friends */
+extern boolean nv30_state_validate(struct nv30_context *nv30);
+extern void nv30_state_emit(struct nv30_context *nv30);
+extern struct nv30_state_entry nv30_state_rasterizer;
+extern struct nv30_state_entry nv30_state_scissor;
+extern struct nv30_state_entry nv30_state_stipple;
+extern struct nv30_state_entry nv30_state_fragprog;
+extern struct nv30_state_entry nv30_state_vertprog;
+extern struct nv30_state_entry nv30_state_blend;
+extern struct nv30_state_entry nv30_state_blend_colour;
+extern struct nv30_state_entry nv30_state_zsa;
+extern struct nv30_state_entry nv30_state_viewport;
+extern struct nv30_state_entry nv30_state_framebuffer;
+extern struct nv30_state_entry nv30_state_fragtex;
+extern struct nv30_state_entry nv30_state_vbo;
+
+/* nv30_vbo.c */
+extern boolean nv30_draw_arrays(struct pipe_context *, unsigned mode,
+ unsigned start, unsigned count);
+extern boolean nv30_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start,
+ unsigned count);
+
+/* nv30_clear.c */
+extern void nv30_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil);
+
+#endif
diff --git a/src/gallium/drivers/nv30/nv30_draw.c b/src/gallium/drivers/nv30/nv30_draw.c
new file mode 100644
index 0000000000..74fc138c05
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_draw.c
@@ -0,0 +1,61 @@
+#include "draw/draw_pipe.h"
+
+#include "nv30_context.h"
+
+struct nv30_draw_stage {
+ struct draw_stage draw;
+ struct nv30_context *nv30;
+};
+
+static void
+nv30_draw_point(struct draw_stage *draw, struct prim_header *prim)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv30_draw_line(struct draw_stage *draw, struct prim_header *prim)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv30_draw_tri(struct draw_stage *draw, struct prim_header *prim)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv30_draw_flush(struct draw_stage *draw, unsigned flags)
+{
+}
+
+static void
+nv30_draw_reset_stipple_counter(struct draw_stage *draw)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv30_draw_destroy(struct draw_stage *draw)
+{
+ FREE(draw);
+}
+
+struct draw_stage *
+nv30_draw_render_stage(struct nv30_context *nv30)
+{
+ struct nv30_draw_stage *nv30draw = CALLOC_STRUCT(nv30_draw_stage);
+
+ nv30draw->nv30 = nv30;
+ nv30draw->draw.draw = nv30->draw;
+ nv30draw->draw.point = nv30_draw_point;
+ nv30draw->draw.line = nv30_draw_line;
+ nv30draw->draw.tri = nv30_draw_tri;
+ nv30draw->draw.flush = nv30_draw_flush;
+ nv30draw->draw.reset_stipple_counter = nv30_draw_reset_stipple_counter;
+ nv30draw->draw.destroy = nv30_draw_destroy;
+
+ return &nv30draw->draw;
+}
+
diff --git a/src/gallium/drivers/nv30/nv30_fragprog.c b/src/gallium/drivers/nv30/nv30_fragprog.c
new file mode 100644
index 0000000000..a48ba9782b
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_fragprog.c
@@ -0,0 +1,915 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv30_context.h"
+
+#define SWZ_X 0
+#define SWZ_Y 1
+#define SWZ_Z 2
+#define SWZ_W 3
+#define MASK_X 1
+#define MASK_Y 2
+#define MASK_Z 4
+#define MASK_W 8
+#define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
+#define DEF_SCALE NV30_FP_OP_DST_SCALE_1X
+#define DEF_CTEST NV30_FP_OP_COND_TR
+#include "nv30_shader.h"
+
+#define swz(s,x,y,z,w) nv30_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
+#define neg(s) nv30_sr_neg((s))
+#define abs(s) nv30_sr_abs((s))
+#define scale(s,v) nv30_sr_scale((s), NV30_FP_OP_DST_SCALE_##v)
+
+#define MAX_CONSTS 128
+#define MAX_IMM 32
+struct nv30_fpc {
+ struct nv30_fragment_program *fp;
+
+ uint attrib_map[PIPE_MAX_SHADER_INPUTS];
+
+ int high_temp;
+ int temp_temp_count;
+ int num_regs;
+
+ uint depth_id;
+ uint colour_id;
+
+ unsigned inst_offset;
+
+ struct {
+ int pipe;
+ float vals[4];
+ } consts[MAX_CONSTS];
+ int nr_consts;
+
+ struct nv30_sreg imm[MAX_IMM];
+ unsigned nr_imm;
+};
+
+static INLINE struct nv30_sreg
+temp(struct nv30_fpc *fpc)
+{
+ int idx;
+
+ idx = fpc->temp_temp_count++;
+ idx += fpc->high_temp + 1;
+ return nv30_sr(NV30SR_TEMP, idx);
+}
+
+static INLINE struct nv30_sreg
+constant(struct nv30_fpc *fpc, int pipe, float vals[4])
+{
+ int idx;
+
+ if (fpc->nr_consts == MAX_CONSTS)
+ assert(0);
+ idx = fpc->nr_consts++;
+
+ fpc->consts[idx].pipe = pipe;
+ if (pipe == -1)
+ memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
+ return nv30_sr(NV30SR_CONST, idx);
+}
+
+#define arith(cc,s,o,d,m,s0,s1,s2) \
+ nv30_fp_arith((cc), (s), NV30_FP_OP_OPCODE_##o, \
+ (d), (m), (s0), (s1), (s2))
+#define tex(cc,s,o,u,d,m,s0,s1,s2) \
+ nv30_fp_tex((cc), (s), NV30_FP_OP_OPCODE_##o, (u), \
+ (d), (m), (s0), none, none)
+
+static void
+grow_insns(struct nv30_fpc *fpc, int size)
+{
+ struct nv30_fragment_program *fp = fpc->fp;
+
+ fp->insn_len += size;
+ fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
+}
+
+static void
+emit_src(struct nv30_fpc *fpc, int pos, struct nv30_sreg src)
+{
+ struct nv30_fragment_program *fp = fpc->fp;
+ uint32_t *hw = &fp->insn[fpc->inst_offset];
+ uint32_t sr = 0;
+
+ switch (src.type) {
+ case NV30SR_INPUT:
+ sr |= (NV30_FP_REG_TYPE_INPUT << NV30_FP_REG_TYPE_SHIFT);
+ hw[0] |= (src.index << NV30_FP_OP_INPUT_SRC_SHIFT);
+ break;
+ case NV30SR_OUTPUT:
+ sr |= NV30_FP_REG_SRC_HALF;
+ /* fall-through */
+ case NV30SR_TEMP:
+ sr |= (NV30_FP_REG_TYPE_TEMP << NV30_FP_REG_TYPE_SHIFT);
+ sr |= (src.index << NV30_FP_REG_SRC_SHIFT);
+ break;
+ case NV30SR_CONST:
+ grow_insns(fpc, 4);
+ hw = &fp->insn[fpc->inst_offset];
+ if (fpc->consts[src.index].pipe >= 0) {
+ struct nv30_fragment_program_data *fpd;
+
+ fp->consts = realloc(fp->consts, ++fp->nr_consts *
+ sizeof(*fpd));
+ fpd = &fp->consts[fp->nr_consts - 1];
+ fpd->offset = fpc->inst_offset + 4;
+ fpd->index = fpc->consts[src.index].pipe;
+ memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
+ } else {
+ memcpy(&fp->insn[fpc->inst_offset + 4],
+ fpc->consts[src.index].vals,
+ sizeof(uint32_t) * 4);
+ }
+
+ sr |= (NV30_FP_REG_TYPE_CONST << NV30_FP_REG_TYPE_SHIFT);
+ break;
+ case NV30SR_NONE:
+ sr |= (NV30_FP_REG_TYPE_INPUT << NV30_FP_REG_TYPE_SHIFT);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (src.negate)
+ sr |= NV30_FP_REG_NEGATE;
+
+ if (src.abs)
+ hw[1] |= (1 << (29 + pos));
+
+ sr |= ((src.swz[0] << NV30_FP_REG_SWZ_X_SHIFT) |
+ (src.swz[1] << NV30_FP_REG_SWZ_Y_SHIFT) |
+ (src.swz[2] << NV30_FP_REG_SWZ_Z_SHIFT) |
+ (src.swz[3] << NV30_FP_REG_SWZ_W_SHIFT));
+
+ hw[pos + 1] |= sr;
+}
+
+static void
+emit_dst(struct nv30_fpc *fpc, struct nv30_sreg dst)
+{
+ struct nv30_fragment_program *fp = fpc->fp;
+ uint32_t *hw = &fp->insn[fpc->inst_offset];
+
+ switch (dst.type) {
+ case NV30SR_TEMP:
+ if (fpc->num_regs < (dst.index + 1))
+ fpc->num_regs = dst.index + 1;
+ break;
+ case NV30SR_OUTPUT:
+ if (dst.index == 1) {
+ fp->fp_control |= 0xe;
+ } else {
+ hw[0] |= NV30_FP_OP_OUT_REG_HALF;
+ }
+ break;
+ case NV30SR_NONE:
+ hw[0] |= (1 << 30);
+ break;
+ default:
+ assert(0);
+ }
+
+ hw[0] |= (dst.index << NV30_FP_OP_OUT_REG_SHIFT);
+}
+
+static void
+nv30_fp_arith(struct nv30_fpc *fpc, int sat, int op,
+ struct nv30_sreg dst, int mask,
+ struct nv30_sreg s0, struct nv30_sreg s1, struct nv30_sreg s2)
+{
+ struct nv30_fragment_program *fp = fpc->fp;
+ uint32_t *hw;
+
+ fpc->inst_offset = fp->insn_len;
+ grow_insns(fpc, 4);
+ hw = &fp->insn[fpc->inst_offset];
+ memset(hw, 0, sizeof(uint32_t) * 4);
+
+ if (op == NV30_FP_OP_OPCODE_KIL)
+ fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL;
+ hw[0] |= (op << NV30_FP_OP_OPCODE_SHIFT);
+ hw[0] |= (mask << NV30_FP_OP_OUTMASK_SHIFT);
+ hw[2] |= (dst.dst_scale << NV30_FP_OP_DST_SCALE_SHIFT);
+
+ if (sat)
+ hw[0] |= NV30_FP_OP_OUT_SAT;
+
+ if (dst.cc_update)
+ hw[0] |= NV30_FP_OP_COND_WRITE_ENABLE;
+ hw[1] |= (dst.cc_test << NV30_FP_OP_COND_SHIFT);
+ hw[1] |= ((dst.cc_swz[0] << NV30_FP_OP_COND_SWZ_X_SHIFT) |
+ (dst.cc_swz[1] << NV30_FP_OP_COND_SWZ_Y_SHIFT) |
+ (dst.cc_swz[2] << NV30_FP_OP_COND_SWZ_Z_SHIFT) |
+ (dst.cc_swz[3] << NV30_FP_OP_COND_SWZ_W_SHIFT));
+
+ emit_dst(fpc, dst);
+ emit_src(fpc, 0, s0);
+ emit_src(fpc, 1, s1);
+ emit_src(fpc, 2, s2);
+}
+
+static void
+nv30_fp_tex(struct nv30_fpc *fpc, int sat, int op, int unit,
+ struct nv30_sreg dst, int mask,
+ struct nv30_sreg s0, struct nv30_sreg s1, struct nv30_sreg s2)
+{
+ struct nv30_fragment_program *fp = fpc->fp;
+
+ nv30_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2);
+
+ fp->insn[fpc->inst_offset] |= (unit << NV30_FP_OP_TEX_UNIT_SHIFT);
+ fp->samplers |= (1 << unit);
+}
+
+static INLINE struct nv30_sreg
+tgsi_src(struct nv30_fpc *fpc, const struct tgsi_full_src_register *fsrc)
+{
+ struct nv30_sreg src;
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ src = nv30_sr(NV30SR_INPUT,
+ fpc->attrib_map[fsrc->SrcRegister.Index]);
+ break;
+ case TGSI_FILE_CONSTANT:
+ src = constant(fpc, fsrc->SrcRegister.Index, NULL);
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ assert(fsrc->SrcRegister.Index < fpc->nr_imm);
+ src = fpc->imm[fsrc->SrcRegister.Index];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ src = nv30_sr(NV30SR_TEMP, fsrc->SrcRegister.Index + 1);
+ if (fpc->high_temp < src.index)
+ fpc->high_temp = src.index;
+ break;
+ /* This is clearly insane, but gallium hands us shaders like this.
+ * Luckily fragprog results are just temp regs..
+ */
+ case TGSI_FILE_OUTPUT:
+ if (fsrc->SrcRegister.Index == fpc->colour_id)
+ return nv30_sr(NV30SR_OUTPUT, 0);
+ else
+ return nv30_sr(NV30SR_OUTPUT, 1);
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ break;
+ }
+
+ src.abs = fsrc->SrcRegisterExtMod.Absolute;
+ src.negate = fsrc->SrcRegister.Negate;
+ src.swz[0] = fsrc->SrcRegister.SwizzleX;
+ src.swz[1] = fsrc->SrcRegister.SwizzleY;
+ src.swz[2] = fsrc->SrcRegister.SwizzleZ;
+ src.swz[3] = fsrc->SrcRegister.SwizzleW;
+ return src;
+}
+
+static INLINE struct nv30_sreg
+tgsi_dst(struct nv30_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
+ int idx;
+
+ switch (fdst->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ if (fdst->DstRegister.Index == fpc->colour_id)
+ return nv30_sr(NV30SR_OUTPUT, 0);
+ else
+ return nv30_sr(NV30SR_OUTPUT, 1);
+ break;
+ case TGSI_FILE_TEMPORARY:
+ idx = fdst->DstRegister.Index + 1;
+ if (fpc->high_temp < idx)
+ fpc->high_temp = idx;
+ return nv30_sr(NV30SR_TEMP, idx);
+ case TGSI_FILE_NULL:
+ return nv30_sr(NV30SR_NONE, 0);
+ default:
+ NOUVEAU_ERR("bad dst file %d\n", fdst->DstRegister.File);
+ return nv30_sr(NV30SR_NONE, 0);
+ }
+}
+
+static INLINE int
+tgsi_mask(uint tgsi)
+{
+ int mask = 0;
+
+ if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
+ if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
+ if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
+ if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
+ return mask;
+}
+
+static boolean
+src_native_swz(struct nv30_fpc *fpc, const struct tgsi_full_src_register *fsrc,
+ struct nv30_sreg *src)
+{
+ const struct nv30_sreg none = nv30_sr(NV30SR_NONE, 0);
+ struct nv30_sreg tgsi = tgsi_src(fpc, fsrc);
+ uint mask = 0, zero_mask = 0, one_mask = 0, neg_mask = 0;
+ uint neg[4] = { fsrc->SrcRegisterExtSwz.NegateX,
+ fsrc->SrcRegisterExtSwz.NegateY,
+ fsrc->SrcRegisterExtSwz.NegateZ,
+ fsrc->SrcRegisterExtSwz.NegateW };
+ uint c;
+
+ for (c = 0; c < 4; c++) {
+ switch (tgsi_util_get_full_src_register_extswizzle(fsrc, c)) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ mask |= (1 << c);
+ break;
+ case TGSI_EXTSWIZZLE_ZERO:
+ zero_mask |= (1 << c);
+ tgsi.swz[c] = SWZ_X;
+ break;
+ case TGSI_EXTSWIZZLE_ONE:
+ one_mask |= (1 << c);
+ tgsi.swz[c] = SWZ_X;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (!tgsi.negate && neg[c])
+ neg_mask |= (1 << c);
+ }
+
+ if (mask == MASK_ALL && !neg_mask)
+ return TRUE;
+
+ *src = temp(fpc);
+
+ if (mask)
+ arith(fpc, 0, MOV, *src, mask, tgsi, none, none);
+
+ if (zero_mask)
+ arith(fpc, 0, SFL, *src, zero_mask, *src, none, none);
+
+ if (one_mask)
+ arith(fpc, 0, STR, *src, one_mask, *src, none, none);
+
+ if (neg_mask) {
+ struct nv30_sreg one = temp(fpc);
+ arith(fpc, 0, STR, one, neg_mask, one, none, none);
+ arith(fpc, 0, MUL, *src, neg_mask, *src, neg(one), none);
+ }
+
+ return FALSE;
+}
+
+static boolean
+nv30_fragprog_parse_instruction(struct nv30_fpc *fpc,
+ const struct tgsi_full_instruction *finst)
+{
+ const struct nv30_sreg none = nv30_sr(NV30SR_NONE, 0);
+ struct nv30_sreg src[3], dst, tmp;
+ int mask, sat, unit = 0;
+ int ai = -1, ci = -1;
+ int i;
+
+ if (finst->Instruction.Opcode == TGSI_OPCODE_END)
+ return TRUE;
+
+ fpc->temp_temp_count = 0;
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ if (fsrc->SrcRegister.File == TGSI_FILE_TEMPORARY) {
+ src[i] = tgsi_src(fpc, fsrc);
+ }
+ }
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_TEMPORARY:
+ if (!src_native_swz(fpc, fsrc, &src[i]))
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ if (ai == -1 || ai == fsrc->SrcRegister.Index) {
+ ai = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(fpc, fsrc);
+ } else {
+ NOUVEAU_MSG("extra src attr %d\n",
+ fsrc->SrcRegister.Index);
+ src[i] = temp(fpc);
+ arith(fpc, 0, MOV, src[i], MASK_ALL,
+ tgsi_src(fpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_IMMEDIATE:
+ if (ci == -1 || ci == fsrc->SrcRegister.Index) {
+ ci = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(fpc, fsrc);
+ } else {
+ src[i] = temp(fpc);
+ arith(fpc, 0, MOV, src[i], MASK_ALL,
+ tgsi_src(fpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ /* handled above */
+ break;
+ case TGSI_FILE_SAMPLER:
+ unit = fsrc->SrcRegister.Index;
+ break;
+ case TGSI_FILE_OUTPUT:
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ return FALSE;
+ }
+ }
+
+ dst = tgsi_dst(fpc, &finst->FullDstRegisters[0]);
+ mask = tgsi_mask(finst->FullDstRegisters[0].DstRegister.WriteMask);
+ sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
+
+ switch (finst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none);
+ break;
+ case TGSI_OPCODE_ADD:
+ arith(fpc, sat, ADD, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_CMP:
+ tmp = temp(fpc);
+ arith(fpc, sat, MOV, dst, mask, src[2], none, none);
+ tmp.cc_update = 1;
+ arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none);
+ dst.cc_test = NV30_VP_INST_COND_LT;
+ arith(fpc, sat, MOV, dst, mask, src[1], none, none);
+ break;
+ case TGSI_OPCODE_COS:
+ arith(fpc, sat, COS, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_DP3:
+ arith(fpc, sat, DP3, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DP4:
+ arith(fpc, sat, DP4, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DPH:
+ tmp = temp(fpc);
+ arith(fpc, 0, DP3, tmp, MASK_X, src[0], src[1], none);
+ arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X),
+ swz(src[1], W, W, W, W), none);
+ break;
+ case TGSI_OPCODE_DST:
+ arith(fpc, sat, DST, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_EX2:
+ arith(fpc, sat, EX2, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FLR:
+ arith(fpc, sat, FLR, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FRC:
+ arith(fpc, sat, FRC, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_KILP:
+ arith(fpc, 0, KIL, none, 0, none, none, none);
+ break;
+ case TGSI_OPCODE_KIL:
+ dst = nv30_sr(NV30SR_NONE, 0);
+ dst.cc_update = 1;
+ arith(fpc, 0, MOV, dst, MASK_ALL, src[0], none, none);
+ dst.cc_update = 0; dst.cc_test = NV30_FP_OP_COND_LT;
+ arith(fpc, 0, KIL, dst, 0, none, none, none);
+ break;
+ case TGSI_OPCODE_LG2:
+ arith(fpc, sat, LG2, dst, mask, src[0], none, none);
+ break;
+// case TGSI_OPCODE_LIT:
+ case TGSI_OPCODE_LRP:
+ arith(fpc, sat, LRP, dst, mask, src[0], src[1], src[2]);
+ break;
+ case TGSI_OPCODE_MAD:
+ arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]);
+ break;
+ case TGSI_OPCODE_MAX:
+ arith(fpc, sat, MAX, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MIN:
+ arith(fpc, sat, MIN, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MOV:
+ arith(fpc, sat, MOV, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_MUL:
+ arith(fpc, sat, MUL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_NOISE1:
+ case TGSI_OPCODE_NOISE2:
+ case TGSI_OPCODE_NOISE3:
+ case TGSI_OPCODE_NOISE4:
+ arith(fpc, sat, SFL, dst, mask, none, none, none);
+ break;
+ case TGSI_OPCODE_POW:
+ arith(fpc, sat, POW, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_RCP:
+ arith(fpc, sat, RCP, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_RET:
+ assert(0);
+ break;
+ case TGSI_OPCODE_RFL:
+ arith(fpc, 0, RFL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_RSQ:
+ arith(fpc, sat, RSQ, dst, mask, abs(swz(src[0], X, X, X, X)), none, none);
+ break;
+ case TGSI_OPCODE_SCS:
+ if (mask & MASK_X) {
+ arith(fpc, sat, COS, dst, MASK_X,
+ swz(src[0], X, X, X, X), none, none);
+ }
+ if (mask & MASK_Y) {
+ arith(fpc, sat, SIN, dst, MASK_Y,
+ swz(src[0], X, X, X, X), none, none);
+ }
+ break;
+ case TGSI_OPCODE_SIN:
+ arith(fpc, sat, SIN, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_SGE:
+ arith(fpc, sat, SGE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SGT:
+ arith(fpc, sat, SGT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SLT:
+ arith(fpc, sat, SLT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SUB:
+ arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none);
+ break;
+ case TGSI_OPCODE_TEX:
+ tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_TXB:
+ tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_TXP:
+ tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_XPD:
+ tmp = temp(fpc);
+ arith(fpc, 0, MUL, tmp, mask,
+ swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
+ arith(fpc, sat, MAD, dst, (mask & ~MASK_W),
+ swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
+ neg(tmp));
+ break;
+ default:
+ NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static boolean
+nv30_fragprog_parse_decl_attrib(struct nv30_fpc *fpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ int hw;
+
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ hw = NV30_FP_OP_INPUT_SRC_POSITION;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV30_FP_OP_INPUT_SRC_COL0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV30_FP_OP_INPUT_SRC_COL1;
+ } else {
+ NOUVEAU_ERR("bad colour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ hw = NV30_FP_OP_INPUT_SRC_FOGC;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ if (fdec->Semantic.SemanticIndex <= 7) {
+ hw = NV30_FP_OP_INPUT_SRC_TC(fdec->Semantic.
+ SemanticIndex);
+ } else {
+ NOUVEAU_ERR("bad generic semantic index\n");
+ return FALSE;
+ }
+ break;
+ default:
+ NOUVEAU_ERR("bad input semantic\n");
+ return FALSE;
+ }
+
+ fpc->attrib_map[fdec->DeclarationRange.First] = hw;
+ return TRUE;
+}
+
+static boolean
+nv30_fragprog_parse_decl_output(struct nv30_fpc *fpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ fpc->depth_id = fdec->DeclarationRange.First;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ fpc->colour_id = fdec->DeclarationRange.First;
+ break;
+ default:
+ NOUVEAU_ERR("bad output semantic\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static boolean
+nv30_fragprog_prepare(struct nv30_fpc *fpc)
+{
+ struct tgsi_parse_context p;
+ /*int high_temp = -1, i;*/
+
+ tgsi_parse_init(&p, fpc->fp->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&p)) {
+ const union tgsi_full_token *tok = &p.FullToken;
+
+ tgsi_parse_token(&p);
+ switch(tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *fdec;
+ fdec = &p.FullToken.FullDeclaration;
+ switch (fdec->Declaration.File) {
+ case TGSI_FILE_INPUT:
+ if (!nv30_fragprog_parse_decl_attrib(fpc, fdec))
+ goto out_err;
+ break;
+ case TGSI_FILE_OUTPUT:
+ if (!nv30_fragprog_parse_decl_output(fpc, fdec))
+ goto out_err;
+ break;
+ /*case TGSI_FILE_TEMPORARY:
+ if (fdec->DeclarationRange.Last > high_temp) {
+ high_temp =
+ fdec->DeclarationRange.Last;
+ }
+ break;*/
+ default:
+ break;
+ }
+ }
+ break;
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ struct tgsi_full_immediate *imm;
+ float vals[4];
+
+ imm = &p.FullToken.FullImmediate;
+ assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
+ assert(fpc->nr_imm < MAX_IMM);
+
+ vals[0] = imm->u[0].Float;
+ vals[1] = imm->u[1].Float;
+ vals[2] = imm->u[2].Float;
+ vals[3] = imm->u[3].Float;
+ fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ tgsi_parse_free(&p);
+
+ /*if (++high_temp) {
+ fpc->r_temp = CALLOC(high_temp, sizeof(struct nv30_sreg));
+ for (i = 0; i < high_temp; i++)
+ fpc->r_temp[i] = temp(fpc);
+ fpc->r_temps_discard = 0;
+ }*/
+
+ return TRUE;
+
+out_err:
+ /*if (fpc->r_temp)
+ FREE(fpc->r_temp);*/
+ tgsi_parse_free(&p);
+ return FALSE;
+}
+
+static void
+nv30_fragprog_translate(struct nv30_context *nv30,
+ struct nv30_fragment_program *fp)
+{
+ struct tgsi_parse_context parse;
+ struct nv30_fpc *fpc = NULL;
+
+ tgsi_dump(fp->pipe.tokens,0);
+
+ fpc = CALLOC(1, sizeof(struct nv30_fpc));
+ if (!fpc)
+ return;
+ fpc->fp = fp;
+ fpc->high_temp = -1;
+ fpc->num_regs = 2;
+
+ if (!nv30_fragprog_prepare(fpc)) {
+ FREE(fpc);
+ return;
+ }
+
+ tgsi_parse_init(&parse, fp->pipe.tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *finst;
+
+ finst = &parse.FullToken.FullInstruction;
+ if (!nv30_fragprog_parse_instruction(fpc, finst))
+ goto out_err;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ fp->fp_control |= (fpc->num_regs-1)/2;
+ fp->fp_reg_control = (1<<16)|0x4;
+
+ /* Terminate final instruction */
+ fp->insn[fpc->inst_offset] |= 0x00000001;
+
+ /* Append NOP + END instruction, may or may not be necessary. */
+ fpc->inst_offset = fp->insn_len;
+ grow_insns(fpc, 4);
+ fp->insn[fpc->inst_offset + 0] = 0x00000001;
+ fp->insn[fpc->inst_offset + 1] = 0x00000000;
+ fp->insn[fpc->inst_offset + 2] = 0x00000000;
+ fp->insn[fpc->inst_offset + 3] = 0x00000000;
+
+ fp->translated = TRUE;
+ fp->on_hw = FALSE;
+out_err:
+ tgsi_parse_free(&parse);
+ FREE(fpc);
+}
+
+static void
+nv30_fragprog_upload(struct nv30_context *nv30,
+ struct nv30_fragment_program *fp)
+{
+ struct pipe_screen *pscreen = nv30->pipe.screen;
+ const uint32_t le = 1;
+ uint32_t *map;
+ int i;
+
+ map = pipe_buffer_map(pscreen, fp->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
+
+#if 0
+ for (i = 0; i < fp->insn_len; i++) {
+ fflush(stdout); fflush(stderr);
+ NOUVEAU_ERR("%d 0x%08x\n", i, fp->insn[i]);
+ fflush(stdout); fflush(stderr);
+ }
+#endif
+
+ if ((*(const uint8_t *)&le)) {
+ for (i = 0; i < fp->insn_len; i++) {
+ map[i] = fp->insn[i];
+ }
+ } else {
+ /* Weird swapping for big-endian chips */
+ for (i = 0; i < fp->insn_len; i++) {
+ map[i] = ((fp->insn[i] & 0xffff) << 16) |
+ ((fp->insn[i] >> 16) & 0xffff);
+ }
+ }
+
+ pipe_buffer_unmap(pscreen, fp->buffer);
+}
+
+static boolean
+nv30_fragprog_validate(struct nv30_context *nv30)
+{
+ struct nv30_fragment_program *fp = nv30->fragprog;
+ struct pipe_buffer *constbuf =
+ nv30->constbuf[PIPE_SHADER_FRAGMENT];
+ struct pipe_screen *pscreen = nv30->pipe.screen;
+ struct nouveau_stateobj *so;
+ boolean new_consts = FALSE;
+ int i;
+
+ if (fp->translated)
+ goto update_constants;
+
+ /*nv30->fallback_swrast &= ~NV30_NEW_FRAGPROG;*/
+ nv30_fragprog_translate(nv30, fp);
+ if (!fp->translated) {
+ /*nv30->fallback_swrast |= NV30_NEW_FRAGPROG;*/
+ return FALSE;
+ }
+
+ fp->buffer = pscreen->buffer_create(pscreen, 0x100, 0, fp->insn_len * 4);
+ nv30_fragprog_upload(nv30, fp);
+
+ so = so_new(8, 1);
+ so_method(so, nv30->screen->rankine, NV34TCL_FP_ACTIVE_PROGRAM, 1);
+ so_reloc (so, nouveau_bo(fp->buffer), 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
+ NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0,
+ NV34TCL_FP_ACTIVE_PROGRAM_DMA1);
+ so_method(so, nv30->screen->rankine, NV34TCL_FP_CONTROL, 1);
+ so_data (so, fp->fp_control);
+ so_method(so, nv30->screen->rankine, NV34TCL_FP_REG_CONTROL, 1);
+ so_data (so, fp->fp_reg_control);
+ so_method(so, nv30->screen->rankine, NV34TCL_TX_UNITS_ENABLE, 1);
+ so_data (so, fp->samplers);
+ so_ref(so, &fp->so);
+ so_ref(NULL, &so);
+
+update_constants:
+ if (fp->nr_consts) {
+ float *map;
+
+ map = pipe_buffer_map(pscreen, constbuf,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ for (i = 0; i < fp->nr_consts; i++) {
+ struct nv30_fragment_program_data *fpd = &fp->consts[i];
+ uint32_t *p = &fp->insn[fpd->offset];
+ uint32_t *cb = (uint32_t *)&map[fpd->index * 4];
+
+ if (!memcmp(p, cb, 4 * sizeof(float)))
+ continue;
+ memcpy(p, cb, 4 * sizeof(float));
+ new_consts = TRUE;
+ }
+ pipe_buffer_unmap(pscreen, constbuf);
+
+ if (new_consts)
+ nv30_fragprog_upload(nv30, fp);
+ }
+
+ if (new_consts || fp->so != nv30->state.hw[NV30_STATE_FRAGPROG]) {
+ so_ref(fp->so, &nv30->state.hw[NV30_STATE_FRAGPROG]);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void
+nv30_fragprog_destroy(struct nv30_context *nv30,
+ struct nv30_fragment_program *fp)
+{
+ if (fp->insn_len)
+ FREE(fp->insn);
+}
+
+struct nv30_state_entry nv30_state_fragprog = {
+ .validate = nv30_fragprog_validate,
+ .dirty = {
+ .pipe = NV30_NEW_FRAGPROG,
+ .hw = NV30_STATE_FRAGPROG
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_fragtex.c b/src/gallium/drivers/nv30/nv30_fragtex.c
new file mode 100644
index 0000000000..822e1d8def
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_fragtex.c
@@ -0,0 +1,166 @@
+#include "nv30_context.h"
+#include "nouveau/nouveau_util.h"
+
+#define _(m,tf,ts0x,ts0y,ts0z,ts0w,ts1x,ts1y,ts1z,ts1w) \
+{ \
+ TRUE, \
+ PIPE_FORMAT_##m, \
+ NV34TCL_TX_FORMAT_FORMAT_##tf, \
+ (NV34TCL_TX_SWIZZLE_S0_X_##ts0x | NV34TCL_TX_SWIZZLE_S0_Y_##ts0y | \
+ NV34TCL_TX_SWIZZLE_S0_Z_##ts0z | NV34TCL_TX_SWIZZLE_S0_W_##ts0w | \
+ NV34TCL_TX_SWIZZLE_S1_X_##ts1x | NV34TCL_TX_SWIZZLE_S1_Y_##ts1y | \
+ NV34TCL_TX_SWIZZLE_S1_Z_##ts1z | NV34TCL_TX_SWIZZLE_S1_W_##ts1w) \
+}
+
+struct nv30_texture_format {
+ boolean defined;
+ uint pipe;
+ int format;
+ int swizzle;
+};
+
+static struct nv30_texture_format
+nv30_texture_formats[] = {
+ _(A8R8G8B8_UNORM, A8R8G8B8, S1, S1, S1, S1, X, Y, Z, W),
+ _(A1R5G5B5_UNORM, A1R5G5B5, S1, S1, S1, S1, X, Y, Z, W),
+ _(A4R4G4B4_UNORM, A4R4G4B4, S1, S1, S1, S1, X, Y, Z, W),
+ _(R5G6B5_UNORM , R5G6B5 , S1, S1, S1, ONE, X, Y, Z, W),
+ _(L8_UNORM , L8 , S1, S1, S1, ONE, X, X, X, X),
+ _(A8_UNORM , L8 , ZERO, ZERO, ZERO, S1, X, X, X, X),
+ _(I8_UNORM , L8 , S1, S1, S1, S1, X, X, X, X),
+ _(A8L8_UNORM , A8L8 , S1, S1, S1, S1, X, X, X, Y),
+// _(Z16_UNORM , Z16 , S1, S1, S1, ONE, X, X, X, X),
+// _(Z24S8_UNORM , Z24 , S1, S1, S1, ONE, X, X, X, X),
+ _(DXT1_RGB , DXT1 , S1, S1, S1, ONE, X, Y, Z, W),
+ _(DXT1_RGBA , DXT1 , S1, S1, S1, S1, X, Y, Z, W),
+ _(DXT3_RGBA , DXT3 , S1, S1, S1, S1, X, Y, Z, W),
+ _(DXT5_RGBA , DXT5 , S1, S1, S1, S1, X, Y, Z, W),
+ {},
+};
+
+static struct nv30_texture_format *
+nv30_fragtex_format(uint pipe_format)
+{
+ struct nv30_texture_format *tf = nv30_texture_formats;
+ char fs[128];
+
+ while (tf->defined) {
+ if (tf->pipe == pipe_format)
+ return tf;
+ tf++;
+ }
+
+ NOUVEAU_ERR("unknown texture format %s\n", pf_name(pipe_format));
+ return NULL;
+}
+
+
+static struct nouveau_stateobj *
+nv30_fragtex_build(struct nv30_context *nv30, int unit)
+{
+ struct nv30_sampler_state *ps = nv30->tex_sampler[unit];
+ struct nv30_miptree *nv30mt = nv30->tex_miptree[unit];
+ struct pipe_texture *pt = &nv30mt->base;
+ struct nouveau_bo *bo = nouveau_bo(nv30mt->buffer);
+ struct nv30_texture_format *tf;
+ struct nouveau_stateobj *so;
+ uint32_t txf, txs , txp;
+ unsigned tex_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
+
+ tf = nv30_fragtex_format(pt->format);
+ if (!tf)
+ assert(0);
+
+ txf = tf->format;
+ txf |= ((pt->last_level>0) ? NV34TCL_TX_FORMAT_MIPMAP : 0);
+ txf |= log2i(pt->width[0]) << 20;
+ txf |= log2i(pt->height[0]) << 24;
+ txf |= log2i(pt->depth[0]) << 28;
+ txf |= NV34TCL_TX_FORMAT_NO_BORDER | 0x10000;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_CUBE:
+ txf |= NV34TCL_TX_FORMAT_CUBIC;
+ /* fall-through */
+ case PIPE_TEXTURE_2D:
+ txf |= NV34TCL_TX_FORMAT_DIMS_2D;
+ break;
+ case PIPE_TEXTURE_3D:
+ txf |= NV34TCL_TX_FORMAT_DIMS_3D;
+ break;
+ case PIPE_TEXTURE_1D:
+ txf |= NV34TCL_TX_FORMAT_DIMS_1D;
+ break;
+ default:
+ NOUVEAU_ERR("Unknown target %d\n", pt->target);
+ return NULL;
+ }
+
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ txp = 0;
+ } else {
+ txp = nv30mt->level[0].pitch;
+ txf |= (1<<13) /*FIXME: NV34TCL_TX_FORMAT_LINEAR ? */;
+ }
+
+ txs = tf->swizzle;
+
+ so = so_new(16, 2);
+ so_method(so, nv30->screen->rankine, NV34TCL_TX_OFFSET(unit), 8);
+ so_reloc (so, bo, 0, tex_flags | NOUVEAU_BO_LOW, 0, 0);
+ so_reloc (so, bo, txf, tex_flags | NOUVEAU_BO_OR,
+ NV34TCL_TX_FORMAT_DMA0, NV34TCL_TX_FORMAT_DMA1);
+ so_data (so, ps->wrap);
+ so_data (so, NV34TCL_TX_ENABLE_ENABLE | ps->en);
+ so_data (so, txs);
+ so_data (so, ps->filt | 0x2000 /*voodoo*/);
+ so_data (so, (pt->width[0] << NV34TCL_TX_NPOT_SIZE_W_SHIFT) |
+ pt->height[0]);
+ so_data (so, ps->bcol);
+
+ return so;
+}
+
+static boolean
+nv30_fragtex_validate(struct nv30_context *nv30)
+{
+ struct nv30_fragment_program *fp = nv30->fragprog;
+ struct nv30_state *state = &nv30->state;
+ struct nouveau_stateobj *so;
+ unsigned samplers, unit;
+
+ samplers = state->fp_samplers & ~fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ so = so_new(2, 0);
+ so_method(so, nv30->screen->rankine, NV34TCL_TX_ENABLE(unit), 1);
+ so_data (so, 0);
+ so_ref(so, &nv30->state.hw[NV30_STATE_FRAGTEX0 + unit]);
+ so_ref(NULL, &so);
+ state->dirty |= (1ULL << (NV30_STATE_FRAGTEX0 + unit));
+ }
+
+ samplers = nv30->dirty_samplers & fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ so = nv30_fragtex_build(nv30, unit);
+ so_ref(so, &nv30->state.hw[NV30_STATE_FRAGTEX0 + unit]);
+ so_ref(NULL, &so);
+ state->dirty |= (1ULL << (NV30_STATE_FRAGTEX0 + unit));
+ }
+
+ nv30->state.fp_samplers = fp->samplers;
+ return FALSE;
+}
+
+struct nv30_state_entry nv30_state_fragtex = {
+ .validate = nv30_fragtex_validate,
+ .dirty = {
+ .pipe = NV30_NEW_SAMPLER | NV30_NEW_FRAGPROG,
+ .hw = 0
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_miptree.c b/src/gallium/drivers/nv30/nv30_miptree.c
new file mode 100644
index 0000000000..7f8054de73
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_miptree.c
@@ -0,0 +1,213 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "nv30_context.h"
+
+static void
+nv30_miptree_layout(struct nv30_miptree *nv30mt)
+{
+ struct pipe_texture *pt = &nv30mt->base;
+ uint width = pt->width[0], height = pt->height[0], depth = pt->depth[0];
+ uint offset = 0;
+ int nr_faces, l, f;
+ uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
+ PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY);
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ nr_faces = 6;
+ } else
+ if (pt->target == PIPE_TEXTURE_3D) {
+ nr_faces = pt->depth[0];
+ } else {
+ nr_faces = 1;
+ }
+
+ for (l = 0; l <= pt->last_level; l++) {
+ pt->width[l] = width;
+ pt->height[l] = height;
+ pt->depth[l] = depth;
+ pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
+
+ if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
+ nv30mt->level[l].pitch = align(pt->width[0] * pt->block.size, 64);
+ else
+ nv30mt->level[l].pitch = pt->width[l] * pt->block.size;
+
+ nv30mt->level[l].image_offset =
+ CALLOC(nr_faces, sizeof(unsigned));
+
+ width = MAX2(1, width >> 1);
+ height = MAX2(1, height >> 1);
+ depth = MAX2(1, depth >> 1);
+ }
+
+ for (f = 0; f < nr_faces; f++) {
+ for (l = 0; l < pt->last_level; l++) {
+ nv30mt->level[l].image_offset[f] = offset;
+
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
+ pt->width[l + 1] > 1 && pt->height[l + 1] > 1)
+ offset += align(nv30mt->level[l].pitch * pt->height[l], 64);
+ else
+ offset += nv30mt->level[l].pitch * pt->height[l];
+ }
+
+ nv30mt->level[l].image_offset[f] = offset;
+ offset += nv30mt->level[l].pitch * pt->height[l];
+ }
+
+ nv30mt->total_size = offset;
+}
+
+static struct pipe_texture *
+nv30_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
+{
+ struct nv30_miptree *mt;
+ unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
+ NOUVEAU_BUFFER_USAGE_TEXTURE;
+
+ mt = MALLOC(sizeof(struct nv30_miptree));
+ if (!mt)
+ return NULL;
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+
+ /* Swizzled textures must be POT */
+ if (pt->width[0] & (pt->width[0] - 1) ||
+ pt->height[0] & (pt->height[0] - 1))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & (PIPE_TEXTURE_USAGE_PRIMARY |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else {
+ switch (pt->format) {
+ /* TODO: Figure out which formats can be swizzled */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ case PIPE_FORMAT_R16_SNORM:
+ {
+ if (debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ break;
+ }
+ default:
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ }
+ }
+
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+
+ nv30_miptree_layout(mt);
+
+ mt->buffer = pscreen->buffer_create(pscreen, 256, buf_usage,
+ mt->total_size);
+ if (!mt->buffer) {
+ FREE(mt);
+ return NULL;
+ }
+
+ return &mt->base;
+}
+
+static struct pipe_texture *
+nv30_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
+ const unsigned *stride, struct pipe_buffer *pb)
+{
+ struct nv30_miptree *mt;
+
+ /* Only supports 2D, non-mipmapped textures for the moment */
+ if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
+ pt->depth[0] != 1)
+ return NULL;
+
+ mt = CALLOC_STRUCT(nv30_miptree);
+ if (!mt)
+ return NULL;
+
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+ mt->level[0].pitch = stride[0];
+ mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
+
+ pipe_buffer_reference(&mt->buffer, pb);
+ return &mt->base;
+}
+
+static void
+nv30_miptree_destroy(struct pipe_texture *pt)
+{
+ struct nv30_miptree *mt = (struct nv30_miptree *)pt;
+ int l;
+
+ pipe_buffer_reference(&mt->buffer, NULL);
+ for (l = 0; l <= pt->last_level; l++) {
+ if (mt->level[l].image_offset)
+ FREE(mt->level[l].image_offset);
+ }
+
+ FREE(mt);
+}
+
+static struct pipe_surface *
+nv30_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct nv30_miptree *nv30mt = (struct nv30_miptree *)pt;
+ struct nv04_surface *ns;
+
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
+ return NULL;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ pipe_reference_init(&ns->base.reference, 1);
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = nv30mt->level[level].pitch;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ns->base.offset = nv30mt->level[level].image_offset[face];
+ } else
+ if (pt->target == PIPE_TEXTURE_3D) {
+ ns->base.offset = nv30mt->level[level].image_offset[zslice];
+ } else {
+ ns->base.offset = nv30mt->level[level].image_offset[0];
+ }
+
+ return &ns->base;
+}
+
+static void
+nv30_miptree_surface_del(struct pipe_surface *ps)
+{
+ pipe_texture_reference(&ps->texture, NULL);
+ FREE(ps);
+}
+
+void
+nv30_screen_init_miptree_functions(struct pipe_screen *pscreen)
+{
+ pscreen->texture_create = nv30_miptree_create;
+ pscreen->texture_blanket = nv30_miptree_blanket;
+ pscreen->texture_destroy = nv30_miptree_destroy;
+ pscreen->get_tex_surface = nv30_miptree_surface_new;
+ pscreen->tex_surface_destroy = nv30_miptree_surface_del;
+}
diff --git a/src/gallium/drivers/nv30/nv30_query.c b/src/gallium/drivers/nv30/nv30_query.c
new file mode 100644
index 0000000000..1d1c8a484e
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_query.c
@@ -0,0 +1,121 @@
+#include "pipe/p_context.h"
+
+#include "nv30_context.h"
+
+struct nv30_query {
+ struct nouveau_resource *object;
+ unsigned type;
+ boolean ready;
+ uint64_t result;
+};
+
+static INLINE struct nv30_query *
+nv30_query(struct pipe_query *pipe)
+{
+ return (struct nv30_query *)pipe;
+}
+
+static struct pipe_query *
+nv30_query_create(struct pipe_context *pipe, unsigned query_type)
+{
+ struct nv30_query *q;
+
+ q = CALLOC(1, sizeof(struct nv30_query));
+ q->type = query_type;
+
+ return (struct pipe_query *)q;
+}
+
+static void
+nv30_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv30_query *q = nv30_query(pq);
+
+ if (q->object)
+ nouveau_resource_free(&q->object);
+ FREE(q);
+}
+
+static void
+nv30_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_query *q = nv30_query(pq);
+
+ assert(q->type == PIPE_QUERY_OCCLUSION_COUNTER);
+
+ /* Happens when end_query() is called, then another begin_query()
+ * without querying the result in-between. For now we'll wait for
+ * the existing query to notify completion, but it could be better.
+ */
+ if (q->object) {
+ uint64_t tmp;
+ pipe->get_query_result(pipe, pq, 1, &tmp);
+ }
+
+ if (nouveau_resource_alloc(nv30->screen->query_heap, 1, NULL, &q->object))
+ assert(0);
+ nouveau_notifier_reset(nv30->screen->query, q->object->start);
+
+ BEGIN_RING(rankine, NV34TCL_QUERY_RESET, 1);
+ OUT_RING (1);
+ BEGIN_RING(rankine, NV34TCL_QUERY_UNK17CC, 1);
+ OUT_RING (1);
+
+ q->ready = FALSE;
+}
+
+static void
+nv30_query_end(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_query *q = nv30_query(pq);
+
+ BEGIN_RING(rankine, NV34TCL_QUERY_GET, 1);
+ OUT_RING ((0x01 << NV34TCL_QUERY_GET_UNK24_SHIFT) |
+ ((q->object->start * 32) << NV34TCL_QUERY_GET_OFFSET_SHIFT));
+ FIRE_RING(NULL);
+}
+
+static boolean
+nv30_query_result(struct pipe_context *pipe, struct pipe_query *pq,
+ boolean wait, uint64_t *result)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_query *q = nv30_query(pq);
+
+ assert(q->object && q->type == PIPE_QUERY_OCCLUSION_COUNTER);
+
+ if (!q->ready) {
+ unsigned status;
+
+ status = nouveau_notifier_status(nv30->screen->query,
+ q->object->start);
+ if (status != NV_NOTIFY_STATE_STATUS_COMPLETED) {
+ if (wait == FALSE)
+ return FALSE;
+
+ nouveau_notifier_wait_status(nv30->screen->query,
+ q->object->start,
+ NV_NOTIFY_STATE_STATUS_COMPLETED, 0);
+ }
+
+ q->result = nouveau_notifier_return_val(nv30->screen->query,
+ q->object->start);
+ q->ready = TRUE;
+ nouveau_resource_free(&q->object);
+ }
+
+ *result = q->result;
+ return TRUE;
+}
+
+void
+nv30_init_query_functions(struct nv30_context *nv30)
+{
+ nv30->pipe.create_query = nv30_query_create;
+ nv30->pipe.destroy_query = nv30_query_destroy;
+ nv30->pipe.begin_query = nv30_query_begin;
+ nv30->pipe.end_query = nv30_query_end;
+ nv30->pipe.get_query_result = nv30_query_result;
+}
diff --git a/src/gallium/drivers/nv30/nv30_screen.c b/src/gallium/drivers/nv30/nv30_screen.c
new file mode 100644
index 0000000000..f8285e4455
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_screen.c
@@ -0,0 +1,324 @@
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+
+#include "nouveau/nouveau_screen.h"
+
+#include "nv30_context.h"
+#include "nv30_screen.h"
+
+#define NV30TCL_CHIPSET_3X_MASK 0x00000003
+#define NV34TCL_CHIPSET_3X_MASK 0x00000010
+#define NV35TCL_CHIPSET_3X_MASK 0x000001e0
+
+static int
+nv30_screen_get_param(struct pipe_screen *pscreen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 16;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 0;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 2;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 13;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 10;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 13;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 0;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return 0;
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 0;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ case NOUVEAU_CAP_HW_IDXBUF:
+ return 1;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+}
+
+static float
+nv30_screen_get_paramf(struct pipe_screen *pscreen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 10.0;
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 64.0;
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 8.0;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 4.0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0.0;
+ }
+}
+
+static boolean
+nv30_screen_surface_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage, unsigned geom_flags)
+{
+ if (tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET) {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else
+ if (tex_usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ case PIPE_FORMAT_A8L8_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ case PIPE_FORMAT_Z24S8_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+static struct pipe_buffer *
+nv30_surface_buffer(struct pipe_surface *surf)
+{
+ struct nv30_miptree *mt = (struct nv30_miptree *)surf->texture;
+
+ return mt->buffer;
+}
+
+static void
+nv30_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct nv30_screen *screen = nv30_screen(pscreen);
+
+ nouveau_resource_free(&screen->vp_exec_heap);
+ nouveau_resource_free(&screen->vp_data_heap);
+ nouveau_resource_free(&screen->query_heap);
+ nouveau_notifier_free(&screen->query);
+ nouveau_notifier_free(&screen->sync);
+ nouveau_grobj_free(&screen->rankine);
+
+ FREE(pscreen);
+}
+
+struct pipe_screen *
+nv30_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
+{
+ struct nv30_screen *screen = CALLOC_STRUCT(nv30_screen);
+ struct nouveau_channel *chan;
+ struct pipe_screen *pscreen;
+ struct nouveau_stateobj *so;
+ unsigned rankine_class = 0;
+ int ret, i;
+
+ if (!screen)
+ return NULL;
+ pscreen = &screen->base.base;
+
+ ret = nouveau_screen_init(&screen->base, dev);
+ if (ret) {
+ nv30_screen_destroy(pscreen);
+ return NULL;
+ }
+ chan = screen->base.channel;
+
+ pscreen->winsys = ws;
+ pscreen->destroy = nv30_screen_destroy;
+ pscreen->get_param = nv30_screen_get_param;
+ pscreen->get_paramf = nv30_screen_get_paramf;
+ pscreen->is_format_supported = nv30_screen_surface_format_supported;
+
+ nv30_screen_init_miptree_functions(pscreen);
+ nv30_screen_init_transfer_functions(pscreen);
+
+ /* 3D object */
+ switch (dev->chipset & 0xf0) {
+ case 0x30:
+ if (NV30TCL_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
+ rankine_class = 0x0397;
+ else
+ if (NV34TCL_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
+ rankine_class = 0x0697;
+ else
+ if (NV35TCL_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
+ rankine_class = 0x0497;
+ break;
+ default:
+ break;
+ }
+
+ if (!rankine_class) {
+ NOUVEAU_ERR("Unknown nv3x chipset: nv%02x\n", dev->chipset);
+ return NULL;
+ }
+
+ ret = nouveau_grobj_alloc(chan, 0xbeef3097, rankine_class,
+ &screen->rankine);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
+ return FALSE;
+ }
+ BIND_RING(chan, screen->rankine, 7);
+
+ /* 2D engine setup */
+ screen->eng2d = nv04_surface_2d_init(&screen->base);
+ screen->eng2d->buf = nv30_surface_buffer;
+
+ /* Notifier for sync purposes */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
+ if (ret) {
+ NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
+ nv30_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Query objects */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0302, 32, &screen->query);
+ if (ret) {
+ NOUVEAU_ERR("Error initialising query objects: %d\n", ret);
+ nv30_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ ret = nouveau_resource_init(&screen->query_heap, 0, 32);
+ if (ret) {
+ NOUVEAU_ERR("Error initialising query object heap: %d\n", ret);
+ nv30_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Vtxprog resources */
+ if (nouveau_resource_init(&screen->vp_exec_heap, 0, 256) ||
+ nouveau_resource_init(&screen->vp_data_heap, 0, 256)) {
+ nv30_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Static rankine initialisation */
+ so = so_new(128, 0);
+ so_method(so, screen->rankine, NV34TCL_DMA_NOTIFY, 1);
+ so_data (so, screen->sync->handle);
+ so_method(so, screen->rankine, NV34TCL_DMA_TEXTURE0, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->gart->handle);
+ so_method(so, screen->rankine, NV34TCL_DMA_COLOR1, 1);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->rankine, NV34TCL_DMA_COLOR0, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->rankine, NV34TCL_DMA_VTXBUF0, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->gart->handle);
+/* so_method(so, screen->rankine, NV34TCL_DMA_FENCE, 2);
+ so_data (so, 0);
+ so_data (so, screen->query->handle);*/
+ so_method(so, screen->rankine, NV34TCL_DMA_IN_MEMORY7, 1);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->rankine, NV34TCL_DMA_IN_MEMORY8, 1);
+ so_data (so, chan->vram->handle);
+
+ for (i=1; i<8; i++) {
+ so_method(so, screen->rankine, NV34TCL_VIEWPORT_CLIP_HORIZ(i), 1);
+ so_data (so, 0);
+ so_method(so, screen->rankine, NV34TCL_VIEWPORT_CLIP_VERT(i), 1);
+ so_data (so, 0);
+ }
+
+ so_method(so, screen->rankine, 0x220, 1);
+ so_data (so, 1);
+
+ so_method(so, screen->rankine, 0x03b0, 1);
+ so_data (so, 0x00100000);
+ so_method(so, screen->rankine, 0x1454, 1);
+ so_data (so, 0);
+ so_method(so, screen->rankine, 0x1d80, 1);
+ so_data (so, 3);
+ so_method(so, screen->rankine, 0x1450, 1);
+ so_data (so, 0x00030004);
+
+ /* NEW */
+ so_method(so, screen->rankine, 0x1e98, 1);
+ so_data (so, 0);
+ so_method(so, screen->rankine, 0x17e0, 3);
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(1.0));
+ so_method(so, screen->rankine, 0x1f80, 16);
+ for (i=0; i<16; i++) {
+ so_data (so, (i==8) ? 0x0000ffff : 0);
+ }
+
+ so_method(so, screen->rankine, 0x120, 3);
+ so_data (so, 0);
+ so_data (so, 1);
+ so_data (so, 2);
+
+ so_method(so, screen->rankine, 0x1d88, 1);
+ so_data (so, 0x00001200);
+
+ so_method(so, screen->rankine, NV34TCL_RC_ENABLE, 1);
+ so_data (so, 0);
+
+ so_method(so, screen->rankine, NV34TCL_DEPTH_RANGE_NEAR, 2);
+ so_data (so, fui(0.0));
+ so_data (so, fui(1.0));
+
+ so_method(so, screen->rankine, NV34TCL_MULTISAMPLE_CONTROL, 1);
+ so_data (so, 0xffff0000);
+
+ /* enables use of vp rather than fixed-function somehow */
+ so_method(so, screen->rankine, 0x1e94, 1);
+ so_data (so, 0x13);
+
+ so_emit(chan, so);
+ so_ref(NULL, &so);
+ nouveau_pushbuf_flush(chan, 0);
+
+ return pscreen;
+}
diff --git a/src/gallium/drivers/nv30/nv30_screen.h b/src/gallium/drivers/nv30/nv30_screen.h
new file mode 100644
index 0000000000..5fbd998b53
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_screen.h
@@ -0,0 +1,41 @@
+#ifndef __NV30_SCREEN_H__
+#define __NV30_SCREEN_H__
+
+#include "nouveau/nouveau_screen.h"
+
+#include "nv04/nv04_surface_2d.h"
+
+struct nv30_screen {
+ struct nouveau_screen base;
+
+ struct nouveau_winsys *nvws;
+
+ unsigned cur_pctx;
+
+ /* HW graphics objects */
+ struct nv04_surface_2d *eng2d;
+ struct nouveau_grobj *rankine;
+ struct nouveau_notifier *sync;
+
+ /* Query object resources */
+ struct nouveau_notifier *query;
+ struct nouveau_resource *query_heap;
+
+ /* Vtxprog resources */
+ struct nouveau_resource *vp_exec_heap;
+ struct nouveau_resource *vp_data_heap;
+
+ /* Current 3D state of channel */
+ struct nouveau_stateobj *state[NV30_STATE_MAX];
+};
+
+static INLINE struct nv30_screen *
+nv30_screen(struct pipe_screen *screen)
+{
+ return (struct nv30_screen *)screen;
+}
+
+void
+nv30_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
+#endif
diff --git a/src/gallium/drivers/nv30/nv30_shader.h b/src/gallium/drivers/nv30/nv30_shader.h
new file mode 100644
index 0000000000..dd3a36f78f
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_shader.h
@@ -0,0 +1,490 @@
+#ifndef __NV30_SHADER_H__
+#define __NV30_SHADER_H__
+
+/* Vertex programs instruction set
+ *
+ * 128bit opcodes, split into 4 32-bit ones for ease of use.
+ *
+ * Non-native instructions
+ * ABS - MOV + NV40_VP_INST0_DEST_ABS
+ * POW - EX2 + MUL + LG2
+ * SUB - ADD, second source negated
+ * SWZ - MOV
+ * XPD -
+ *
+ * Register access
+ * - Only one INPUT can be accessed per-instruction (move extras into TEMPs)
+ * - Only one CONST can be accessed per-instruction (move extras into TEMPs)
+ *
+ * Relative Addressing
+ * According to the value returned for
+ * MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB
+ *
+ * there are only two address registers available. The destination in the
+ * ARL instruction is set to TEMP <n> (The temp isn't actually written).
+ *
+ * When using vanilla ARB_v_p, the proprietary driver will squish both the
+ * available ADDRESS regs into the first hardware reg in the X and Y
+ * components.
+ *
+ * To use an address reg as an index into consts, the CONST_SRC is set to
+ * (const_base + offset) and INDEX_CONST is set.
+ *
+ * To access the second address reg use ADDR_REG_SELECT_1. A particular
+ * component of the address regs is selected with ADDR_SWZ.
+ *
+ * Only one address register can be accessed per instruction.
+ *
+ * Conditional execution (see NV_vertex_program{2,3} for details) Conditional
+ * execution of an instruction is enabled by setting COND_TEST_ENABLE, and
+ * selecting the condition which will allow the test to pass with
+ * COND_{FL,LT,...}. It is possible to swizzle the values in the condition
+ * register, which allows for testing against an individual component.
+ *
+ * Branching:
+ *
+ * The BRA/CAL instructions seem to follow a slightly different opcode
+ * layout. The destination instruction ID (IADDR) overlaps a source field.
+ * Instruction ID's seem to be numbered based on the UPLOAD_FROM_ID FIFO
+ * command, and is incremented automatically on each UPLOAD_INST FIFO
+ * command.
+ *
+ * Conditional branching is achieved by using the condition tests described
+ * above. There doesn't appear to be dedicated looping instructions, but
+ * this can be done using a temp reg + conditional branching.
+ *
+ * Subroutines may be uploaded before the main program itself, but the first
+ * executed instruction is determined by the PROGRAM_START_ID FIFO command.
+ *
+ */
+
+/* DWORD 0 */
+
+#define NV30_VP_INST_ADDR_REG_SELECT_1 (1 << 24)
+#define NV30_VP_INST_SRC2_ABS (1 << 23) /* guess */
+#define NV30_VP_INST_SRC1_ABS (1 << 22) /* guess */
+#define NV30_VP_INST_SRC0_ABS (1 << 21) /* guess */
+#define NV30_VP_INST_VEC_RESULT (1 << 20)
+#define NV30_VP_INST_DEST_TEMP_ID_SHIFT 16
+#define NV30_VP_INST_DEST_TEMP_ID_MASK (0x0F << 16)
+#define NV30_VP_INST_COND_UPDATE_ENABLE (1<<15)
+#define NV30_VP_INST_VEC_DEST_TEMP_MASK (0xF << 16)
+#define NV30_VP_INST_COND_TEST_ENABLE (1<<14)
+#define NV30_VP_INST_COND_SHIFT 11
+#define NV30_VP_INST_COND_MASK (0x07 << 11)
+# define NV30_VP_INST_COND_FL 0 /* guess */
+# define NV30_VP_INST_COND_LT 1
+# define NV30_VP_INST_COND_EQ 2
+# define NV30_VP_INST_COND_LE 3
+# define NV30_VP_INST_COND_GT 4
+# define NV30_VP_INST_COND_NE 5
+# define NV30_VP_INST_COND_GE 6
+# define NV30_VP_INST_COND_TR 7 /* guess */
+#define NV30_VP_INST_COND_SWZ_X_SHIFT 9
+#define NV30_VP_INST_COND_SWZ_X_MASK (0x03 << 9)
+#define NV30_VP_INST_COND_SWZ_Y_SHIFT 7
+#define NV30_VP_INST_COND_SWZ_Y_MASK (0x03 << 7)
+#define NV30_VP_INST_COND_SWZ_Z_SHIFT 5
+#define NV30_VP_INST_COND_SWZ_Z_MASK (0x03 << 5)
+#define NV30_VP_INST_COND_SWZ_W_SHIFT 3
+#define NV30_VP_INST_COND_SWZ_W_MASK (0x03 << 3)
+#define NV30_VP_INST_COND_SWZ_ALL_SHIFT 3
+#define NV30_VP_INST_COND_SWZ_ALL_MASK (0xFF << 3)
+#define NV30_VP_INST_ADDR_SWZ_SHIFT 1
+#define NV30_VP_INST_ADDR_SWZ_MASK (0x03 << 1)
+#define NV30_VP_INST_SCA_OPCODEH_SHIFT 0
+#define NV30_VP_INST_SCA_OPCODEH_MASK (0x01 << 0)
+
+/* DWORD 1 */
+#define NV30_VP_INST_SCA_OPCODEL_SHIFT 28
+#define NV30_VP_INST_SCA_OPCODEL_MASK (0x0F << 28)
+# define NV30_VP_INST_OP_NOP 0x00
+# define NV30_VP_INST_OP_RCP 0x02
+# define NV30_VP_INST_OP_RCC 0x03
+# define NV30_VP_INST_OP_RSQ 0x04
+# define NV30_VP_INST_OP_EXP 0x05
+# define NV30_VP_INST_OP_LOG 0x06
+# define NV30_VP_INST_OP_LIT 0x07
+# define NV30_VP_INST_OP_BRA 0x09
+# define NV30_VP_INST_OP_CAL 0x0B
+# define NV30_VP_INST_OP_RET 0x0C
+# define NV30_VP_INST_OP_LG2 0x0D
+# define NV30_VP_INST_OP_EX2 0x0E
+# define NV30_VP_INST_OP_SIN 0x0F
+# define NV30_VP_INST_OP_COS 0x10
+#define NV30_VP_INST_VEC_OPCODE_SHIFT 23
+#define NV30_VP_INST_VEC_OPCODE_MASK (0x1F << 23)
+# define NV30_VP_INST_OP_NOPV 0x00
+# define NV30_VP_INST_OP_MOV 0x01
+# define NV30_VP_INST_OP_MUL 0x02
+# define NV30_VP_INST_OP_ADD 0x03
+# define NV30_VP_INST_OP_MAD 0x04
+# define NV30_VP_INST_OP_DP3 0x05
+# define NV30_VP_INST_OP_DP4 0x07
+# define NV30_VP_INST_OP_DPH 0x06
+# define NV30_VP_INST_OP_DST 0x08
+# define NV30_VP_INST_OP_MIN 0x09
+# define NV30_VP_INST_OP_MAX 0x0A
+# define NV30_VP_INST_OP_SLT 0x0B
+# define NV30_VP_INST_OP_SGE 0x0C
+# define NV30_VP_INST_OP_ARL 0x0D
+# define NV30_VP_INST_OP_FRC 0x0E
+# define NV30_VP_INST_OP_FLR 0x0F
+# define NV30_VP_INST_OP_SEQ 0x10
+# define NV30_VP_INST_OP_SFL 0x11
+# define NV30_VP_INST_OP_SGT 0x12
+# define NV30_VP_INST_OP_SLE 0x13
+# define NV30_VP_INST_OP_SNE 0x14
+# define NV30_VP_INST_OP_STR 0x15
+# define NV30_VP_INST_OP_SSG 0x16
+# define NV30_VP_INST_OP_ARR 0x17
+# define NV30_VP_INST_OP_ARA 0x18
+#define NV30_VP_INST_CONST_SRC_SHIFT 14
+#define NV30_VP_INST_CONST_SRC_MASK (0xFF << 14)
+#define NV30_VP_INST_INPUT_SRC_SHIFT 9 /*NV20*/
+#define NV30_VP_INST_INPUT_SRC_MASK (0x0F << 9) /*NV20*/
+# define NV30_VP_INST_IN_POS 0 /* These seem to match the bindings specified in */
+# define NV30_VP_INST_IN_WEIGHT 1 /* the ARB_v_p spec (2.14.3.1) */
+# define NV30_VP_INST_IN_NORMAL 2
+# define NV30_VP_INST_IN_COL0 3 /* Should probably confirm them all though */
+# define NV30_VP_INST_IN_COL1 4
+# define NV30_VP_INST_IN_FOGC 5
+# define NV30_VP_INST_IN_TC0 8
+# define NV30_VP_INST_IN_TC(n) (8+n)
+#define NV30_VP_INST_SRC0H_SHIFT 0 /*NV20*/
+#define NV30_VP_INST_SRC0H_MASK (0x1FF << 0) /*NV20*/
+
+/* Please note: the IADDR fields overlap other fields because they are used
+ * only for branch instructions. See Branching: label above
+ *
+ * DWORD 2
+ */
+#define NV30_VP_INST_SRC0L_SHIFT 26 /*NV20*/
+#define NV30_VP_INST_SRC0L_MASK (0x3F <<26) /* NV30_VP_SRC0_LOW_MASK << 26 */
+#define NV30_VP_INST_SRC1_SHIFT 11 /*NV20*/
+#define NV30_VP_INST_SRC1_MASK (0x7FFF<<11) /*NV20*/
+#define NV30_VP_INST_SRC2H_SHIFT 0 /*NV20*/
+#define NV30_VP_INST_SRC2H_MASK (0x7FF << 0) /* NV30_VP_SRC2_HIGH_MASK >> 4*/
+#define NV30_VP_INST_IADDR_SHIFT 2
+#define NV30_VP_INST_IADDR_MASK (0xF << 28) /* NV30_VP_SRC2_LOW_MASK << 28 */
+
+/* DWORD 3 */
+#define NV30_VP_INST_SRC2L_SHIFT 28 /*NV20*/
+#define NV30_VP_INST_SRC2L_MASK (0x0F <<28) /*NV20*/
+#define NV30_VP_INST_STEMP_WRITEMASK_SHIFT 24
+#define NV30_VP_INST_STEMP_WRITEMASK_MASK (0x0F << 24)
+#define NV30_VP_INST_VTEMP_WRITEMASK_SHIFT 20
+#define NV30_VP_INST_VTEMP_WRITEMASK_MASK (0x0F << 20)
+#define NV30_VP_INST_SDEST_WRITEMASK_SHIFT 16
+#define NV30_VP_INST_SDEST_WRITEMASK_MASK (0x0F << 16)
+#define NV30_VP_INST_VDEST_WRITEMASK_SHIFT 12 /*NV20*/
+#define NV30_VP_INST_VDEST_WRITEMASK_MASK (0x0F << 12) /*NV20*/
+#define NV30_VP_INST_DEST_SHIFT 2
+#define NV30_VP_INST_DEST_MASK (0x0F << 2)
+# define NV30_VP_INST_DEST_POS 0
+# define NV30_VP_INST_DEST_BFC0 1
+# define NV30_VP_INST_DEST_BFC1 2
+# define NV30_VP_INST_DEST_COL0 3
+# define NV30_VP_INST_DEST_COL1 4
+# define NV30_VP_INST_DEST_FOGC 5
+# define NV30_VP_INST_DEST_PSZ 6
+# define NV30_VP_INST_DEST_TC(n) (8+n)
+
+#define NV30_VP_INST_LAST (1 << 0)
+
+/* Useful to split the source selection regs into their pieces */
+#define NV30_VP_SRC0_HIGH_SHIFT 6
+#define NV30_VP_SRC0_HIGH_MASK 0x00007FC0
+#define NV30_VP_SRC0_LOW_MASK 0x0000003F
+#define NV30_VP_SRC2_HIGH_SHIFT 4
+#define NV30_VP_SRC2_HIGH_MASK 0x00007FF0
+#define NV30_VP_SRC2_LOW_MASK 0x0000000F
+
+
+/* Source-register definition - matches NV20 exactly */
+#define NV30_VP_SRC_NEGATE (1<<14)
+#define NV30_VP_SRC_SWZ_X_SHIFT 12
+#define NV30_VP_SRC_REG_SWZ_X_MASK (0x03 <<12)
+#define NV30_VP_SRC_SWZ_Y_SHIFT 10
+#define NV30_VP_SRC_REG_SWZ_Y_MASK (0x03 <<10)
+#define NV30_VP_SRC_SWZ_Z_SHIFT 8
+#define NV30_VP_SRC_REG_SWZ_Z_MASK (0x03 << 8)
+#define NV30_VP_SRC_SWZ_W_SHIFT 6
+#define NV30_VP_SRC_REG_SWZ_W_MASK (0x03 << 6)
+#define NV30_VP_SRC_REG_SWZ_ALL_SHIFT 6
+#define NV30_VP_SRC_REG_SWZ_ALL_MASK (0xFF << 6)
+#define NV30_VP_SRC_TEMP_SRC_SHIFT 2
+#define NV30_VP_SRC_REG_TEMP_ID_MASK (0x0F << 0)
+#define NV30_VP_SRC_REG_TYPE_SHIFT 0
+#define NV30_VP_SRC_REG_TYPE_MASK (0x03 << 0)
+#define NV30_VP_SRC_REG_TYPE_TEMP 1
+#define NV30_VP_SRC_REG_TYPE_INPUT 2
+#define NV30_VP_SRC_REG_TYPE_CONST 3 /* guess */
+
+/*
+ * Each fragment program opcode appears to be comprised of 4 32-bit values.
+ *
+ * 0 - Opcode, output reg/mask, ATTRIB source
+ * 1 - Source 0
+ * 2 - Source 1
+ * 3 - Source 2
+ *
+ * There appears to be no special difference between result regs and temp regs.
+ * result.color == R0.xyzw
+ * result.depth == R1.z
+ * When the fragprog contains instructions to write depth, NV30_TCL_PRIMITIVE_3D_UNK1D78=0
+ * otherwise it is set to 1.
+ *
+ * Constants are inserted directly after the instruction that uses them.
+ *
+ * It appears that it's not possible to use two input registers in one
+ * instruction as the input sourcing is done in the instruction dword
+ * and not the source selection dwords. As such instructions such as:
+ *
+ * ADD result.color, fragment.color, fragment.texcoord[0];
+ *
+ * must be split into two MOV's and then an ADD (nvidia does this) but
+ * I'm not sure why it's not just one MOV and then source the second input
+ * in the ADD instruction..
+ *
+ * Negation of the full source is done with NV30_FP_REG_NEGATE, arbitrary
+ * negation requires multiplication with a const.
+ *
+ * Arbitrary swizzling is supported with the exception of SWIZZLE_ZERO/SWIZZLE_ONE
+ * The temp/result regs appear to be initialised to (0.0, 0.0, 0.0, 0.0) as SWIZZLE_ZERO
+ * is implemented simply by not writing to the relevant components of the destination.
+ *
+ * Conditional execution
+ * TODO
+ *
+ * Non-native instructions:
+ * LIT
+ * LRP - MAD+MAD
+ * SUB - ADD, negate second source
+ * RSQ - LG2 + EX2
+ * POW - LG2 + MUL + EX2
+ * SCS - COS + SIN
+ * XPD
+ */
+
+//== Opcode / Destination selection ==
+#define NV30_FP_OP_PROGRAM_END (1 << 0)
+#define NV30_FP_OP_OUT_REG_SHIFT 1
+#define NV30_FP_OP_OUT_REG_MASK (31 << 1) /* uncertain */
+/* Needs to be set when writing outputs to get expected result.. */
+#define NV30_FP_OP_OUT_REG_HALF (1 << 7)
+#define NV30_FP_OP_COND_WRITE_ENABLE (1 << 8)
+#define NV30_FP_OP_OUTMASK_SHIFT 9
+#define NV30_FP_OP_OUTMASK_MASK (0xF << 9)
+# define NV30_FP_OP_OUT_X (1<<9)
+# define NV30_FP_OP_OUT_Y (1<<10)
+# define NV30_FP_OP_OUT_Z (1<<11)
+# define NV30_FP_OP_OUT_W (1<<12)
+/* Uncertain about these, especially the input_src values.. it's possible that
+ * they can be dynamically changed.
+ */
+#define NV30_FP_OP_INPUT_SRC_SHIFT 13
+#define NV30_FP_OP_INPUT_SRC_MASK (15 << 13)
+# define NV30_FP_OP_INPUT_SRC_POSITION 0x0
+# define NV30_FP_OP_INPUT_SRC_COL0 0x1
+# define NV30_FP_OP_INPUT_SRC_COL1 0x2
+# define NV30_FP_OP_INPUT_SRC_FOGC 0x3
+# define NV30_FP_OP_INPUT_SRC_TC0 0x4
+# define NV30_FP_OP_INPUT_SRC_TC(n) (0x4 + n)
+#define NV30_FP_OP_TEX_UNIT_SHIFT 17
+#define NV30_FP_OP_TEX_UNIT_MASK (0xF << 17) /* guess */
+#define NV30_FP_OP_PRECISION_SHIFT 22
+#define NV30_FP_OP_PRECISION_MASK (3 << 22)
+# define NV30_FP_PRECISION_FP32 0
+# define NV30_FP_PRECISION_FP16 1
+# define NV30_FP_PRECISION_FX12 2
+#define NV30_FP_OP_OPCODE_SHIFT 24
+#define NV30_FP_OP_OPCODE_MASK (0x3F << 24)
+# define NV30_FP_OP_OPCODE_NOP 0x00
+# define NV30_FP_OP_OPCODE_MOV 0x01
+# define NV30_FP_OP_OPCODE_MUL 0x02
+# define NV30_FP_OP_OPCODE_ADD 0x03
+# define NV30_FP_OP_OPCODE_MAD 0x04
+# define NV30_FP_OP_OPCODE_DP3 0x05
+# define NV30_FP_OP_OPCODE_DP4 0x06
+# define NV30_FP_OP_OPCODE_DST 0x07
+# define NV30_FP_OP_OPCODE_MIN 0x08
+# define NV30_FP_OP_OPCODE_MAX 0x09
+# define NV30_FP_OP_OPCODE_SLT 0x0A
+# define NV30_FP_OP_OPCODE_SGE 0x0B
+# define NV30_FP_OP_OPCODE_SLE 0x0C
+# define NV30_FP_OP_OPCODE_SGT 0x0D
+# define NV30_FP_OP_OPCODE_SNE 0x0E
+# define NV30_FP_OP_OPCODE_SEQ 0x0F
+# define NV30_FP_OP_OPCODE_FRC 0x10
+# define NV30_FP_OP_OPCODE_FLR 0x11
+# define NV30_FP_OP_OPCODE_KIL 0x12
+# define NV30_FP_OP_OPCODE_PK4B 0x13
+# define NV30_FP_OP_OPCODE_UP4B 0x14
+# define NV30_FP_OP_OPCODE_DDX 0x15 /* can only write XY */
+# define NV30_FP_OP_OPCODE_DDY 0x16 /* can only write XY */
+# define NV30_FP_OP_OPCODE_TEX 0x17
+# define NV30_FP_OP_OPCODE_TXP 0x18
+# define NV30_FP_OP_OPCODE_TXD 0x19
+# define NV30_FP_OP_OPCODE_RCP 0x1A
+# define NV30_FP_OP_OPCODE_RSQ 0x1B
+# define NV30_FP_OP_OPCODE_EX2 0x1C
+# define NV30_FP_OP_OPCODE_LG2 0x1D
+# define NV30_FP_OP_OPCODE_LIT 0x1E
+# define NV30_FP_OP_OPCODE_LRP 0x1F
+# define NV30_FP_OP_OPCODE_STR 0x20
+# define NV30_FP_OP_OPCODE_SFL 0x21
+# define NV30_FP_OP_OPCODE_COS 0x22
+# define NV30_FP_OP_OPCODE_SIN 0x23
+# define NV30_FP_OP_OPCODE_PK2H 0x24
+# define NV30_FP_OP_OPCODE_UP2H 0x25
+# define NV30_FP_OP_OPCODE_POW 0x26
+# define NV30_FP_OP_OPCODE_PK4UB 0x27
+# define NV30_FP_OP_OPCODE_UP4UB 0x28
+# define NV30_FP_OP_OPCODE_PK2US 0x29
+# define NV30_FP_OP_OPCODE_UP2US 0x2A
+# define NV30_FP_OP_OPCODE_DP2A 0x2E
+# define NV30_FP_OP_OPCODE_TXB 0x31
+# define NV30_FP_OP_OPCODE_RFL 0x36
+# define NV30_FP_OP_OPCODE_DIV 0x3A
+#define NV30_FP_OP_OUT_SAT (1 << 31)
+
+/* high order bits of SRC0 */
+#define NV30_FP_OP_OUT_ABS (1 << 29)
+#define NV30_FP_OP_COND_SWZ_W_SHIFT 27
+#define NV30_FP_OP_COND_SWZ_W_MASK (3 << 27)
+#define NV30_FP_OP_COND_SWZ_Z_SHIFT 25
+#define NV30_FP_OP_COND_SWZ_Z_MASK (3 << 25)
+#define NV30_FP_OP_COND_SWZ_Y_SHIFT 23
+#define NV30_FP_OP_COND_SWZ_Y_MASK (3 << 23)
+#define NV30_FP_OP_COND_SWZ_X_SHIFT 21
+#define NV30_FP_OP_COND_SWZ_X_MASK (3 << 21)
+#define NV30_FP_OP_COND_SWZ_ALL_SHIFT 21
+#define NV30_FP_OP_COND_SWZ_ALL_MASK (0xFF << 21)
+#define NV30_FP_OP_COND_SHIFT 18
+#define NV30_FP_OP_COND_MASK (0x07 << 18)
+# define NV30_FP_OP_COND_FL 0
+# define NV30_FP_OP_COND_LT 1
+# define NV30_FP_OP_COND_EQ 2
+# define NV30_FP_OP_COND_LE 3
+# define NV30_FP_OP_COND_GT 4
+# define NV30_FP_OP_COND_NE 5
+# define NV30_FP_OP_COND_GE 6
+# define NV30_FP_OP_COND_TR 7
+
+/* high order bits of SRC1 */
+#define NV30_FP_OP_DST_SCALE_SHIFT 28
+#define NV30_FP_OP_DST_SCALE_MASK (3 << 28)
+#define NV30_FP_OP_DST_SCALE_1X 0
+#define NV30_FP_OP_DST_SCALE_2X 1
+#define NV30_FP_OP_DST_SCALE_4X 2
+#define NV30_FP_OP_DST_SCALE_8X 3
+#define NV30_FP_OP_DST_SCALE_INV_2X 5
+#define NV30_FP_OP_DST_SCALE_INV_4X 6
+#define NV30_FP_OP_DST_SCALE_INV_8X 7
+
+
+/* high order bits of SRC2 */
+#define NV30_FP_OP_INDEX_INPUT (1 << 30)
+
+//== Register selection ==
+#define NV30_FP_REG_TYPE_SHIFT 0
+#define NV30_FP_REG_TYPE_MASK (3 << 0)
+# define NV30_FP_REG_TYPE_TEMP 0
+# define NV30_FP_REG_TYPE_INPUT 1
+# define NV30_FP_REG_TYPE_CONST 2
+#define NV30_FP_REG_SRC_SHIFT 2 /* uncertain */
+#define NV30_FP_REG_SRC_MASK (31 << 2)
+#define NV30_FP_REG_SRC_HALF (1 << 8)
+#define NV30_FP_REG_SWZ_ALL_SHIFT 9
+#define NV30_FP_REG_SWZ_ALL_MASK (255 << 9)
+#define NV30_FP_REG_SWZ_X_SHIFT 9
+#define NV30_FP_REG_SWZ_X_MASK (3 << 9)
+#define NV30_FP_REG_SWZ_Y_SHIFT 11
+#define NV30_FP_REG_SWZ_Y_MASK (3 << 11)
+#define NV30_FP_REG_SWZ_Z_SHIFT 13
+#define NV30_FP_REG_SWZ_Z_MASK (3 << 13)
+#define NV30_FP_REG_SWZ_W_SHIFT 15
+#define NV30_FP_REG_SWZ_W_MASK (3 << 15)
+# define NV30_FP_SWIZZLE_X 0
+# define NV30_FP_SWIZZLE_Y 1
+# define NV30_FP_SWIZZLE_Z 2
+# define NV30_FP_SWIZZLE_W 3
+#define NV30_FP_REG_NEGATE (1 << 17)
+
+#define NV30SR_NONE 0
+#define NV30SR_OUTPUT 1
+#define NV30SR_INPUT 2
+#define NV30SR_TEMP 3
+#define NV30SR_CONST 4
+
+struct nv30_sreg {
+ int type;
+ int index;
+
+ int dst_scale;
+
+ int negate;
+ int abs;
+ int swz[4];
+
+ int cc_update;
+ int cc_update_reg;
+ int cc_test;
+ int cc_test_reg;
+ int cc_swz[4];
+};
+
+static INLINE struct nv30_sreg
+nv30_sr(int type, int index)
+{
+ struct nv30_sreg temp = {
+ .type = type,
+ .index = index,
+ .dst_scale = DEF_SCALE,
+ .abs = 0,
+ .negate = 0,
+ .swz = { 0, 1, 2, 3 },
+ .cc_update = 0,
+ .cc_update_reg = 0,
+ .cc_test = DEF_CTEST,
+ .cc_test_reg = 0,
+ .cc_swz = { 0, 1, 2, 3 },
+ };
+ return temp;
+}
+
+static INLINE struct nv30_sreg
+nv30_sr_swz(struct nv30_sreg src, int x, int y, int z, int w)
+{
+ struct nv30_sreg dst = src;
+
+ dst.swz[SWZ_X] = src.swz[x];
+ dst.swz[SWZ_Y] = src.swz[y];
+ dst.swz[SWZ_Z] = src.swz[z];
+ dst.swz[SWZ_W] = src.swz[w];
+ return dst;
+}
+
+static INLINE struct nv30_sreg
+nv30_sr_neg(struct nv30_sreg src)
+{
+ src.negate = !src.negate;
+ return src;
+}
+
+static INLINE struct nv30_sreg
+nv30_sr_abs(struct nv30_sreg src)
+{
+ src.abs = 1;
+ return src;
+}
+
+static INLINE struct nv30_sreg
+nv30_sr_scale(struct nv30_sreg src, int scale)
+{
+ src.dst_scale = scale;
+ return src;
+}
+
+#endif
diff --git a/src/gallium/drivers/nv30/nv30_state.c b/src/gallium/drivers/nv30/nv30_state.c
new file mode 100644
index 0000000000..b91e972c12
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state.c
@@ -0,0 +1,728 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "nv30_context.h"
+#include "nv30_state.h"
+
+static void *
+nv30_blend_state_create(struct pipe_context *pipe,
+ const struct pipe_blend_state *cso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+ struct nv30_blend_state *bso = CALLOC(1, sizeof(*bso));
+ struct nouveau_stateobj *so = so_new(16, 0);
+
+ if (cso->blend_enable) {
+ so_method(so, rankine, NV34TCL_BLEND_FUNC_ENABLE, 3);
+ so_data (so, 1);
+ so_data (so, (nvgl_blend_func(cso->alpha_src_factor) << 16) |
+ nvgl_blend_func(cso->rgb_src_factor));
+ so_data (so, nvgl_blend_func(cso->alpha_dst_factor) << 16 |
+ nvgl_blend_func(cso->rgb_dst_factor));
+ /* FIXME: Gallium assumes GL_EXT_blend_func_separate.
+ It is not the case for NV30 */
+ so_method(so, rankine, NV34TCL_BLEND_EQUATION, 1);
+ so_data (so, nvgl_blend_eqn(cso->rgb_func));
+ } else {
+ so_method(so, rankine, NV34TCL_BLEND_FUNC_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_method(so, rankine, NV34TCL_COLOR_MASK, 1);
+ so_data (so, (((cso->colormask & PIPE_MASK_A) ? (0x01 << 24) : 0) |
+ ((cso->colormask & PIPE_MASK_R) ? (0x01 << 16) : 0) |
+ ((cso->colormask & PIPE_MASK_G) ? (0x01 << 8) : 0) |
+ ((cso->colormask & PIPE_MASK_B) ? (0x01 << 0) : 0)));
+
+ if (cso->logicop_enable) {
+ so_method(so, rankine, NV34TCL_COLOR_LOGIC_OP_ENABLE, 2);
+ so_data (so, 1);
+ so_data (so, nvgl_logicop_func(cso->logicop_func));
+ } else {
+ so_method(so, rankine, NV34TCL_COLOR_LOGIC_OP_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_method(so, rankine, NV34TCL_DITHER_ENABLE, 1);
+ so_data (so, cso->dither ? 1 : 0);
+
+ so_ref(so, &bso->so);
+ so_ref(NULL, &so);
+ bso->pipe = *cso;
+ return (void *)bso;
+}
+
+static void
+nv30_blend_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->blend = hwcso;
+ nv30->dirty |= NV30_NEW_BLEND;
+}
+
+static void
+nv30_blend_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_blend_state *bso = hwcso;
+
+ so_ref(NULL, &bso->so);
+ FREE(bso);
+}
+
+
+static INLINE unsigned
+wrap_mode(unsigned wrap) {
+ unsigned ret;
+
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ ret = NV34TCL_TX_WRAP_S_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ ret = NV34TCL_TX_WRAP_S_MIRRORED_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ ret = NV34TCL_TX_WRAP_S_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ ret = NV34TCL_TX_WRAP_S_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_CLAMP:
+ ret = NV34TCL_TX_WRAP_S_CLAMP;
+ break;
+/* case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ ret = NV34TCL_TX_WRAP_S_MIRROR_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ ret = NV34TCL_TX_WRAP_S_MIRROR_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ ret = NV34TCL_TX_WRAP_S_MIRROR_CLAMP;
+ break;*/
+ default:
+ NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
+ ret = NV34TCL_TX_WRAP_S_REPEAT;
+ break;
+ }
+
+ return ret >> NV34TCL_TX_WRAP_S_SHIFT;
+}
+
+static void *
+nv30_sampler_state_create(struct pipe_context *pipe,
+ const struct pipe_sampler_state *cso)
+{
+ struct nv30_sampler_state *ps;
+ uint32_t filter = 0;
+
+ ps = MALLOC(sizeof(struct nv30_sampler_state));
+
+ ps->fmt = 0;
+ /* TODO: Not all RECTs formats have this bit set, bits 15-8 of format
+ are the tx format to use. We should store normalized coord flag
+ in sampler state structure, and set appropriate format in
+ nvxx_fragtex_build()
+ */
+ /*NV34TCL_TX_FORMAT_RECT*/
+ /*if (!cso->normalized_coords) {
+ ps->fmt |= (1<<14) ;
+ }*/
+
+ ps->wrap = ((wrap_mode(cso->wrap_s) << NV34TCL_TX_WRAP_S_SHIFT) |
+ (wrap_mode(cso->wrap_t) << NV34TCL_TX_WRAP_T_SHIFT) |
+ (wrap_mode(cso->wrap_r) << NV34TCL_TX_WRAP_R_SHIFT));
+
+ ps->en = 0;
+
+ if (cso->max_anisotropy >= 8.0) {
+ ps->en |= NV34TCL_TX_ENABLE_ANISO_8X;
+ } else
+ if (cso->max_anisotropy >= 4.0) {
+ ps->en |= NV34TCL_TX_ENABLE_ANISO_4X;
+ } else
+ if (cso->max_anisotropy >= 2.0) {
+ ps->en |= NV34TCL_TX_ENABLE_ANISO_2X;
+ }
+
+ switch (cso->mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ filter |= NV34TCL_TX_FILTER_MAGNIFY_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ filter |= NV34TCL_TX_FILTER_MAGNIFY_NEAREST;
+ break;
+ }
+
+ switch (cso->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV34TCL_TX_FILTER_MINIFY_LINEAR_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV34TCL_TX_FILTER_MINIFY_LINEAR_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV34TCL_TX_FILTER_MINIFY_LINEAR;
+ break;
+ }
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV34TCL_TX_FILTER_MINIFY_NEAREST_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV34TCL_TX_FILTER_MINIFY_NEAREST_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV34TCL_TX_FILTER_MINIFY_NEAREST;
+ break;
+ }
+ break;
+ }
+
+ ps->filt = filter;
+
+ {
+ float limit;
+
+ limit = CLAMP(cso->lod_bias, -16.0, 15.0);
+ ps->filt |= (int)(cso->lod_bias * 256.0) & 0x1fff;
+
+ limit = CLAMP(cso->max_lod, 0.0, 15.0);
+ ps->en |= (int)(limit) << 14 /*NV34TCL_TX_ENABLE_MIPMAP_MAX_LOD_SHIFT*/;
+
+ limit = CLAMP(cso->min_lod, 0.0, 15.0);
+ ps->en |= (int)(limit) << 26 /*NV34TCL_TX_ENABLE_MIPMAP_MIN_LOD_SHIFT*/;
+ }
+
+ if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ switch (cso->compare_func) {
+ case PIPE_FUNC_NEVER:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_NEVER;
+ break;
+ case PIPE_FUNC_GREATER:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_GREATER;
+ break;
+ case PIPE_FUNC_EQUAL:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_EQUAL;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_GEQUAL;
+ break;
+ case PIPE_FUNC_LESS:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_LESS;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_NOTEQUAL;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_LEQUAL;
+ break;
+ case PIPE_FUNC_ALWAYS:
+ ps->wrap |= NV34TCL_TX_WRAP_RCOMP_ALWAYS;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ps->bcol = ((float_to_ubyte(cso->border_color[3]) << 24) |
+ (float_to_ubyte(cso->border_color[0]) << 16) |
+ (float_to_ubyte(cso->border_color[1]) << 8) |
+ (float_to_ubyte(cso->border_color[2]) << 0));
+
+ return (void *)ps;
+}
+
+static void
+nv30_sampler_state_bind(struct pipe_context *pipe, unsigned nr, void **sampler)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv30->tex_sampler[unit] = sampler[unit];
+ nv30->dirty_samplers |= (1 << unit);
+ }
+
+ for (unit = nr; unit < nv30->nr_samplers; unit++) {
+ nv30->tex_sampler[unit] = NULL;
+ nv30->dirty_samplers |= (1 << unit);
+ }
+
+ nv30->nr_samplers = nr;
+ nv30->dirty |= NV30_NEW_SAMPLER;
+}
+
+static void
+nv30_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void
+nv30_set_sampler_texture(struct pipe_context *pipe, unsigned nr,
+ struct pipe_texture **miptree)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ pipe_texture_reference((struct pipe_texture **)
+ &nv30->tex_miptree[unit], miptree[unit]);
+ nv30->dirty_samplers |= (1 << unit);
+ }
+
+ for (unit = nr; unit < nv30->nr_textures; unit++) {
+ pipe_texture_reference((struct pipe_texture **)
+ &nv30->tex_miptree[unit], NULL);
+ nv30->dirty_samplers |= (1 << unit);
+ }
+
+ nv30->nr_textures = nr;
+ nv30->dirty |= NV30_NEW_SAMPLER;
+}
+
+static void *
+nv30_rasterizer_state_create(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_rasterizer_state *rsso = CALLOC(1, sizeof(*rsso));
+ struct nouveau_stateobj *so = so_new(32, 0);
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+
+ /*XXX: ignored:
+ * light_twoside
+ * point_smooth -nohw
+ * multisample
+ */
+
+ so_method(so, rankine, NV34TCL_SHADE_MODEL, 1);
+ so_data (so, cso->flatshade ? NV34TCL_SHADE_MODEL_FLAT :
+ NV34TCL_SHADE_MODEL_SMOOTH);
+
+ so_method(so, rankine, NV34TCL_LINE_WIDTH, 2);
+ so_data (so, (unsigned char)(cso->line_width * 8.0) & 0xff);
+ so_data (so, cso->line_smooth ? 1 : 0);
+ so_method(so, rankine, NV34TCL_LINE_STIPPLE_ENABLE, 2);
+ so_data (so, cso->line_stipple_enable ? 1 : 0);
+ so_data (so, (cso->line_stipple_pattern << 16) |
+ cso->line_stipple_factor);
+
+ so_method(so, rankine, NV34TCL_POINT_SIZE, 1);
+ so_data (so, fui(cso->point_size));
+
+ so_method(so, rankine, NV34TCL_POLYGON_MODE_FRONT, 6);
+ if (cso->front_winding == PIPE_WINDING_CCW) {
+ so_data(so, nvgl_polygon_mode(cso->fill_ccw));
+ so_data(so, nvgl_polygon_mode(cso->fill_cw));
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ so_data(so, NV34TCL_CULL_FACE_FRONT);
+ break;
+ case PIPE_WINDING_CW:
+ so_data(so, NV34TCL_CULL_FACE_BACK);
+ break;
+ case PIPE_WINDING_BOTH:
+ so_data(so, NV34TCL_CULL_FACE_FRONT_AND_BACK);
+ break;
+ default:
+ so_data(so, NV34TCL_CULL_FACE_BACK);
+ break;
+ }
+ so_data(so, NV34TCL_FRONT_FACE_CCW);
+ } else {
+ so_data(so, nvgl_polygon_mode(cso->fill_cw));
+ so_data(so, nvgl_polygon_mode(cso->fill_ccw));
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ so_data(so, NV34TCL_CULL_FACE_BACK);
+ break;
+ case PIPE_WINDING_CW:
+ so_data(so, NV34TCL_CULL_FACE_FRONT);
+ break;
+ case PIPE_WINDING_BOTH:
+ so_data(so, NV34TCL_CULL_FACE_FRONT_AND_BACK);
+ break;
+ default:
+ so_data(so, NV34TCL_CULL_FACE_BACK);
+ break;
+ }
+ so_data(so, NV34TCL_FRONT_FACE_CW);
+ }
+ so_data(so, cso->poly_smooth ? 1 : 0);
+ so_data(so, (cso->cull_mode != PIPE_WINDING_NONE) ? 1 : 0);
+
+ so_method(so, rankine, NV34TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, cso->poly_stipple_enable ? 1 : 0);
+
+ so_method(so, rankine, NV34TCL_POLYGON_OFFSET_POINT_ENABLE, 3);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_POINT) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_POINT))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_LINE) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_LINE))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_FILL) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_FILL))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if (cso->offset_cw || cso->offset_ccw) {
+ so_method(so, rankine, NV34TCL_POLYGON_OFFSET_FACTOR, 2);
+ so_data (so, fui(cso->offset_scale));
+ so_data (so, fui(cso->offset_units * 2));
+ }
+
+ so_method(so, rankine, NV34TCL_POINT_SPRITE, 1);
+ if (cso->point_sprite) {
+ unsigned psctl = (1 << 0), i;
+
+ for (i = 0; i < 8; i++) {
+ if (cso->sprite_coord_mode[i] != PIPE_SPRITE_COORD_NONE)
+ psctl |= (1 << (8 + i));
+ }
+
+ so_data(so, psctl);
+ } else {
+ so_data(so, 0);
+ }
+
+ so_ref(so, &rsso->so);
+ so_ref(NULL, &so);
+ rsso->pipe = *cso;
+ return (void *)rsso;
+}
+
+static void
+nv30_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->rasterizer = hwcso;
+ nv30->dirty |= NV30_NEW_RAST;
+ /*nv30->draw_dirty |= NV30_NEW_RAST;*/
+}
+
+static void
+nv30_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_rasterizer_state *rsso = hwcso;
+
+ so_ref(NULL, &rsso->so);
+ FREE(rsso);
+}
+
+static void *
+nv30_depth_stencil_alpha_state_create(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_zsa_state *zsaso = CALLOC(1, sizeof(*zsaso));
+ struct nouveau_stateobj *so = so_new(32, 0);
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+
+ so_method(so, rankine, NV34TCL_DEPTH_FUNC, 3);
+ so_data (so, nvgl_comparison_op(cso->depth.func));
+ so_data (so, cso->depth.writemask ? 1 : 0);
+ so_data (so, cso->depth.enabled ? 1 : 0);
+
+ so_method(so, rankine, NV34TCL_ALPHA_FUNC_ENABLE, 3);
+ so_data (so, cso->alpha.enabled ? 1 : 0);
+ so_data (so, nvgl_comparison_op(cso->alpha.func));
+ so_data (so, float_to_ubyte(cso->alpha.ref_value));
+
+ if (cso->stencil[0].enabled) {
+ so_method(so, rankine, NV34TCL_STENCIL_FRONT_ENABLE, 8);
+ so_data (so, cso->stencil[0].enabled ? 1 : 0);
+ so_data (so, cso->stencil[0].writemask);
+ so_data (so, nvgl_comparison_op(cso->stencil[0].func));
+ so_data (so, cso->stencil[0].ref_value);
+ so_data (so, cso->stencil[0].valuemask);
+ so_data (so, nvgl_stencil_op(cso->stencil[0].fail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
+ } else {
+ so_method(so, rankine, NV34TCL_STENCIL_FRONT_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ if (cso->stencil[1].enabled) {
+ so_method(so, rankine, NV34TCL_STENCIL_BACK_ENABLE, 8);
+ so_data (so, cso->stencil[1].enabled ? 1 : 0);
+ so_data (so, cso->stencil[1].writemask);
+ so_data (so, nvgl_comparison_op(cso->stencil[1].func));
+ so_data (so, cso->stencil[1].ref_value);
+ so_data (so, cso->stencil[1].valuemask);
+ so_data (so, nvgl_stencil_op(cso->stencil[1].fail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
+ } else {
+ so_method(so, rankine, NV34TCL_STENCIL_BACK_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_ref(so, &zsaso->so);
+ so_ref(NULL, &so);
+ zsaso->pipe = *cso;
+ return (void *)zsaso;
+}
+
+static void
+nv30_depth_stencil_alpha_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->zsa = hwcso;
+ nv30->dirty |= NV30_NEW_ZSA;
+}
+
+static void
+nv30_depth_stencil_alpha_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_zsa_state *zsaso = hwcso;
+
+ so_ref(NULL, &zsaso->so);
+ FREE(zsaso);
+}
+
+static void *
+nv30_vp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ /*struct nv30_context *nv30 = nv30_context(pipe);*/
+ struct nv30_vertex_program *vp;
+
+ vp = CALLOC(1, sizeof(struct nv30_vertex_program));
+ vp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+ /*vp->draw = draw_create_vertex_shader(nv30->draw, &vp->pipe);*/
+
+ return (void *)vp;
+}
+
+static void
+nv30_vp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->vertprog = hwcso;
+ nv30->dirty |= NV30_NEW_VERTPROG;
+ /*nv30->draw_dirty |= NV30_NEW_VERTPROG;*/
+}
+
+static void
+nv30_vp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_vertex_program *vp = hwcso;
+
+ /*draw_delete_vertex_shader(nv30->draw, vp->draw);*/
+ nv30_vertprog_destroy(nv30, vp);
+ FREE((void*)vp->pipe.tokens);
+ FREE(vp);
+}
+
+static void *
+nv30_fp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv30_fragment_program *fp;
+
+ fp = CALLOC(1, sizeof(struct nv30_fragment_program));
+ fp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+
+ tgsi_scan_shader(fp->pipe.tokens, &fp->info);
+
+ return (void *)fp;
+}
+
+static void
+nv30_fp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->fragprog = hwcso;
+ nv30->dirty |= NV30_NEW_FRAGPROG;
+}
+
+static void
+nv30_fp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv30_fragment_program *fp = hwcso;
+
+ nv30_fragprog_destroy(nv30, fp);
+ FREE((void*)fp->pipe.tokens);
+ FREE(fp);
+}
+
+static void
+nv30_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *bcol)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->blend_colour = *bcol;
+ nv30->dirty |= NV30_NEW_BCOL;
+}
+
+static void
+nv30_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+}
+
+static void
+nv30_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+ const struct pipe_constant_buffer *buf )
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->constbuf[shader] = buf->buffer;
+ nv30->constbuf_nr[shader] = buf->buffer->size / (4 * sizeof(float));
+
+ if (shader == PIPE_SHADER_VERTEX) {
+ nv30->dirty |= NV30_NEW_VERTPROG;
+ } else
+ if (shader == PIPE_SHADER_FRAGMENT) {
+ nv30->dirty |= NV30_NEW_FRAGPROG;
+ }
+}
+
+static void
+nv30_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->framebuffer = *fb;
+ nv30->dirty |= NV30_NEW_FB;
+}
+
+static void
+nv30_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ memcpy(nv30->stipple, stipple->stipple, 4 * 32);
+ nv30->dirty |= NV30_NEW_STIPPLE;
+}
+
+static void
+nv30_set_scissor_state(struct pipe_context *pipe,
+ const struct pipe_scissor_state *s)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->scissor = *s;
+ nv30->dirty |= NV30_NEW_SCISSOR;
+}
+
+static void
+nv30_set_viewport_state(struct pipe_context *pipe,
+ const struct pipe_viewport_state *vpt)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->viewport = *vpt;
+ nv30->dirty |= NV30_NEW_VIEWPORT;
+ /*nv30->draw_dirty |= NV30_NEW_VIEWPORT;*/
+}
+
+static void
+nv30_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_buffer *vb)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ memcpy(nv30->vtxbuf, vb, sizeof(*vb) * count);
+ nv30->vtxbuf_nr = count;
+
+ nv30->dirty |= NV30_NEW_ARRAYS;
+ /*nv30->draw_dirty |= NV30_NEW_ARRAYS;*/
+}
+
+static void
+nv30_set_vertex_elements(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_element *ve)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ memcpy(nv30->vtxelt, ve, sizeof(*ve) * count);
+ nv30->vtxelt_nr = count;
+
+ nv30->dirty |= NV30_NEW_ARRAYS;
+ /*nv30->draw_dirty |= NV30_NEW_ARRAYS;*/
+}
+
+static void
+nv30_set_edgeflags(struct pipe_context *pipe, const unsigned *bitfield)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+
+ nv30->edgeflags = bitfield;
+ nv30->dirty |= NV30_NEW_ARRAYS;
+ /*nv30->draw_dirty |= NV30_NEW_ARRAYS;*/
+}
+
+void
+nv30_init_state_functions(struct nv30_context *nv30)
+{
+ nv30->pipe.create_blend_state = nv30_blend_state_create;
+ nv30->pipe.bind_blend_state = nv30_blend_state_bind;
+ nv30->pipe.delete_blend_state = nv30_blend_state_delete;
+
+ nv30->pipe.create_sampler_state = nv30_sampler_state_create;
+ nv30->pipe.bind_sampler_states = nv30_sampler_state_bind;
+ nv30->pipe.delete_sampler_state = nv30_sampler_state_delete;
+ nv30->pipe.set_sampler_textures = nv30_set_sampler_texture;
+
+ nv30->pipe.create_rasterizer_state = nv30_rasterizer_state_create;
+ nv30->pipe.bind_rasterizer_state = nv30_rasterizer_state_bind;
+ nv30->pipe.delete_rasterizer_state = nv30_rasterizer_state_delete;
+
+ nv30->pipe.create_depth_stencil_alpha_state =
+ nv30_depth_stencil_alpha_state_create;
+ nv30->pipe.bind_depth_stencil_alpha_state =
+ nv30_depth_stencil_alpha_state_bind;
+ nv30->pipe.delete_depth_stencil_alpha_state =
+ nv30_depth_stencil_alpha_state_delete;
+
+ nv30->pipe.create_vs_state = nv30_vp_state_create;
+ nv30->pipe.bind_vs_state = nv30_vp_state_bind;
+ nv30->pipe.delete_vs_state = nv30_vp_state_delete;
+
+ nv30->pipe.create_fs_state = nv30_fp_state_create;
+ nv30->pipe.bind_fs_state = nv30_fp_state_bind;
+ nv30->pipe.delete_fs_state = nv30_fp_state_delete;
+
+ nv30->pipe.set_blend_color = nv30_set_blend_color;
+ nv30->pipe.set_clip_state = nv30_set_clip_state;
+ nv30->pipe.set_constant_buffer = nv30_set_constant_buffer;
+ nv30->pipe.set_framebuffer_state = nv30_set_framebuffer_state;
+ nv30->pipe.set_polygon_stipple = nv30_set_polygon_stipple;
+ nv30->pipe.set_scissor_state = nv30_set_scissor_state;
+ nv30->pipe.set_viewport_state = nv30_set_viewport_state;
+
+ nv30->pipe.set_edgeflags = nv30_set_edgeflags;
+ nv30->pipe.set_vertex_buffers = nv30_set_vertex_buffers;
+ nv30->pipe.set_vertex_elements = nv30_set_vertex_elements;
+}
+
diff --git a/src/gallium/drivers/nv30/nv30_state.h b/src/gallium/drivers/nv30/nv30_state.h
new file mode 100644
index 0000000000..e6f23bf166
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state.h
@@ -0,0 +1,85 @@
+#ifndef __NV30_STATE_H__
+#define __NV30_STATE_H__
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+struct nv30_sampler_state {
+ uint32_t fmt;
+ uint32_t wrap;
+ uint32_t en;
+ uint32_t filt;
+ uint32_t bcol;
+};
+
+struct nv30_vertex_program_exec {
+ uint32_t data[4];
+ boolean has_branch_offset;
+ int const_index;
+};
+
+struct nv30_vertex_program_data {
+ int index; /* immediates == -1 */
+ float value[4];
+};
+
+struct nv30_vertex_program {
+ struct pipe_shader_state pipe;
+
+ boolean translated;
+
+ struct nv30_vertex_program_exec *insns;
+ unsigned nr_insns;
+ struct nv30_vertex_program_data *consts;
+ unsigned nr_consts;
+
+ struct nouveau_resource *exec;
+ unsigned exec_start;
+ struct nouveau_resource *data;
+ unsigned data_start;
+ unsigned data_start_min;
+
+ uint32_t ir;
+ uint32_t or;
+ struct nouveau_stateobj *so;
+};
+
+struct nv30_fragment_program_data {
+ unsigned offset;
+ unsigned index;
+};
+
+struct nv30_fragment_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+
+ boolean translated;
+ boolean on_hw;
+ unsigned samplers;
+
+ uint32_t *insn;
+ int insn_len;
+
+ struct nv30_fragment_program_data *consts;
+ unsigned nr_consts;
+
+ struct pipe_buffer *buffer;
+
+ uint32_t fp_control;
+ uint32_t fp_reg_control;
+ struct nouveau_stateobj *so;
+};
+
+struct nv30_miptree {
+ struct pipe_texture base;
+
+ struct pipe_buffer *buffer;
+ uint total_size;
+
+ struct {
+ uint pitch;
+ uint *image_offset;
+ } level[PIPE_MAX_TEXTURE_LEVELS];
+};
+
+#endif
diff --git a/src/gallium/drivers/nv30/nv30_state_blend.c b/src/gallium/drivers/nv30/nv30_state_blend.c
new file mode 100644
index 0000000000..64cf9ae93a
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_blend.c
@@ -0,0 +1,41 @@
+#include "nv30_context.h"
+
+static boolean
+nv30_state_blend_validate(struct nv30_context *nv30)
+{
+ so_ref(nv30->blend->so, &nv30->state.hw[NV30_STATE_BLEND]);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_blend = {
+ .validate = nv30_state_blend_validate,
+ .dirty = {
+ .pipe = NV30_NEW_BLEND,
+ .hw = NV30_STATE_BLEND
+ }
+};
+
+static boolean
+nv30_state_blend_colour_validate(struct nv30_context *nv30)
+{
+ struct nouveau_stateobj *so = so_new(2, 0);
+ struct pipe_blend_color *bcol = &nv30->blend_colour;
+
+ so_method(so, nv30->screen->rankine, NV34TCL_BLEND_COLOR, 1);
+ so_data (so, ((float_to_ubyte(bcol->color[3]) << 24) |
+ (float_to_ubyte(bcol->color[0]) << 16) |
+ (float_to_ubyte(bcol->color[1]) << 8) |
+ (float_to_ubyte(bcol->color[2]) << 0)));
+
+ so_ref(so, &nv30->state.hw[NV30_STATE_BCOL]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_blend_colour = {
+ .validate = nv30_state_blend_colour_validate,
+ .dirty = {
+ .pipe = NV30_NEW_BCOL,
+ .hw = NV30_STATE_BCOL
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_state_emit.c b/src/gallium/drivers/nv30/nv30_state_emit.c
new file mode 100644
index 0000000000..621b8846c8
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_emit.c
@@ -0,0 +1,111 @@
+#include "nv30_context.h"
+#include "nv30_state.h"
+
+static struct nv30_state_entry *render_states[] = {
+ &nv30_state_framebuffer,
+ &nv30_state_rasterizer,
+ &nv30_state_scissor,
+ &nv30_state_stipple,
+ &nv30_state_fragprog,
+ &nv30_state_fragtex,
+ &nv30_state_vertprog,
+ &nv30_state_blend,
+ &nv30_state_blend_colour,
+ &nv30_state_zsa,
+ &nv30_state_viewport,
+ &nv30_state_vbo,
+ NULL
+};
+
+static void
+nv30_state_do_validate(struct nv30_context *nv30,
+ struct nv30_state_entry **states)
+{
+ while (*states) {
+ struct nv30_state_entry *e = *states;
+
+ if (nv30->dirty & e->dirty.pipe) {
+ if (e->validate(nv30)) {
+ nv30->state.dirty |= (1ULL << e->dirty.hw);
+ }
+ }
+
+ states++;
+ }
+ nv30->dirty = 0;
+}
+
+void
+nv30_state_emit(struct nv30_context *nv30)
+{
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+ struct nv30_state *state = &nv30->state;
+ struct nv30_screen *screen = nv30->screen;
+ unsigned i, samplers;
+ uint64_t states;
+
+ if (nv30->pctx_id != screen->cur_pctx) {
+ for (i = 0; i < NV30_STATE_MAX; i++) {
+ if (state->hw[i] && screen->state[i] != state->hw[i])
+ state->dirty |= (1ULL << i);
+ }
+
+ screen->cur_pctx = nv30->pctx_id;
+ }
+
+ for (i = 0, states = state->dirty; states; i++) {
+ if (!(states & (1ULL << i)))
+ continue;
+ so_ref (state->hw[i], &nv30->screen->state[i]);
+ if (state->hw[i])
+ so_emit(chan, nv30->screen->state[i]);
+ states &= ~(1ULL << i);
+ }
+
+ state->dirty = 0;
+
+ so_emit_reloc_markers(chan, state->hw[NV30_STATE_FB]);
+ for (i = 0, samplers = state->fp_samplers; i < 16 && samplers; i++) {
+ if (!(samplers & (1 << i)))
+ continue;
+ so_emit_reloc_markers(chan,
+ state->hw[NV30_STATE_FRAGTEX0+i]);
+ samplers &= ~(1ULL << i);
+ }
+ so_emit_reloc_markers(chan, state->hw[NV30_STATE_FRAGPROG]);
+ if (state->hw[NV30_STATE_VTXBUF] /*&& nv30->render_mode == HW*/)
+ so_emit_reloc_markers(chan, state->hw[NV30_STATE_VTXBUF]);
+}
+
+boolean
+nv30_state_validate(struct nv30_context *nv30)
+{
+#if 0
+ boolean was_sw = nv30->fallback_swtnl ? TRUE : FALSE;
+
+ if (nv30->render_mode != HW) {
+ /* Don't even bother trying to go back to hw if none
+ * of the states that caused swtnl previously have changed.
+ */
+ if ((nv30->fallback_swtnl & nv30->dirty)
+ != nv30->fallback_swtnl)
+ return FALSE;
+
+ /* Attempt to go to hwtnl again */
+ nv30->pipe.flush(&nv30->pipe, 0, NULL);
+ nv30->dirty |= (NV30_NEW_VIEWPORT |
+ NV30_NEW_VERTPROG |
+ NV30_NEW_ARRAYS);
+ nv30->render_mode = HW;
+ }
+#endif
+ nv30_state_do_validate(nv30, render_states);
+#if 0
+ if (nv30->fallback_swtnl || nv30->fallback_swrast)
+ return FALSE;
+
+ if (was_sw)
+ NOUVEAU_ERR("swtnl->hw\n");
+#endif
+ return TRUE;
+}
diff --git a/src/gallium/drivers/nv30/nv30_state_fb.c b/src/gallium/drivers/nv30/nv30_state_fb.c
new file mode 100644
index 0000000000..44b6a74715
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_fb.c
@@ -0,0 +1,145 @@
+#include "nv30_context.h"
+#include "nouveau/nouveau_util.h"
+
+static boolean
+nv30_state_framebuffer_validate(struct nv30_context *nv30)
+{
+ struct pipe_framebuffer_state *fb = &nv30->framebuffer;
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+ struct nv04_surface *rt[2], *zeta = NULL;
+ uint32_t rt_enable, rt_format;
+ int i, colour_format = 0, zeta_format = 0;
+ struct nouveau_stateobj *so = so_new(64, 10);
+ unsigned rt_flags = NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM;
+ unsigned w = fb->width;
+ unsigned h = fb->height;
+ struct nv30_miptree *nv30mt;
+
+ rt_enable = 0;
+ for (i = 0; i < fb->nr_cbufs; i++) {
+ if (colour_format) {
+ assert(colour_format == fb->cbufs[i]->format);
+ } else {
+ colour_format = fb->cbufs[i]->format;
+ rt_enable |= (NV34TCL_RT_ENABLE_COLOR0 << i);
+ rt[i] = (struct nv04_surface *)fb->cbufs[i];
+ }
+ }
+
+ if (rt_enable & NV34TCL_RT_ENABLE_COLOR1)
+ rt_enable |= NV34TCL_RT_ENABLE_MRT;
+
+ if (fb->zsbuf) {
+ zeta_format = fb->zsbuf->format;
+ zeta = (struct nv04_surface *)fb->zsbuf;
+ }
+
+ if (!(rt[0]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ assert(!(fb->width & (fb->width - 1)) && !(fb->height & (fb->height - 1)));
+ for (i = 1; i < fb->nr_cbufs; i++)
+ assert(!(rt[i]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
+
+ /* FIXME: NV34TCL_RT_FORMAT_LOG2_[WIDTH/HEIGHT] */
+ rt_format = NV34TCL_RT_FORMAT_TYPE_SWIZZLED |
+ log2i(fb->width) << 16 /*NV34TCL_RT_FORMAT_LOG2_WIDTH_SHIFT*/ |
+ log2i(fb->height) << 24 /*NV34TCL_RT_FORMAT_LOG2_HEIGHT_SHIFT*/;
+ }
+ else
+ rt_format = NV34TCL_RT_FORMAT_TYPE_LINEAR;
+
+ switch (colour_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case 0:
+ rt_format |= NV34TCL_RT_FORMAT_COLOR_A8R8G8B8;
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ rt_format |= NV34TCL_RT_FORMAT_COLOR_R5G6B5;
+ break;
+ default:
+ assert(0);
+ }
+
+ switch (zeta_format) {
+ case PIPE_FORMAT_Z16_UNORM:
+ rt_format |= NV34TCL_RT_FORMAT_ZETA_Z16;
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case 0:
+ rt_format |= NV34TCL_RT_FORMAT_ZETA_Z24S8;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (rt_enable & NV34TCL_RT_ENABLE_COLOR0) {
+ uint32_t pitch = rt[0]->pitch;
+ if (zeta) {
+ pitch |= (zeta->pitch << 16);
+ } else {
+ pitch |= (pitch << 16);
+ }
+
+ nv30mt = (struct nv30_miptree *)rt[0]->base.texture;
+ so_method(so, rankine, NV34TCL_DMA_COLOR0, 1);
+ so_reloc (so, nouveau_bo(nv30mt->buffer), 0, rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, rankine, NV34TCL_COLOR0_PITCH, 2);
+ so_data (so, pitch);
+ so_reloc (so, nouveau_bo(nv30mt->buffer), rt[0]->base.offset,
+ rt_flags | NOUVEAU_BO_LOW, 0, 0);
+ }
+
+ if (rt_enable & NV34TCL_RT_ENABLE_COLOR1) {
+ nv30mt = (struct nv30_miptree *)rt[1]->base.texture;
+ so_method(so, rankine, NV34TCL_DMA_COLOR1, 1);
+ so_reloc (so, nouveau_bo(nv30mt->buffer), 0, rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, rankine, NV34TCL_COLOR1_OFFSET, 2);
+ so_reloc (so, nouveau_bo(nv30mt->buffer), rt[1]->base.offset,
+ rt_flags | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, rt[1]->pitch);
+ }
+
+ if (zeta_format) {
+ nv30mt = (struct nv30_miptree *)zeta->base.texture;
+ so_method(so, rankine, NV34TCL_DMA_ZETA, 1);
+ so_reloc (so, nouveau_bo(nv30mt->buffer), 0, rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, rankine, NV34TCL_ZETA_OFFSET, 1);
+ so_reloc (so, nouveau_bo(nv30mt->buffer), zeta->base.offset,
+ rt_flags | NOUVEAU_BO_LOW, 0, 0);
+ /* TODO: allocate LMA depth buffer */
+ }
+
+ so_method(so, rankine, NV34TCL_RT_ENABLE, 1);
+ so_data (so, rt_enable);
+ so_method(so, rankine, NV34TCL_RT_HORIZ, 3);
+ so_data (so, (w << 16) | 0);
+ so_data (so, (h << 16) | 0);
+ so_data (so, rt_format);
+ so_method(so, rankine, NV34TCL_VIEWPORT_HORIZ, 2);
+ so_data (so, (w << 16) | 0);
+ so_data (so, (h << 16) | 0);
+ so_method(so, rankine, NV34TCL_VIEWPORT_CLIP_HORIZ(0), 2);
+ so_data (so, ((w - 1) << 16) | 0);
+ so_data (so, ((h - 1) << 16) | 0);
+ so_method(so, rankine, 0x1d88, 1);
+ so_data (so, (1 << 12) | h);
+ /* Wonder why this is needed, context should all be set to zero on init */
+ so_method(so, rankine, NV34TCL_VIEWPORT_TX_ORIGIN, 1);
+ so_data (so, 0);
+
+ so_ref(so, &nv30->state.hw[NV30_STATE_FB]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_framebuffer = {
+ .validate = nv30_state_framebuffer_validate,
+ .dirty = {
+ .pipe = NV30_NEW_FB,
+ .hw = NV30_STATE_FB
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_state_rasterizer.c b/src/gallium/drivers/nv30/nv30_state_rasterizer.c
new file mode 100644
index 0000000000..6d1b60e043
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_rasterizer.c
@@ -0,0 +1,17 @@
+#include "nv30_context.h"
+
+static boolean
+nv30_state_rasterizer_validate(struct nv30_context *nv30)
+{
+ so_ref(nv30->rasterizer->so,
+ &nv30->state.hw[NV30_STATE_RAST]);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_rasterizer = {
+ .validate = nv30_state_rasterizer_validate,
+ .dirty = {
+ .pipe = NV30_NEW_RAST,
+ .hw = NV30_STATE_RAST
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_state_scissor.c b/src/gallium/drivers/nv30/nv30_state_scissor.c
new file mode 100644
index 0000000000..3ac7a8471e
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_scissor.c
@@ -0,0 +1,36 @@
+#include "nv30_context.h"
+
+static boolean
+nv30_state_scissor_validate(struct nv30_context *nv30)
+{
+ struct pipe_rasterizer_state *rast = &nv30->rasterizer->pipe;
+ struct pipe_scissor_state *s = &nv30->scissor;
+ struct nouveau_stateobj *so;
+
+ if (nv30->state.hw[NV30_STATE_SCISSOR] &&
+ (rast->scissor == 0 && nv30->state.scissor_enabled == 0))
+ return FALSE;
+ nv30->state.scissor_enabled = rast->scissor;
+
+ so = so_new(3, 0);
+ so_method(so, nv30->screen->rankine, NV34TCL_SCISSOR_HORIZ, 2);
+ if (nv30->state.scissor_enabled) {
+ so_data (so, ((s->maxx - s->minx) << 16) | s->minx);
+ so_data (so, ((s->maxy - s->miny) << 16) | s->miny);
+ } else {
+ so_data (so, 4096 << 16);
+ so_data (so, 4096 << 16);
+ }
+
+ so_ref(so, &nv30->state.hw[NV30_STATE_SCISSOR]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_scissor = {
+ .validate = nv30_state_scissor_validate,
+ .dirty = {
+ .pipe = NV30_NEW_SCISSOR | NV30_NEW_RAST,
+ .hw = NV30_STATE_SCISSOR
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_state_stipple.c b/src/gallium/drivers/nv30/nv30_state_stipple.c
new file mode 100644
index 0000000000..d0c791ac08
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_stipple.c
@@ -0,0 +1,40 @@
+#include "nv30_context.h"
+
+static boolean
+nv30_state_stipple_validate(struct nv30_context *nv30)
+{
+ struct pipe_rasterizer_state *rast = &nv30->rasterizer->pipe;
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+ struct nouveau_stateobj *so;
+
+ if (nv30->state.hw[NV30_STATE_STIPPLE] &&
+ (rast->poly_stipple_enable == 0 && nv30->state.stipple_enabled == 0))
+ return FALSE;
+
+ if (rast->poly_stipple_enable) {
+ unsigned i;
+
+ so = so_new(35, 0);
+ so_method(so, rankine, NV34TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, 1);
+ so_method(so, rankine, NV34TCL_POLYGON_STIPPLE_PATTERN(0), 32);
+ for (i = 0; i < 32; i++)
+ so_data(so, nv30->stipple[i]);
+ } else {
+ so = so_new(2, 0);
+ so_method(so, rankine, NV34TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_ref(so, &nv30->state.hw[NV30_STATE_STIPPLE]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_stipple = {
+ .validate = nv30_state_stipple_validate,
+ .dirty = {
+ .pipe = NV30_NEW_STIPPLE | NV30_NEW_RAST,
+ .hw = NV30_STATE_STIPPLE,
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_state_viewport.c b/src/gallium/drivers/nv30/nv30_state_viewport.c
new file mode 100644
index 0000000000..c3eb413dac
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_viewport.c
@@ -0,0 +1,72 @@
+#include "nv30_context.h"
+
+static boolean
+nv30_state_viewport_validate(struct nv30_context *nv30)
+{
+ struct pipe_viewport_state *vpt = &nv30->viewport;
+ struct nouveau_stateobj *so;
+ unsigned bypass;
+
+ if (/*nv30->render_mode == HW &&*/
+ !nv30->rasterizer->pipe.bypass_vs_clip_and_viewport)
+ bypass = 0;
+ else
+ bypass = 1;
+
+ if (nv30->state.hw[NV30_STATE_VIEWPORT] &&
+ (bypass || !(nv30->dirty & NV30_NEW_VIEWPORT)) &&
+ nv30->state.viewport_bypass == bypass)
+ return FALSE;
+ nv30->state.viewport_bypass = bypass;
+
+ so = so_new(11, 0);
+ if (!bypass) {
+ so_method(so, nv30->screen->rankine,
+ NV34TCL_VIEWPORT_TRANSLATE_X, 8);
+ so_data (so, fui(vpt->translate[0]));
+ so_data (so, fui(vpt->translate[1]));
+ so_data (so, fui(vpt->translate[2]));
+ so_data (so, fui(vpt->translate[3]));
+ so_data (so, fui(vpt->scale[0]));
+ so_data (so, fui(vpt->scale[1]));
+ so_data (so, fui(vpt->scale[2]));
+ so_data (so, fui(vpt->scale[3]));
+/* so_method(so, nv30->screen->rankine, 0x1d78, 1);
+ so_data (so, 1);
+*/ } else {
+ so_method(so, nv30->screen->rankine,
+ NV34TCL_VIEWPORT_TRANSLATE_X, 8);
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(1.0));
+ so_data (so, fui(1.0));
+ so_data (so, fui(1.0));
+ so_data (so, fui(0.0));
+ /* Not entirely certain what this is yet. The DDX uses this
+ * value also as it fixes rendering when you pass
+ * pre-transformed vertices to the GPU. My best gusss is that
+ * this bypasses some culling/clipping stage. Might be worth
+ * noting that points/lines are uneffected by whatever this
+ * value fixes, only filled polygons are effected.
+ */
+/* so_method(so, nv30->screen->rankine, 0x1d78, 1);
+ so_data (so, 0x110);
+*/ }
+ /* TODO/FIXME: never saw value 0x0110 in renouveau dumps, only 0x0001 */
+ so_method(so, nv30->screen->rankine, 0x1d78, 1);
+ so_data (so, 1);
+
+ so_ref(so, &nv30->state.hw[NV30_STATE_VIEWPORT]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_viewport = {
+ .validate = nv30_state_viewport_validate,
+ .dirty = {
+ .pipe = NV30_NEW_VIEWPORT | NV30_NEW_RAST,
+ .hw = NV30_STATE_VIEWPORT
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_state_zsa.c b/src/gallium/drivers/nv30/nv30_state_zsa.c
new file mode 100644
index 0000000000..0940b7269b
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_state_zsa.c
@@ -0,0 +1,17 @@
+#include "nv30_context.h"
+
+static boolean
+nv30_state_zsa_validate(struct nv30_context *nv30)
+{
+ so_ref(nv30->zsa->so,
+ &nv30->state.hw[NV30_STATE_ZSA]);
+ return TRUE;
+}
+
+struct nv30_state_entry nv30_state_zsa = {
+ .validate = nv30_state_zsa_validate,
+ .dirty = {
+ .pipe = NV30_NEW_ZSA,
+ .hw = NV30_STATE_ZSA
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_surface.c b/src/gallium/drivers/nv30/nv30_surface.c
new file mode 100644
index 0000000000..5e237e13eb
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_surface.c
@@ -0,0 +1,63 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "nv30_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_tile.h"
+
+static void
+nv30_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv04_surface_2d *eng2d = nv30->screen->eng2d;
+
+ eng2d->copy(eng2d, dest, destx, desty, src, srcx, srcy, width, height);
+}
+
+static void
+nv30_surface_fill(struct pipe_context *pipe, struct pipe_surface *dest,
+ unsigned destx, unsigned desty, unsigned width,
+ unsigned height, unsigned value)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nv04_surface_2d *eng2d = nv30->screen->eng2d;
+
+ eng2d->fill(eng2d, dest, destx, desty, width, height, value);
+}
+
+void
+nv30_init_surface_functions(struct nv30_context *nv30)
+{
+ nv30->pipe.surface_copy = nv30_surface_copy;
+ nv30->pipe.surface_fill = nv30_surface_fill;
+}
diff --git a/src/gallium/drivers/nv30/nv30_transfer.c b/src/gallium/drivers/nv30/nv30_transfer.c
new file mode 100644
index 0000000000..2367571878
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_transfer.c
@@ -0,0 +1,195 @@
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv30_context.h"
+#include "nv30_screen.h"
+#include "nv30_state.h"
+
+struct nv30_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv30_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv30_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv30_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv30_miptree *mt = (struct nv30_miptree *)pt;
+ struct nv30_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv30_transfer);
+ if (!tx)
+ return NULL;
+
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv30_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv30_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ 0, 0, 0,
+ nv30_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv30_screen *nvscreen = nv30_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv30_transfer_del(struct pipe_transfer *ptx)
+{
+ struct nv30_transfer *tx = (struct nv30_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct pipe_screen *pscreen = ptx->texture->screen;
+ struct nv30_screen *nvscreen = nv30_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv30_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv30_transfer *tx = (struct nv30_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv30_miptree *mt = (struct nv30_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv30_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv30_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv30_transfer *tx = (struct nv30_transfer *)ptx;
+ struct nv30_miptree *mt = (struct nv30_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv30_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv30_transfer_new;
+ pscreen->tex_transfer_destroy = nv30_transfer_del;
+ pscreen->transfer_map = nv30_transfer_map;
+ pscreen->transfer_unmap = nv30_transfer_unmap;
+}
diff --git a/src/gallium/drivers/nv30/nv30_vbo.c b/src/gallium/drivers/nv30/nv30_vbo.c
new file mode 100644
index 0000000000..189656ec81
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_vbo.c
@@ -0,0 +1,561 @@
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "nv30_context.h"
+#include "nv30_state.h"
+
+#include "nouveau/nouveau_channel.h"
+#include "nouveau/nouveau_pushbuf.h"
+#include "nouveau/nouveau_util.h"
+
+#define FORCE_SWTNL 0
+
+static INLINE int
+nv30_vbo_format_to_hw(enum pipe_format pipe, unsigned *fmt, unsigned *ncomp)
+{
+ switch (pipe) {
+ case PIPE_FORMAT_R32_FLOAT:
+ case PIPE_FORMAT_R32G32_FLOAT:
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ *fmt = NV34TCL_VTXFMT_TYPE_FLOAT;
+ break;
+ case PIPE_FORMAT_R8_UNORM:
+ case PIPE_FORMAT_R8G8_UNORM:
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ *fmt = NV34TCL_VTXFMT_TYPE_UBYTE;
+ break;
+ case PIPE_FORMAT_R16_SSCALED:
+ case PIPE_FORMAT_R16G16_SSCALED:
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ *fmt = NV34TCL_VTXFMT_TYPE_USHORT;
+ break;
+ default:
+ NOUVEAU_ERR("Unknown format %s\n", pf_name(pipe));
+ return 1;
+ }
+
+ switch (pipe) {
+ case PIPE_FORMAT_R8_UNORM:
+ case PIPE_FORMAT_R32_FLOAT:
+ case PIPE_FORMAT_R16_SSCALED:
+ *ncomp = 1;
+ break;
+ case PIPE_FORMAT_R8G8_UNORM:
+ case PIPE_FORMAT_R32G32_FLOAT:
+ case PIPE_FORMAT_R16G16_SSCALED:
+ *ncomp = 2;
+ break;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ *ncomp = 3;
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ *ncomp = 4;
+ break;
+ default:
+ NOUVEAU_ERR("Unknown format %s\n", pf_name(pipe));
+ return 1;
+ }
+
+ return 0;
+}
+
+static boolean
+nv30_vbo_set_idxbuf(struct nv30_context *nv30, struct pipe_buffer *ib,
+ unsigned ib_size)
+{
+ struct pipe_screen *pscreen = &nv30->screen->base.base;
+ unsigned type;
+
+ if (!ib) {
+ nv30->idxbuf = NULL;
+ nv30->idxbuf_format = 0xdeadbeef;
+ return FALSE;
+ }
+
+ if (!pscreen->get_param(pscreen, NOUVEAU_CAP_HW_IDXBUF) || ib_size == 1)
+ return FALSE;
+
+ switch (ib_size) {
+ case 2:
+ type = NV34TCL_IDXBUF_FORMAT_TYPE_U16;
+ break;
+ case 4:
+ type = NV34TCL_IDXBUF_FORMAT_TYPE_U32;
+ break;
+ default:
+ return FALSE;
+ }
+
+ if (ib != nv30->idxbuf ||
+ type != nv30->idxbuf_format) {
+ nv30->dirty |= NV30_NEW_ARRAYS;
+ nv30->idxbuf = ib;
+ nv30->idxbuf_format = type;
+ }
+
+ return TRUE;
+}
+
+static boolean
+nv30_vbo_static_attrib(struct nv30_context *nv30, struct nouveau_stateobj *so,
+ int attrib, struct pipe_vertex_element *ve,
+ struct pipe_vertex_buffer *vb)
+{
+ struct pipe_screen *pscreen = nv30->pipe.screen;
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+ unsigned type, ncomp;
+ void *map;
+
+ if (nv30_vbo_format_to_hw(ve->src_format, &type, &ncomp))
+ return FALSE;
+
+ map = pipe_buffer_map(pscreen, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ map += vb->buffer_offset + ve->src_offset;
+
+ switch (type) {
+ case NV34TCL_VTXFMT_TYPE_FLOAT:
+ {
+ float *v = map;
+
+ switch (ncomp) {
+ case 4:
+ so_method(so, rankine, NV34TCL_VTX_ATTR_4F_X(attrib), 4);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ so_data (so, fui(v[2]));
+ so_data (so, fui(v[3]));
+ break;
+ case 3:
+ so_method(so, rankine, NV34TCL_VTX_ATTR_3F_X(attrib), 3);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ so_data (so, fui(v[2]));
+ break;
+ case 2:
+ so_method(so, rankine, NV34TCL_VTX_ATTR_2F_X(attrib), 2);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ break;
+ case 1:
+ so_method(so, rankine, NV34TCL_VTX_ATTR_1F(attrib), 1);
+ so_data (so, fui(v[0]));
+ break;
+ default:
+ pipe_buffer_unmap(pscreen, vb->buffer);
+ return FALSE;
+ }
+ }
+ break;
+ default:
+ pipe_buffer_unmap(pscreen, vb->buffer);
+ return FALSE;
+ }
+
+ pipe_buffer_unmap(pscreen, vb->buffer);
+ return TRUE;
+}
+
+boolean
+nv30_draw_arrays(struct pipe_context *pipe,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+ unsigned restart = 0;
+
+ nv30_vbo_set_idxbuf(nv30, NULL, 0);
+ if (FORCE_SWTNL || !nv30_state_validate(nv30)) {
+ /*return nv30_draw_elements_swtnl(pipe, NULL, 0,
+ mode, start, count);*/
+ return FALSE;
+ }
+
+ while (count) {
+ unsigned vc, nr;
+
+ nv30_state_emit(nv30);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 256,
+ mode, start, count, &restart);
+ if (!vc) {
+ FIRE_RING(NULL);
+ continue;
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ nr = (vc & 0xff);
+ if (nr) {
+ BEGIN_RING(rankine, NV34TCL_VB_VERTEX_BATCH, 1);
+ OUT_RING (((nr - 1) << 24) | start);
+ start += nr;
+ }
+
+ nr = vc >> 8;
+ while (nr) {
+ unsigned push = nr > 2047 ? 2047 : nr;
+
+ nr -= push;
+
+ BEGIN_RING_NI(rankine, NV34TCL_VB_VERTEX_BATCH, push);
+ while (push--) {
+ OUT_RING(((0x100 - 1) << 24) | start);
+ start += 0x100;
+ }
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (0);
+
+ count -= vc;
+ start = restart;
+ }
+
+ pipe->flush(pipe, 0, NULL);
+ return TRUE;
+}
+
+static INLINE void
+nv30_draw_elements_u08(struct nv30_context *nv30, void *ib,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+
+ while (count) {
+ uint8_t *elts = (uint8_t *)ib + start;
+ unsigned vc, push, restart = 0;
+
+ nv30_state_emit(nv30);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 2,
+ mode, start, count, &restart);
+ if (vc == 0) {
+ FIRE_RING(NULL);
+ continue;
+ }
+ count -= vc;
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ if (vc & 1) {
+ BEGIN_RING(rankine, NV34TCL_VB_ELEMENT_U32, 1);
+ OUT_RING (elts[0]);
+ elts++; vc--;
+ }
+
+ while (vc) {
+ unsigned i;
+
+ push = MIN2(vc, 2047 * 2);
+
+ BEGIN_RING_NI(rankine, NV34TCL_VB_ELEMENT_U16, push >> 1);
+ for (i = 0; i < push; i+=2)
+ OUT_RING((elts[i+1] << 16) | elts[i]);
+
+ vc -= push;
+ elts += push;
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (0);
+
+ start = restart;
+ }
+}
+
+static INLINE void
+nv30_draw_elements_u16(struct nv30_context *nv30, void *ib,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+
+ while (count) {
+ uint16_t *elts = (uint16_t *)ib + start;
+ unsigned vc, push, restart = 0;
+
+ nv30_state_emit(nv30);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 2,
+ mode, start, count, &restart);
+ if (vc == 0) {
+ FIRE_RING(NULL);
+ continue;
+ }
+ count -= vc;
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ if (vc & 1) {
+ BEGIN_RING(rankine, NV34TCL_VB_ELEMENT_U32, 1);
+ OUT_RING (elts[0]);
+ elts++; vc--;
+ }
+
+ while (vc) {
+ unsigned i;
+
+ push = MIN2(vc, 2047 * 2);
+
+ BEGIN_RING_NI(rankine, NV34TCL_VB_ELEMENT_U16, push >> 1);
+ for (i = 0; i < push; i+=2)
+ OUT_RING((elts[i+1] << 16) | elts[i]);
+
+ vc -= push;
+ elts += push;
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (0);
+
+ start = restart;
+ }
+}
+
+static INLINE void
+nv30_draw_elements_u32(struct nv30_context *nv30, void *ib,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+
+ while (count) {
+ uint32_t *elts = (uint32_t *)ib + start;
+ unsigned vc, push, restart = 0;
+
+ nv30_state_emit(nv30);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 5, 1,
+ mode, start, count, &restart);
+ if (vc == 0) {
+ FIRE_RING(NULL);
+ continue;
+ }
+ count -= vc;
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ while (vc) {
+ push = MIN2(vc, 2047);
+
+ BEGIN_RING_NI(rankine, NV34TCL_VB_ELEMENT_U32, push);
+ OUT_RINGp (elts, push);
+
+ vc -= push;
+ elts += push;
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (0);
+
+ start = restart;
+ }
+}
+
+static boolean
+nv30_draw_elements_inline(struct pipe_context *pipe,
+ struct pipe_buffer *ib, unsigned ib_size,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct pipe_screen *pscreen = pipe->screen;
+ void *map;
+
+ map = pipe_buffer_map(pscreen, ib, PIPE_BUFFER_USAGE_CPU_READ);
+ if (!ib) {
+ NOUVEAU_ERR("failed mapping ib\n");
+ return FALSE;
+ }
+
+ switch (ib_size) {
+ case 1:
+ nv30_draw_elements_u08(nv30, map, mode, start, count);
+ break;
+ case 2:
+ nv30_draw_elements_u16(nv30, map, mode, start, count);
+ break;
+ case 4:
+ nv30_draw_elements_u32(nv30, map, mode, start, count);
+ break;
+ default:
+ NOUVEAU_ERR("invalid idxbuf fmt %d\n", ib_size);
+ break;
+ }
+
+ pipe_buffer_unmap(pscreen, ib);
+ return TRUE;
+}
+
+static boolean
+nv30_draw_elements_vbo(struct pipe_context *pipe,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ struct nouveau_channel *chan = nv30->screen->base.channel;
+ unsigned restart = 0;
+
+ while (count) {
+ unsigned nr, vc;
+
+ nv30_state_emit(nv30);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 256,
+ mode, start, count, &restart);
+ if (!vc) {
+ FIRE_RING(NULL);
+ continue;
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ nr = (vc & 0xff);
+ if (nr) {
+ BEGIN_RING(rankine, NV34TCL_VB_INDEX_BATCH, 1);
+ OUT_RING (((nr - 1) << 24) | start);
+ start += nr;
+ }
+
+ nr = vc >> 8;
+ while (nr) {
+ unsigned push = nr > 2047 ? 2047 : nr;
+
+ nr -= push;
+
+ BEGIN_RING_NI(rankine, NV34TCL_VB_INDEX_BATCH, push);
+ while (push--) {
+ OUT_RING(((0x100 - 1) << 24) | start);
+ start += 0x100;
+ }
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
+ OUT_RING (0);
+
+ count -= vc;
+ start = restart;
+ }
+
+ return TRUE;
+}
+
+boolean
+nv30_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer, unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv30_context *nv30 = nv30_context(pipe);
+ boolean idxbuf;
+
+ idxbuf = nv30_vbo_set_idxbuf(nv30, indexBuffer, indexSize);
+ if (FORCE_SWTNL || !nv30_state_validate(nv30)) {
+ /*return nv30_draw_elements_swtnl(pipe, NULL, 0,
+ mode, start, count);*/
+ return FALSE;
+ }
+
+ if (idxbuf) {
+ nv30_draw_elements_vbo(pipe, mode, start, count);
+ } else {
+ nv30_draw_elements_inline(pipe, indexBuffer, indexSize,
+ mode, start, count);
+ }
+
+ pipe->flush(pipe, 0, NULL);
+ return TRUE;
+}
+
+static boolean
+nv30_vbo_validate(struct nv30_context *nv30)
+{
+ struct nouveau_stateobj *vtxbuf, *vtxfmt, *sattr = NULL;
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+ struct pipe_buffer *ib = nv30->idxbuf;
+ unsigned ib_format = nv30->idxbuf_format;
+ unsigned vb_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
+ int hw;
+
+ if (nv30->edgeflags) {
+ /*nv30->fallback_swtnl |= NV30_NEW_ARRAYS;*/
+ return FALSE;
+ }
+
+ vtxbuf = so_new(20, 18);
+ so_method(vtxbuf, rankine, NV34TCL_VTXBUF_ADDRESS(0), nv30->vtxelt_nr);
+ vtxfmt = so_new(17, 0);
+ so_method(vtxfmt, rankine, NV34TCL_VTXFMT(0), nv30->vtxelt_nr);
+
+ for (hw = 0; hw < nv30->vtxelt_nr; hw++) {
+ struct pipe_vertex_element *ve;
+ struct pipe_vertex_buffer *vb;
+ unsigned type, ncomp;
+
+ ve = &nv30->vtxelt[hw];
+ vb = &nv30->vtxbuf[ve->vertex_buffer_index];
+
+ if (!vb->stride) {
+ if (!sattr)
+ sattr = so_new(16 * 5, 0);
+
+ if (nv30_vbo_static_attrib(nv30, sattr, hw, ve, vb)) {
+ so_data(vtxbuf, 0);
+ so_data(vtxfmt, NV34TCL_VTXFMT_TYPE_FLOAT);
+ continue;
+ }
+ }
+
+ if (nv30_vbo_format_to_hw(ve->src_format, &type, &ncomp)) {
+ /*nv30->fallback_swtnl |= NV30_NEW_ARRAYS;*/
+ so_ref(NULL, &vtxbuf);
+ so_ref(NULL, &vtxfmt);
+ return FALSE;
+ }
+
+ so_reloc(vtxbuf, nouveau_bo(vb->buffer), vb->buffer_offset +
+ ve->src_offset, vb_flags | NOUVEAU_BO_LOW |
+ NOUVEAU_BO_OR, 0, NV34TCL_VTXBUF_ADDRESS_DMA1);
+ so_data (vtxfmt, ((vb->stride << NV34TCL_VTXFMT_STRIDE_SHIFT) |
+ (ncomp << NV34TCL_VTXFMT_SIZE_SHIFT) | type));
+ }
+
+ if (ib) {
+ struct nouveau_bo *bo = nouveau_bo(ib);
+
+ so_method(vtxbuf, rankine, NV34TCL_IDXBUF_ADDRESS, 2);
+ so_reloc (vtxbuf, bo, 0, vb_flags | NOUVEAU_BO_LOW, 0, 0);
+ so_reloc (vtxbuf, bo, ib_format, vb_flags | NOUVEAU_BO_OR,
+ 0, NV34TCL_IDXBUF_FORMAT_DMA1);
+ }
+
+ so_method(vtxbuf, rankine, 0x1710, 1);
+ so_data (vtxbuf, 0);
+
+ so_ref(vtxbuf, &nv30->state.hw[NV30_STATE_VTXBUF]);
+ so_ref(NULL, &vtxbuf);
+ nv30->state.dirty |= (1ULL << NV30_STATE_VTXBUF);
+ so_ref(vtxfmt, &nv30->state.hw[NV30_STATE_VTXFMT]);
+ so_ref(NULL, &vtxfmt);
+ nv30->state.dirty |= (1ULL << NV30_STATE_VTXFMT);
+ so_ref(sattr, &nv30->state.hw[NV30_STATE_VTXATTR]);
+ so_ref(NULL, &sattr);
+ nv30->state.dirty |= (1ULL << NV30_STATE_VTXATTR);
+ return FALSE;
+}
+
+struct nv30_state_entry nv30_state_vbo = {
+ .validate = nv30_vbo_validate,
+ .dirty = {
+ .pipe = NV30_NEW_ARRAYS,
+ .hw = 0,
+ }
+};
diff --git a/src/gallium/drivers/nv30/nv30_vertprog.c b/src/gallium/drivers/nv30/nv30_vertprog.c
new file mode 100644
index 0000000000..14a5c0260d
--- /dev/null
+++ b/src/gallium/drivers/nv30/nv30_vertprog.c
@@ -0,0 +1,837 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "nv30_context.h"
+#include "nv30_state.h"
+
+/* TODO (at least...):
+ * 1. Indexed consts + ARL
+ * 2. Arb. swz/negation
+ * 3. NV_vp11, NV_vp2, NV_vp3 features
+ * - extra arith opcodes
+ * - branching
+ * - texture sampling
+ * - indexed attribs
+ * - indexed results
+ * 4. bugs
+ */
+
+#define SWZ_X 0
+#define SWZ_Y 1
+#define SWZ_Z 2
+#define SWZ_W 3
+#define MASK_X 8
+#define MASK_Y 4
+#define MASK_Z 2
+#define MASK_W 1
+#define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
+#define DEF_SCALE 0
+#define DEF_CTEST 0
+#include "nv30_shader.h"
+
+#define swz(s,x,y,z,w) nv30_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
+#define neg(s) nv30_sr_neg((s))
+#define abs(s) nv30_sr_abs((s))
+
+struct nv30_vpc {
+ struct nv30_vertex_program *vp;
+
+ struct nv30_vertex_program_exec *vpi;
+
+ unsigned output_map[PIPE_MAX_SHADER_OUTPUTS];
+
+ int high_temp;
+ int temp_temp_count;
+
+ struct nv30_sreg *imm;
+ unsigned nr_imm;
+};
+
+static struct nv30_sreg
+temp(struct nv30_vpc *vpc)
+{
+ int idx;
+
+ idx = vpc->temp_temp_count++;
+ idx += vpc->high_temp + 1;
+ return nv30_sr(NV30SR_TEMP, idx);
+}
+
+static struct nv30_sreg
+constant(struct nv30_vpc *vpc, int pipe, float x, float y, float z, float w)
+{
+ struct nv30_vertex_program *vp = vpc->vp;
+ struct nv30_vertex_program_data *vpd;
+ int idx;
+
+ if (pipe >= 0) {
+ for (idx = 0; idx < vp->nr_consts; idx++) {
+ if (vp->consts[idx].index == pipe)
+ return nv30_sr(NV30SR_CONST, idx);
+ }
+ }
+
+ idx = vp->nr_consts++;
+ vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
+ vpd = &vp->consts[idx];
+
+ vpd->index = pipe;
+ vpd->value[0] = x;
+ vpd->value[1] = y;
+ vpd->value[2] = z;
+ vpd->value[3] = w;
+ return nv30_sr(NV30SR_CONST, idx);
+}
+
+#define arith(cc,s,o,d,m,s0,s1,s2) \
+ nv30_vp_arith((cc), (s), NV30_VP_INST_##o, (d), (m), (s0), (s1), (s2))
+
+static void
+emit_src(struct nv30_vpc *vpc, uint32_t *hw, int pos, struct nv30_sreg src)
+{
+ struct nv30_vertex_program *vp = vpc->vp;
+ uint32_t sr = 0;
+
+ switch (src.type) {
+ case NV30SR_TEMP:
+ sr |= (NV30_VP_SRC_REG_TYPE_TEMP << NV30_VP_SRC_REG_TYPE_SHIFT);
+ sr |= (src.index << NV30_VP_SRC_TEMP_SRC_SHIFT);
+ break;
+ case NV30SR_INPUT:
+ sr |= (NV30_VP_SRC_REG_TYPE_INPUT <<
+ NV30_VP_SRC_REG_TYPE_SHIFT);
+ vp->ir |= (1 << src.index);
+ hw[1] |= (src.index << NV30_VP_INST_INPUT_SRC_SHIFT);
+ break;
+ case NV30SR_CONST:
+ sr |= (NV30_VP_SRC_REG_TYPE_CONST <<
+ NV30_VP_SRC_REG_TYPE_SHIFT);
+ assert(vpc->vpi->const_index == -1 ||
+ vpc->vpi->const_index == src.index);
+ vpc->vpi->const_index = src.index;
+ break;
+ case NV30SR_NONE:
+ sr |= (NV30_VP_SRC_REG_TYPE_INPUT <<
+ NV30_VP_SRC_REG_TYPE_SHIFT);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (src.negate)
+ sr |= NV30_VP_SRC_NEGATE;
+
+ if (src.abs)
+ hw[0] |= (1 << (21 + pos));
+
+ sr |= ((src.swz[0] << NV30_VP_SRC_SWZ_X_SHIFT) |
+ (src.swz[1] << NV30_VP_SRC_SWZ_Y_SHIFT) |
+ (src.swz[2] << NV30_VP_SRC_SWZ_Z_SHIFT) |
+ (src.swz[3] << NV30_VP_SRC_SWZ_W_SHIFT));
+
+/*
+ * |VVV|
+ * d�.�b
+ * \u/
+ *
+ */
+
+ switch (pos) {
+ case 0:
+ hw[1] |= ((sr & NV30_VP_SRC0_HIGH_MASK) >>
+ NV30_VP_SRC0_HIGH_SHIFT) << NV30_VP_INST_SRC0H_SHIFT;
+ hw[2] |= (sr & NV30_VP_SRC0_LOW_MASK) <<
+ NV30_VP_INST_SRC0L_SHIFT;
+ break;
+ case 1:
+ hw[2] |= sr << NV30_VP_INST_SRC1_SHIFT;
+ break;
+ case 2:
+ hw[2] |= ((sr & NV30_VP_SRC2_HIGH_MASK) >>
+ NV30_VP_SRC2_HIGH_SHIFT) << NV30_VP_INST_SRC2H_SHIFT;
+ hw[3] |= (sr & NV30_VP_SRC2_LOW_MASK) <<
+ NV30_VP_INST_SRC2L_SHIFT;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void
+emit_dst(struct nv30_vpc *vpc, uint32_t *hw, int slot, struct nv30_sreg dst)
+{
+ struct nv30_vertex_program *vp = vpc->vp;
+
+ switch (dst.type) {
+ case NV30SR_TEMP:
+ hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
+ break;
+ case NV30SR_OUTPUT:
+ switch (dst.index) {
+ case NV30_VP_INST_DEST_COL0 : vp->or |= (1 << 0); break;
+ case NV30_VP_INST_DEST_COL1 : vp->or |= (1 << 1); break;
+ case NV30_VP_INST_DEST_BFC0 : vp->or |= (1 << 2); break;
+ case NV30_VP_INST_DEST_BFC1 : vp->or |= (1 << 3); break;
+ case NV30_VP_INST_DEST_FOGC : vp->or |= (1 << 4); break;
+ case NV30_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
+ case NV30_VP_INST_DEST_TC(0): vp->or |= (1 << 14); break;
+ case NV30_VP_INST_DEST_TC(1): vp->or |= (1 << 15); break;
+ case NV30_VP_INST_DEST_TC(2): vp->or |= (1 << 16); break;
+ case NV30_VP_INST_DEST_TC(3): vp->or |= (1 << 17); break;
+ case NV30_VP_INST_DEST_TC(4): vp->or |= (1 << 18); break;
+ case NV30_VP_INST_DEST_TC(5): vp->or |= (1 << 19); break;
+ case NV30_VP_INST_DEST_TC(6): vp->or |= (1 << 20); break;
+ case NV30_VP_INST_DEST_TC(7): vp->or |= (1 << 21); break;
+ default:
+ break;
+ }
+
+ hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
+ hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK | (1<<20);
+
+ /*XXX: no way this is entirely correct, someone needs to
+ * figure out what exactly it is.
+ */
+ hw[3] |= 0x800;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void
+nv30_vp_arith(struct nv30_vpc *vpc, int slot, int op,
+ struct nv30_sreg dst, int mask,
+ struct nv30_sreg s0, struct nv30_sreg s1,
+ struct nv30_sreg s2)
+{
+ struct nv30_vertex_program *vp = vpc->vp;
+ uint32_t *hw;
+
+ vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
+ vpc->vpi = &vp->insns[vp->nr_insns - 1];
+ memset(vpc->vpi, 0, sizeof(*vpc->vpi));
+ vpc->vpi->const_index = -1;
+
+ hw = vpc->vpi->data;
+
+ hw[0] |= (NV30_VP_INST_COND_TR << NV30_VP_INST_COND_SHIFT);
+ hw[0] |= ((0 << NV30_VP_INST_COND_SWZ_X_SHIFT) |
+ (1 << NV30_VP_INST_COND_SWZ_Y_SHIFT) |
+ (2 << NV30_VP_INST_COND_SWZ_Z_SHIFT) |
+ (3 << NV30_VP_INST_COND_SWZ_W_SHIFT));
+
+ hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
+// hw[3] |= NV30_VP_INST_SCA_DEST_TEMP_MASK;
+// hw[3] |= (mask << NV30_VP_INST_VEC_WRITEMASK_SHIFT);
+
+ if (dst.type == NV30SR_OUTPUT) {
+ if (slot)
+ hw[3] |= (mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
+ else
+ hw[3] |= (mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
+ } else {
+ if (slot)
+ hw[3] |= (mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
+ else
+ hw[3] |= (mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
+ }
+
+ emit_dst(vpc, hw, slot, dst);
+ emit_src(vpc, hw, 0, s0);
+ emit_src(vpc, hw, 1, s1);
+ emit_src(vpc, hw, 2, s2);
+}
+
+static INLINE struct nv30_sreg
+tgsi_src(struct nv30_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
+ struct nv30_sreg src;
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ src = nv30_sr(NV30SR_INPUT, fsrc->SrcRegister.Index);
+ break;
+ case TGSI_FILE_CONSTANT:
+ src = constant(vpc, fsrc->SrcRegister.Index, 0, 0, 0, 0);
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ src = vpc->imm[fsrc->SrcRegister.Index];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ if (vpc->high_temp < fsrc->SrcRegister.Index)
+ vpc->high_temp = fsrc->SrcRegister.Index;
+ src = nv30_sr(NV30SR_TEMP, fsrc->SrcRegister.Index);
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ break;
+ }
+
+ src.abs = fsrc->SrcRegisterExtMod.Absolute;
+ src.negate = fsrc->SrcRegister.Negate;
+ src.swz[0] = fsrc->SrcRegister.SwizzleX;
+ src.swz[1] = fsrc->SrcRegister.SwizzleY;
+ src.swz[2] = fsrc->SrcRegister.SwizzleZ;
+ src.swz[3] = fsrc->SrcRegister.SwizzleW;
+ return src;
+}
+
+static INLINE struct nv30_sreg
+tgsi_dst(struct nv30_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
+ struct nv30_sreg dst;
+
+ switch (fdst->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ dst = nv30_sr(NV30SR_OUTPUT,
+ vpc->output_map[fdst->DstRegister.Index]);
+
+ break;
+ case TGSI_FILE_TEMPORARY:
+ dst = nv30_sr(NV30SR_TEMP, fdst->DstRegister.Index);
+ if (vpc->high_temp < dst.index)
+ vpc->high_temp = dst.index;
+ break;
+ default:
+ NOUVEAU_ERR("bad dst file\n");
+ break;
+ }
+
+ return dst;
+}
+
+static INLINE int
+tgsi_mask(uint tgsi)
+{
+ int mask = 0;
+
+ if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
+ if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
+ if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
+ if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
+ return mask;
+}
+
+static boolean
+nv30_vertprog_parse_instruction(struct nv30_vpc *vpc,
+ const struct tgsi_full_instruction *finst)
+{
+ struct nv30_sreg src[3], dst, tmp;
+ struct nv30_sreg none = nv30_sr(NV30SR_NONE, 0);
+ int mask;
+ int ai = -1, ci = -1;
+ int i;
+
+ if (finst->Instruction.Opcode == TGSI_OPCODE_END)
+ return TRUE;
+
+ vpc->temp_temp_count = 0;
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ if (fsrc->SrcRegister.File == TGSI_FILE_TEMPORARY) {
+ src[i] = tgsi_src(vpc, fsrc);
+ }
+ }
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ if (ai == -1 || ai == fsrc->SrcRegister.Index) {
+ ai = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ /*XXX: index comparison is broken now that consts come from
+ * two different register files.
+ */
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_IMMEDIATE:
+ if (ci == -1 || ci == fsrc->SrcRegister.Index) {
+ ci = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ /* handled above */
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ return FALSE;
+ }
+ }
+
+ dst = tgsi_dst(vpc, &finst->FullDstRegisters[0]);
+ mask = tgsi_mask(finst->FullDstRegisters[0].DstRegister.WriteMask);
+
+ switch (finst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ arith(vpc, 0, OP_MOV, dst, mask, abs(src[0]), none, none);
+ break;
+ case TGSI_OPCODE_ADD:
+ arith(vpc, 0, OP_ADD, dst, mask, src[0], none, src[1]);
+ break;
+ case TGSI_OPCODE_ARL:
+ arith(vpc, 0, OP_ARL, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_DP3:
+ arith(vpc, 0, OP_DP3, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DP4:
+ arith(vpc, 0, OP_DP4, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DPH:
+ arith(vpc, 0, OP_DPH, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DST:
+ arith(vpc, 0, OP_DST, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_EX2:
+ arith(vpc, 1, OP_EX2, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_EXP:
+ arith(vpc, 1, OP_EXP, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_FLR:
+ arith(vpc, 0, OP_FLR, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FRC:
+ arith(vpc, 0, OP_FRC, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_LG2:
+ arith(vpc, 1, OP_LG2, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_LIT:
+ arith(vpc, 1, OP_LIT, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_LOG:
+ arith(vpc, 1, OP_LOG, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_MAD:
+ arith(vpc, 0, OP_MAD, dst, mask, src[0], src[1], src[2]);
+ break;
+ case TGSI_OPCODE_MAX:
+ arith(vpc, 0, OP_MAX, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MIN:
+ arith(vpc, 0, OP_MIN, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MOV:
+ arith(vpc, 0, OP_MOV, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_MUL:
+ arith(vpc, 0, OP_MUL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_POW:
+ tmp = temp(vpc);
+ arith(vpc, 1, OP_LG2, tmp, MASK_X, none, none,
+ swz(src[0], X, X, X, X));
+ arith(vpc, 0, OP_MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
+ swz(src[1], X, X, X, X), none);
+ arith(vpc, 1, OP_EX2, dst, mask, none, none,
+ swz(tmp, X, X, X, X));
+ break;
+ case TGSI_OPCODE_RCP:
+ arith(vpc, 1, OP_RCP, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_RET:
+ break;
+ case TGSI_OPCODE_RSQ:
+ arith(vpc, 1, OP_RSQ, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_SGE:
+ arith(vpc, 0, OP_SGE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SGT:
+ arith(vpc, 0, OP_SGT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SLT:
+ arith(vpc, 0, OP_SLT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SUB:
+ arith(vpc, 0, OP_ADD, dst, mask, src[0], none, neg(src[1]));
+ break;
+ case TGSI_OPCODE_XPD:
+ tmp = temp(vpc);
+ arith(vpc, 0, OP_MUL, tmp, mask,
+ swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
+ arith(vpc, 0, OP_MAD, dst, (mask & ~MASK_W),
+ swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
+ neg(tmp));
+ break;
+ default:
+ NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static boolean
+nv30_vertprog_parse_decl_output(struct nv30_vpc *vpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ int hw;
+
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ hw = NV30_VP_INST_DEST_POS;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV30_VP_INST_DEST_COL0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV30_VP_INST_DEST_COL1;
+ } else {
+ NOUVEAU_ERR("bad colour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_BCOLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV30_VP_INST_DEST_BFC0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV30_VP_INST_DEST_BFC1;
+ } else {
+ NOUVEAU_ERR("bad bcolour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ hw = NV30_VP_INST_DEST_FOGC;
+ break;
+ case TGSI_SEMANTIC_PSIZE:
+ hw = NV30_VP_INST_DEST_PSZ;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ if (fdec->Semantic.SemanticIndex <= 7) {
+ hw = NV30_VP_INST_DEST_TC(fdec->Semantic.SemanticIndex);
+ } else {
+ NOUVEAU_ERR("bad generic semantic index\n");
+ return FALSE;
+ }
+ break;
+ default:
+ NOUVEAU_ERR("bad output semantic\n");
+ return FALSE;
+ }
+
+ vpc->output_map[fdec->DeclarationRange.First] = hw;
+ return TRUE;
+}
+
+static boolean
+nv30_vertprog_prepare(struct nv30_vpc *vpc)
+{
+ struct tgsi_parse_context p;
+ int nr_imm = 0;
+
+ tgsi_parse_init(&p, vpc->vp->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&p)) {
+ const union tgsi_full_token *tok = &p.FullToken;
+
+ tgsi_parse_token(&p);
+ switch(tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ nr_imm++;
+ break;
+ default:
+ break;
+ }
+ }
+ tgsi_parse_free(&p);
+
+ if (nr_imm) {
+ vpc->imm = CALLOC(nr_imm, sizeof(struct nv30_sreg));
+ assert(vpc->imm);
+ }
+
+ return TRUE;
+}
+
+static void
+nv30_vertprog_translate(struct nv30_context *nv30,
+ struct nv30_vertex_program *vp)
+{
+ struct tgsi_parse_context parse;
+ struct nv30_vpc *vpc = NULL;
+
+ tgsi_dump(vp->pipe.tokens,0);
+
+ vpc = CALLOC(1, sizeof(struct nv30_vpc));
+ if (!vpc)
+ return;
+ vpc->vp = vp;
+ vpc->high_temp = -1;
+
+ if (!nv30_vertprog_prepare(vpc)) {
+ FREE(vpc);
+ return;
+ }
+
+ tgsi_parse_init(&parse, vp->pipe.tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *fdec;
+ fdec = &parse.FullToken.FullDeclaration;
+ switch (fdec->Declaration.File) {
+ case TGSI_FILE_OUTPUT:
+ if (!nv30_vertprog_parse_decl_output(vpc, fdec))
+ goto out_err;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ const struct tgsi_full_immediate *imm;
+
+ imm = &parse.FullToken.FullImmediate;
+ assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
+ assert(imm->Immediate.NrTokens == 4 + 1);
+ vpc->imm[vpc->nr_imm++] =
+ constant(vpc, -1,
+ imm->u[0].Float,
+ imm->u[1].Float,
+ imm->u[2].Float,
+ imm->u[3].Float);
+ }
+ break;
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *finst;
+ finst = &parse.FullToken.FullInstruction;
+ if (!nv30_vertprog_parse_instruction(vpc, finst))
+ goto out_err;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ vp->insns[vp->nr_insns - 1].data[3] |= NV30_VP_INST_LAST;
+ vp->translated = TRUE;
+out_err:
+ tgsi_parse_free(&parse);
+ FREE(vpc);
+}
+
+static boolean
+nv30_vertprog_validate(struct nv30_context *nv30)
+{
+ struct pipe_screen *pscreen = nv30->pipe.screen;
+ struct nouveau_grobj *rankine = nv30->screen->rankine;
+ struct nv30_vertex_program *vp;
+ struct pipe_buffer *constbuf;
+ boolean upload_code = FALSE, upload_data = FALSE;
+ int i;
+
+ vp = nv30->vertprog;
+ constbuf = nv30->constbuf[PIPE_SHADER_VERTEX];
+
+ /* Translate TGSI shader into hw bytecode */
+ if (!vp->translated) {
+ nv30_vertprog_translate(nv30, vp);
+ if (!vp->translated)
+ return FALSE;
+ }
+
+ /* Allocate hw vtxprog exec slots */
+ if (!vp->exec) {
+ struct nouveau_resource *heap = nv30->screen->vp_exec_heap;
+ struct nouveau_stateobj *so;
+ uint vplen = vp->nr_insns;
+
+ if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) {
+ while (heap->next && heap->size < vplen) {
+ struct nv30_vertex_program *evict;
+
+ evict = heap->next->priv;
+ nouveau_resource_free(&evict->exec);
+ }
+
+ if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec))
+ assert(0);
+ }
+
+ so = so_new(2, 0);
+ so_method(so, rankine, NV34TCL_VP_START_FROM_ID, 1);
+ so_data (so, vp->exec->start);
+ so_ref(so, &vp->so);
+ so_ref(NULL, &so);
+
+ upload_code = TRUE;
+ }
+
+ /* Allocate hw vtxprog const slots */
+ if (vp->nr_consts && !vp->data) {
+ struct nouveau_resource *heap = nv30->screen->vp_data_heap;
+
+ if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) {
+ while (heap->next && heap->size < vp->nr_consts) {
+ struct nv30_vertex_program *evict;
+
+ evict = heap->next->priv;
+ nouveau_resource_free(&evict->data);
+ }
+
+ if (nouveau_resource_alloc(heap, vp->nr_consts, vp,
+ &vp->data))
+ assert(0);
+ }
+
+ /*XXX: handle this some day */
+ assert(vp->data->start >= vp->data_start_min);
+
+ upload_data = TRUE;
+ if (vp->data_start != vp->data->start)
+ upload_code = TRUE;
+ }
+
+ /* If exec or data segments moved we need to patch the program to
+ * fixup offsets and register IDs.
+ */
+ if (vp->exec_start != vp->exec->start) {
+ for (i = 0; i < vp->nr_insns; i++) {
+ struct nv30_vertex_program_exec *vpi = &vp->insns[i];
+
+ if (vpi->has_branch_offset) {
+ assert(0);
+ }
+ }
+
+ vp->exec_start = vp->exec->start;
+ }
+
+ if (vp->nr_consts && vp->data_start != vp->data->start) {
+ for (i = 0; i < vp->nr_insns; i++) {
+ struct nv30_vertex_program_exec *vpi = &vp->insns[i];
+
+ if (vpi->const_index >= 0) {
+ vpi->data[1] &= ~NV30_VP_INST_CONST_SRC_MASK;
+ vpi->data[1] |=
+ (vpi->const_index + vp->data->start) <<
+ NV30_VP_INST_CONST_SRC_SHIFT;
+
+ }
+ }
+
+ vp->data_start = vp->data->start;
+ }
+
+ /* Update + Upload constant values */
+ if (vp->nr_consts) {
+ float *map = NULL;
+
+ if (constbuf) {
+ map = pipe_buffer_map(pscreen, constbuf,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ }
+
+ for (i = 0; i < vp->nr_consts; i++) {
+ struct nv30_vertex_program_data *vpd = &vp->consts[i];
+
+ if (vpd->index >= 0) {
+ if (!upload_data &&
+ !memcmp(vpd->value, &map[vpd->index * 4],
+ 4 * sizeof(float)))
+ continue;
+ memcpy(vpd->value, &map[vpd->index * 4],
+ 4 * sizeof(float));
+ }
+
+ BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_CONST_ID, 5);
+ OUT_RING (i + vp->data->start);
+ OUT_RINGp ((uint32_t *)vpd->value, 4);
+ }
+
+ if (constbuf)
+ pipe_buffer_unmap(pscreen, constbuf);
+ }
+
+ /* Upload vtxprog */
+ if (upload_code) {
+#if 0
+ for (i = 0; i < vp->nr_insns; i++) {
+ NOUVEAU_MSG("VP inst %d: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, vp->insns[i].data[0], vp->insns[i].data[1],
+ vp->insns[i].data[2], vp->insns[i].data[3]);
+ }
+#endif
+ BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_FROM_ID, 1);
+ OUT_RING (vp->exec->start);
+ for (i = 0; i < vp->nr_insns; i++) {
+ BEGIN_RING(rankine, NV34TCL_VP_UPLOAD_INST(0), 4);
+ OUT_RINGp (vp->insns[i].data, 4);
+ }
+ }
+
+ if (vp->so != nv30->state.hw[NV30_STATE_VERTPROG]) {
+ so_ref(vp->so, &nv30->state.hw[NV30_STATE_VERTPROG]);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void
+nv30_vertprog_destroy(struct nv30_context *nv30, struct nv30_vertex_program *vp)
+{
+ vp->translated = FALSE;
+
+ if (vp->nr_insns) {
+ FREE(vp->insns);
+ vp->insns = NULL;
+ vp->nr_insns = 0;
+ }
+
+ if (vp->nr_consts) {
+ FREE(vp->consts);
+ vp->consts = NULL;
+ vp->nr_consts = 0;
+ }
+
+ nouveau_resource_free(&vp->exec);
+ vp->exec_start = 0;
+ nouveau_resource_free(&vp->data);
+ vp->data_start = 0;
+ vp->data_start_min = 0;
+
+ vp->ir = vp->or = 0;
+ so_ref(NULL, &vp->so);
+}
+
+struct nv30_state_entry nv30_state_vertprog = {
+ .validate = nv30_vertprog_validate,
+ .dirty = {
+ .pipe = NV30_NEW_VERTPROG /*| NV30_NEW_UCP*/,
+ .hw = NV30_STATE_VERTPROG,
+ }
+};
diff --git a/src/gallium/drivers/nv40/Makefile b/src/gallium/drivers/nv40/Makefile
new file mode 100644
index 0000000000..0ecae2b491
--- /dev/null
+++ b/src/gallium/drivers/nv40/Makefile
@@ -0,0 +1,29 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nv40
+
+C_SOURCES = \
+ nv40_clear.c \
+ nv40_context.c \
+ nv40_draw.c \
+ nv40_fragprog.c \
+ nv40_fragtex.c \
+ nv40_miptree.c \
+ nv40_query.c \
+ nv40_screen.c \
+ nv40_state.c \
+ nv40_state_blend.c \
+ nv40_state_emit.c \
+ nv40_state_fb.c \
+ nv40_state_rasterizer.c \
+ nv40_state_scissor.c \
+ nv40_state_stipple.c \
+ nv40_state_viewport.c \
+ nv40_state_zsa.c \
+ nv40_surface.c \
+ nv40_transfer.c \
+ nv40_vbo.c \
+ nv40_vertprog.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nv40/nv40_clear.c b/src/gallium/drivers/nv40/nv40_clear.c
new file mode 100644
index 0000000000..ddf13addf3
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_clear.c
@@ -0,0 +1,14 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "util/u_clear.h"
+
+#include "nv40_context.h"
+
+void
+nv40_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil)
+{
+ util_clear(pipe, &nv40_context(pipe)->framebuffer, buffers, rgba, depth,
+ stencil);
+}
diff --git a/src/gallium/drivers/nv40/nv40_context.c b/src/gallium/drivers/nv40/nv40_context.c
new file mode 100644
index 0000000000..8eba6a43ef
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_context.c
@@ -0,0 +1,98 @@
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv40_context.h"
+#include "nv40_screen.h"
+
+static void
+nv40_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ if (flags & PIPE_FLUSH_TEXTURE_CACHE) {
+ BEGIN_RING(curie, 0x1fd8, 1);
+ OUT_RING (2);
+ BEGIN_RING(curie, 0x1fd8, 1);
+ OUT_RING (1);
+ }
+
+ FIRE_RING(fence);
+}
+
+static void
+nv40_destroy(struct pipe_context *pipe)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ if (nv40->draw)
+ draw_destroy(nv40->draw);
+ FREE(nv40);
+}
+
+static unsigned int
+nv40_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+nv40_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *
+nv40_create(struct pipe_screen *pscreen, unsigned pctx_id)
+{
+ struct nv40_screen *screen = nv40_screen(pscreen);
+ struct pipe_winsys *ws = pscreen->winsys;
+ struct nv40_context *nv40;
+ struct nouveau_winsys *nvws = screen->nvws;
+
+ nv40 = CALLOC(1, sizeof(struct nv40_context));
+ if (!nv40)
+ return NULL;
+ nv40->screen = screen;
+ nv40->pctx_id = pctx_id;
+
+ nv40->nvws = nvws;
+
+ nv40->pipe.winsys = ws;
+ nv40->pipe.screen = pscreen;
+ nv40->pipe.destroy = nv40_destroy;
+ nv40->pipe.draw_arrays = nv40_draw_arrays;
+ nv40->pipe.draw_elements = nv40_draw_elements;
+ nv40->pipe.clear = nv40_clear;
+ nv40->pipe.flush = nv40_flush;
+
+ nv40->pipe.is_texture_referenced = nv40_is_texture_referenced;
+ nv40->pipe.is_buffer_referenced = nv40_is_buffer_referenced;
+
+ nv40_init_query_functions(nv40);
+ nv40_init_surface_functions(nv40);
+ nv40_init_state_functions(nv40);
+
+ /* Create, configure, and install fallback swtnl path */
+ nv40->draw = draw_create();
+ draw_wide_point_threshold(nv40->draw, 9999999.0);
+ draw_wide_line_threshold(nv40->draw, 9999999.0);
+ draw_enable_line_stipple(nv40->draw, FALSE);
+ draw_enable_point_sprites(nv40->draw, FALSE);
+ draw_set_rasterize_stage(nv40->draw, nv40_draw_render_stage(nv40));
+
+ return &nv40->pipe;
+}
+
diff --git a/src/gallium/drivers/nv40/nv40_context.h b/src/gallium/drivers/nv40/nv40_context.h
new file mode 100644
index 0000000000..97bc83292d
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_context.h
@@ -0,0 +1,233 @@
+#ifndef __NV40_CONTEXT_H__
+#define __NV40_CONTEXT_H__
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "draw/draw_vertex.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_gldefs.h"
+
+#define NOUVEAU_PUSH_CONTEXT(ctx) \
+ struct nv40_screen *ctx = nv40->screen
+#include "nouveau/nouveau_push.h"
+#include "nouveau/nouveau_stateobj.h"
+
+#include "nv40_state.h"
+
+#define NOUVEAU_ERR(fmt, args...) \
+ fprintf(stderr, "%s:%d - "fmt, __func__, __LINE__, ##args);
+#define NOUVEAU_MSG(fmt, args...) \
+ fprintf(stderr, "nouveau: "fmt, ##args);
+
+enum nv40_state_index {
+ NV40_STATE_FB = 0,
+ NV40_STATE_VIEWPORT = 1,
+ NV40_STATE_BLEND = 2,
+ NV40_STATE_RAST = 3,
+ NV40_STATE_ZSA = 4,
+ NV40_STATE_BCOL = 5,
+ NV40_STATE_CLIP = 6,
+ NV40_STATE_SCISSOR = 7,
+ NV40_STATE_STIPPLE = 8,
+ NV40_STATE_FRAGPROG = 9,
+ NV40_STATE_VERTPROG = 10,
+ NV40_STATE_FRAGTEX0 = 11,
+ NV40_STATE_FRAGTEX1 = 12,
+ NV40_STATE_FRAGTEX2 = 13,
+ NV40_STATE_FRAGTEX3 = 14,
+ NV40_STATE_FRAGTEX4 = 15,
+ NV40_STATE_FRAGTEX5 = 16,
+ NV40_STATE_FRAGTEX6 = 17,
+ NV40_STATE_FRAGTEX7 = 18,
+ NV40_STATE_FRAGTEX8 = 19,
+ NV40_STATE_FRAGTEX9 = 20,
+ NV40_STATE_FRAGTEX10 = 21,
+ NV40_STATE_FRAGTEX11 = 22,
+ NV40_STATE_FRAGTEX12 = 23,
+ NV40_STATE_FRAGTEX13 = 24,
+ NV40_STATE_FRAGTEX14 = 25,
+ NV40_STATE_FRAGTEX15 = 26,
+ NV40_STATE_VERTTEX0 = 27,
+ NV40_STATE_VERTTEX1 = 28,
+ NV40_STATE_VERTTEX2 = 29,
+ NV40_STATE_VERTTEX3 = 30,
+ NV40_STATE_VTXBUF = 31,
+ NV40_STATE_VTXFMT = 32,
+ NV40_STATE_VTXATTR = 33,
+ NV40_STATE_MAX = 34
+};
+
+#include "nv40_screen.h"
+
+#define NV40_NEW_BLEND (1 << 0)
+#define NV40_NEW_RAST (1 << 1)
+#define NV40_NEW_ZSA (1 << 2)
+#define NV40_NEW_SAMPLER (1 << 3)
+#define NV40_NEW_FB (1 << 4)
+#define NV40_NEW_STIPPLE (1 << 5)
+#define NV40_NEW_SCISSOR (1 << 6)
+#define NV40_NEW_VIEWPORT (1 << 7)
+#define NV40_NEW_BCOL (1 << 8)
+#define NV40_NEW_VERTPROG (1 << 9)
+#define NV40_NEW_FRAGPROG (1 << 10)
+#define NV40_NEW_ARRAYS (1 << 11)
+#define NV40_NEW_UCP (1 << 12)
+
+struct nv40_rasterizer_state {
+ struct pipe_rasterizer_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv40_zsa_state {
+ struct pipe_depth_stencil_alpha_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv40_blend_state {
+ struct pipe_blend_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+
+struct nv40_state {
+ unsigned scissor_enabled;
+ unsigned stipple_enabled;
+ unsigned viewport_bypass;
+ unsigned fp_samplers;
+
+ uint64_t dirty;
+ struct nouveau_stateobj *hw[NV40_STATE_MAX];
+};
+
+struct nv40_context {
+ struct pipe_context pipe;
+
+ struct nouveau_winsys *nvws;
+ struct nv40_screen *screen;
+ unsigned pctx_id;
+
+ struct draw_context *draw;
+
+ /* HW state derived from pipe states */
+ struct nv40_state state;
+ struct {
+ struct nv40_vertex_program *vertprog;
+
+ unsigned nr_attribs;
+ unsigned hw[PIPE_MAX_SHADER_INPUTS];
+ unsigned draw[PIPE_MAX_SHADER_INPUTS];
+ unsigned emit[PIPE_MAX_SHADER_INPUTS];
+ } swtnl;
+
+ enum {
+ HW, SWTNL, SWRAST
+ } render_mode;
+ unsigned fallback_swtnl;
+ unsigned fallback_swrast;
+
+ /* Context state */
+ unsigned dirty, draw_dirty;
+ struct pipe_scissor_state scissor;
+ unsigned stipple[32];
+ struct pipe_clip_state clip;
+ struct nv40_vertex_program *vertprog;
+ struct nv40_fragment_program *fragprog;
+ struct pipe_buffer *constbuf[PIPE_SHADER_TYPES];
+ unsigned constbuf_nr[PIPE_SHADER_TYPES];
+ struct nv40_rasterizer_state *rasterizer;
+ struct nv40_zsa_state *zsa;
+ struct nv40_blend_state *blend;
+ struct pipe_blend_color blend_colour;
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_buffer *idxbuf;
+ unsigned idxbuf_format;
+ struct nv40_sampler_state *tex_sampler[PIPE_MAX_SAMPLERS];
+ struct nv40_miptree *tex_miptree[PIPE_MAX_SAMPLERS];
+ unsigned nr_samplers;
+ unsigned nr_textures;
+ unsigned dirty_samplers;
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ unsigned vtxbuf_nr;
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
+ unsigned vtxelt_nr;
+ const unsigned *edgeflags;
+};
+
+static INLINE struct nv40_context *
+nv40_context(struct pipe_context *pipe)
+{
+ return (struct nv40_context *)pipe;
+}
+
+struct nv40_state_entry {
+ boolean (*validate)(struct nv40_context *nv40);
+ struct {
+ unsigned pipe;
+ unsigned hw;
+ } dirty;
+};
+
+extern void nv40_init_state_functions(struct nv40_context *nv40);
+extern void nv40_init_surface_functions(struct nv40_context *nv40);
+extern void nv40_init_query_functions(struct nv40_context *nv40);
+
+extern void nv40_screen_init_miptree_functions(struct pipe_screen *pscreen);
+
+/* nv40_draw.c */
+extern struct draw_stage *nv40_draw_render_stage(struct nv40_context *nv40);
+extern boolean nv40_draw_elements_swtnl(struct pipe_context *pipe,
+ struct pipe_buffer *idxbuf,
+ unsigned ib_size, unsigned mode,
+ unsigned start, unsigned count);
+
+/* nv40_vertprog.c */
+extern void nv40_vertprog_destroy(struct nv40_context *,
+ struct nv40_vertex_program *);
+
+/* nv40_fragprog.c */
+extern void nv40_fragprog_destroy(struct nv40_context *,
+ struct nv40_fragment_program *);
+
+/* nv40_fragtex.c */
+extern void nv40_fragtex_bind(struct nv40_context *);
+
+/* nv40_state.c and friends */
+extern boolean nv40_state_validate(struct nv40_context *nv40);
+extern boolean nv40_state_validate_swtnl(struct nv40_context *nv40);
+extern void nv40_state_emit(struct nv40_context *nv40);
+extern struct nv40_state_entry nv40_state_rasterizer;
+extern struct nv40_state_entry nv40_state_scissor;
+extern struct nv40_state_entry nv40_state_stipple;
+extern struct nv40_state_entry nv40_state_fragprog;
+extern struct nv40_state_entry nv40_state_vertprog;
+extern struct nv40_state_entry nv40_state_blend;
+extern struct nv40_state_entry nv40_state_blend_colour;
+extern struct nv40_state_entry nv40_state_zsa;
+extern struct nv40_state_entry nv40_state_viewport;
+extern struct nv40_state_entry nv40_state_framebuffer;
+extern struct nv40_state_entry nv40_state_fragtex;
+extern struct nv40_state_entry nv40_state_vbo;
+extern struct nv40_state_entry nv40_state_vtxfmt;
+
+/* nv40_vbo.c */
+extern boolean nv40_draw_arrays(struct pipe_context *, unsigned mode,
+ unsigned start, unsigned count);
+extern boolean nv40_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start,
+ unsigned count);
+
+/* nv40_clear.c */
+extern void nv40_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil);
+
+#endif
diff --git a/src/gallium/drivers/nv40/nv40_draw.c b/src/gallium/drivers/nv40/nv40_draw.c
new file mode 100644
index 0000000000..b2f19ecb69
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_draw.c
@@ -0,0 +1,352 @@
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_pack_color.h"
+
+#include "draw/draw_context.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_pipe.h"
+
+#include "nv40_context.h"
+#define NV40_SHADER_NO_FUCKEDNESS
+#include "nv40_shader.h"
+
+/* Simple, but crappy, swtnl path, hopefully we wont need to hit this very
+ * often at all. Uses "quadro style" vertex submission + a fixed vertex
+ * layout to avoid the need to generate a vertex program or vtxfmt.
+ */
+
+struct nv40_render_stage {
+ struct draw_stage stage;
+ struct nv40_context *nv40;
+ unsigned prim;
+};
+
+static INLINE struct nv40_render_stage *
+nv40_render_stage(struct draw_stage *stage)
+{
+ return (struct nv40_render_stage *)stage;
+}
+
+static INLINE void
+nv40_render_vertex(struct nv40_context *nv40, const struct vertex_header *v)
+{
+ unsigned i;
+
+ for (i = 0; i < nv40->swtnl.nr_attribs; i++) {
+ unsigned idx = nv40->swtnl.draw[i];
+ unsigned hw = nv40->swtnl.hw[i];
+
+ switch (nv40->swtnl.emit[i]) {
+ case EMIT_OMIT:
+ break;
+ case EMIT_1F:
+ BEGIN_RING(curie, NV40TCL_VTX_ATTR_1F(hw), 1);
+ OUT_RING (fui(v->data[idx][0]));
+ break;
+ case EMIT_2F:
+ BEGIN_RING(curie, NV40TCL_VTX_ATTR_2F_X(hw), 2);
+ OUT_RING (fui(v->data[idx][0]));
+ OUT_RING (fui(v->data[idx][1]));
+ break;
+ case EMIT_3F:
+ BEGIN_RING(curie, NV40TCL_VTX_ATTR_3F_X(hw), 3);
+ OUT_RING (fui(v->data[idx][0]));
+ OUT_RING (fui(v->data[idx][1]));
+ OUT_RING (fui(v->data[idx][2]));
+ break;
+ case EMIT_4F:
+ BEGIN_RING(curie, NV40TCL_VTX_ATTR_4F_X(hw), 4);
+ OUT_RING (fui(v->data[idx][0]));
+ OUT_RING (fui(v->data[idx][1]));
+ OUT_RING (fui(v->data[idx][2]));
+ OUT_RING (fui(v->data[idx][3]));
+ break;
+ case EMIT_4UB:
+ BEGIN_RING(curie, NV40TCL_VTX_ATTR_4UB(hw), 1);
+ OUT_RING (pack_ub4(float_to_ubyte(v->data[idx][0]),
+ float_to_ubyte(v->data[idx][1]),
+ float_to_ubyte(v->data[idx][2]),
+ float_to_ubyte(v->data[idx][3])));
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+}
+
+static INLINE void
+nv40_render_prim(struct draw_stage *stage, struct prim_header *prim,
+ unsigned mode, unsigned count)
+{
+ struct nv40_render_stage *rs = nv40_render_stage(stage);
+ struct nv40_context *nv40 = rs->nv40;
+ struct nouveau_pushbuf *pb = nv40->screen->base.channel->pushbuf;
+ unsigned i;
+
+ /* Ensure there's room for 4xfloat32 + potentially 3 begin/end */
+ if (pb->remaining < ((count * 20) + 6)) {
+ if (rs->prim != NV40TCL_BEGIN_END_STOP) {
+ NOUVEAU_ERR("AIII, missed flush\n");
+ assert(0);
+ }
+ FIRE_RING(NULL);
+ nv40_state_emit(nv40);
+ }
+
+ /* Switch primitive modes if necessary */
+ if (rs->prim != mode) {
+ if (rs->prim != NV40TCL_BEGIN_END_STOP) {
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (NV40TCL_BEGIN_END_STOP);
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (mode);
+ rs->prim = mode;
+ }
+
+ /* Emit vertex data */
+ for (i = 0; i < count; i++)
+ nv40_render_vertex(nv40, prim->v[i]);
+
+ /* If it's likely we'll need to empty the push buffer soon, finish
+ * off the primitive now.
+ */
+ if (pb->remaining < ((count * 20) + 6)) {
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (NV40TCL_BEGIN_END_STOP);
+ rs->prim = NV40TCL_BEGIN_END_STOP;
+ }
+}
+
+static void
+nv40_render_point(struct draw_stage *draw, struct prim_header *prim)
+{
+ nv40_render_prim(draw, prim, NV40TCL_BEGIN_END_POINTS, 1);
+}
+
+static void
+nv40_render_line(struct draw_stage *draw, struct prim_header *prim)
+{
+ nv40_render_prim(draw, prim, NV40TCL_BEGIN_END_LINES, 2);
+}
+
+static void
+nv40_render_tri(struct draw_stage *draw, struct prim_header *prim)
+{
+ nv40_render_prim(draw, prim, NV40TCL_BEGIN_END_TRIANGLES, 3);
+}
+
+static void
+nv40_render_flush(struct draw_stage *draw, unsigned flags)
+{
+ struct nv40_render_stage *rs = nv40_render_stage(draw);
+ struct nv40_context *nv40 = rs->nv40;
+
+ if (rs->prim != NV40TCL_BEGIN_END_STOP) {
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (NV40TCL_BEGIN_END_STOP);
+ rs->prim = NV40TCL_BEGIN_END_STOP;
+ }
+}
+
+static void
+nv40_render_reset_stipple_counter(struct draw_stage *draw)
+{
+}
+
+static void
+nv40_render_destroy(struct draw_stage *draw)
+{
+ FREE(draw);
+}
+
+static INLINE void
+emit_mov(struct nv40_vertex_program *vp,
+ unsigned dst, unsigned src, unsigned vor, unsigned mask)
+{
+ struct nv40_vertex_program_exec *inst;
+
+ vp->insns = realloc(vp->insns,
+ sizeof(struct nv40_vertex_program_exec) *
+ ++vp->nr_insns);
+ inst = &vp->insns[vp->nr_insns - 1];
+
+ inst->data[0] = 0x401f9c6c;
+ inst->data[1] = 0x0040000d | (src << 8);
+ inst->data[2] = 0x8106c083;
+ inst->data[3] = 0x6041ff80 | (dst << 2) | (mask << 13);
+ inst->const_index = -1;
+ inst->has_branch_offset = FALSE;
+
+ vp->ir |= (1 << src);
+ if (vor != ~0)
+ vp->or |= (1 << vor);
+}
+
+static struct nv40_vertex_program *
+create_drawvp(struct nv40_context *nv40)
+{
+ struct nv40_vertex_program *vp = CALLOC_STRUCT(nv40_vertex_program);
+ unsigned i;
+
+ emit_mov(vp, NV40_VP_INST_DEST_POS, 0, ~0, 0xf);
+ emit_mov(vp, NV40_VP_INST_DEST_COL0, 3, 0, 0xf);
+ emit_mov(vp, NV40_VP_INST_DEST_COL1, 4, 1, 0xf);
+ emit_mov(vp, NV40_VP_INST_DEST_BFC0, 3, 2, 0xf);
+ emit_mov(vp, NV40_VP_INST_DEST_BFC1, 4, 3, 0xf);
+ emit_mov(vp, NV40_VP_INST_DEST_FOGC, 5, 4, 0x8);
+ for (i = 0; i < 8; i++)
+ emit_mov(vp, NV40_VP_INST_DEST_TC(i), 8 + i, 14 + i, 0xf);
+
+ vp->insns[vp->nr_insns - 1].data[3] |= 1;
+ vp->translated = TRUE;
+ return vp;
+}
+
+struct draw_stage *
+nv40_draw_render_stage(struct nv40_context *nv40)
+{
+ struct nv40_render_stage *render = CALLOC_STRUCT(nv40_render_stage);
+
+ if (!nv40->swtnl.vertprog)
+ nv40->swtnl.vertprog = create_drawvp(nv40);
+
+ render->nv40 = nv40;
+ render->stage.draw = nv40->draw;
+ render->stage.point = nv40_render_point;
+ render->stage.line = nv40_render_line;
+ render->stage.tri = nv40_render_tri;
+ render->stage.flush = nv40_render_flush;
+ render->stage.reset_stipple_counter = nv40_render_reset_stipple_counter;
+ render->stage.destroy = nv40_render_destroy;
+
+ return &render->stage;
+}
+
+boolean
+nv40_draw_elements_swtnl(struct pipe_context *pipe,
+ struct pipe_buffer *idxbuf, unsigned idxbuf_size,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct pipe_screen *pscreen = pipe->screen;
+ unsigned i;
+ void *map;
+
+ if (!nv40_state_validate_swtnl(nv40))
+ return FALSE;
+ nv40->state.dirty &= ~(1ULL << NV40_STATE_VTXBUF);
+ nv40_state_emit(nv40);
+
+ for (i = 0; i < nv40->vtxbuf_nr; i++) {
+ map = pipe_buffer_map(pscreen, nv40->vtxbuf[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(nv40->draw, i, map);
+ }
+
+ if (idxbuf) {
+ map = pipe_buffer_map(pscreen, idxbuf,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer(nv40->draw, idxbuf_size, map);
+ } else {
+ draw_set_mapped_element_buffer(nv40->draw, 0, NULL);
+ }
+
+ if (nv40->constbuf[PIPE_SHADER_VERTEX]) {
+ const unsigned nr = nv40->constbuf_nr[PIPE_SHADER_VERTEX];
+
+ map = pipe_buffer_map(pscreen,
+ nv40->constbuf[PIPE_SHADER_VERTEX],
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_constant_buffer(nv40->draw, map, nr);
+ }
+
+ draw_arrays(nv40->draw, mode, start, count);
+
+ for (i = 0; i < nv40->vtxbuf_nr; i++)
+ pipe_buffer_unmap(pscreen, nv40->vtxbuf[i].buffer);
+
+ if (idxbuf)
+ pipe_buffer_unmap(pscreen, idxbuf);
+
+ if (nv40->constbuf[PIPE_SHADER_VERTEX])
+ pipe_buffer_unmap(pscreen, nv40->constbuf[PIPE_SHADER_VERTEX]);
+
+ draw_flush(nv40->draw);
+ pipe->flush(pipe, 0, NULL);
+
+ return TRUE;
+}
+
+static INLINE void
+emit_attrib(struct nv40_context *nv40, unsigned hw, unsigned emit,
+ unsigned semantic, unsigned index)
+{
+ unsigned draw_out = draw_find_vs_output(nv40->draw, semantic, index);
+ unsigned a = nv40->swtnl.nr_attribs++;
+
+ nv40->swtnl.hw[a] = hw;
+ nv40->swtnl.emit[a] = emit;
+ nv40->swtnl.draw[a] = draw_out;
+}
+
+static boolean
+nv40_state_vtxfmt_validate(struct nv40_context *nv40)
+{
+ struct nv40_fragment_program *fp = nv40->fragprog;
+ unsigned colour = 0, texcoords = 0, fog = 0, i;
+
+ /* Determine needed fragprog inputs */
+ for (i = 0; i < fp->info.num_inputs; i++) {
+ switch (fp->info.input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ colour |= (1 << fp->info.input_semantic_index[i]);
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ texcoords |= (1 << fp->info.input_semantic_index[i]);
+ break;
+ case TGSI_SEMANTIC_FOG:
+ fog = 1;
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ nv40->swtnl.nr_attribs = 0;
+
+ /* Map draw vtxprog output to hw attribute IDs */
+ for (i = 0; i < 2; i++) {
+ if (!(colour & (1 << i)))
+ continue;
+ emit_attrib(nv40, 3 + i, EMIT_4UB, TGSI_SEMANTIC_COLOR, i);
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (!(texcoords & (1 << i)))
+ continue;
+ emit_attrib(nv40, 8 + i, EMIT_4F, TGSI_SEMANTIC_GENERIC, i);
+ }
+
+ if (fog) {
+ emit_attrib(nv40, 5, EMIT_1F, TGSI_SEMANTIC_FOG, 0);
+ }
+
+ emit_attrib(nv40, 0, EMIT_3F, TGSI_SEMANTIC_POSITION, 0);
+
+ return FALSE;
+}
+
+struct nv40_state_entry nv40_state_vtxfmt = {
+ .validate = nv40_state_vtxfmt_validate,
+ .dirty = {
+ .pipe = NV40_NEW_ARRAYS | NV40_NEW_FRAGPROG,
+ .hw = 0
+ }
+};
+
diff --git a/src/gallium/drivers/nv40/nv40_fragprog.c b/src/gallium/drivers/nv40/nv40_fragprog.c
new file mode 100644
index 0000000000..32d9ed1a7f
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_fragprog.c
@@ -0,0 +1,995 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv40_context.h"
+
+#define SWZ_X 0
+#define SWZ_Y 1
+#define SWZ_Z 2
+#define SWZ_W 3
+#define MASK_X 1
+#define MASK_Y 2
+#define MASK_Z 4
+#define MASK_W 8
+#define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
+#define DEF_SCALE NV40_FP_OP_DST_SCALE_1X
+#define DEF_CTEST NV40_FP_OP_COND_TR
+#include "nv40_shader.h"
+
+#define swz(s,x,y,z,w) nv40_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
+#define neg(s) nv40_sr_neg((s))
+#define abs(s) nv40_sr_abs((s))
+#define scale(s,v) nv40_sr_scale((s), NV40_FP_OP_DST_SCALE_##v)
+
+#define MAX_CONSTS 128
+#define MAX_IMM 32
+struct nv40_fpc {
+ struct nv40_fragment_program *fp;
+
+ uint attrib_map[PIPE_MAX_SHADER_INPUTS];
+
+ unsigned r_temps;
+ unsigned r_temps_discard;
+ struct nv40_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
+ struct nv40_sreg *r_temp;
+
+ int num_regs;
+
+ unsigned inst_offset;
+ unsigned have_const;
+
+ struct {
+ int pipe;
+ float vals[4];
+ } consts[MAX_CONSTS];
+ int nr_consts;
+
+ struct nv40_sreg imm[MAX_IMM];
+ unsigned nr_imm;
+};
+
+static INLINE struct nv40_sreg
+temp(struct nv40_fpc *fpc)
+{
+ int idx = ffs(~fpc->r_temps) - 1;
+
+ if (idx < 0) {
+ NOUVEAU_ERR("out of temps!!\n");
+ assert(0);
+ return nv40_sr(NV40SR_TEMP, 0);
+ }
+
+ fpc->r_temps |= (1 << idx);
+ fpc->r_temps_discard |= (1 << idx);
+ return nv40_sr(NV40SR_TEMP, idx);
+}
+
+static INLINE void
+release_temps(struct nv40_fpc *fpc)
+{
+ fpc->r_temps &= ~fpc->r_temps_discard;
+ fpc->r_temps_discard = 0;
+}
+
+static INLINE struct nv40_sreg
+constant(struct nv40_fpc *fpc, int pipe, float vals[4])
+{
+ int idx;
+
+ if (fpc->nr_consts == MAX_CONSTS)
+ assert(0);
+ idx = fpc->nr_consts++;
+
+ fpc->consts[idx].pipe = pipe;
+ if (pipe == -1)
+ memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float));
+ return nv40_sr(NV40SR_CONST, idx);
+}
+
+#define arith(cc,s,o,d,m,s0,s1,s2) \
+ nv40_fp_arith((cc), (s), NV40_FP_OP_OPCODE_##o, \
+ (d), (m), (s0), (s1), (s2))
+#define tex(cc,s,o,u,d,m,s0,s1,s2) \
+ nv40_fp_tex((cc), (s), NV40_FP_OP_OPCODE_##o, (u), \
+ (d), (m), (s0), none, none)
+
+static void
+grow_insns(struct nv40_fpc *fpc, int size)
+{
+ struct nv40_fragment_program *fp = fpc->fp;
+
+ fp->insn_len += size;
+ fp->insn = realloc(fp->insn, sizeof(uint32_t) * fp->insn_len);
+}
+
+static void
+emit_src(struct nv40_fpc *fpc, int pos, struct nv40_sreg src)
+{
+ struct nv40_fragment_program *fp = fpc->fp;
+ uint32_t *hw = &fp->insn[fpc->inst_offset];
+ uint32_t sr = 0;
+
+ switch (src.type) {
+ case NV40SR_INPUT:
+ sr |= (NV40_FP_REG_TYPE_INPUT << NV40_FP_REG_TYPE_SHIFT);
+ hw[0] |= (src.index << NV40_FP_OP_INPUT_SRC_SHIFT);
+ break;
+ case NV40SR_OUTPUT:
+ sr |= NV40_FP_REG_SRC_HALF;
+ /* fall-through */
+ case NV40SR_TEMP:
+ sr |= (NV40_FP_REG_TYPE_TEMP << NV40_FP_REG_TYPE_SHIFT);
+ sr |= (src.index << NV40_FP_REG_SRC_SHIFT);
+ break;
+ case NV40SR_CONST:
+ if (!fpc->have_const) {
+ grow_insns(fpc, 4);
+ fpc->have_const = 1;
+ }
+
+ hw = &fp->insn[fpc->inst_offset];
+ if (fpc->consts[src.index].pipe >= 0) {
+ struct nv40_fragment_program_data *fpd;
+
+ fp->consts = realloc(fp->consts, ++fp->nr_consts *
+ sizeof(*fpd));
+ fpd = &fp->consts[fp->nr_consts - 1];
+ fpd->offset = fpc->inst_offset + 4;
+ fpd->index = fpc->consts[src.index].pipe;
+ memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4);
+ } else {
+ memcpy(&fp->insn[fpc->inst_offset + 4],
+ fpc->consts[src.index].vals,
+ sizeof(uint32_t) * 4);
+ }
+
+ sr |= (NV40_FP_REG_TYPE_CONST << NV40_FP_REG_TYPE_SHIFT);
+ break;
+ case NV40SR_NONE:
+ sr |= (NV40_FP_REG_TYPE_INPUT << NV40_FP_REG_TYPE_SHIFT);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (src.negate)
+ sr |= NV40_FP_REG_NEGATE;
+
+ if (src.abs)
+ hw[1] |= (1 << (29 + pos));
+
+ sr |= ((src.swz[0] << NV40_FP_REG_SWZ_X_SHIFT) |
+ (src.swz[1] << NV40_FP_REG_SWZ_Y_SHIFT) |
+ (src.swz[2] << NV40_FP_REG_SWZ_Z_SHIFT) |
+ (src.swz[3] << NV40_FP_REG_SWZ_W_SHIFT));
+
+ hw[pos + 1] |= sr;
+}
+
+static void
+emit_dst(struct nv40_fpc *fpc, struct nv40_sreg dst)
+{
+ struct nv40_fragment_program *fp = fpc->fp;
+ uint32_t *hw = &fp->insn[fpc->inst_offset];
+
+ switch (dst.type) {
+ case NV40SR_TEMP:
+ if (fpc->num_regs < (dst.index + 1))
+ fpc->num_regs = dst.index + 1;
+ break;
+ case NV40SR_OUTPUT:
+ if (dst.index == 1) {
+ fp->fp_control |= 0xe;
+ } else {
+ hw[0] |= NV40_FP_OP_OUT_REG_HALF;
+ }
+ break;
+ case NV40SR_NONE:
+ hw[0] |= (1 << 30);
+ break;
+ default:
+ assert(0);
+ }
+
+ hw[0] |= (dst.index << NV40_FP_OP_OUT_REG_SHIFT);
+}
+
+static void
+nv40_fp_arith(struct nv40_fpc *fpc, int sat, int op,
+ struct nv40_sreg dst, int mask,
+ struct nv40_sreg s0, struct nv40_sreg s1, struct nv40_sreg s2)
+{
+ struct nv40_fragment_program *fp = fpc->fp;
+ uint32_t *hw;
+
+ fpc->inst_offset = fp->insn_len;
+ fpc->have_const = 0;
+ grow_insns(fpc, 4);
+ hw = &fp->insn[fpc->inst_offset];
+ memset(hw, 0, sizeof(uint32_t) * 4);
+
+ if (op == NV40_FP_OP_OPCODE_KIL)
+ fp->fp_control |= NV40TCL_FP_CONTROL_KIL;
+ hw[0] |= (op << NV40_FP_OP_OPCODE_SHIFT);
+ hw[0] |= (mask << NV40_FP_OP_OUTMASK_SHIFT);
+ hw[2] |= (dst.dst_scale << NV40_FP_OP_DST_SCALE_SHIFT);
+
+ if (sat)
+ hw[0] |= NV40_FP_OP_OUT_SAT;
+
+ if (dst.cc_update)
+ hw[0] |= NV40_FP_OP_COND_WRITE_ENABLE;
+ hw[1] |= (dst.cc_test << NV40_FP_OP_COND_SHIFT);
+ hw[1] |= ((dst.cc_swz[0] << NV40_FP_OP_COND_SWZ_X_SHIFT) |
+ (dst.cc_swz[1] << NV40_FP_OP_COND_SWZ_Y_SHIFT) |
+ (dst.cc_swz[2] << NV40_FP_OP_COND_SWZ_Z_SHIFT) |
+ (dst.cc_swz[3] << NV40_FP_OP_COND_SWZ_W_SHIFT));
+
+ emit_dst(fpc, dst);
+ emit_src(fpc, 0, s0);
+ emit_src(fpc, 1, s1);
+ emit_src(fpc, 2, s2);
+}
+
+static void
+nv40_fp_tex(struct nv40_fpc *fpc, int sat, int op, int unit,
+ struct nv40_sreg dst, int mask,
+ struct nv40_sreg s0, struct nv40_sreg s1, struct nv40_sreg s2)
+{
+ struct nv40_fragment_program *fp = fpc->fp;
+
+ nv40_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2);
+
+ fp->insn[fpc->inst_offset] |= (unit << NV40_FP_OP_TEX_UNIT_SHIFT);
+ fp->samplers |= (1 << unit);
+}
+
+static INLINE struct nv40_sreg
+tgsi_src(struct nv40_fpc *fpc, const struct tgsi_full_src_register *fsrc)
+{
+ struct nv40_sreg src;
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ src = nv40_sr(NV40SR_INPUT,
+ fpc->attrib_map[fsrc->SrcRegister.Index]);
+ break;
+ case TGSI_FILE_CONSTANT:
+ src = constant(fpc, fsrc->SrcRegister.Index, NULL);
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ assert(fsrc->SrcRegister.Index < fpc->nr_imm);
+ src = fpc->imm[fsrc->SrcRegister.Index];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ src = fpc->r_temp[fsrc->SrcRegister.Index];
+ break;
+ /* NV40 fragprog result regs are just temps, so this is simple */
+ case TGSI_FILE_OUTPUT:
+ src = fpc->r_result[fsrc->SrcRegister.Index];
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ break;
+ }
+
+ src.abs = fsrc->SrcRegisterExtMod.Absolute;
+ src.negate = fsrc->SrcRegister.Negate;
+ src.swz[0] = fsrc->SrcRegister.SwizzleX;
+ src.swz[1] = fsrc->SrcRegister.SwizzleY;
+ src.swz[2] = fsrc->SrcRegister.SwizzleZ;
+ src.swz[3] = fsrc->SrcRegister.SwizzleW;
+ return src;
+}
+
+static INLINE struct nv40_sreg
+tgsi_dst(struct nv40_fpc *fpc, const struct tgsi_full_dst_register *fdst) {
+ switch (fdst->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ return fpc->r_result[fdst->DstRegister.Index];
+ case TGSI_FILE_TEMPORARY:
+ return fpc->r_temp[fdst->DstRegister.Index];
+ case TGSI_FILE_NULL:
+ return nv40_sr(NV40SR_NONE, 0);
+ default:
+ NOUVEAU_ERR("bad dst file %d\n", fdst->DstRegister.File);
+ return nv40_sr(NV40SR_NONE, 0);
+ }
+}
+
+static INLINE int
+tgsi_mask(uint tgsi)
+{
+ int mask = 0;
+
+ if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
+ if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
+ if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
+ if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
+ return mask;
+}
+
+static boolean
+src_native_swz(struct nv40_fpc *fpc, const struct tgsi_full_src_register *fsrc,
+ struct nv40_sreg *src)
+{
+ const struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
+ struct nv40_sreg tgsi = tgsi_src(fpc, fsrc);
+ uint mask = 0, zero_mask = 0, one_mask = 0, neg_mask = 0;
+ uint neg[4] = { fsrc->SrcRegisterExtSwz.NegateX,
+ fsrc->SrcRegisterExtSwz.NegateY,
+ fsrc->SrcRegisterExtSwz.NegateZ,
+ fsrc->SrcRegisterExtSwz.NegateW };
+ uint c;
+
+ for (c = 0; c < 4; c++) {
+ switch (tgsi_util_get_full_src_register_extswizzle(fsrc, c)) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ mask |= (1 << c);
+ break;
+ case TGSI_EXTSWIZZLE_ZERO:
+ zero_mask |= (1 << c);
+ tgsi.swz[c] = SWZ_X;
+ break;
+ case TGSI_EXTSWIZZLE_ONE:
+ one_mask |= (1 << c);
+ tgsi.swz[c] = SWZ_X;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (!tgsi.negate && neg[c])
+ neg_mask |= (1 << c);
+ }
+
+ if (mask == MASK_ALL && !neg_mask)
+ return TRUE;
+
+ *src = temp(fpc);
+
+ if (mask)
+ arith(fpc, 0, MOV, *src, mask, tgsi, none, none);
+
+ if (zero_mask)
+ arith(fpc, 0, SFL, *src, zero_mask, *src, none, none);
+
+ if (one_mask)
+ arith(fpc, 0, STR, *src, one_mask, *src, none, none);
+
+ if (neg_mask) {
+ struct nv40_sreg one = temp(fpc);
+ arith(fpc, 0, STR, one, neg_mask, one, none, none);
+ arith(fpc, 0, MUL, *src, neg_mask, *src, neg(one), none);
+ }
+
+ return FALSE;
+}
+
+static boolean
+nv40_fragprog_parse_instruction(struct nv40_fpc *fpc,
+ const struct tgsi_full_instruction *finst)
+{
+ const struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
+ struct nv40_sreg src[3], dst, tmp;
+ int mask, sat, unit;
+ int ai = -1, ci = -1, ii = -1;
+ int i;
+
+ if (finst->Instruction.Opcode == TGSI_OPCODE_END)
+ return TRUE;
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ if (fsrc->SrcRegister.File == TGSI_FILE_TEMPORARY) {
+ src[i] = tgsi_src(fpc, fsrc);
+ }
+ }
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_TEMPORARY:
+ if (!src_native_swz(fpc, fsrc, &src[i]))
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ if (ai == -1 || ai == fsrc->SrcRegister.Index) {
+ ai = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(fpc, fsrc);
+ } else {
+ src[i] = temp(fpc);
+ arith(fpc, 0, MOV, src[i], MASK_ALL,
+ tgsi_src(fpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_CONSTANT:
+ if ((ci == -1 && ii == -1) ||
+ ci == fsrc->SrcRegister.Index) {
+ ci = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(fpc, fsrc);
+ } else {
+ src[i] = temp(fpc);
+ arith(fpc, 0, MOV, src[i], MASK_ALL,
+ tgsi_src(fpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ if ((ci == -1 && ii == -1) ||
+ ii == fsrc->SrcRegister.Index) {
+ ii = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(fpc, fsrc);
+ } else {
+ src[i] = temp(fpc);
+ arith(fpc, 0, MOV, src[i], MASK_ALL,
+ tgsi_src(fpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ /* handled above */
+ break;
+ case TGSI_FILE_SAMPLER:
+ unit = fsrc->SrcRegister.Index;
+ break;
+ case TGSI_FILE_OUTPUT:
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ return FALSE;
+ }
+ }
+
+ dst = tgsi_dst(fpc, &finst->FullDstRegisters[0]);
+ mask = tgsi_mask(finst->FullDstRegisters[0].DstRegister.WriteMask);
+ sat = (finst->Instruction.Saturate == TGSI_SAT_ZERO_ONE);
+
+ switch (finst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none);
+ break;
+ case TGSI_OPCODE_ADD:
+ arith(fpc, sat, ADD, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_CMP:
+ tmp = temp(fpc);
+ arith(fpc, sat, MOV, dst, mask, src[2], none, none);
+ tmp.cc_update = 1;
+ arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none);
+ dst.cc_test = NV40_VP_INST_COND_LT;
+ arith(fpc, sat, MOV, dst, mask, src[1], none, none);
+ break;
+ case TGSI_OPCODE_COS:
+ arith(fpc, sat, COS, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_DDX:
+ if (mask & (MASK_Z | MASK_W)) {
+ tmp = temp(fpc);
+ arith(fpc, sat, DDX, tmp, MASK_X | MASK_Y,
+ swz(src[0], Z, W, Z, W), none, none);
+ arith(fpc, 0, MOV, tmp, MASK_Z | MASK_W,
+ swz(tmp, X, Y, X, Y), none, none);
+ arith(fpc, sat, DDX, tmp, MASK_X | MASK_Y, src[0],
+ none, none);
+ arith(fpc, 0, MOV, dst, mask, tmp, none, none);
+ } else {
+ arith(fpc, sat, DDX, dst, mask, src[0], none, none);
+ }
+ break;
+ case TGSI_OPCODE_DDY:
+ if (mask & (MASK_Z | MASK_W)) {
+ tmp = temp(fpc);
+ arith(fpc, sat, DDY, tmp, MASK_X | MASK_Y,
+ swz(src[0], Z, W, Z, W), none, none);
+ arith(fpc, 0, MOV, tmp, MASK_Z | MASK_W,
+ swz(tmp, X, Y, X, Y), none, none);
+ arith(fpc, sat, DDY, tmp, MASK_X | MASK_Y, src[0],
+ none, none);
+ arith(fpc, 0, MOV, dst, mask, tmp, none, none);
+ } else {
+ arith(fpc, sat, DDY, dst, mask, src[0], none, none);
+ }
+ break;
+ case TGSI_OPCODE_DP3:
+ arith(fpc, sat, DP3, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DP4:
+ arith(fpc, sat, DP4, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DPH:
+ tmp = temp(fpc);
+ arith(fpc, 0, DP3, tmp, MASK_X, src[0], src[1], none);
+ arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X),
+ swz(src[1], W, W, W, W), none);
+ break;
+ case TGSI_OPCODE_DST:
+ arith(fpc, sat, DST, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_EX2:
+ arith(fpc, sat, EX2, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FLR:
+ arith(fpc, sat, FLR, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FRC:
+ arith(fpc, sat, FRC, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_KILP:
+ arith(fpc, 0, KIL, none, 0, none, none, none);
+ break;
+ case TGSI_OPCODE_KIL:
+ dst = nv40_sr(NV40SR_NONE, 0);
+ dst.cc_update = 1;
+ arith(fpc, 0, MOV, dst, MASK_ALL, src[0], none, none);
+ dst.cc_update = 0; dst.cc_test = NV40_FP_OP_COND_LT;
+ arith(fpc, 0, KIL, dst, 0, none, none, none);
+ break;
+ case TGSI_OPCODE_LG2:
+ arith(fpc, sat, LG2, dst, mask, src[0], none, none);
+ break;
+// case TGSI_OPCODE_LIT:
+ case TGSI_OPCODE_LRP:
+ tmp = temp(fpc);
+ arith(fpc, 0, MAD, tmp, mask, neg(src[0]), src[2], src[2]);
+ arith(fpc, sat, MAD, dst, mask, src[0], src[1], tmp);
+ break;
+ case TGSI_OPCODE_MAD:
+ arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]);
+ break;
+ case TGSI_OPCODE_MAX:
+ arith(fpc, sat, MAX, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MIN:
+ arith(fpc, sat, MIN, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MOV:
+ arith(fpc, sat, MOV, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_MUL:
+ arith(fpc, sat, MUL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_NOISE1:
+ case TGSI_OPCODE_NOISE2:
+ case TGSI_OPCODE_NOISE3:
+ case TGSI_OPCODE_NOISE4:
+ arith(fpc, sat, SFL, dst, mask, none, none, none);
+ break;
+ case TGSI_OPCODE_POW:
+ tmp = temp(fpc);
+ arith(fpc, 0, LG2, tmp, MASK_X,
+ swz(src[0], X, X, X, X), none, none);
+ arith(fpc, 0, MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
+ swz(src[1], X, X, X, X), none);
+ arith(fpc, sat, EX2, dst, mask,
+ swz(tmp, X, X, X, X), none, none);
+ break;
+ case TGSI_OPCODE_RCP:
+ arith(fpc, sat, RCP, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_RET:
+ assert(0);
+ break;
+ case TGSI_OPCODE_RFL:
+ tmp = temp(fpc);
+ arith(fpc, 0, DP3, tmp, MASK_X, src[0], src[0], none);
+ arith(fpc, 0, DP3, tmp, MASK_Y, src[0], src[1], none);
+ arith(fpc, 0, DIV, scale(tmp, 2X), MASK_Z,
+ swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none);
+ arith(fpc, sat, MAD, dst, mask,
+ swz(tmp, Z, Z, Z, Z), src[0], neg(src[1]));
+ break;
+ case TGSI_OPCODE_RSQ:
+ tmp = temp(fpc);
+ arith(fpc, 0, LG2, scale(tmp, INV_2X), MASK_X,
+ abs(swz(src[0], X, X, X, X)), none, none);
+ arith(fpc, sat, EX2, dst, mask,
+ neg(swz(tmp, X, X, X, X)), none, none);
+ break;
+ case TGSI_OPCODE_SCS:
+ if (mask & MASK_X) {
+ arith(fpc, sat, COS, dst, MASK_X,
+ swz(src[0], X, X, X, X), none, none);
+ }
+ if (mask & MASK_Y) {
+ arith(fpc, sat, SIN, dst, MASK_Y,
+ swz(src[0], X, X, X, X), none, none);
+ }
+ break;
+ case TGSI_OPCODE_SEQ:
+ arith(fpc, sat, SEQ, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SFL:
+ arith(fpc, sat, SFL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SGE:
+ arith(fpc, sat, SGE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SGT:
+ arith(fpc, sat, SGT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SIN:
+ arith(fpc, sat, SIN, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_SLE:
+ arith(fpc, sat, SLE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SLT:
+ arith(fpc, sat, SLT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SNE:
+ arith(fpc, sat, SNE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_STR:
+ arith(fpc, sat, STR, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SUB:
+ arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none);
+ break;
+ case TGSI_OPCODE_TEX:
+ tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_TXB:
+ tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_TXP:
+ tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_XPD:
+ tmp = temp(fpc);
+ arith(fpc, 0, MUL, tmp, mask,
+ swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
+ arith(fpc, sat, MAD, dst, (mask & ~MASK_W),
+ swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
+ neg(tmp));
+ break;
+ default:
+ NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ release_temps(fpc);
+ return TRUE;
+}
+
+static boolean
+nv40_fragprog_parse_decl_attrib(struct nv40_fpc *fpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ int hw;
+
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ hw = NV40_FP_OP_INPUT_SRC_POSITION;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV40_FP_OP_INPUT_SRC_COL0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV40_FP_OP_INPUT_SRC_COL1;
+ } else {
+ NOUVEAU_ERR("bad colour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ hw = NV40_FP_OP_INPUT_SRC_FOGC;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ if (fdec->Semantic.SemanticIndex <= 7) {
+ hw = NV40_FP_OP_INPUT_SRC_TC(fdec->Semantic.
+ SemanticIndex);
+ } else {
+ NOUVEAU_ERR("bad generic semantic index\n");
+ return FALSE;
+ }
+ break;
+ default:
+ NOUVEAU_ERR("bad input semantic\n");
+ return FALSE;
+ }
+
+ fpc->attrib_map[fdec->DeclarationRange.First] = hw;
+ return TRUE;
+}
+
+static boolean
+nv40_fragprog_parse_decl_output(struct nv40_fpc *fpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ unsigned idx = fdec->DeclarationRange.First;
+ unsigned hw;
+
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ hw = 1;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ switch (fdec->Semantic.SemanticIndex) {
+ case 0: hw = 0; break;
+ case 1: hw = 2; break;
+ case 2: hw = 3; break;
+ case 3: hw = 4; break;
+ default:
+ NOUVEAU_ERR("bad rcol index\n");
+ return FALSE;
+ }
+ break;
+ default:
+ NOUVEAU_ERR("bad output semantic\n");
+ return FALSE;
+ }
+
+ fpc->r_result[idx] = nv40_sr(NV40SR_OUTPUT, hw);
+ fpc->r_temps |= (1 << hw);
+ return TRUE;
+}
+
+static boolean
+nv40_fragprog_prepare(struct nv40_fpc *fpc)
+{
+ struct tgsi_parse_context p;
+ int high_temp = -1, i;
+
+ tgsi_parse_init(&p, fpc->fp->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&p)) {
+ const union tgsi_full_token *tok = &p.FullToken;
+
+ tgsi_parse_token(&p);
+ switch(tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *fdec;
+ fdec = &p.FullToken.FullDeclaration;
+ switch (fdec->Declaration.File) {
+ case TGSI_FILE_INPUT:
+ if (!nv40_fragprog_parse_decl_attrib(fpc, fdec))
+ goto out_err;
+ break;
+ case TGSI_FILE_OUTPUT:
+ if (!nv40_fragprog_parse_decl_output(fpc, fdec))
+ goto out_err;
+ break;
+ case TGSI_FILE_TEMPORARY:
+ if (fdec->DeclarationRange.Last > high_temp) {
+ high_temp =
+ fdec->DeclarationRange.Last;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ struct tgsi_full_immediate *imm;
+ float vals[4];
+
+ imm = &p.FullToken.FullImmediate;
+ assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
+ assert(fpc->nr_imm < MAX_IMM);
+
+ vals[0] = imm->u[0].Float;
+ vals[1] = imm->u[1].Float;
+ vals[2] = imm->u[2].Float;
+ vals[3] = imm->u[3].Float;
+ fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ tgsi_parse_free(&p);
+
+ if (++high_temp) {
+ fpc->r_temp = CALLOC(high_temp, sizeof(struct nv40_sreg));
+ for (i = 0; i < high_temp; i++)
+ fpc->r_temp[i] = temp(fpc);
+ fpc->r_temps_discard = 0;
+ }
+
+ return TRUE;
+
+out_err:
+ if (fpc->r_temp)
+ FREE(fpc->r_temp);
+ tgsi_parse_free(&p);
+ return FALSE;
+}
+
+static void
+nv40_fragprog_translate(struct nv40_context *nv40,
+ struct nv40_fragment_program *fp)
+{
+ struct tgsi_parse_context parse;
+ struct nv40_fpc *fpc = NULL;
+
+ fpc = CALLOC(1, sizeof(struct nv40_fpc));
+ if (!fpc)
+ return;
+ fpc->fp = fp;
+ fpc->num_regs = 2;
+
+ if (!nv40_fragprog_prepare(fpc)) {
+ FREE(fpc);
+ return;
+ }
+
+ tgsi_parse_init(&parse, fp->pipe.tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *finst;
+
+ finst = &parse.FullToken.FullInstruction;
+ if (!nv40_fragprog_parse_instruction(fpc, finst))
+ goto out_err;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT;
+
+ /* Terminate final instruction */
+ fp->insn[fpc->inst_offset] |= 0x00000001;
+
+ /* Append NOP + END instruction, may or may not be necessary. */
+ fpc->inst_offset = fp->insn_len;
+ grow_insns(fpc, 4);
+ fp->insn[fpc->inst_offset + 0] = 0x00000001;
+ fp->insn[fpc->inst_offset + 1] = 0x00000000;
+ fp->insn[fpc->inst_offset + 2] = 0x00000000;
+ fp->insn[fpc->inst_offset + 3] = 0x00000000;
+
+ fp->translated = TRUE;
+out_err:
+ tgsi_parse_free(&parse);
+ if (fpc->r_temp)
+ FREE(fpc->r_temp);
+ FREE(fpc);
+}
+
+static void
+nv40_fragprog_upload(struct nv40_context *nv40,
+ struct nv40_fragment_program *fp)
+{
+ struct pipe_screen *pscreen = nv40->pipe.screen;
+ const uint32_t le = 1;
+ uint32_t *map;
+ int i;
+
+ map = pipe_buffer_map(pscreen, fp->buffer, PIPE_BUFFER_USAGE_CPU_WRITE);
+
+#if 0
+ for (i = 0; i < fp->insn_len; i++) {
+ fflush(stdout); fflush(stderr);
+ NOUVEAU_ERR("%d 0x%08x\n", i, fp->insn[i]);
+ fflush(stdout); fflush(stderr);
+ }
+#endif
+
+ if ((*(const uint8_t *)&le)) {
+ for (i = 0; i < fp->insn_len; i++) {
+ map[i] = fp->insn[i];
+ }
+ } else {
+ /* Weird swapping for big-endian chips */
+ for (i = 0; i < fp->insn_len; i++) {
+ map[i] = ((fp->insn[i] & 0xffff) << 16) |
+ ((fp->insn[i] >> 16) & 0xffff);
+ }
+ }
+
+ pipe_buffer_unmap(pscreen, fp->buffer);
+}
+
+static boolean
+nv40_fragprog_validate(struct nv40_context *nv40)
+{
+ struct nv40_fragment_program *fp = nv40->fragprog;
+ struct pipe_buffer *constbuf =
+ nv40->constbuf[PIPE_SHADER_FRAGMENT];
+ struct pipe_screen *pscreen = nv40->pipe.screen;
+ struct nouveau_stateobj *so;
+ boolean new_consts = FALSE;
+ int i;
+
+ if (fp->translated)
+ goto update_constants;
+
+ nv40->fallback_swrast &= ~NV40_NEW_FRAGPROG;
+ nv40_fragprog_translate(nv40, fp);
+ if (!fp->translated) {
+ nv40->fallback_swrast |= NV40_NEW_FRAGPROG;
+ return FALSE;
+ }
+
+ fp->buffer = pscreen->buffer_create(pscreen, 0x100, 0, fp->insn_len * 4);
+ nv40_fragprog_upload(nv40, fp);
+
+ so = so_new(4, 1);
+ so_method(so, nv40->screen->curie, NV40TCL_FP_ADDRESS, 1);
+ so_reloc (so, nouveau_bo(fp->buffer), 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW |
+ NOUVEAU_BO_OR, NV40TCL_FP_ADDRESS_DMA0,
+ NV40TCL_FP_ADDRESS_DMA1);
+ so_method(so, nv40->screen->curie, NV40TCL_FP_CONTROL, 1);
+ so_data (so, fp->fp_control);
+ so_ref(so, &fp->so);
+ so_ref(NULL, &so);
+
+update_constants:
+ if (fp->nr_consts) {
+ float *map;
+
+ map = pipe_buffer_map(pscreen, constbuf,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ for (i = 0; i < fp->nr_consts; i++) {
+ struct nv40_fragment_program_data *fpd = &fp->consts[i];
+ uint32_t *p = &fp->insn[fpd->offset];
+ uint32_t *cb = (uint32_t *)&map[fpd->index * 4];
+
+ if (!memcmp(p, cb, 4 * sizeof(float)))
+ continue;
+ memcpy(p, cb, 4 * sizeof(float));
+ new_consts = TRUE;
+ }
+ pipe_buffer_unmap(pscreen, constbuf);
+
+ if (new_consts)
+ nv40_fragprog_upload(nv40, fp);
+ }
+
+ if (new_consts || fp->so != nv40->state.hw[NV40_STATE_FRAGPROG]) {
+ so_ref(fp->so, &nv40->state.hw[NV40_STATE_FRAGPROG]);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void
+nv40_fragprog_destroy(struct nv40_context *nv40,
+ struct nv40_fragment_program *fp)
+{
+ if (fp->insn_len)
+ FREE(fp->insn);
+}
+
+struct nv40_state_entry nv40_state_fragprog = {
+ .validate = nv40_fragprog_validate,
+ .dirty = {
+ .pipe = NV40_NEW_FRAGPROG,
+ .hw = NV40_STATE_FRAGPROG
+ }
+};
+
diff --git a/src/gallium/drivers/nv40/nv40_fragtex.c b/src/gallium/drivers/nv40/nv40_fragtex.c
new file mode 100644
index 0000000000..f6cdf31dfe
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_fragtex.c
@@ -0,0 +1,170 @@
+#include "nv40_context.h"
+
+#define _(m,tf,ts0x,ts0y,ts0z,ts0w,ts1x,ts1y,ts1z,ts1w,sx,sy,sz,sw) \
+{ \
+ TRUE, \
+ PIPE_FORMAT_##m, \
+ NV40TCL_TEX_FORMAT_FORMAT_##tf, \
+ (NV40TCL_TEX_SWIZZLE_S0_X_##ts0x | NV40TCL_TEX_SWIZZLE_S0_Y_##ts0y | \
+ NV40TCL_TEX_SWIZZLE_S0_Z_##ts0z | NV40TCL_TEX_SWIZZLE_S0_W_##ts0w | \
+ NV40TCL_TEX_SWIZZLE_S1_X_##ts1x | NV40TCL_TEX_SWIZZLE_S1_Y_##ts1y | \
+ NV40TCL_TEX_SWIZZLE_S1_Z_##ts1z | NV40TCL_TEX_SWIZZLE_S1_W_##ts1w), \
+ ((NV40TCL_TEX_FILTER_SIGNED_RED*sx) | (NV40TCL_TEX_FILTER_SIGNED_GREEN*sy) | \
+ (NV40TCL_TEX_FILTER_SIGNED_BLUE*sz) | (NV40TCL_TEX_FILTER_SIGNED_ALPHA*sw)) \
+}
+
+struct nv40_texture_format {
+ boolean defined;
+ uint pipe;
+ int format;
+ int swizzle;
+ int sign;
+};
+
+static struct nv40_texture_format
+nv40_texture_formats[] = {
+ _(A8R8G8B8_UNORM, A8R8G8B8, S1, S1, S1, S1, X, Y, Z, W, 0, 0, 0, 0),
+ _(A1R5G5B5_UNORM, A1R5G5B5, S1, S1, S1, S1, X, Y, Z, W, 0, 0, 0, 0),
+ _(A4R4G4B4_UNORM, A4R4G4B4, S1, S1, S1, S1, X, Y, Z, W, 0, 0, 0, 0),
+ _(R5G6B5_UNORM , R5G6B5 , S1, S1, S1, ONE, X, Y, Z, W, 0, 0, 0, 0),
+ _(L8_UNORM , L8 , S1, S1, S1, ONE, X, X, X, X, 0, 0, 0, 0),
+ _(A8_UNORM , L8 , ZERO, ZERO, ZERO, S1, X, X, X, X, 0, 0, 0, 0),
+ _(R16_SNORM , A16 , ZERO, ZERO, S1, ONE, X, X, X, Y, 1, 1, 1, 1),
+ _(I8_UNORM , L8 , S1, S1, S1, S1, X, X, X, X, 0, 0, 0, 0),
+ _(A8L8_UNORM , A8L8 , S1, S1, S1, S1, X, X, X, Y, 0, 0, 0, 0),
+ _(Z16_UNORM , Z16 , S1, S1, S1, ONE, X, X, X, X, 0, 0, 0, 0),
+ _(Z24S8_UNORM , Z24 , S1, S1, S1, ONE, X, X, X, X, 0, 0, 0, 0),
+ _(DXT1_RGB , DXT1 , S1, S1, S1, ONE, X, Y, Z, W, 0, 0, 0, 0),
+ _(DXT1_RGBA , DXT1 , S1, S1, S1, S1, X, Y, Z, W, 0, 0, 0, 0),
+ _(DXT3_RGBA , DXT3 , S1, S1, S1, S1, X, Y, Z, W, 0, 0, 0, 0),
+ _(DXT5_RGBA , DXT5 , S1, S1, S1, S1, X, Y, Z, W, 0, 0, 0, 0),
+ {},
+};
+
+static struct nv40_texture_format *
+nv40_fragtex_format(uint pipe_format)
+{
+ struct nv40_texture_format *tf = nv40_texture_formats;
+
+ while (tf->defined) {
+ if (tf->pipe == pipe_format)
+ return tf;
+ tf++;
+ }
+
+ NOUVEAU_ERR("unknown texture format %s\n", pf_name(pipe_format));
+ return NULL;
+}
+
+
+static struct nouveau_stateobj *
+nv40_fragtex_build(struct nv40_context *nv40, int unit)
+{
+ struct nv40_sampler_state *ps = nv40->tex_sampler[unit];
+ struct nv40_miptree *nv40mt = nv40->tex_miptree[unit];
+ struct nouveau_bo *bo = nouveau_bo(nv40mt->buffer);
+ struct pipe_texture *pt = &nv40mt->base;
+ struct nv40_texture_format *tf;
+ struct nouveau_stateobj *so;
+ uint32_t txf, txs, txp;
+ unsigned tex_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
+
+ tf = nv40_fragtex_format(pt->format);
+ if (!tf)
+ assert(0);
+
+ txf = ps->fmt;
+ txf |= tf->format | 0x8000;
+ txf |= ((pt->last_level + 1) << NV40TCL_TEX_FORMAT_MIPMAP_COUNT_SHIFT);
+
+ if (1) /* XXX */
+ txf |= NV40TCL_TEX_FORMAT_NO_BORDER;
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_CUBE:
+ txf |= NV40TCL_TEX_FORMAT_CUBIC;
+ /* fall-through */
+ case PIPE_TEXTURE_2D:
+ txf |= NV40TCL_TEX_FORMAT_DIMS_2D;
+ break;
+ case PIPE_TEXTURE_3D:
+ txf |= NV40TCL_TEX_FORMAT_DIMS_3D;
+ break;
+ case PIPE_TEXTURE_1D:
+ txf |= NV40TCL_TEX_FORMAT_DIMS_1D;
+ break;
+ default:
+ NOUVEAU_ERR("Unknown target %d\n", pt->target);
+ return NULL;
+ }
+
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ txp = 0;
+ } else {
+ txp = nv40mt->level[0].pitch;
+ txf |= NV40TCL_TEX_FORMAT_LINEAR;
+ }
+
+ txs = tf->swizzle;
+
+ so = so_new(16, 2);
+ so_method(so, nv40->screen->curie, NV40TCL_TEX_OFFSET(unit), 8);
+ so_reloc (so, bo, 0, tex_flags | NOUVEAU_BO_LOW, 0, 0);
+ so_reloc (so, bo, txf, tex_flags | NOUVEAU_BO_OR,
+ NV40TCL_TEX_FORMAT_DMA0, NV40TCL_TEX_FORMAT_DMA1);
+ so_data (so, ps->wrap);
+ so_data (so, NV40TCL_TEX_ENABLE_ENABLE | ps->en);
+ so_data (so, txs);
+ so_data (so, ps->filt | tf->sign | 0x2000 /*voodoo*/);
+ so_data (so, (pt->width[0] << NV40TCL_TEX_SIZE0_W_SHIFT) |
+ pt->height[0]);
+ so_data (so, ps->bcol);
+ so_method(so, nv40->screen->curie, NV40TCL_TEX_SIZE1(unit), 1);
+ so_data (so, (pt->depth[0] << NV40TCL_TEX_SIZE1_DEPTH_SHIFT) | txp);
+
+ return so;
+}
+
+static boolean
+nv40_fragtex_validate(struct nv40_context *nv40)
+{
+ struct nv40_fragment_program *fp = nv40->fragprog;
+ struct nv40_state *state = &nv40->state;
+ struct nouveau_stateobj *so;
+ unsigned samplers, unit;
+
+ samplers = state->fp_samplers & ~fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ so = so_new(2, 0);
+ so_method(so, nv40->screen->curie, NV40TCL_TEX_ENABLE(unit), 1);
+ so_data (so, 0);
+ so_ref(so, &nv40->state.hw[NV40_STATE_FRAGTEX0 + unit]);
+ state->dirty |= (1ULL << (NV40_STATE_FRAGTEX0 + unit));
+ }
+
+ samplers = nv40->dirty_samplers & fp->samplers;
+ while (samplers) {
+ unit = ffs(samplers) - 1;
+ samplers &= ~(1 << unit);
+
+ so = nv40_fragtex_build(nv40, unit);
+ so_ref(so, &nv40->state.hw[NV40_STATE_FRAGTEX0 + unit]);
+ so_ref(NULL, &so);
+ state->dirty |= (1ULL << (NV40_STATE_FRAGTEX0 + unit));
+ }
+
+ nv40->state.fp_samplers = fp->samplers;
+ return FALSE;
+}
+
+struct nv40_state_entry nv40_state_fragtex = {
+ .validate = nv40_fragtex_validate,
+ .dirty = {
+ .pipe = NV40_NEW_SAMPLER | NV40_NEW_FRAGPROG,
+ .hw = 0
+ }
+};
+
diff --git a/src/gallium/drivers/nv40/nv40_miptree.c b/src/gallium/drivers/nv40/nv40_miptree.c
new file mode 100644
index 0000000000..5a201ccf45
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_miptree.c
@@ -0,0 +1,213 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "nv40_context.h"
+
+static void
+nv40_miptree_layout(struct nv40_miptree *mt)
+{
+ struct pipe_texture *pt = &mt->base;
+ uint width = pt->width[0], height = pt->height[0], depth = pt->depth[0];
+ uint offset = 0;
+ int nr_faces, l, f;
+ uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
+ PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY);
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ nr_faces = 6;
+ } else
+ if (pt->target == PIPE_TEXTURE_3D) {
+ nr_faces = pt->depth[0];
+ } else {
+ nr_faces = 1;
+ }
+
+ for (l = 0; l <= pt->last_level; l++) {
+ pt->width[l] = width;
+ pt->height[l] = height;
+ pt->depth[l] = depth;
+ pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
+
+ if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
+ mt->level[l].pitch = align(pt->width[0] * pt->block.size, 64);
+ else
+ mt->level[l].pitch = pt->width[l] * pt->block.size;
+
+ mt->level[l].image_offset =
+ CALLOC(nr_faces, sizeof(unsigned));
+
+ width = MAX2(1, width >> 1);
+ height = MAX2(1, height >> 1);
+ depth = MAX2(1, depth >> 1);
+ }
+
+ for (f = 0; f < nr_faces; f++) {
+ for (l = 0; l < pt->last_level; l++) {
+ mt->level[l].image_offset[f] = offset;
+
+ if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
+ pt->width[l + 1] > 1 && pt->height[l + 1] > 1)
+ offset += align(mt->level[l].pitch * pt->height[l], 64);
+ else
+ offset += mt->level[l].pitch * pt->height[l];
+ }
+
+ mt->level[l].image_offset[f] = offset;
+ offset += mt->level[l].pitch * pt->height[l];
+ }
+
+ mt->total_size = offset;
+}
+
+static struct pipe_texture *
+nv40_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
+{
+ struct nv40_miptree *mt;
+ unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
+ NOUVEAU_BUFFER_USAGE_TEXTURE;
+
+ mt = MALLOC(sizeof(struct nv40_miptree));
+ if (!mt)
+ return NULL;
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+
+ /* Swizzled textures must be POT */
+ if (pt->width[0] & (pt->width[0] - 1) ||
+ pt->height[0] & (pt->height[0] - 1))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & (PIPE_TEXTURE_USAGE_PRIMARY |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ else {
+ switch (pt->format) {
+ /* TODO: Figure out which formats can be swizzled */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ case PIPE_FORMAT_R16_SNORM:
+ {
+ if (debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE))
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ break;
+ }
+ default:
+ mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
+ }
+ }
+
+ if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
+ buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+
+ nv40_miptree_layout(mt);
+
+ mt->buffer = pscreen->buffer_create(pscreen, 256, buf_usage, mt->total_size);
+ if (!mt->buffer) {
+ FREE(mt);
+ return NULL;
+ }
+
+ return &mt->base;
+}
+
+static struct pipe_texture *
+nv40_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
+ const unsigned *stride, struct pipe_buffer *pb)
+{
+ struct nv40_miptree *mt;
+
+ /* Only supports 2D, non-mipmapped textures for the moment */
+ if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
+ pt->depth[0] != 1)
+ return NULL;
+
+ mt = CALLOC_STRUCT(nv40_miptree);
+ if (!mt)
+ return NULL;
+
+ mt->base = *pt;
+ pipe_reference_init(&mt->base.reference, 1);
+ mt->base.screen = pscreen;
+ mt->level[0].pitch = stride[0];
+ mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
+
+ pipe_buffer_reference(&mt->buffer, pb);
+ return &mt->base;
+}
+
+static void
+nv40_miptree_destroy(struct pipe_texture *pt)
+{
+ struct nv40_miptree *mt = (struct nv40_miptree *)pt;
+ int l;
+
+ pipe_buffer_reference(&mt->buffer, NULL);
+ for (l = 0; l <= pt->last_level; l++) {
+ if (mt->level[l].image_offset)
+ FREE(mt->level[l].image_offset);
+ }
+
+ FREE(mt);
+}
+
+static struct pipe_surface *
+nv40_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct nv40_miptree *mt = (struct nv40_miptree *)pt;
+ struct nv04_surface *ns;
+
+ ns = CALLOC_STRUCT(nv04_surface);
+ if (!ns)
+ return NULL;
+ pipe_texture_reference(&ns->base.texture, pt);
+ ns->base.format = pt->format;
+ ns->base.width = pt->width[level];
+ ns->base.height = pt->height[level];
+ ns->base.usage = flags;
+ pipe_reference_init(&ns->base.reference, 1);
+ ns->base.face = face;
+ ns->base.level = level;
+ ns->base.zslice = zslice;
+ ns->pitch = mt->level[level].pitch;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ns->base.offset = mt->level[level].image_offset[face];
+ } else
+ if (pt->target == PIPE_TEXTURE_3D) {
+ ns->base.offset = mt->level[level].image_offset[zslice];
+ } else {
+ ns->base.offset = mt->level[level].image_offset[0];
+ }
+
+ return &ns->base;
+}
+
+static void
+nv40_miptree_surface_del(struct pipe_surface *ps)
+{
+ pipe_texture_reference(&ps->texture, NULL);
+ FREE(ps);
+}
+
+void
+nv40_screen_init_miptree_functions(struct pipe_screen *pscreen)
+{
+ pscreen->texture_create = nv40_miptree_create;
+ pscreen->texture_blanket = nv40_miptree_blanket;
+ pscreen->texture_destroy = nv40_miptree_destroy;
+ pscreen->get_tex_surface = nv40_miptree_surface_new;
+ pscreen->tex_surface_destroy = nv40_miptree_surface_del;
+}
+
diff --git a/src/gallium/drivers/nv40/nv40_query.c b/src/gallium/drivers/nv40/nv40_query.c
new file mode 100644
index 0000000000..7874aedd42
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_query.c
@@ -0,0 +1,121 @@
+#include "pipe/p_context.h"
+
+#include "nv40_context.h"
+
+struct nv40_query {
+ struct nouveau_resource *object;
+ unsigned type;
+ boolean ready;
+ uint64_t result;
+};
+
+static INLINE struct nv40_query *
+nv40_query(struct pipe_query *pipe)
+{
+ return (struct nv40_query *)pipe;
+}
+
+static struct pipe_query *
+nv40_query_create(struct pipe_context *pipe, unsigned query_type)
+{
+ struct nv40_query *q;
+
+ q = CALLOC(1, sizeof(struct nv40_query));
+ q->type = query_type;
+
+ return (struct pipe_query *)q;
+}
+
+static void
+nv40_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv40_query *q = nv40_query(pq);
+
+ if (q->object)
+ nouveau_resource_free(&q->object);
+ FREE(q);
+}
+
+static void
+nv40_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_query *q = nv40_query(pq);
+
+ assert(q->type == PIPE_QUERY_OCCLUSION_COUNTER);
+
+ /* Happens when end_query() is called, then another begin_query()
+ * without querying the result in-between. For now we'll wait for
+ * the existing query to notify completion, but it could be better.
+ */
+ if (q->object) {
+ uint64_t tmp;
+ pipe->get_query_result(pipe, pq, 1, &tmp);
+ }
+
+ if (nouveau_resource_alloc(nv40->screen->query_heap, 1, NULL, &q->object))
+ assert(0);
+ nouveau_notifier_reset(nv40->screen->query, q->object->start);
+
+ BEGIN_RING(curie, NV40TCL_QUERY_RESET, 1);
+ OUT_RING (1);
+ BEGIN_RING(curie, NV40TCL_QUERY_UNK17CC, 1);
+ OUT_RING (1);
+
+ q->ready = FALSE;
+}
+
+static void
+nv40_query_end(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_query *q = nv40_query(pq);
+
+ BEGIN_RING(curie, NV40TCL_QUERY_GET, 1);
+ OUT_RING ((0x01 << NV40TCL_QUERY_GET_UNK24_SHIFT) |
+ ((q->object->start * 32) << NV40TCL_QUERY_GET_OFFSET_SHIFT));
+ FIRE_RING(NULL);
+}
+
+static boolean
+nv40_query_result(struct pipe_context *pipe, struct pipe_query *pq,
+ boolean wait, uint64_t *result)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_query *q = nv40_query(pq);
+
+ assert(q->object && q->type == PIPE_QUERY_OCCLUSION_COUNTER);
+
+ if (!q->ready) {
+ unsigned status;
+
+ status = nouveau_notifier_status(nv40->screen->query,
+ q->object->start);
+ if (status != NV_NOTIFY_STATE_STATUS_COMPLETED) {
+ if (wait == FALSE)
+ return FALSE;
+ nouveau_notifier_wait_status(nv40->screen->query,
+ q->object->start,
+ NV_NOTIFY_STATE_STATUS_COMPLETED,
+ 0);
+ }
+
+ q->result = nouveau_notifier_return_val(nv40->screen->query,
+ q->object->start);
+ q->ready = TRUE;
+ nouveau_resource_free(&q->object);
+ }
+
+ *result = q->result;
+ return TRUE;
+}
+
+void
+nv40_init_query_functions(struct nv40_context *nv40)
+{
+ nv40->pipe.create_query = nv40_query_create;
+ nv40->pipe.destroy_query = nv40_query_destroy;
+ nv40->pipe.begin_query = nv40_query_begin;
+ nv40->pipe.end_query = nv40_query_end;
+ nv40->pipe.get_query_result = nv40_query_result;
+}
diff --git a/src/gallium/drivers/nv40/nv40_screen.c b/src/gallium/drivers/nv40/nv40_screen.c
new file mode 100644
index 0000000000..5d2a4216c5
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_screen.c
@@ -0,0 +1,303 @@
+#include "pipe/p_screen.h"
+
+#include "nv40_context.h"
+#include "nv40_screen.h"
+
+#define NV4X_GRCLASS4097_CHIPSETS 0x00000baf
+#define NV4X_GRCLASS4497_CHIPSETS 0x00005450
+#define NV6X_GRCLASS4497_CHIPSETS 0x00000088
+
+static int
+nv40_screen_get_param(struct pipe_screen *pscreen, int param)
+{
+ struct nv40_screen *screen = nv40_screen(pscreen);
+
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 16;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 1;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 4;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 13;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 10;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 13;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return 0; /* We have 4 - but unsupported currently */
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ return 1;
+ case NOUVEAU_CAP_HW_IDXBUF:
+ if (screen->curie->grclass == NV40TCL)
+ return 1;
+ return 0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+}
+
+static float
+nv40_screen_get_paramf(struct pipe_screen *pscreen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 10.0;
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 64.0;
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 16.0;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0.0;
+ }
+}
+
+static boolean
+nv40_screen_surface_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage, unsigned geom_flags)
+{
+ if (tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET) {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else
+ if (tex_usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_R16_SNORM:
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ case PIPE_FORMAT_A8L8_UNORM:
+ case PIPE_FORMAT_Z16_UNORM:
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ return TRUE;
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+static struct pipe_buffer *
+nv40_surface_buffer(struct pipe_surface *surf)
+{
+ struct nv40_miptree *mt = (struct nv40_miptree *)surf->texture;
+
+ return mt->buffer;
+}
+
+static void
+nv40_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct nv40_screen *screen = nv40_screen(pscreen);
+
+ nouveau_resource_free(&screen->vp_exec_heap);
+ nouveau_resource_free(&screen->vp_data_heap);
+ nouveau_resource_free(&screen->query_heap);
+ nouveau_notifier_free(&screen->query);
+ nouveau_notifier_free(&screen->sync);
+ nouveau_grobj_free(&screen->curie);
+
+ nouveau_screen_fini(&screen->base);
+
+ FREE(pscreen);
+}
+
+struct pipe_screen *
+nv40_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
+{
+ struct nv40_screen *screen = CALLOC_STRUCT(nv40_screen);
+ struct nouveau_channel *chan;
+ struct pipe_screen *pscreen;
+ struct nouveau_stateobj *so;
+ unsigned curie_class = 0;
+ int ret;
+
+ if (!screen)
+ return NULL;
+ pscreen = &screen->base.base;
+
+ ret = nouveau_screen_init(&screen->base, dev);
+ if (ret) {
+ nv40_screen_destroy(pscreen);
+ return NULL;
+ }
+ chan = screen->base.channel;
+
+ pscreen->winsys = ws;
+ pscreen->destroy = nv40_screen_destroy;
+ pscreen->get_param = nv40_screen_get_param;
+ pscreen->get_paramf = nv40_screen_get_paramf;
+ pscreen->is_format_supported = nv40_screen_surface_format_supported;
+
+ nv40_screen_init_miptree_functions(pscreen);
+ nv40_screen_init_transfer_functions(pscreen);
+
+ /* 3D object */
+ switch (dev->chipset & 0xf0) {
+ case 0x40:
+ if (NV4X_GRCLASS4097_CHIPSETS & (1 << (dev->chipset & 0x0f)))
+ curie_class = NV40TCL;
+ else
+ if (NV4X_GRCLASS4497_CHIPSETS & (1 << (dev->chipset & 0x0f)))
+ curie_class = NV44TCL;
+ break;
+ case 0x60:
+ if (NV6X_GRCLASS4497_CHIPSETS & (1 << (dev->chipset & 0x0f)))
+ curie_class = NV44TCL;
+ break;
+ }
+
+ if (!curie_class) {
+ NOUVEAU_ERR("Unknown nv4x chipset: nv%02x\n", dev->chipset);
+ return NULL;
+ }
+
+ ret = nouveau_grobj_alloc(chan, 0xbeef3097, curie_class, &screen->curie);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
+ return FALSE;
+ }
+ BIND_RING(chan, screen->curie, 7);
+
+ /* 2D engine setup */
+ screen->eng2d = nv04_surface_2d_init(&screen->base);
+ screen->eng2d->buf = nv40_surface_buffer;
+
+ /* Notifier for sync purposes */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
+ if (ret) {
+ NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
+ nv40_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Query objects */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0302, 32, &screen->query);
+ if (ret) {
+ NOUVEAU_ERR("Error initialising query objects: %d\n", ret);
+ nv40_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ nouveau_resource_init(&screen->query_heap, 0, 32);
+ if (ret) {
+ NOUVEAU_ERR("Error initialising query object heap: %d\n", ret);
+ nv40_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Vtxprog resources */
+ if (nouveau_resource_init(&screen->vp_exec_heap, 0, 512) ||
+ nouveau_resource_init(&screen->vp_data_heap, 0, 256)) {
+ nv40_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Static curie initialisation */
+ so = so_new(128, 0);
+ so_method(so, screen->curie, NV40TCL_DMA_NOTIFY, 1);
+ so_data (so, screen->sync->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_TEXTURE0, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->gart->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_COLOR1, 1);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_COLOR0, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_VTXBUF0, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->gart->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_FENCE, 2);
+ so_data (so, 0);
+ so_data (so, screen->query->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_UNK01AC, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->curie, NV40TCL_DMA_COLOR2, 2);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+
+ so_method(so, screen->curie, 0x1ea4, 3);
+ so_data (so, 0x00000010);
+ so_data (so, 0x01000100);
+ so_data (so, 0xff800006);
+
+ /* vtxprog output routing */
+ so_method(so, screen->curie, 0x1fc4, 1);
+ so_data (so, 0x06144321);
+ so_method(so, screen->curie, 0x1fc8, 2);
+ so_data (so, 0xedcba987);
+ so_data (so, 0x00000021);
+ so_method(so, screen->curie, 0x1fd0, 1);
+ so_data (so, 0x00171615);
+ so_method(so, screen->curie, 0x1fd4, 1);
+ so_data (so, 0x001b1a19);
+
+ so_method(so, screen->curie, 0x1ef8, 1);
+ so_data (so, 0x0020ffff);
+ so_method(so, screen->curie, 0x1d64, 1);
+ so_data (so, 0x00d30000);
+ so_method(so, screen->curie, 0x1e94, 1);
+ so_data (so, 0x00000001);
+
+ so_emit(chan, so);
+ so_ref(NULL, &so);
+ nouveau_pushbuf_flush(chan, 0);
+
+ return pscreen;
+}
+
diff --git a/src/gallium/drivers/nv40/nv40_screen.h b/src/gallium/drivers/nv40/nv40_screen.h
new file mode 100644
index 0000000000..57b4c8fc46
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_screen.h
@@ -0,0 +1,40 @@
+#ifndef __NV40_SCREEN_H__
+#define __NV40_SCREEN_H__
+
+#include "nouveau/nouveau_screen.h"
+#include "nv04/nv04_surface_2d.h"
+
+struct nv40_screen {
+ struct nouveau_screen base;
+
+ struct nouveau_winsys *nvws;
+
+ unsigned cur_pctx;
+
+ /* HW graphics objects */
+ struct nv04_surface_2d *eng2d;
+ struct nouveau_grobj *curie;
+ struct nouveau_notifier *sync;
+
+ /* Query object resources */
+ struct nouveau_notifier *query;
+ struct nouveau_resource *query_heap;
+
+ /* Vtxprog resources */
+ struct nouveau_resource *vp_exec_heap;
+ struct nouveau_resource *vp_data_heap;
+
+ /* Current 3D state of channel */
+ struct nouveau_stateobj *state[NV40_STATE_MAX];
+};
+
+static INLINE struct nv40_screen *
+nv40_screen(struct pipe_screen *screen)
+{
+ return (struct nv40_screen *)screen;
+}
+
+void
+nv40_screen_init_transfer_functions(struct pipe_screen *pscreen);
+
+#endif
diff --git a/src/gallium/drivers/nv40/nv40_shader.h b/src/gallium/drivers/nv40/nv40_shader.h
new file mode 100644
index 0000000000..854dccf548
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_shader.h
@@ -0,0 +1,556 @@
+#ifndef __NV40_SHADER_H__
+#define __NV40_SHADER_H__
+
+/* Vertex programs instruction set
+ *
+ * The NV40 instruction set is very similar to NV30. Most fields are in
+ * a slightly different position in the instruction however.
+ *
+ * Merged instructions
+ * In some cases it is possible to put two instructions into one opcode
+ * slot. The rules for when this is OK is not entirely clear to me yet.
+ *
+ * There are separate writemasks and dest temp register fields for each
+ * grouping of instructions. There is however only one field with the
+ * ID of a result register. Writing to temp/result regs is selected by
+ * setting VEC_RESULT/SCA_RESULT.
+ *
+ * Temporary registers
+ * The source/dest temp register fields have been extended by 1 bit, to
+ * give a total of 32 temporary registers.
+ *
+ * Relative Addressing
+ * NV40 can use an address register to index into vertex attribute regs.
+ * This is done by putting the offset value into INPUT_SRC and setting
+ * the INDEX_INPUT flag.
+ *
+ * Conditional execution (see NV_vertex_program{2,3} for details)
+ * There is a second condition code register on NV40, it's use is enabled
+ * by setting the COND_REG_SELECT_1 flag.
+ *
+ * Texture lookup
+ * TODO
+ */
+
+/* ---- OPCODE BITS 127:96 / data DWORD 0 --- */
+#define NV40_VP_INST_VEC_RESULT (1 << 30)
+/* uncertain.. */
+#define NV40_VP_INST_COND_UPDATE_ENABLE ((1 << 14)|1<<29)
+/* use address reg as index into attribs */
+#define NV40_VP_INST_INDEX_INPUT (1 << 27)
+#define NV40_VP_INST_COND_REG_SELECT_1 (1 << 25)
+#define NV40_VP_INST_ADDR_REG_SELECT_1 (1 << 24)
+#define NV40_VP_INST_SRC2_ABS (1 << 23)
+#define NV40_VP_INST_SRC1_ABS (1 << 22)
+#define NV40_VP_INST_SRC0_ABS (1 << 21)
+#define NV40_VP_INST_VEC_DEST_TEMP_SHIFT 15
+#define NV40_VP_INST_VEC_DEST_TEMP_MASK (0x1F << 15)
+#define NV40_VP_INST_COND_TEST_ENABLE (1 << 13)
+#define NV40_VP_INST_COND_SHIFT 10
+#define NV40_VP_INST_COND_MASK (0x7 << 10)
+# define NV40_VP_INST_COND_FL 0
+# define NV40_VP_INST_COND_LT 1
+# define NV40_VP_INST_COND_EQ 2
+# define NV40_VP_INST_COND_LE 3
+# define NV40_VP_INST_COND_GT 4
+# define NV40_VP_INST_COND_NE 5
+# define NV40_VP_INST_COND_GE 6
+# define NV40_VP_INST_COND_TR 7
+#define NV40_VP_INST_COND_SWZ_X_SHIFT 8
+#define NV40_VP_INST_COND_SWZ_X_MASK (3 << 8)
+#define NV40_VP_INST_COND_SWZ_Y_SHIFT 6
+#define NV40_VP_INST_COND_SWZ_Y_MASK (3 << 6)
+#define NV40_VP_INST_COND_SWZ_Z_SHIFT 4
+#define NV40_VP_INST_COND_SWZ_Z_MASK (3 << 4)
+#define NV40_VP_INST_COND_SWZ_W_SHIFT 2
+#define NV40_VP_INST_COND_SWZ_W_MASK (3 << 2)
+#define NV40_VP_INST_COND_SWZ_ALL_SHIFT 2
+#define NV40_VP_INST_COND_SWZ_ALL_MASK (0xFF << 2)
+#define NV40_VP_INST_ADDR_SWZ_SHIFT 0
+#define NV40_VP_INST_ADDR_SWZ_MASK (0x03 << 0)
+#define NV40_VP_INST0_KNOWN ( \
+ NV40_VP_INST_INDEX_INPUT | \
+ NV40_VP_INST_COND_REG_SELECT_1 | \
+ NV40_VP_INST_ADDR_REG_SELECT_1 | \
+ NV40_VP_INST_SRC2_ABS | \
+ NV40_VP_INST_SRC1_ABS | \
+ NV40_VP_INST_SRC0_ABS | \
+ NV40_VP_INST_VEC_DEST_TEMP_MASK | \
+ NV40_VP_INST_COND_TEST_ENABLE | \
+ NV40_VP_INST_COND_MASK | \
+ NV40_VP_INST_COND_SWZ_ALL_MASK | \
+ NV40_VP_INST_ADDR_SWZ_MASK)
+
+/* ---- OPCODE BITS 95:64 / data DWORD 1 --- */
+#define NV40_VP_INST_VEC_OPCODE_SHIFT 22
+#define NV40_VP_INST_VEC_OPCODE_MASK (0x1F << 22)
+# define NV40_VP_INST_OP_NOP 0x00
+# define NV40_VP_INST_OP_MOV 0x01
+# define NV40_VP_INST_OP_MUL 0x02
+# define NV40_VP_INST_OP_ADD 0x03
+# define NV40_VP_INST_OP_MAD 0x04
+# define NV40_VP_INST_OP_DP3 0x05
+# define NV40_VP_INST_OP_DPH 0x06
+# define NV40_VP_INST_OP_DP4 0x07
+# define NV40_VP_INST_OP_DST 0x08
+# define NV40_VP_INST_OP_MIN 0x09
+# define NV40_VP_INST_OP_MAX 0x0A
+# define NV40_VP_INST_OP_SLT 0x0B
+# define NV40_VP_INST_OP_SGE 0x0C
+# define NV40_VP_INST_OP_ARL 0x0D
+# define NV40_VP_INST_OP_FRC 0x0E
+# define NV40_VP_INST_OP_FLR 0x0F
+# define NV40_VP_INST_OP_SEQ 0x10
+# define NV40_VP_INST_OP_SFL 0x11
+# define NV40_VP_INST_OP_SGT 0x12
+# define NV40_VP_INST_OP_SLE 0x13
+# define NV40_VP_INST_OP_SNE 0x14
+# define NV40_VP_INST_OP_STR 0x15
+# define NV40_VP_INST_OP_SSG 0x16
+# define NV40_VP_INST_OP_ARR 0x17
+# define NV40_VP_INST_OP_ARA 0x18
+# define NV40_VP_INST_OP_TXL 0x19
+#define NV40_VP_INST_SCA_OPCODE_SHIFT 27
+#define NV40_VP_INST_SCA_OPCODE_MASK (0x1F << 27)
+# define NV40_VP_INST_OP_NOP 0x00
+# define NV40_VP_INST_OP_MOV 0x01
+# define NV40_VP_INST_OP_RCP 0x02
+# define NV40_VP_INST_OP_RCC 0x03
+# define NV40_VP_INST_OP_RSQ 0x04
+# define NV40_VP_INST_OP_EXP 0x05
+# define NV40_VP_INST_OP_LOG 0x06
+# define NV40_VP_INST_OP_LIT 0x07
+# define NV40_VP_INST_OP_BRA 0x09
+# define NV40_VP_INST_OP_CAL 0x0B
+# define NV40_VP_INST_OP_RET 0x0C
+# define NV40_VP_INST_OP_LG2 0x0D
+# define NV40_VP_INST_OP_EX2 0x0E
+# define NV40_VP_INST_OP_SIN 0x0F
+# define NV40_VP_INST_OP_COS 0x10
+# define NV40_VP_INST_OP_PUSHA 0x13
+# define NV40_VP_INST_OP_POPA 0x14
+#define NV40_VP_INST_CONST_SRC_SHIFT 12
+#define NV40_VP_INST_CONST_SRC_MASK (0xFF << 12)
+#define NV40_VP_INST_INPUT_SRC_SHIFT 8
+#define NV40_VP_INST_INPUT_SRC_MASK (0x0F << 8)
+# define NV40_VP_INST_IN_POS 0
+# define NV40_VP_INST_IN_WEIGHT 1
+# define NV40_VP_INST_IN_NORMAL 2
+# define NV40_VP_INST_IN_COL0 3
+# define NV40_VP_INST_IN_COL1 4
+# define NV40_VP_INST_IN_FOGC 5
+# define NV40_VP_INST_IN_TC0 8
+# define NV40_VP_INST_IN_TC(n) (8+n)
+#define NV40_VP_INST_SRC0H_SHIFT 0
+#define NV40_VP_INST_SRC0H_MASK (0xFF << 0)
+#define NV40_VP_INST1_KNOWN ( \
+ NV40_VP_INST_VEC_OPCODE_MASK | \
+ NV40_VP_INST_SCA_OPCODE_MASK | \
+ NV40_VP_INST_CONST_SRC_MASK | \
+ NV40_VP_INST_INPUT_SRC_MASK | \
+ NV40_VP_INST_SRC0H_MASK \
+ )
+
+/* ---- OPCODE BITS 63:32 / data DWORD 2 --- */
+#define NV40_VP_INST_SRC0L_SHIFT 23
+#define NV40_VP_INST_SRC0L_MASK (0x1FF << 23)
+#define NV40_VP_INST_SRC1_SHIFT 6
+#define NV40_VP_INST_SRC1_MASK (0x1FFFF << 6)
+#define NV40_VP_INST_SRC2H_SHIFT 0
+#define NV40_VP_INST_SRC2H_MASK (0x3F << 0)
+#define NV40_VP_INST_IADDRH_SHIFT 0
+#define NV40_VP_INST_IADDRH_MASK (0x1F << 0)
+
+/* ---- OPCODE BITS 31:0 / data DWORD 3 --- */
+#define NV40_VP_INST_IADDRL_SHIFT 29
+#define NV40_VP_INST_IADDRL_MASK (7 << 29)
+#define NV40_VP_INST_SRC2L_SHIFT 21
+#define NV40_VP_INST_SRC2L_MASK (0x7FF << 21)
+#define NV40_VP_INST_SCA_WRITEMASK_SHIFT 17
+#define NV40_VP_INST_SCA_WRITEMASK_MASK (0xF << 17)
+# define NV40_VP_INST_SCA_WRITEMASK_X (1 << 20)
+# define NV40_VP_INST_SCA_WRITEMASK_Y (1 << 19)
+# define NV40_VP_INST_SCA_WRITEMASK_Z (1 << 18)
+# define NV40_VP_INST_SCA_WRITEMASK_W (1 << 17)
+#define NV40_VP_INST_VEC_WRITEMASK_SHIFT 13
+#define NV40_VP_INST_VEC_WRITEMASK_MASK (0xF << 13)
+# define NV40_VP_INST_VEC_WRITEMASK_X (1 << 16)
+# define NV40_VP_INST_VEC_WRITEMASK_Y (1 << 15)
+# define NV40_VP_INST_VEC_WRITEMASK_Z (1 << 14)
+# define NV40_VP_INST_VEC_WRITEMASK_W (1 << 13)
+#define NV40_VP_INST_SCA_RESULT (1 << 12)
+#define NV40_VP_INST_SCA_DEST_TEMP_SHIFT 7
+#define NV40_VP_INST_SCA_DEST_TEMP_MASK (0x1F << 7)
+#define NV40_VP_INST_DEST_SHIFT 2
+#define NV40_VP_INST_DEST_MASK (31 << 2)
+# define NV40_VP_INST_DEST_POS 0
+# define NV40_VP_INST_DEST_COL0 1
+# define NV40_VP_INST_DEST_COL1 2
+# define NV40_VP_INST_DEST_BFC0 3
+# define NV40_VP_INST_DEST_BFC1 4
+# define NV40_VP_INST_DEST_FOGC 5
+# define NV40_VP_INST_DEST_PSZ 6
+# define NV40_VP_INST_DEST_TC0 7
+# define NV40_VP_INST_DEST_TC(n) (7+n)
+# define NV40_VP_INST_DEST_TEMP 0x1F
+#define NV40_VP_INST_INDEX_CONST (1 << 1)
+#define NV40_VP_INST_LAST (1 << 0)
+#define NV40_VP_INST3_KNOWN ( \
+ NV40_VP_INST_SRC2L_MASK |\
+ NV40_VP_INST_SCA_WRITEMASK_MASK |\
+ NV40_VP_INST_VEC_WRITEMASK_MASK |\
+ NV40_VP_INST_SCA_DEST_TEMP_MASK |\
+ NV40_VP_INST_DEST_MASK |\
+ NV40_VP_INST_INDEX_CONST)
+
+/* Useful to split the source selection regs into their pieces */
+#define NV40_VP_SRC0_HIGH_SHIFT 9
+#define NV40_VP_SRC0_HIGH_MASK 0x0001FE00
+#define NV40_VP_SRC0_LOW_MASK 0x000001FF
+#define NV40_VP_SRC2_HIGH_SHIFT 11
+#define NV40_VP_SRC2_HIGH_MASK 0x0001F800
+#define NV40_VP_SRC2_LOW_MASK 0x000007FF
+
+/* Source selection - these are the bits you fill NV40_VP_INST_SRCn with */
+#define NV40_VP_SRC_NEGATE (1 << 16)
+#define NV40_VP_SRC_SWZ_X_SHIFT 14
+#define NV40_VP_SRC_SWZ_X_MASK (3 << 14)
+#define NV40_VP_SRC_SWZ_Y_SHIFT 12
+#define NV40_VP_SRC_SWZ_Y_MASK (3 << 12)
+#define NV40_VP_SRC_SWZ_Z_SHIFT 10
+#define NV40_VP_SRC_SWZ_Z_MASK (3 << 10)
+#define NV40_VP_SRC_SWZ_W_SHIFT 8
+#define NV40_VP_SRC_SWZ_W_MASK (3 << 8)
+#define NV40_VP_SRC_SWZ_ALL_SHIFT 8
+#define NV40_VP_SRC_SWZ_ALL_MASK (0xFF << 8)
+#define NV40_VP_SRC_TEMP_SRC_SHIFT 2
+#define NV40_VP_SRC_TEMP_SRC_MASK (0x1F << 2)
+#define NV40_VP_SRC_REG_TYPE_SHIFT 0
+#define NV40_VP_SRC_REG_TYPE_MASK (3 << 0)
+# define NV40_VP_SRC_REG_TYPE_UNK0 0
+# define NV40_VP_SRC_REG_TYPE_TEMP 1
+# define NV40_VP_SRC_REG_TYPE_INPUT 2
+# define NV40_VP_SRC_REG_TYPE_CONST 3
+
+
+/*
+ * Each fragment program opcode appears to be comprised of 4 32-bit values.
+ *
+ * 0 - Opcode, output reg/mask, ATTRIB source
+ * 1 - Source 0
+ * 2 - Source 1
+ * 3 - Source 2
+ *
+ * There appears to be no special difference between result regs and temp regs.
+ * result.color == R0.xyzw
+ * result.depth == R1.z
+ * When the fragprog contains instructions to write depth,
+ * NV30_TCL_PRIMITIVE_3D_UNK1D78=0 otherwise it is set to 1.
+ *
+ * Constants are inserted directly after the instruction that uses them.
+ *
+ * It appears that it's not possible to use two input registers in one
+ * instruction as the input sourcing is done in the instruction dword
+ * and not the source selection dwords. As such instructions such as:
+ *
+ * ADD result.color, fragment.color, fragment.texcoord[0];
+ *
+ * must be split into two MOV's and then an ADD (nvidia does this) but
+ * I'm not sure why it's not just one MOV and then source the second input
+ * in the ADD instruction..
+ *
+ * Negation of the full source is done with NV30_FP_REG_NEGATE, arbitrary
+ * negation requires multiplication with a const.
+ *
+ * Arbitrary swizzling is supported with the exception of SWIZZLE_ZERO and
+ * SWIZZLE_ONE.
+ *
+ * The temp/result regs appear to be initialised to (0.0, 0.0, 0.0, 0.0) as
+ * SWIZZLE_ZERO is implemented simply by not writing to the relevant components
+ * of the destination.
+ *
+ * Looping
+ * Loops appear to be fairly expensive on NV40 at least, the proprietary
+ * driver goes to a lot of effort to avoid using the native looping
+ * instructions. If the total number of *executed* instructions between
+ * REP/ENDREP or LOOP/ENDLOOP is <=500, the driver will unroll the loop.
+ * The maximum loop count is 255.
+ *
+ * Conditional execution
+ * TODO
+ *
+ * Non-native instructions:
+ * LIT
+ * LRP - MAD+MAD
+ * SUB - ADD, negate second source
+ * RSQ - LG2 + EX2
+ * POW - LG2 + MUL + EX2
+ * SCS - COS + SIN
+ * XPD
+ * DP2 - MUL + ADD
+ * NRM
+ */
+
+//== Opcode / Destination selection ==
+#define NV40_FP_OP_PROGRAM_END (1 << 0)
+#define NV40_FP_OP_OUT_REG_SHIFT 1
+#define NV40_FP_OP_OUT_REG_MASK (63 << 1)
+/* Needs to be set when writing outputs to get expected result.. */
+#define NV40_FP_OP_OUT_REG_HALF (1 << 7)
+#define NV40_FP_OP_COND_WRITE_ENABLE (1 << 8)
+#define NV40_FP_OP_OUTMASK_SHIFT 9
+#define NV40_FP_OP_OUTMASK_MASK (0xF << 9)
+# define NV40_FP_OP_OUT_X (1 << 9)
+# define NV40_FP_OP_OUT_Y (1 <<10)
+# define NV40_FP_OP_OUT_Z (1 <<11)
+# define NV40_FP_OP_OUT_W (1 <<12)
+/* Uncertain about these, especially the input_src values.. it's possible that
+ * they can be dynamically changed.
+ */
+#define NV40_FP_OP_INPUT_SRC_SHIFT 13
+#define NV40_FP_OP_INPUT_SRC_MASK (15 << 13)
+# define NV40_FP_OP_INPUT_SRC_POSITION 0x0
+# define NV40_FP_OP_INPUT_SRC_COL0 0x1
+# define NV40_FP_OP_INPUT_SRC_COL1 0x2
+# define NV40_FP_OP_INPUT_SRC_FOGC 0x3
+# define NV40_FP_OP_INPUT_SRC_TC0 0x4
+# define NV40_FP_OP_INPUT_SRC_TC(n) (0x4 + n)
+# define NV40_FP_OP_INPUT_SRC_FACING 0xE
+#define NV40_FP_OP_TEX_UNIT_SHIFT 17
+#define NV40_FP_OP_TEX_UNIT_MASK (0xF << 17)
+#define NV40_FP_OP_PRECISION_SHIFT 22
+#define NV40_FP_OP_PRECISION_MASK (3 << 22)
+# define NV40_FP_PRECISION_FP32 0
+# define NV40_FP_PRECISION_FP16 1
+# define NV40_FP_PRECISION_FX12 2
+#define NV40_FP_OP_OPCODE_SHIFT 24
+#define NV40_FP_OP_OPCODE_MASK (0x3F << 24)
+# define NV40_FP_OP_OPCODE_NOP 0x00
+# define NV40_FP_OP_OPCODE_MOV 0x01
+# define NV40_FP_OP_OPCODE_MUL 0x02
+# define NV40_FP_OP_OPCODE_ADD 0x03
+# define NV40_FP_OP_OPCODE_MAD 0x04
+# define NV40_FP_OP_OPCODE_DP3 0x05
+# define NV40_FP_OP_OPCODE_DP4 0x06
+# define NV40_FP_OP_OPCODE_DST 0x07
+# define NV40_FP_OP_OPCODE_MIN 0x08
+# define NV40_FP_OP_OPCODE_MAX 0x09
+# define NV40_FP_OP_OPCODE_SLT 0x0A
+# define NV40_FP_OP_OPCODE_SGE 0x0B
+# define NV40_FP_OP_OPCODE_SLE 0x0C
+# define NV40_FP_OP_OPCODE_SGT 0x0D
+# define NV40_FP_OP_OPCODE_SNE 0x0E
+# define NV40_FP_OP_OPCODE_SEQ 0x0F
+# define NV40_FP_OP_OPCODE_FRC 0x10
+# define NV40_FP_OP_OPCODE_FLR 0x11
+# define NV40_FP_OP_OPCODE_KIL 0x12
+# define NV40_FP_OP_OPCODE_PK4B 0x13
+# define NV40_FP_OP_OPCODE_UP4B 0x14
+/* DDX/DDY can only write to XY */
+# define NV40_FP_OP_OPCODE_DDX 0x15
+# define NV40_FP_OP_OPCODE_DDY 0x16
+# define NV40_FP_OP_OPCODE_TEX 0x17
+# define NV40_FP_OP_OPCODE_TXP 0x18
+# define NV40_FP_OP_OPCODE_TXD 0x19
+# define NV40_FP_OP_OPCODE_RCP 0x1A
+# define NV40_FP_OP_OPCODE_EX2 0x1C
+# define NV40_FP_OP_OPCODE_LG2 0x1D
+# define NV40_FP_OP_OPCODE_STR 0x20
+# define NV40_FP_OP_OPCODE_SFL 0x21
+# define NV40_FP_OP_OPCODE_COS 0x22
+# define NV40_FP_OP_OPCODE_SIN 0x23
+# define NV40_FP_OP_OPCODE_PK2H 0x24
+# define NV40_FP_OP_OPCODE_UP2H 0x25
+# define NV40_FP_OP_OPCODE_PK4UB 0x27
+# define NV40_FP_OP_OPCODE_UP4UB 0x28
+# define NV40_FP_OP_OPCODE_PK2US 0x29
+# define NV40_FP_OP_OPCODE_UP2US 0x2A
+# define NV40_FP_OP_OPCODE_DP2A 0x2E
+# define NV40_FP_OP_OPCODE_TXL 0x2F
+# define NV40_FP_OP_OPCODE_TXB 0x31
+# define NV40_FP_OP_OPCODE_DIV 0x3A
+# define NV40_FP_OP_OPCODE_UNK_LIT 0x3C
+/* The use of these instructions appears to be indicated by bit 31 of DWORD 2.*/
+# define NV40_FP_OP_BRA_OPCODE_BRK 0x0
+# define NV40_FP_OP_BRA_OPCODE_CAL 0x1
+# define NV40_FP_OP_BRA_OPCODE_IF 0x2
+# define NV40_FP_OP_BRA_OPCODE_LOOP 0x3
+# define NV40_FP_OP_BRA_OPCODE_REP 0x4
+# define NV40_FP_OP_BRA_OPCODE_RET 0x5
+#define NV40_FP_OP_OUT_SAT (1 << 31)
+
+/* high order bits of SRC0 */
+#define NV40_FP_OP_OUT_ABS (1 << 29)
+#define NV40_FP_OP_COND_SWZ_W_SHIFT 27
+#define NV40_FP_OP_COND_SWZ_W_MASK (3 << 27)
+#define NV40_FP_OP_COND_SWZ_Z_SHIFT 25
+#define NV40_FP_OP_COND_SWZ_Z_MASK (3 << 25)
+#define NV40_FP_OP_COND_SWZ_Y_SHIFT 23
+#define NV40_FP_OP_COND_SWZ_Y_MASK (3 << 23)
+#define NV40_FP_OP_COND_SWZ_X_SHIFT 21
+#define NV40_FP_OP_COND_SWZ_X_MASK (3 << 21)
+#define NV40_FP_OP_COND_SWZ_ALL_SHIFT 21
+#define NV40_FP_OP_COND_SWZ_ALL_MASK (0xFF << 21)
+#define NV40_FP_OP_COND_SHIFT 18
+#define NV40_FP_OP_COND_MASK (0x07 << 18)
+# define NV40_FP_OP_COND_FL 0
+# define NV40_FP_OP_COND_LT 1
+# define NV40_FP_OP_COND_EQ 2
+# define NV40_FP_OP_COND_LE 3
+# define NV40_FP_OP_COND_GT 4
+# define NV40_FP_OP_COND_NE 5
+# define NV40_FP_OP_COND_GE 6
+# define NV40_FP_OP_COND_TR 7
+
+/* high order bits of SRC1 */
+#define NV40_FP_OP_OPCODE_IS_BRANCH (1<<31)
+#define NV40_FP_OP_DST_SCALE_SHIFT 28
+#define NV40_FP_OP_DST_SCALE_MASK (3 << 28)
+#define NV40_FP_OP_DST_SCALE_1X 0
+#define NV40_FP_OP_DST_SCALE_2X 1
+#define NV40_FP_OP_DST_SCALE_4X 2
+#define NV40_FP_OP_DST_SCALE_8X 3
+#define NV40_FP_OP_DST_SCALE_INV_2X 5
+#define NV40_FP_OP_DST_SCALE_INV_4X 6
+#define NV40_FP_OP_DST_SCALE_INV_8X 7
+
+/* SRC1 LOOP */
+#define NV40_FP_OP_LOOP_INCR_SHIFT 19
+#define NV40_FP_OP_LOOP_INCR_MASK (0xFF << 19)
+#define NV40_FP_OP_LOOP_INDEX_SHIFT 10
+#define NV40_FP_OP_LOOP_INDEX_MASK (0xFF << 10)
+#define NV40_FP_OP_LOOP_COUNT_SHIFT 2
+#define NV40_FP_OP_LOOP_COUNT_MASK (0xFF << 2)
+
+/* SRC1 IF */
+#define NV40_FP_OP_ELSE_ID_SHIFT 2
+#define NV40_FP_OP_ELSE_ID_MASK (0xFF << 2)
+
+/* SRC1 CAL */
+#define NV40_FP_OP_IADDR_SHIFT 2
+#define NV40_FP_OP_IADDR_MASK (0xFF << 2)
+
+/* SRC1 REP
+ * I have no idea why there are 3 count values here.. but they
+ * have always been filled with the same value in my tests so
+ * far..
+ */
+#define NV40_FP_OP_REP_COUNT1_SHIFT 2
+#define NV40_FP_OP_REP_COUNT1_MASK (0xFF << 2)
+#define NV40_FP_OP_REP_COUNT2_SHIFT 10
+#define NV40_FP_OP_REP_COUNT2_MASK (0xFF << 10)
+#define NV40_FP_OP_REP_COUNT3_SHIFT 19
+#define NV40_FP_OP_REP_COUNT3_MASK (0xFF << 19)
+
+/* SRC2 REP/IF */
+#define NV40_FP_OP_END_ID_SHIFT 2
+#define NV40_FP_OP_END_ID_MASK (0xFF << 2)
+
+// SRC2 high-order
+#define NV40_FP_OP_INDEX_INPUT (1 << 30)
+#define NV40_FP_OP_ADDR_INDEX_SHIFT 19
+#define NV40_FP_OP_ADDR_INDEX_MASK (0xF << 19)
+
+//== Register selection ==
+#define NV40_FP_REG_TYPE_SHIFT 0
+#define NV40_FP_REG_TYPE_MASK (3 << 0)
+# define NV40_FP_REG_TYPE_TEMP 0
+# define NV40_FP_REG_TYPE_INPUT 1
+# define NV40_FP_REG_TYPE_CONST 2
+#define NV40_FP_REG_SRC_SHIFT 2
+#define NV40_FP_REG_SRC_MASK (63 << 2)
+#define NV40_FP_REG_SRC_HALF (1 << 8)
+#define NV40_FP_REG_SWZ_ALL_SHIFT 9
+#define NV40_FP_REG_SWZ_ALL_MASK (255 << 9)
+#define NV40_FP_REG_SWZ_X_SHIFT 9
+#define NV40_FP_REG_SWZ_X_MASK (3 << 9)
+#define NV40_FP_REG_SWZ_Y_SHIFT 11
+#define NV40_FP_REG_SWZ_Y_MASK (3 << 11)
+#define NV40_FP_REG_SWZ_Z_SHIFT 13
+#define NV40_FP_REG_SWZ_Z_MASK (3 << 13)
+#define NV40_FP_REG_SWZ_W_SHIFT 15
+#define NV40_FP_REG_SWZ_W_MASK (3 << 15)
+# define NV40_FP_SWIZZLE_X 0
+# define NV40_FP_SWIZZLE_Y 1
+# define NV40_FP_SWIZZLE_Z 2
+# define NV40_FP_SWIZZLE_W 3
+#define NV40_FP_REG_NEGATE (1 << 17)
+
+#ifndef NV40_SHADER_NO_FUCKEDNESS
+#define NV40SR_NONE 0
+#define NV40SR_OUTPUT 1
+#define NV40SR_INPUT 2
+#define NV40SR_TEMP 3
+#define NV40SR_CONST 4
+
+struct nv40_sreg {
+ int type;
+ int index;
+
+ int dst_scale;
+
+ int negate;
+ int abs;
+ int swz[4];
+
+ int cc_update;
+ int cc_update_reg;
+ int cc_test;
+ int cc_test_reg;
+ int cc_swz[4];
+};
+
+static INLINE struct nv40_sreg
+nv40_sr(int type, int index)
+{
+ struct nv40_sreg temp = {
+ .type = type,
+ .index = index,
+ .dst_scale = DEF_SCALE,
+ .abs = 0,
+ .negate = 0,
+ .swz = { 0, 1, 2, 3 },
+ .cc_update = 0,
+ .cc_update_reg = 0,
+ .cc_test = DEF_CTEST,
+ .cc_test_reg = 0,
+ .cc_swz = { 0, 1, 2, 3 },
+ };
+ return temp;
+}
+
+static INLINE struct nv40_sreg
+nv40_sr_swz(struct nv40_sreg src, int x, int y, int z, int w)
+{
+ struct nv40_sreg dst = src;
+
+ dst.swz[SWZ_X] = src.swz[x];
+ dst.swz[SWZ_Y] = src.swz[y];
+ dst.swz[SWZ_Z] = src.swz[z];
+ dst.swz[SWZ_W] = src.swz[w];
+ return dst;
+}
+
+static INLINE struct nv40_sreg
+nv40_sr_neg(struct nv40_sreg src)
+{
+ src.negate = !src.negate;
+ return src;
+}
+
+static INLINE struct nv40_sreg
+nv40_sr_abs(struct nv40_sreg src)
+{
+ src.abs = 1;
+ return src;
+}
+
+static INLINE struct nv40_sreg
+nv40_sr_scale(struct nv40_sreg src, int scale)
+{
+ src.dst_scale = scale;
+ return src;
+}
+#endif
+
+#endif
diff --git a/src/gallium/drivers/nv40/nv40_state.c b/src/gallium/drivers/nv40/nv40_state.c
new file mode 100644
index 0000000000..c3ee4d2345
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state.c
@@ -0,0 +1,743 @@
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "draw/draw_context.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "nv40_context.h"
+#include "nv40_state.h"
+
+static void *
+nv40_blend_state_create(struct pipe_context *pipe,
+ const struct pipe_blend_state *cso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nouveau_grobj *curie = nv40->screen->curie;
+ struct nv40_blend_state *bso = CALLOC(1, sizeof(*bso));
+ struct nouveau_stateobj *so = so_new(16, 0);
+
+ if (cso->blend_enable) {
+ so_method(so, curie, NV40TCL_BLEND_ENABLE, 3);
+ so_data (so, 1);
+ so_data (so, (nvgl_blend_func(cso->alpha_src_factor) << 16) |
+ nvgl_blend_func(cso->rgb_src_factor));
+ so_data (so, nvgl_blend_func(cso->alpha_dst_factor) << 16 |
+ nvgl_blend_func(cso->rgb_dst_factor));
+ so_method(so, curie, NV40TCL_BLEND_EQUATION, 1);
+ so_data (so, nvgl_blend_eqn(cso->alpha_func) << 16 |
+ nvgl_blend_eqn(cso->rgb_func));
+ } else {
+ so_method(so, curie, NV40TCL_BLEND_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_method(so, curie, NV40TCL_COLOR_MASK, 1);
+ so_data (so, (((cso->colormask & PIPE_MASK_A) ? (0x01 << 24) : 0) |
+ ((cso->colormask & PIPE_MASK_R) ? (0x01 << 16) : 0) |
+ ((cso->colormask & PIPE_MASK_G) ? (0x01 << 8) : 0) |
+ ((cso->colormask & PIPE_MASK_B) ? (0x01 << 0) : 0)));
+
+ if (cso->logicop_enable) {
+ so_method(so, curie, NV40TCL_COLOR_LOGIC_OP_ENABLE, 2);
+ so_data (so, 1);
+ so_data (so, nvgl_logicop_func(cso->logicop_func));
+ } else {
+ so_method(so, curie, NV40TCL_COLOR_LOGIC_OP_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_method(so, curie, NV40TCL_DITHER_ENABLE, 1);
+ so_data (so, cso->dither ? 1 : 0);
+
+ so_ref(so, &bso->so);
+ so_ref(NULL, &so);
+ bso->pipe = *cso;
+ return (void *)bso;
+}
+
+static void
+nv40_blend_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->blend = hwcso;
+ nv40->dirty |= NV40_NEW_BLEND;
+}
+
+static void
+nv40_blend_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_blend_state *bso = hwcso;
+
+ so_ref(NULL, &bso->so);
+ FREE(bso);
+}
+
+
+static INLINE unsigned
+wrap_mode(unsigned wrap) {
+ unsigned ret;
+
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ ret = NV40TCL_TEX_WRAP_S_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ ret = NV40TCL_TEX_WRAP_S_MIRRORED_REPEAT;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ ret = NV40TCL_TEX_WRAP_S_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ ret = NV40TCL_TEX_WRAP_S_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_CLAMP:
+ ret = NV40TCL_TEX_WRAP_S_CLAMP;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ ret = NV40TCL_TEX_WRAP_S_MIRROR_CLAMP_TO_EDGE;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ ret = NV40TCL_TEX_WRAP_S_MIRROR_CLAMP_TO_BORDER;
+ break;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ ret = NV40TCL_TEX_WRAP_S_MIRROR_CLAMP;
+ break;
+ default:
+ NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
+ ret = NV40TCL_TEX_WRAP_S_REPEAT;
+ break;
+ }
+
+ return ret >> NV40TCL_TEX_WRAP_S_SHIFT;
+}
+
+static void *
+nv40_sampler_state_create(struct pipe_context *pipe,
+ const struct pipe_sampler_state *cso)
+{
+ struct nv40_sampler_state *ps;
+ uint32_t filter = 0;
+
+ ps = MALLOC(sizeof(struct nv40_sampler_state));
+
+ ps->fmt = 0;
+ if (!cso->normalized_coords)
+ ps->fmt |= NV40TCL_TEX_FORMAT_RECT;
+
+ ps->wrap = ((wrap_mode(cso->wrap_s) << NV40TCL_TEX_WRAP_S_SHIFT) |
+ (wrap_mode(cso->wrap_t) << NV40TCL_TEX_WRAP_T_SHIFT) |
+ (wrap_mode(cso->wrap_r) << NV40TCL_TEX_WRAP_R_SHIFT));
+
+ ps->en = 0;
+ if (cso->max_anisotropy >= 2.0) {
+ /* no idea, binary driver sets it, works without it.. meh.. */
+ ps->wrap |= (1 << 5);
+
+ if (cso->max_anisotropy >= 16.0) {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_16X;
+ } else
+ if (cso->max_anisotropy >= 12.0) {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_12X;
+ } else
+ if (cso->max_anisotropy >= 10.0) {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_10X;
+ } else
+ if (cso->max_anisotropy >= 8.0) {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_8X;
+ } else
+ if (cso->max_anisotropy >= 6.0) {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_6X;
+ } else
+ if (cso->max_anisotropy >= 4.0) {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_4X;
+ } else {
+ ps->en |= NV40TCL_TEX_ENABLE_ANISO_2X;
+ }
+ }
+
+ switch (cso->mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ filter |= NV40TCL_TEX_FILTER_MAG_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ filter |= NV40TCL_TEX_FILTER_MAG_NEAREST;
+ break;
+ }
+
+ switch (cso->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV40TCL_TEX_FILTER_MIN_LINEAR_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV40TCL_TEX_FILTER_MIN_LINEAR_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV40TCL_TEX_FILTER_MIN_LINEAR;
+ break;
+ }
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ filter |= NV40TCL_TEX_FILTER_MIN_NEAREST_MIPMAP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ filter |= NV40TCL_TEX_FILTER_MIN_NEAREST_MIPMAP_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ filter |= NV40TCL_TEX_FILTER_MIN_NEAREST;
+ break;
+ }
+ break;
+ }
+
+ ps->filt = filter;
+
+ {
+ float limit;
+
+ limit = CLAMP(cso->lod_bias, -16.0, 15.0);
+ ps->filt |= (int)(cso->lod_bias * 256.0) & 0x1fff;
+
+ limit = CLAMP(cso->max_lod, 0.0, 15.0);
+ ps->en |= (int)(limit * 256.0) << 7;
+
+ limit = CLAMP(cso->min_lod, 0.0, 15.0);
+ ps->en |= (int)(limit * 256.0) << 19;
+ }
+
+
+ if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ switch (cso->compare_func) {
+ case PIPE_FUNC_NEVER:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_NEVER;
+ break;
+ case PIPE_FUNC_GREATER:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_GREATER;
+ break;
+ case PIPE_FUNC_EQUAL:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_EQUAL;
+ break;
+ case PIPE_FUNC_GEQUAL:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_GEQUAL;
+ break;
+ case PIPE_FUNC_LESS:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_LESS;
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_NOTEQUAL;
+ break;
+ case PIPE_FUNC_LEQUAL:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_LEQUAL;
+ break;
+ case PIPE_FUNC_ALWAYS:
+ ps->wrap |= NV40TCL_TEX_WRAP_RCOMP_ALWAYS;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ps->bcol = ((float_to_ubyte(cso->border_color[3]) << 24) |
+ (float_to_ubyte(cso->border_color[0]) << 16) |
+ (float_to_ubyte(cso->border_color[1]) << 8) |
+ (float_to_ubyte(cso->border_color[2]) << 0));
+
+ return (void *)ps;
+}
+
+static void
+nv40_sampler_state_bind(struct pipe_context *pipe, unsigned nr, void **sampler)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ nv40->tex_sampler[unit] = sampler[unit];
+ nv40->dirty_samplers |= (1 << unit);
+ }
+
+ for (unit = nr; unit < nv40->nr_samplers; unit++) {
+ nv40->tex_sampler[unit] = NULL;
+ nv40->dirty_samplers |= (1 << unit);
+ }
+
+ nv40->nr_samplers = nr;
+ nv40->dirty |= NV40_NEW_SAMPLER;
+}
+
+static void
+nv40_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void
+nv40_set_sampler_texture(struct pipe_context *pipe, unsigned nr,
+ struct pipe_texture **miptree)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ unsigned unit;
+
+ for (unit = 0; unit < nr; unit++) {
+ pipe_texture_reference((struct pipe_texture **)
+ &nv40->tex_miptree[unit], miptree[unit]);
+ nv40->dirty_samplers |= (1 << unit);
+ }
+
+ for (unit = nr; unit < nv40->nr_textures; unit++) {
+ pipe_texture_reference((struct pipe_texture **)
+ &nv40->tex_miptree[unit], NULL);
+ nv40->dirty_samplers |= (1 << unit);
+ }
+
+ nv40->nr_textures = nr;
+ nv40->dirty |= NV40_NEW_SAMPLER;
+}
+
+static void *
+nv40_rasterizer_state_create(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_rasterizer_state *rsso = CALLOC(1, sizeof(*rsso));
+ struct nouveau_stateobj *so = so_new(32, 0);
+ struct nouveau_grobj *curie = nv40->screen->curie;
+
+ /*XXX: ignored:
+ * light_twoside
+ * point_smooth -nohw
+ * multisample
+ */
+
+ so_method(so, curie, NV40TCL_SHADE_MODEL, 1);
+ so_data (so, cso->flatshade ? NV40TCL_SHADE_MODEL_FLAT :
+ NV40TCL_SHADE_MODEL_SMOOTH);
+
+ so_method(so, curie, NV40TCL_LINE_WIDTH, 2);
+ so_data (so, (unsigned char)(cso->line_width * 8.0) & 0xff);
+ so_data (so, cso->line_smooth ? 1 : 0);
+ so_method(so, curie, NV40TCL_LINE_STIPPLE_ENABLE, 2);
+ so_data (so, cso->line_stipple_enable ? 1 : 0);
+ so_data (so, (cso->line_stipple_pattern << 16) |
+ cso->line_stipple_factor);
+
+ so_method(so, curie, NV40TCL_POINT_SIZE, 1);
+ so_data (so, fui(cso->point_size));
+
+ so_method(so, curie, NV40TCL_POLYGON_MODE_FRONT, 6);
+ if (cso->front_winding == PIPE_WINDING_CCW) {
+ so_data(so, nvgl_polygon_mode(cso->fill_ccw));
+ so_data(so, nvgl_polygon_mode(cso->fill_cw));
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ so_data(so, NV40TCL_CULL_FACE_FRONT);
+ break;
+ case PIPE_WINDING_CW:
+ so_data(so, NV40TCL_CULL_FACE_BACK);
+ break;
+ case PIPE_WINDING_BOTH:
+ so_data(so, NV40TCL_CULL_FACE_FRONT_AND_BACK);
+ break;
+ default:
+ so_data(so, NV40TCL_CULL_FACE_BACK);
+ break;
+ }
+ so_data(so, NV40TCL_FRONT_FACE_CCW);
+ } else {
+ so_data(so, nvgl_polygon_mode(cso->fill_cw));
+ so_data(so, nvgl_polygon_mode(cso->fill_ccw));
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ so_data(so, NV40TCL_CULL_FACE_BACK);
+ break;
+ case PIPE_WINDING_CW:
+ so_data(so, NV40TCL_CULL_FACE_FRONT);
+ break;
+ case PIPE_WINDING_BOTH:
+ so_data(so, NV40TCL_CULL_FACE_FRONT_AND_BACK);
+ break;
+ default:
+ so_data(so, NV40TCL_CULL_FACE_BACK);
+ break;
+ }
+ so_data(so, NV40TCL_FRONT_FACE_CW);
+ }
+ so_data(so, cso->poly_smooth ? 1 : 0);
+ so_data(so, (cso->cull_mode != PIPE_WINDING_NONE) ? 1 : 0);
+
+ so_method(so, curie, NV40TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, cso->poly_stipple_enable ? 1 : 0);
+
+ so_method(so, curie, NV40TCL_POLYGON_OFFSET_POINT_ENABLE, 3);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_POINT) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_POINT))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_LINE) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_LINE))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_FILL) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_FILL))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if (cso->offset_cw || cso->offset_ccw) {
+ so_method(so, curie, NV40TCL_POLYGON_OFFSET_FACTOR, 2);
+ so_data (so, fui(cso->offset_scale));
+ so_data (so, fui(cso->offset_units * 2));
+ }
+
+ so_method(so, curie, NV40TCL_POINT_SPRITE, 1);
+ if (cso->point_sprite) {
+ unsigned psctl = (1 << 0), i;
+
+ for (i = 0; i < 8; i++) {
+ if (cso->sprite_coord_mode[i] != PIPE_SPRITE_COORD_NONE)
+ psctl |= (1 << (8 + i));
+ }
+
+ so_data(so, psctl);
+ } else {
+ so_data(so, 0);
+ }
+
+ so_ref(so, &rsso->so);
+ so_ref(NULL, &so);
+ rsso->pipe = *cso;
+ return (void *)rsso;
+}
+
+static void
+nv40_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->rasterizer = hwcso;
+ nv40->dirty |= NV40_NEW_RAST;
+ nv40->draw_dirty |= NV40_NEW_RAST;
+}
+
+static void
+nv40_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_rasterizer_state *rsso = hwcso;
+
+ so_ref(NULL, &rsso->so);
+ FREE(rsso);
+}
+
+static void *
+nv40_depth_stencil_alpha_state_create(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_zsa_state *zsaso = CALLOC(1, sizeof(*zsaso));
+ struct nouveau_stateobj *so = so_new(32, 0);
+ struct nouveau_grobj *curie = nv40->screen->curie;
+
+ so_method(so, curie, NV40TCL_DEPTH_FUNC, 3);
+ so_data (so, nvgl_comparison_op(cso->depth.func));
+ so_data (so, cso->depth.writemask ? 1 : 0);
+ so_data (so, cso->depth.enabled ? 1 : 0);
+
+ so_method(so, curie, NV40TCL_ALPHA_TEST_ENABLE, 3);
+ so_data (so, cso->alpha.enabled ? 1 : 0);
+ so_data (so, nvgl_comparison_op(cso->alpha.func));
+ so_data (so, float_to_ubyte(cso->alpha.ref_value));
+
+ if (cso->stencil[0].enabled) {
+ so_method(so, curie, NV40TCL_STENCIL_FRONT_ENABLE, 8);
+ so_data (so, cso->stencil[0].enabled ? 1 : 0);
+ so_data (so, cso->stencil[0].writemask);
+ so_data (so, nvgl_comparison_op(cso->stencil[0].func));
+ so_data (so, cso->stencil[0].ref_value);
+ so_data (so, cso->stencil[0].valuemask);
+ so_data (so, nvgl_stencil_op(cso->stencil[0].fail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
+ } else {
+ so_method(so, curie, NV40TCL_STENCIL_FRONT_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ if (cso->stencil[1].enabled) {
+ so_method(so, curie, NV40TCL_STENCIL_BACK_ENABLE, 8);
+ so_data (so, cso->stencil[1].enabled ? 1 : 0);
+ so_data (so, cso->stencil[1].writemask);
+ so_data (so, nvgl_comparison_op(cso->stencil[1].func));
+ so_data (so, cso->stencil[1].ref_value);
+ so_data (so, cso->stencil[1].valuemask);
+ so_data (so, nvgl_stencil_op(cso->stencil[1].fail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
+ } else {
+ so_method(so, curie, NV40TCL_STENCIL_BACK_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_ref(so, &zsaso->so);
+ so_ref(NULL, &so);
+ zsaso->pipe = *cso;
+ return (void *)zsaso;
+}
+
+static void
+nv40_depth_stencil_alpha_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->zsa = hwcso;
+ nv40->dirty |= NV40_NEW_ZSA;
+}
+
+static void
+nv40_depth_stencil_alpha_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_zsa_state *zsaso = hwcso;
+
+ so_ref(NULL, &zsaso->so);
+ FREE(zsaso);
+}
+
+static void *
+nv40_vp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_vertex_program *vp;
+
+ vp = CALLOC(1, sizeof(struct nv40_vertex_program));
+ vp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+ vp->draw = draw_create_vertex_shader(nv40->draw, &vp->pipe);
+
+ return (void *)vp;
+}
+
+static void
+nv40_vp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->vertprog = hwcso;
+ nv40->dirty |= NV40_NEW_VERTPROG;
+ nv40->draw_dirty |= NV40_NEW_VERTPROG;
+}
+
+static void
+nv40_vp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_vertex_program *vp = hwcso;
+
+ draw_delete_vertex_shader(nv40->draw, vp->draw);
+ nv40_vertprog_destroy(nv40, vp);
+ FREE((void*)vp->pipe.tokens);
+ FREE(vp);
+}
+
+static void *
+nv40_fp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv40_fragment_program *fp;
+
+ fp = CALLOC(1, sizeof(struct nv40_fragment_program));
+ fp->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+
+ tgsi_scan_shader(fp->pipe.tokens, &fp->info);
+
+ return (void *)fp;
+}
+
+static void
+nv40_fp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->fragprog = hwcso;
+ nv40->dirty |= NV40_NEW_FRAGPROG;
+}
+
+static void
+nv40_fp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv40_fragment_program *fp = hwcso;
+
+ nv40_fragprog_destroy(nv40, fp);
+ FREE((void*)fp->pipe.tokens);
+ FREE(fp);
+}
+
+static void
+nv40_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *bcol)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->blend_colour = *bcol;
+ nv40->dirty |= NV40_NEW_BCOL;
+}
+
+static void
+nv40_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->clip = *clip;
+ nv40->dirty |= NV40_NEW_UCP;
+ nv40->draw_dirty |= NV40_NEW_UCP;
+}
+
+static void
+nv40_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+ const struct pipe_constant_buffer *buf )
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->constbuf[shader] = buf->buffer;
+ nv40->constbuf_nr[shader] = buf->buffer->size / (4 * sizeof(float));
+
+ if (shader == PIPE_SHADER_VERTEX) {
+ nv40->dirty |= NV40_NEW_VERTPROG;
+ } else
+ if (shader == PIPE_SHADER_FRAGMENT) {
+ nv40->dirty |= NV40_NEW_FRAGPROG;
+ }
+}
+
+static void
+nv40_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->framebuffer = *fb;
+ nv40->dirty |= NV40_NEW_FB;
+}
+
+static void
+nv40_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ memcpy(nv40->stipple, stipple->stipple, 4 * 32);
+ nv40->dirty |= NV40_NEW_STIPPLE;
+}
+
+static void
+nv40_set_scissor_state(struct pipe_context *pipe,
+ const struct pipe_scissor_state *s)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->scissor = *s;
+ nv40->dirty |= NV40_NEW_SCISSOR;
+}
+
+static void
+nv40_set_viewport_state(struct pipe_context *pipe,
+ const struct pipe_viewport_state *vpt)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->viewport = *vpt;
+ nv40->dirty |= NV40_NEW_VIEWPORT;
+ nv40->draw_dirty |= NV40_NEW_VIEWPORT;
+}
+
+static void
+nv40_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_buffer *vb)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ memcpy(nv40->vtxbuf, vb, sizeof(*vb) * count);
+ nv40->vtxbuf_nr = count;
+
+ nv40->dirty |= NV40_NEW_ARRAYS;
+ nv40->draw_dirty |= NV40_NEW_ARRAYS;
+}
+
+static void
+nv40_set_vertex_elements(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_element *ve)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ memcpy(nv40->vtxelt, ve, sizeof(*ve) * count);
+ nv40->vtxelt_nr = count;
+
+ nv40->dirty |= NV40_NEW_ARRAYS;
+ nv40->draw_dirty |= NV40_NEW_ARRAYS;
+}
+
+static void
+nv40_set_edgeflags(struct pipe_context *pipe, const unsigned *bitfield)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+
+ nv40->edgeflags = bitfield;
+ nv40->dirty |= NV40_NEW_ARRAYS;
+ nv40->draw_dirty |= NV40_NEW_ARRAYS;
+}
+
+void
+nv40_init_state_functions(struct nv40_context *nv40)
+{
+ nv40->pipe.create_blend_state = nv40_blend_state_create;
+ nv40->pipe.bind_blend_state = nv40_blend_state_bind;
+ nv40->pipe.delete_blend_state = nv40_blend_state_delete;
+
+ nv40->pipe.create_sampler_state = nv40_sampler_state_create;
+ nv40->pipe.bind_sampler_states = nv40_sampler_state_bind;
+ nv40->pipe.delete_sampler_state = nv40_sampler_state_delete;
+ nv40->pipe.set_sampler_textures = nv40_set_sampler_texture;
+
+ nv40->pipe.create_rasterizer_state = nv40_rasterizer_state_create;
+ nv40->pipe.bind_rasterizer_state = nv40_rasterizer_state_bind;
+ nv40->pipe.delete_rasterizer_state = nv40_rasterizer_state_delete;
+
+ nv40->pipe.create_depth_stencil_alpha_state =
+ nv40_depth_stencil_alpha_state_create;
+ nv40->pipe.bind_depth_stencil_alpha_state =
+ nv40_depth_stencil_alpha_state_bind;
+ nv40->pipe.delete_depth_stencil_alpha_state =
+ nv40_depth_stencil_alpha_state_delete;
+
+ nv40->pipe.create_vs_state = nv40_vp_state_create;
+ nv40->pipe.bind_vs_state = nv40_vp_state_bind;
+ nv40->pipe.delete_vs_state = nv40_vp_state_delete;
+
+ nv40->pipe.create_fs_state = nv40_fp_state_create;
+ nv40->pipe.bind_fs_state = nv40_fp_state_bind;
+ nv40->pipe.delete_fs_state = nv40_fp_state_delete;
+
+ nv40->pipe.set_blend_color = nv40_set_blend_color;
+ nv40->pipe.set_clip_state = nv40_set_clip_state;
+ nv40->pipe.set_constant_buffer = nv40_set_constant_buffer;
+ nv40->pipe.set_framebuffer_state = nv40_set_framebuffer_state;
+ nv40->pipe.set_polygon_stipple = nv40_set_polygon_stipple;
+ nv40->pipe.set_scissor_state = nv40_set_scissor_state;
+ nv40->pipe.set_viewport_state = nv40_set_viewport_state;
+
+ nv40->pipe.set_edgeflags = nv40_set_edgeflags;
+ nv40->pipe.set_vertex_buffers = nv40_set_vertex_buffers;
+ nv40->pipe.set_vertex_elements = nv40_set_vertex_elements;
+}
+
diff --git a/src/gallium/drivers/nv40/nv40_state.h b/src/gallium/drivers/nv40/nv40_state.h
new file mode 100644
index 0000000000..8a9d8c8fdf
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state.h
@@ -0,0 +1,88 @@
+#ifndef __NV40_STATE_H__
+#define __NV40_STATE_H__
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+struct nv40_sampler_state {
+ uint32_t fmt;
+ uint32_t wrap;
+ uint32_t en;
+ uint32_t filt;
+ uint32_t bcol;
+};
+
+struct nv40_vertex_program_exec {
+ uint32_t data[4];
+ boolean has_branch_offset;
+ int const_index;
+};
+
+struct nv40_vertex_program_data {
+ int index; /* immediates == -1 */
+ float value[4];
+};
+
+struct nv40_vertex_program {
+ struct pipe_shader_state pipe;
+
+ struct draw_vertex_shader *draw;
+
+ boolean translated;
+
+ struct pipe_clip_state ucp;
+
+ struct nv40_vertex_program_exec *insns;
+ unsigned nr_insns;
+ struct nv40_vertex_program_data *consts;
+ unsigned nr_consts;
+
+ struct nouveau_resource *exec;
+ unsigned exec_start;
+ struct nouveau_resource *data;
+ unsigned data_start;
+ unsigned data_start_min;
+
+ uint32_t ir;
+ uint32_t or;
+ uint32_t clip_ctrl;
+ struct nouveau_stateobj *so;
+};
+
+struct nv40_fragment_program_data {
+ unsigned offset;
+ unsigned index;
+};
+
+struct nv40_fragment_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+
+ boolean translated;
+ unsigned samplers;
+
+ uint32_t *insn;
+ int insn_len;
+
+ struct nv40_fragment_program_data *consts;
+ unsigned nr_consts;
+
+ struct pipe_buffer *buffer;
+
+ uint32_t fp_control;
+ struct nouveau_stateobj *so;
+};
+
+struct nv40_miptree {
+ struct pipe_texture base;
+
+ struct pipe_buffer *buffer;
+ uint total_size;
+
+ struct {
+ uint pitch;
+ uint *image_offset;
+ } level[PIPE_MAX_TEXTURE_LEVELS];
+};
+
+#endif
diff --git a/src/gallium/drivers/nv40/nv40_state_blend.c b/src/gallium/drivers/nv40/nv40_state_blend.c
new file mode 100644
index 0000000000..8cd05ce66e
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_blend.c
@@ -0,0 +1,41 @@
+#include "nv40_context.h"
+
+static boolean
+nv40_state_blend_validate(struct nv40_context *nv40)
+{
+ so_ref(nv40->blend->so, &nv40->state.hw[NV40_STATE_BLEND]);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_blend = {
+ .validate = nv40_state_blend_validate,
+ .dirty = {
+ .pipe = NV40_NEW_BLEND,
+ .hw = NV40_STATE_BLEND
+ }
+};
+
+static boolean
+nv40_state_blend_colour_validate(struct nv40_context *nv40)
+{
+ struct nouveau_stateobj *so = so_new(2, 0);
+ struct pipe_blend_color *bcol = &nv40->blend_colour;
+
+ so_method(so, nv40->screen->curie, NV40TCL_BLEND_COLOR, 1);
+ so_data (so, ((float_to_ubyte(bcol->color[3]) << 24) |
+ (float_to_ubyte(bcol->color[0]) << 16) |
+ (float_to_ubyte(bcol->color[1]) << 8) |
+ (float_to_ubyte(bcol->color[2]) << 0)));
+
+ so_ref(so, &nv40->state.hw[NV40_STATE_BCOL]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_blend_colour = {
+ .validate = nv40_state_blend_colour_validate,
+ .dirty = {
+ .pipe = NV40_NEW_BCOL,
+ .hw = NV40_STATE_BCOL
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_state_emit.c b/src/gallium/drivers/nv40/nv40_state_emit.c
new file mode 100644
index 0000000000..198692965d
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_emit.c
@@ -0,0 +1,177 @@
+#include "nv40_context.h"
+#include "nv40_state.h"
+#include "draw/draw_context.h"
+
+static struct nv40_state_entry *render_states[] = {
+ &nv40_state_framebuffer,
+ &nv40_state_rasterizer,
+ &nv40_state_scissor,
+ &nv40_state_stipple,
+ &nv40_state_fragprog,
+ &nv40_state_fragtex,
+ &nv40_state_vertprog,
+ &nv40_state_blend,
+ &nv40_state_blend_colour,
+ &nv40_state_zsa,
+ &nv40_state_viewport,
+ &nv40_state_vbo,
+ NULL
+};
+
+static struct nv40_state_entry *swtnl_states[] = {
+ &nv40_state_framebuffer,
+ &nv40_state_rasterizer,
+ &nv40_state_scissor,
+ &nv40_state_stipple,
+ &nv40_state_fragprog,
+ &nv40_state_fragtex,
+ &nv40_state_vertprog,
+ &nv40_state_blend,
+ &nv40_state_blend_colour,
+ &nv40_state_zsa,
+ &nv40_state_viewport,
+ &nv40_state_vtxfmt,
+ NULL
+};
+
+static void
+nv40_state_do_validate(struct nv40_context *nv40,
+ struct nv40_state_entry **states)
+{
+ while (*states) {
+ struct nv40_state_entry *e = *states;
+
+ if (nv40->dirty & e->dirty.pipe) {
+ if (e->validate(nv40))
+ nv40->state.dirty |= (1ULL << e->dirty.hw);
+ }
+
+ states++;
+ }
+ nv40->dirty = 0;
+}
+
+void
+nv40_state_emit(struct nv40_context *nv40)
+{
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+ struct nv40_state *state = &nv40->state;
+ struct nv40_screen *screen = nv40->screen;
+ unsigned i, samplers;
+ uint64_t states;
+
+ if (nv40->pctx_id != screen->cur_pctx) {
+ for (i = 0; i < NV40_STATE_MAX; i++) {
+ if (state->hw[i] && screen->state[i] != state->hw[i])
+ state->dirty |= (1ULL << i);
+ }
+
+ screen->cur_pctx = nv40->pctx_id;
+ }
+
+ for (i = 0, states = state->dirty; states; i++) {
+ if (!(states & (1ULL << i)))
+ continue;
+ so_ref (state->hw[i], &nv40->screen->state[i]);
+ if (state->hw[i])
+ so_emit(chan, nv40->screen->state[i]);
+ states &= ~(1ULL << i);
+ }
+
+ if (state->dirty & ((1ULL << NV40_STATE_FRAGPROG) |
+ (1ULL << NV40_STATE_FRAGTEX0))) {
+ BEGIN_RING(curie, NV40TCL_TEX_CACHE_CTL, 1);
+ OUT_RING (2);
+ BEGIN_RING(curie, NV40TCL_TEX_CACHE_CTL, 1);
+ OUT_RING (1);
+ }
+
+ state->dirty = 0;
+
+ so_emit_reloc_markers(chan, state->hw[NV40_STATE_FB]);
+ for (i = 0, samplers = state->fp_samplers; i < 16 && samplers; i++) {
+ if (!(samplers & (1 << i)))
+ continue;
+ so_emit_reloc_markers(chan,
+ state->hw[NV40_STATE_FRAGTEX0+i]);
+ samplers &= ~(1ULL << i);
+ }
+ so_emit_reloc_markers(chan, state->hw[NV40_STATE_FRAGPROG]);
+ if (state->hw[NV40_STATE_VTXBUF] && nv40->render_mode == HW)
+ so_emit_reloc_markers(chan, state->hw[NV40_STATE_VTXBUF]);
+}
+
+boolean
+nv40_state_validate(struct nv40_context *nv40)
+{
+ boolean was_sw = nv40->fallback_swtnl ? TRUE : FALSE;
+
+ if (nv40->render_mode != HW) {
+ /* Don't even bother trying to go back to hw if none
+ * of the states that caused swtnl previously have changed.
+ */
+ if ((nv40->fallback_swtnl & nv40->dirty)
+ != nv40->fallback_swtnl)
+ return FALSE;
+
+ /* Attempt to go to hwtnl again */
+ nv40->pipe.flush(&nv40->pipe, 0, NULL);
+ nv40->dirty |= (NV40_NEW_VIEWPORT |
+ NV40_NEW_VERTPROG |
+ NV40_NEW_ARRAYS);
+ nv40->render_mode = HW;
+ }
+
+ nv40_state_do_validate(nv40, render_states);
+ if (nv40->fallback_swtnl || nv40->fallback_swrast)
+ return FALSE;
+
+ if (was_sw)
+ NOUVEAU_ERR("swtnl->hw\n");
+
+ return TRUE;
+}
+
+boolean
+nv40_state_validate_swtnl(struct nv40_context *nv40)
+{
+ struct draw_context *draw = nv40->draw;
+
+ /* Setup for swtnl */
+ if (nv40->render_mode == HW) {
+ NOUVEAU_ERR("hw->swtnl 0x%08x\n", nv40->fallback_swtnl);
+ nv40->pipe.flush(&nv40->pipe, 0, NULL);
+ nv40->dirty |= (NV40_NEW_VIEWPORT |
+ NV40_NEW_VERTPROG |
+ NV40_NEW_ARRAYS);
+ nv40->render_mode = SWTNL;
+ }
+
+ if (nv40->draw_dirty & NV40_NEW_VERTPROG)
+ draw_bind_vertex_shader(draw, nv40->vertprog->draw);
+
+ if (nv40->draw_dirty & NV40_NEW_RAST)
+ draw_set_rasterizer_state(draw, &nv40->rasterizer->pipe);
+
+ if (nv40->draw_dirty & NV40_NEW_UCP)
+ draw_set_clip_state(draw, &nv40->clip);
+
+ if (nv40->draw_dirty & NV40_NEW_VIEWPORT)
+ draw_set_viewport_state(draw, &nv40->viewport);
+
+ if (nv40->draw_dirty & NV40_NEW_ARRAYS) {
+ draw_set_edgeflags(draw, nv40->edgeflags);
+ draw_set_vertex_buffers(draw, nv40->vtxbuf_nr, nv40->vtxbuf);
+ draw_set_vertex_elements(draw, nv40->vtxelt_nr, nv40->vtxelt);
+ }
+
+ nv40_state_do_validate(nv40, swtnl_states);
+ if (nv40->fallback_swrast) {
+ NOUVEAU_ERR("swtnl->swrast 0x%08x\n", nv40->fallback_swrast);
+ return FALSE;
+ }
+
+ nv40->draw_dirty = 0;
+ return TRUE;
+}
+
diff --git a/src/gallium/drivers/nv40/nv40_state_fb.c b/src/gallium/drivers/nv40/nv40_state_fb.c
new file mode 100644
index 0000000000..c2f739157a
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_fb.c
@@ -0,0 +1,172 @@
+#include "nv40_context.h"
+#include "nouveau/nouveau_util.h"
+
+static struct pipe_buffer *
+nv40_do_surface_buffer(struct pipe_surface *surface)
+{
+ struct nv40_miptree *mt = (struct nv40_miptree *)surface->texture;
+ return mt->buffer;
+}
+
+#define nv40_surface_buffer(ps) nouveau_bo(nv40_do_surface_buffer(ps))
+
+static boolean
+nv40_state_framebuffer_validate(struct nv40_context *nv40)
+{
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+ struct nouveau_grobj *curie = nv40->screen->curie;
+ struct pipe_framebuffer_state *fb = &nv40->framebuffer;
+ struct nv04_surface *rt[4], *zeta;
+ uint32_t rt_enable, rt_format;
+ int i, colour_format = 0, zeta_format = 0;
+ struct nouveau_stateobj *so = so_new(64, 10);
+ unsigned rt_flags = NOUVEAU_BO_RDWR | NOUVEAU_BO_VRAM;
+ unsigned w = fb->width;
+ unsigned h = fb->height;
+
+ rt_enable = 0;
+ for (i = 0; i < fb->nr_cbufs; i++) {
+ if (colour_format) {
+ assert(colour_format == fb->cbufs[i]->format);
+ } else {
+ colour_format = fb->cbufs[i]->format;
+ rt_enable |= (NV40TCL_RT_ENABLE_COLOR0 << i);
+ rt[i] = (struct nv04_surface *)fb->cbufs[i];
+ }
+ }
+
+ if (rt_enable & (NV40TCL_RT_ENABLE_COLOR1 | NV40TCL_RT_ENABLE_COLOR2 |
+ NV40TCL_RT_ENABLE_COLOR3))
+ rt_enable |= NV40TCL_RT_ENABLE_MRT;
+
+ if (fb->zsbuf) {
+ zeta_format = fb->zsbuf->format;
+ zeta = (struct nv04_surface *)fb->zsbuf;
+ }
+
+ if (!(rt[0]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)) {
+ assert(!(fb->width & (fb->width - 1)) && !(fb->height & (fb->height - 1)));
+ for (i = 1; i < fb->nr_cbufs; i++)
+ assert(!(rt[i]->base.texture->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR));
+
+ rt_format = NV40TCL_RT_FORMAT_TYPE_SWIZZLED |
+ log2i(fb->width) << NV40TCL_RT_FORMAT_LOG2_WIDTH_SHIFT |
+ log2i(fb->height) << NV40TCL_RT_FORMAT_LOG2_HEIGHT_SHIFT;
+ }
+ else
+ rt_format = NV40TCL_RT_FORMAT_TYPE_LINEAR;
+
+ switch (colour_format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case 0:
+ rt_format |= NV40TCL_RT_FORMAT_COLOR_A8R8G8B8;
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ rt_format |= NV40TCL_RT_FORMAT_COLOR_R5G6B5;
+ break;
+ default:
+ assert(0);
+ }
+
+ switch (zeta_format) {
+ case PIPE_FORMAT_Z16_UNORM:
+ rt_format |= NV40TCL_RT_FORMAT_ZETA_Z16;
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case 0:
+ rt_format |= NV40TCL_RT_FORMAT_ZETA_Z24S8;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (rt_enable & NV40TCL_RT_ENABLE_COLOR0) {
+ so_method(so, curie, NV40TCL_DMA_COLOR0, 1);
+ so_reloc (so, nv40_surface_buffer(&rt[0]->base), 0,
+ rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, curie, NV40TCL_COLOR0_PITCH, 2);
+ so_data (so, rt[0]->pitch);
+ so_reloc (so, nv40_surface_buffer(&rt[0]->base),
+ rt[0]->base.offset, rt_flags | NOUVEAU_BO_LOW,
+ 0, 0);
+ }
+
+ if (rt_enable & NV40TCL_RT_ENABLE_COLOR1) {
+ so_method(so, curie, NV40TCL_DMA_COLOR1, 1);
+ so_reloc (so, nv40_surface_buffer(&rt[1]->base), 0,
+ rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, curie, NV40TCL_COLOR1_OFFSET, 2);
+ so_reloc (so, nv40_surface_buffer(&rt[1]->base),
+ rt[1]->base.offset, rt_flags | NOUVEAU_BO_LOW,
+ 0, 0);
+ so_data (so, rt[1]->pitch);
+ }
+
+ if (rt_enable & NV40TCL_RT_ENABLE_COLOR2) {
+ so_method(so, curie, NV40TCL_DMA_COLOR2, 1);
+ so_reloc (so, nv40_surface_buffer(&rt[2]->base), 0,
+ rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, curie, NV40TCL_COLOR2_OFFSET, 1);
+ so_reloc (so, nv40_surface_buffer(&rt[2]->base),
+ rt[2]->base.offset, rt_flags | NOUVEAU_BO_LOW,
+ 0, 0);
+ so_method(so, curie, NV40TCL_COLOR2_PITCH, 1);
+ so_data (so, rt[2]->pitch);
+ }
+
+ if (rt_enable & NV40TCL_RT_ENABLE_COLOR3) {
+ so_method(so, curie, NV40TCL_DMA_COLOR3, 1);
+ so_reloc (so, nv40_surface_buffer(&rt[3]->base), 0,
+ rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, curie, NV40TCL_COLOR3_OFFSET, 1);
+ so_reloc (so, nv40_surface_buffer(&rt[3]->base),
+ rt[3]->base.offset, rt_flags | NOUVEAU_BO_LOW,
+ 0, 0);
+ so_method(so, curie, NV40TCL_COLOR3_PITCH, 1);
+ so_data (so, rt[3]->pitch);
+ }
+
+ if (zeta_format) {
+ so_method(so, curie, NV40TCL_DMA_ZETA, 1);
+ so_reloc (so, nv40_surface_buffer(&zeta->base), 0,
+ rt_flags | NOUVEAU_BO_OR,
+ chan->vram->handle, chan->gart->handle);
+ so_method(so, curie, NV40TCL_ZETA_OFFSET, 1);
+ so_reloc (so, nv40_surface_buffer(&zeta->base),
+ zeta->base.offset, rt_flags | NOUVEAU_BO_LOW, 0, 0);
+ so_method(so, curie, NV40TCL_ZETA_PITCH, 1);
+ so_data (so, zeta->pitch);
+ }
+
+ so_method(so, curie, NV40TCL_RT_ENABLE, 1);
+ so_data (so, rt_enable);
+ so_method(so, curie, NV40TCL_RT_HORIZ, 3);
+ so_data (so, (w << 16) | 0);
+ so_data (so, (h << 16) | 0);
+ so_data (so, rt_format);
+ so_method(so, curie, NV40TCL_VIEWPORT_HORIZ, 2);
+ so_data (so, (w << 16) | 0);
+ so_data (so, (h << 16) | 0);
+ so_method(so, curie, NV40TCL_VIEWPORT_CLIP_HORIZ(0), 2);
+ so_data (so, ((w - 1) << 16) | 0);
+ so_data (so, ((h - 1) << 16) | 0);
+ so_method(so, curie, 0x1d88, 1);
+ so_data (so, (1 << 12) | h);
+
+ so_ref(so, &nv40->state.hw[NV40_STATE_FB]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_framebuffer = {
+ .validate = nv40_state_framebuffer_validate,
+ .dirty = {
+ .pipe = NV40_NEW_FB,
+ .hw = NV40_STATE_FB
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_state_rasterizer.c b/src/gallium/drivers/nv40/nv40_state_rasterizer.c
new file mode 100644
index 0000000000..9ecda5990f
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_rasterizer.c
@@ -0,0 +1,17 @@
+#include "nv40_context.h"
+
+static boolean
+nv40_state_rasterizer_validate(struct nv40_context *nv40)
+{
+ so_ref(nv40->rasterizer->so,
+ &nv40->state.hw[NV40_STATE_RAST]);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_rasterizer = {
+ .validate = nv40_state_rasterizer_validate,
+ .dirty = {
+ .pipe = NV40_NEW_RAST,
+ .hw = NV40_STATE_RAST
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_state_scissor.c b/src/gallium/drivers/nv40/nv40_state_scissor.c
new file mode 100644
index 0000000000..cf58d33906
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_scissor.c
@@ -0,0 +1,36 @@
+#include "nv40_context.h"
+
+static boolean
+nv40_state_scissor_validate(struct nv40_context *nv40)
+{
+ struct pipe_rasterizer_state *rast = &nv40->rasterizer->pipe;
+ struct pipe_scissor_state *s = &nv40->scissor;
+ struct nouveau_stateobj *so;
+
+ if (nv40->state.hw[NV40_STATE_SCISSOR] &&
+ (rast->scissor == 0 && nv40->state.scissor_enabled == 0))
+ return FALSE;
+ nv40->state.scissor_enabled = rast->scissor;
+
+ so = so_new(3, 0);
+ so_method(so, nv40->screen->curie, NV40TCL_SCISSOR_HORIZ, 2);
+ if (nv40->state.scissor_enabled) {
+ so_data (so, ((s->maxx - s->minx) << 16) | s->minx);
+ so_data (so, ((s->maxy - s->miny) << 16) | s->miny);
+ } else {
+ so_data (so, 4096 << 16);
+ so_data (so, 4096 << 16);
+ }
+
+ so_ref(so, &nv40->state.hw[NV40_STATE_SCISSOR]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_scissor = {
+ .validate = nv40_state_scissor_validate,
+ .dirty = {
+ .pipe = NV40_NEW_SCISSOR | NV40_NEW_RAST,
+ .hw = NV40_STATE_SCISSOR
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_state_stipple.c b/src/gallium/drivers/nv40/nv40_state_stipple.c
new file mode 100644
index 0000000000..b51024ad9b
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_stipple.c
@@ -0,0 +1,39 @@
+#include "nv40_context.h"
+
+static boolean
+nv40_state_stipple_validate(struct nv40_context *nv40)
+{
+ struct pipe_rasterizer_state *rast = &nv40->rasterizer->pipe;
+ struct nouveau_grobj *curie = nv40->screen->curie;
+ struct nouveau_stateobj *so;
+
+ if (nv40->state.hw[NV40_STATE_STIPPLE] &&
+ (rast->poly_stipple_enable == 0 && nv40->state.stipple_enabled == 0))
+ return FALSE;
+
+ if (rast->poly_stipple_enable) {
+ unsigned i;
+
+ so = so_new(35, 0);
+ so_method(so, curie, NV40TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, 1);
+ so_method(so, curie, NV40TCL_POLYGON_STIPPLE_PATTERN(0), 32);
+ for (i = 0; i < 32; i++)
+ so_data(so, nv40->stipple[i]);
+ } else {
+ so = so_new(2, 0);
+ so_method(so, curie, NV40TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_ref(so, &nv40->state.hw[NV40_STATE_STIPPLE]);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_stipple = {
+ .validate = nv40_state_stipple_validate,
+ .dirty = {
+ .pipe = NV40_NEW_STIPPLE | NV40_NEW_RAST,
+ .hw = NV40_STATE_STIPPLE,
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_state_viewport.c b/src/gallium/drivers/nv40/nv40_state_viewport.c
new file mode 100644
index 0000000000..665d2d5fca
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_viewport.c
@@ -0,0 +1,69 @@
+#include "nv40_context.h"
+
+static boolean
+nv40_state_viewport_validate(struct nv40_context *nv40)
+{
+ struct pipe_viewport_state *vpt = &nv40->viewport;
+ struct nouveau_stateobj *so;
+ unsigned bypass;
+
+ if (nv40->render_mode == HW &&
+ !nv40->rasterizer->pipe.bypass_vs_clip_and_viewport)
+ bypass = 0;
+ else
+ bypass = 1;
+
+ if (nv40->state.hw[NV40_STATE_VIEWPORT] &&
+ (bypass || !(nv40->dirty & NV40_NEW_VIEWPORT)) &&
+ nv40->state.viewport_bypass == bypass)
+ return FALSE;
+ nv40->state.viewport_bypass = bypass;
+
+ so = so_new(11, 0);
+ if (!bypass) {
+ so_method(so, nv40->screen->curie,
+ NV40TCL_VIEWPORT_TRANSLATE_X, 8);
+ so_data (so, fui(vpt->translate[0]));
+ so_data (so, fui(vpt->translate[1]));
+ so_data (so, fui(vpt->translate[2]));
+ so_data (so, fui(vpt->translate[3]));
+ so_data (so, fui(vpt->scale[0]));
+ so_data (so, fui(vpt->scale[1]));
+ so_data (so, fui(vpt->scale[2]));
+ so_data (so, fui(vpt->scale[3]));
+ so_method(so, nv40->screen->curie, 0x1d78, 1);
+ so_data (so, 1);
+ } else {
+ so_method(so, nv40->screen->curie,
+ NV40TCL_VIEWPORT_TRANSLATE_X, 8);
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(0.0));
+ so_data (so, fui(1.0));
+ so_data (so, fui(1.0));
+ so_data (so, fui(1.0));
+ so_data (so, fui(0.0));
+ /* Not entirely certain what this is yet. The DDX uses this
+ * value also as it fixes rendering when you pass
+ * pre-transformed vertices to the GPU. My best gusss is that
+ * this bypasses some culling/clipping stage. Might be worth
+ * noting that points/lines are uneffected by whatever this
+ * value fixes, only filled polygons are effected.
+ */
+ so_method(so, nv40->screen->curie, 0x1d78, 1);
+ so_data (so, 0x110);
+ }
+
+ so_ref(so, &nv40->state.hw[NV40_STATE_VIEWPORT]);
+ so_ref(NULL, &so);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_viewport = {
+ .validate = nv40_state_viewport_validate,
+ .dirty = {
+ .pipe = NV40_NEW_VIEWPORT | NV40_NEW_RAST,
+ .hw = NV40_STATE_VIEWPORT
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_state_zsa.c b/src/gallium/drivers/nv40/nv40_state_zsa.c
new file mode 100644
index 0000000000..fb760677c8
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_state_zsa.c
@@ -0,0 +1,17 @@
+#include "nv40_context.h"
+
+static boolean
+nv40_state_zsa_validate(struct nv40_context *nv40)
+{
+ so_ref(nv40->zsa->so,
+ &nv40->state.hw[NV40_STATE_ZSA]);
+ return TRUE;
+}
+
+struct nv40_state_entry nv40_state_zsa = {
+ .validate = nv40_state_zsa_validate,
+ .dirty = {
+ .pipe = NV40_NEW_ZSA,
+ .hw = NV40_STATE_ZSA
+ }
+};
diff --git a/src/gallium/drivers/nv40/nv40_surface.c b/src/gallium/drivers/nv40/nv40_surface.c
new file mode 100644
index 0000000000..a596547974
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_surface.c
@@ -0,0 +1,64 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_tile.h"
+
+#include "nv40_context.h"
+
+static void
+nv40_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv04_surface_2d *eng2d = nv40->screen->eng2d;
+
+ eng2d->copy(eng2d, dest, destx, desty, src, srcx, srcy, width, height);
+}
+
+static void
+nv40_surface_fill(struct pipe_context *pipe, struct pipe_surface *dest,
+ unsigned destx, unsigned desty, unsigned width,
+ unsigned height, unsigned value)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nv04_surface_2d *eng2d = nv40->screen->eng2d;
+
+ eng2d->fill(eng2d, dest, destx, desty, width, height, value);
+}
+
+void
+nv40_init_surface_functions(struct nv40_context *nv40)
+{
+ nv40->pipe.surface_copy = nv40_surface_copy;
+ nv40->pipe.surface_fill = nv40_surface_fill;
+}
diff --git a/src/gallium/drivers/nv40/nv40_transfer.c b/src/gallium/drivers/nv40/nv40_transfer.c
new file mode 100644
index 0000000000..6d92ac3db9
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_transfer.c
@@ -0,0 +1,195 @@
+#include <pipe/p_state.h>
+#include <pipe/p_defines.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <nouveau/nouveau_winsys.h>
+#include "nv40_context.h"
+#include "nv40_screen.h"
+#include "nv40_state.h"
+
+struct nv40_transfer {
+ struct pipe_transfer base;
+ struct pipe_surface *surface;
+ bool direct;
+};
+
+static unsigned nv40_usage_tx_to_buf(unsigned tx_usage)
+{
+ switch (tx_usage) {
+ case PIPE_TRANSFER_READ:
+ return PIPE_BUFFER_USAGE_CPU_READ;
+ case PIPE_TRANSFER_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_WRITE;
+ case PIPE_TRANSFER_READ_WRITE:
+ return PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ default:
+ assert(0);
+ }
+
+ return -1;
+}
+
+static void
+nv40_compatible_transfer_tex(struct pipe_texture *pt, unsigned level,
+ struct pipe_texture *template)
+{
+ memset(template, 0, sizeof(struct pipe_texture));
+ template->target = pt->target;
+ template->format = pt->format;
+ template->width[0] = pt->width[level];
+ template->height[0] = pt->height[level];
+ template->depth[0] = 1;
+ template->block = pt->block;
+ template->nblocksx[0] = pt->nblocksx[level];
+ template->nblocksy[0] = pt->nblocksx[level];
+ template->last_level = 0;
+ template->nr_samples = pt->nr_samples;
+
+ template->tex_usage = PIPE_TEXTURE_USAGE_DYNAMIC |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+}
+
+static struct pipe_transfer *
+nv40_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nv40_miptree *mt = (struct nv40_miptree *)pt;
+ struct nv40_transfer *tx;
+ struct pipe_texture tx_tex_template, *tx_tex;
+
+ tx = CALLOC_STRUCT(nv40_transfer);
+ if (!tx)
+ return NULL;
+
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.x = x;
+ tx->base.y = y;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = mt->level[level].pitch;
+ tx->base.usage = usage;
+ tx->base.face = face;
+ tx->base.level = level;
+ tx->base.zslice = zslice;
+
+ /* Direct access to texture */
+ if ((pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC ||
+ debug_get_bool_option("NOUVEAU_NO_TRANSFER", TRUE/*XXX:FALSE*/)) &&
+ pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR)
+ {
+ tx->direct = true;
+ tx->surface = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ nv40_usage_tx_to_buf(usage));
+ return &tx->base;
+ }
+
+ tx->direct = false;
+
+ nv40_compatible_transfer_tex(pt, level, &tx_tex_template);
+
+ tx_tex = pscreen->texture_create(pscreen, &tx_tex_template);
+ if (!tx_tex)
+ {
+ FREE(tx);
+ return NULL;
+ }
+
+ tx->surface = pscreen->get_tex_surface(pscreen, tx_tex,
+ 0, 0, 0,
+ nv40_usage_tx_to_buf(usage));
+
+ pipe_texture_reference(&tx_tex, NULL);
+
+ if (!tx->surface)
+ {
+ pipe_surface_reference(&tx->surface, NULL);
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ struct nv40_screen *nvscreen = nv40_screen(pscreen);
+ struct pipe_surface *src;
+
+ src = pscreen->get_tex_surface(pscreen, pt,
+ face, level, zslice,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ /* TODO: Check if SIFM can un-swizzle */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ tx->surface, 0, 0,
+ src, 0, 0,
+ src->width, src->height);
+
+ pipe_surface_reference(&src, NULL);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv40_transfer_del(struct pipe_transfer *ptx)
+{
+ struct nv40_transfer *tx = (struct nv40_transfer *)ptx;
+
+ if (!tx->direct && ptx->usage != PIPE_TRANSFER_READ) {
+ struct pipe_screen *pscreen = ptx->texture->screen;
+ struct nv40_screen *nvscreen = nv40_screen(pscreen);
+ struct pipe_surface *dst;
+
+ dst = pscreen->get_tex_surface(pscreen, ptx->texture,
+ ptx->face, ptx->level, ptx->zslice,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* TODO: Check if SIFM can deal with x,y,w,h when swizzling */
+ nvscreen->eng2d->copy(nvscreen->eng2d,
+ dst, 0, 0,
+ tx->surface, 0, 0,
+ dst->width, dst->height);
+
+ pipe_surface_reference(&dst, NULL);
+ }
+
+ pipe_surface_reference(&tx->surface, NULL);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv40_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv40_transfer *tx = (struct nv40_transfer *)ptx;
+ struct nv04_surface *ns = (struct nv04_surface *)tx->surface;
+ struct nv40_miptree *mt = (struct nv40_miptree *)tx->surface->texture;
+ void *map = pipe_buffer_map(pscreen, mt->buffer,
+ nv40_usage_tx_to_buf(ptx->usage));
+
+ return map + ns->base.offset +
+ ptx->y * ns->pitch + ptx->x * ptx->block.size;
+}
+
+static void
+nv40_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv40_transfer *tx = (struct nv40_transfer *)ptx;
+ struct nv40_miptree *mt = (struct nv40_miptree *)tx->surface->texture;
+
+ pipe_buffer_unmap(pscreen, mt->buffer);
+}
+
+void
+nv40_screen_init_transfer_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv40_transfer_new;
+ pscreen->tex_transfer_destroy = nv40_transfer_del;
+ pscreen->transfer_map = nv40_transfer_map;
+ pscreen->transfer_unmap = nv40_transfer_unmap;
+}
diff --git a/src/gallium/drivers/nv40/nv40_vbo.c b/src/gallium/drivers/nv40/nv40_vbo.c
new file mode 100644
index 0000000000..b2753b8e2e
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_vbo.c
@@ -0,0 +1,562 @@
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "nv40_context.h"
+#include "nv40_state.h"
+
+#include "nouveau/nouveau_channel.h"
+#include "nouveau/nouveau_pushbuf.h"
+#include "nouveau/nouveau_util.h"
+
+#define FORCE_SWTNL 0
+
+static INLINE int
+nv40_vbo_format_to_hw(enum pipe_format pipe, unsigned *fmt, unsigned *ncomp)
+{
+ switch (pipe) {
+ case PIPE_FORMAT_R32_FLOAT:
+ case PIPE_FORMAT_R32G32_FLOAT:
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ *fmt = NV40TCL_VTXFMT_TYPE_FLOAT;
+ break;
+ case PIPE_FORMAT_R8_UNORM:
+ case PIPE_FORMAT_R8G8_UNORM:
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ *fmt = NV40TCL_VTXFMT_TYPE_UBYTE;
+ break;
+ case PIPE_FORMAT_R16_SSCALED:
+ case PIPE_FORMAT_R16G16_SSCALED:
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ *fmt = NV40TCL_VTXFMT_TYPE_USHORT;
+ break;
+ default:
+ NOUVEAU_ERR("Unknown format %s\n", pf_name(pipe));
+ return 1;
+ }
+
+ switch (pipe) {
+ case PIPE_FORMAT_R8_UNORM:
+ case PIPE_FORMAT_R32_FLOAT:
+ case PIPE_FORMAT_R16_SSCALED:
+ *ncomp = 1;
+ break;
+ case PIPE_FORMAT_R8G8_UNORM:
+ case PIPE_FORMAT_R32G32_FLOAT:
+ case PIPE_FORMAT_R16G16_SSCALED:
+ *ncomp = 2;
+ break;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ case PIPE_FORMAT_R32G32B32_FLOAT:
+ case PIPE_FORMAT_R16G16B16_SSCALED:
+ *ncomp = 3;
+ break;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ case PIPE_FORMAT_R32G32B32A32_FLOAT:
+ case PIPE_FORMAT_R16G16B16A16_SSCALED:
+ *ncomp = 4;
+ break;
+ default:
+ NOUVEAU_ERR("Unknown format %s\n", pf_name(pipe));
+ return 1;
+ }
+
+ return 0;
+}
+
+static boolean
+nv40_vbo_set_idxbuf(struct nv40_context *nv40, struct pipe_buffer *ib,
+ unsigned ib_size)
+{
+ struct pipe_screen *pscreen = &nv40->screen->base.base;
+ unsigned type;
+
+ if (!ib) {
+ nv40->idxbuf = NULL;
+ nv40->idxbuf_format = 0xdeadbeef;
+ return FALSE;
+ }
+
+ if (!pscreen->get_param(pscreen, NOUVEAU_CAP_HW_IDXBUF) || ib_size == 1)
+ return FALSE;
+
+ switch (ib_size) {
+ case 2:
+ type = NV40TCL_IDXBUF_FORMAT_TYPE_U16;
+ break;
+ case 4:
+ type = NV40TCL_IDXBUF_FORMAT_TYPE_U32;
+ break;
+ default:
+ return FALSE;
+ }
+
+ if (ib != nv40->idxbuf ||
+ type != nv40->idxbuf_format) {
+ nv40->dirty |= NV40_NEW_ARRAYS;
+ nv40->idxbuf = ib;
+ nv40->idxbuf_format = type;
+ }
+
+ return TRUE;
+}
+
+static boolean
+nv40_vbo_static_attrib(struct nv40_context *nv40, struct nouveau_stateobj *so,
+ int attrib, struct pipe_vertex_element *ve,
+ struct pipe_vertex_buffer *vb)
+{
+ struct pipe_screen *pscreen = nv40->pipe.screen;
+ struct nouveau_grobj *curie = nv40->screen->curie;
+ unsigned type, ncomp;
+ void *map;
+
+ if (nv40_vbo_format_to_hw(ve->src_format, &type, &ncomp))
+ return FALSE;
+
+ map = pipe_buffer_map(pscreen, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ map += vb->buffer_offset + ve->src_offset;
+
+ switch (type) {
+ case NV40TCL_VTXFMT_TYPE_FLOAT:
+ {
+ float *v = map;
+
+ switch (ncomp) {
+ case 4:
+ so_method(so, curie, NV40TCL_VTX_ATTR_4F_X(attrib), 4);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ so_data (so, fui(v[2]));
+ so_data (so, fui(v[3]));
+ break;
+ case 3:
+ so_method(so, curie, NV40TCL_VTX_ATTR_3F_X(attrib), 3);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ so_data (so, fui(v[2]));
+ break;
+ case 2:
+ so_method(so, curie, NV40TCL_VTX_ATTR_2F_X(attrib), 2);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ break;
+ case 1:
+ so_method(so, curie, NV40TCL_VTX_ATTR_1F(attrib), 1);
+ so_data (so, fui(v[0]));
+ break;
+ default:
+ pipe_buffer_unmap(pscreen, vb->buffer);
+ return FALSE;
+ }
+ }
+ break;
+ default:
+ pipe_buffer_unmap(pscreen, vb->buffer);
+ return FALSE;
+ }
+
+ pipe_buffer_unmap(pscreen, vb->buffer);
+
+ return TRUE;
+}
+
+boolean
+nv40_draw_arrays(struct pipe_context *pipe,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+ unsigned restart;
+
+ nv40_vbo_set_idxbuf(nv40, NULL, 0);
+ if (FORCE_SWTNL || !nv40_state_validate(nv40)) {
+ return nv40_draw_elements_swtnl(pipe, NULL, 0,
+ mode, start, count);
+ }
+
+ while (count) {
+ unsigned vc, nr;
+
+ nv40_state_emit(nv40);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 256,
+ mode, start, count, &restart);
+ if (!vc) {
+ FIRE_RING(NULL);
+ continue;
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ nr = (vc & 0xff);
+ if (nr) {
+ BEGIN_RING(curie, NV40TCL_VB_VERTEX_BATCH, 1);
+ OUT_RING (((nr - 1) << 24) | start);
+ start += nr;
+ }
+
+ nr = vc >> 8;
+ while (nr) {
+ unsigned push = nr > 2047 ? 2047 : nr;
+
+ nr -= push;
+
+ BEGIN_RING_NI(curie, NV40TCL_VB_VERTEX_BATCH, push);
+ while (push--) {
+ OUT_RING(((0x100 - 1) << 24) | start);
+ start += 0x100;
+ }
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (0);
+
+ count -= vc;
+ start = restart;
+ }
+
+ pipe->flush(pipe, 0, NULL);
+ return TRUE;
+}
+
+static INLINE void
+nv40_draw_elements_u08(struct nv40_context *nv40, void *ib,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+
+ while (count) {
+ uint8_t *elts = (uint8_t *)ib + start;
+ unsigned vc, push, restart;
+
+ nv40_state_emit(nv40);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 2,
+ mode, start, count, &restart);
+ if (vc == 0) {
+ FIRE_RING(NULL);
+ continue;
+ }
+ count -= vc;
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ if (vc & 1) {
+ BEGIN_RING(curie, NV40TCL_VB_ELEMENT_U32, 1);
+ OUT_RING (elts[0]);
+ elts++; vc--;
+ }
+
+ while (vc) {
+ unsigned i;
+
+ push = MIN2(vc, 2047 * 2);
+
+ BEGIN_RING_NI(curie, NV40TCL_VB_ELEMENT_U16, push >> 1);
+ for (i = 0; i < push; i+=2)
+ OUT_RING((elts[i+1] << 16) | elts[i]);
+
+ vc -= push;
+ elts += push;
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (0);
+
+ start = restart;
+ }
+}
+
+static INLINE void
+nv40_draw_elements_u16(struct nv40_context *nv40, void *ib,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+
+ while (count) {
+ uint16_t *elts = (uint16_t *)ib + start;
+ unsigned vc, push, restart;
+
+ nv40_state_emit(nv40);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 2,
+ mode, start, count, &restart);
+ if (vc == 0) {
+ FIRE_RING(NULL);
+ continue;
+ }
+ count -= vc;
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ if (vc & 1) {
+ BEGIN_RING(curie, NV40TCL_VB_ELEMENT_U32, 1);
+ OUT_RING (elts[0]);
+ elts++; vc--;
+ }
+
+ while (vc) {
+ unsigned i;
+
+ push = MIN2(vc, 2047 * 2);
+
+ BEGIN_RING_NI(curie, NV40TCL_VB_ELEMENT_U16, push >> 1);
+ for (i = 0; i < push; i+=2)
+ OUT_RING((elts[i+1] << 16) | elts[i]);
+
+ vc -= push;
+ elts += push;
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (0);
+
+ start = restart;
+ }
+}
+
+static INLINE void
+nv40_draw_elements_u32(struct nv40_context *nv40, void *ib,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+
+ while (count) {
+ uint32_t *elts = (uint32_t *)ib + start;
+ unsigned vc, push, restart;
+
+ nv40_state_emit(nv40);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 5, 1,
+ mode, start, count, &restart);
+ if (vc == 0) {
+ FIRE_RING(NULL);
+ continue;
+ }
+ count -= vc;
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ while (vc) {
+ push = MIN2(vc, 2047);
+
+ BEGIN_RING_NI(curie, NV40TCL_VB_ELEMENT_U32, push);
+ OUT_RINGp (elts, push);
+
+ vc -= push;
+ elts += push;
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (0);
+
+ start = restart;
+ }
+}
+
+static boolean
+nv40_draw_elements_inline(struct pipe_context *pipe,
+ struct pipe_buffer *ib, unsigned ib_size,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct pipe_screen *pscreen = pipe->screen;
+ void *map;
+
+ map = pipe_buffer_map(pscreen, ib, PIPE_BUFFER_USAGE_CPU_READ);
+ if (!ib) {
+ NOUVEAU_ERR("failed mapping ib\n");
+ return FALSE;
+ }
+
+ switch (ib_size) {
+ case 1:
+ nv40_draw_elements_u08(nv40, map, mode, start, count);
+ break;
+ case 2:
+ nv40_draw_elements_u16(nv40, map, mode, start, count);
+ break;
+ case 4:
+ nv40_draw_elements_u32(nv40, map, mode, start, count);
+ break;
+ default:
+ NOUVEAU_ERR("invalid idxbuf fmt %d\n", ib_size);
+ break;
+ }
+
+ pipe_buffer_unmap(pscreen, ib);
+ return TRUE;
+}
+
+static boolean
+nv40_draw_elements_vbo(struct pipe_context *pipe,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ struct nouveau_channel *chan = nv40->screen->base.channel;
+ unsigned restart;
+
+ while (count) {
+ unsigned nr, vc;
+
+ nv40_state_emit(nv40);
+
+ vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 256,
+ mode, start, count, &restart);
+ if (!vc) {
+ FIRE_RING(NULL);
+ continue;
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (nvgl_primitive(mode));
+
+ nr = (vc & 0xff);
+ if (nr) {
+ BEGIN_RING(curie, NV40TCL_VB_INDEX_BATCH, 1);
+ OUT_RING (((nr - 1) << 24) | start);
+ start += nr;
+ }
+
+ nr = vc >> 8;
+ while (nr) {
+ unsigned push = nr > 2047 ? 2047 : nr;
+
+ nr -= push;
+
+ BEGIN_RING_NI(curie, NV40TCL_VB_INDEX_BATCH, push);
+ while (push--) {
+ OUT_RING(((0x100 - 1) << 24) | start);
+ start += 0x100;
+ }
+ }
+
+ BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
+ OUT_RING (0);
+
+ count -= vc;
+ start = restart;
+ }
+
+ return TRUE;
+}
+
+boolean
+nv40_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer, unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv40_context *nv40 = nv40_context(pipe);
+ boolean idxbuf;
+
+ idxbuf = nv40_vbo_set_idxbuf(nv40, indexBuffer, indexSize);
+ if (FORCE_SWTNL || !nv40_state_validate(nv40)) {
+ return nv40_draw_elements_swtnl(pipe, NULL, 0,
+ mode, start, count);
+ }
+
+ if (idxbuf) {
+ nv40_draw_elements_vbo(pipe, mode, start, count);
+ } else {
+ nv40_draw_elements_inline(pipe, indexBuffer, indexSize,
+ mode, start, count);
+ }
+
+ pipe->flush(pipe, 0, NULL);
+ return TRUE;
+}
+
+static boolean
+nv40_vbo_validate(struct nv40_context *nv40)
+{
+ struct nouveau_stateobj *vtxbuf, *vtxfmt, *sattr = NULL;
+ struct nouveau_grobj *curie = nv40->screen->curie;
+ struct pipe_buffer *ib = nv40->idxbuf;
+ unsigned ib_format = nv40->idxbuf_format;
+ unsigned vb_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD;
+ int hw;
+
+ if (nv40->edgeflags) {
+ nv40->fallback_swtnl |= NV40_NEW_ARRAYS;
+ return FALSE;
+ }
+
+ vtxbuf = so_new(20, 18);
+ so_method(vtxbuf, curie, NV40TCL_VTXBUF_ADDRESS(0), nv40->vtxelt_nr);
+ vtxfmt = so_new(17, 0);
+ so_method(vtxfmt, curie, NV40TCL_VTXFMT(0), nv40->vtxelt_nr);
+
+ for (hw = 0; hw < nv40->vtxelt_nr; hw++) {
+ struct pipe_vertex_element *ve;
+ struct pipe_vertex_buffer *vb;
+ unsigned type, ncomp;
+
+ ve = &nv40->vtxelt[hw];
+ vb = &nv40->vtxbuf[ve->vertex_buffer_index];
+
+ if (!vb->stride) {
+ if (!sattr)
+ sattr = so_new(16 * 5, 0);
+
+ if (nv40_vbo_static_attrib(nv40, sattr, hw, ve, vb)) {
+ so_data(vtxbuf, 0);
+ so_data(vtxfmt, NV40TCL_VTXFMT_TYPE_FLOAT);
+ continue;
+ }
+ }
+
+ if (nv40_vbo_format_to_hw(ve->src_format, &type, &ncomp)) {
+ nv40->fallback_swtnl |= NV40_NEW_ARRAYS;
+ so_ref(NULL, &vtxbuf);
+ so_ref(NULL, &vtxfmt);
+ return FALSE;
+ }
+
+ so_reloc(vtxbuf, nouveau_bo(vb->buffer),
+ vb->buffer_offset + ve->src_offset,
+ vb_flags | NOUVEAU_BO_LOW | NOUVEAU_BO_OR,
+ 0, NV40TCL_VTXBUF_ADDRESS_DMA1);
+ so_data (vtxfmt, ((vb->stride << NV40TCL_VTXFMT_STRIDE_SHIFT) |
+ (ncomp << NV40TCL_VTXFMT_SIZE_SHIFT) | type));
+ }
+
+ if (ib) {
+ struct nouveau_bo *bo = nouveau_bo(ib);
+
+ so_method(vtxbuf, curie, NV40TCL_IDXBUF_ADDRESS, 2);
+ so_reloc (vtxbuf, bo, 0, vb_flags | NOUVEAU_BO_LOW, 0, 0);
+ so_reloc (vtxbuf, bo, ib_format, vb_flags | NOUVEAU_BO_OR,
+ 0, NV40TCL_IDXBUF_FORMAT_DMA1);
+ }
+
+ so_method(vtxbuf, curie, 0x1710, 1);
+ so_data (vtxbuf, 0);
+
+ so_ref(vtxbuf, &nv40->state.hw[NV40_STATE_VTXBUF]);
+ so_ref(NULL, &vtxbuf);
+ nv40->state.dirty |= (1ULL << NV40_STATE_VTXBUF);
+ so_ref(vtxfmt, &nv40->state.hw[NV40_STATE_VTXFMT]);
+ so_ref(NULL, &vtxfmt);
+ nv40->state.dirty |= (1ULL << NV40_STATE_VTXFMT);
+ so_ref(sattr, &nv40->state.hw[NV40_STATE_VTXATTR]);
+ so_ref(NULL, &sattr);
+ nv40->state.dirty |= (1ULL << NV40_STATE_VTXATTR);
+ return FALSE;
+}
+
+struct nv40_state_entry nv40_state_vbo = {
+ .validate = nv40_vbo_validate,
+ .dirty = {
+ .pipe = NV40_NEW_ARRAYS,
+ .hw = 0,
+ }
+};
+
diff --git a/src/gallium/drivers/nv40/nv40_vertprog.c b/src/gallium/drivers/nv40/nv40_vertprog.c
new file mode 100644
index 0000000000..0382dbba8f
--- /dev/null
+++ b/src/gallium/drivers/nv40/nv40_vertprog.c
@@ -0,0 +1,1069 @@
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv40_context.h"
+#include "nv40_state.h"
+
+/* TODO (at least...):
+ * 1. Indexed consts + ARL
+ * 3. NV_vp11, NV_vp2, NV_vp3 features
+ * - extra arith opcodes
+ * - branching
+ * - texture sampling
+ * - indexed attribs
+ * - indexed results
+ * 4. bugs
+ */
+
+#define SWZ_X 0
+#define SWZ_Y 1
+#define SWZ_Z 2
+#define SWZ_W 3
+#define MASK_X 8
+#define MASK_Y 4
+#define MASK_Z 2
+#define MASK_W 1
+#define MASK_ALL (MASK_X|MASK_Y|MASK_Z|MASK_W)
+#define DEF_SCALE 0
+#define DEF_CTEST 0
+#include "nv40_shader.h"
+
+#define swz(s,x,y,z,w) nv40_sr_swz((s), SWZ_##x, SWZ_##y, SWZ_##z, SWZ_##w)
+#define neg(s) nv40_sr_neg((s))
+#define abs(s) nv40_sr_abs((s))
+
+#define NV40_VP_INST_DEST_CLIP(n) ((~0 - 6) + (n))
+
+struct nv40_vpc {
+ struct nv40_vertex_program *vp;
+
+ struct nv40_vertex_program_exec *vpi;
+
+ unsigned r_temps;
+ unsigned r_temps_discard;
+ struct nv40_sreg r_result[PIPE_MAX_SHADER_OUTPUTS];
+ struct nv40_sreg *r_address;
+ struct nv40_sreg *r_temp;
+
+ struct nv40_sreg *imm;
+ unsigned nr_imm;
+
+ unsigned hpos_idx;
+};
+
+static struct nv40_sreg
+temp(struct nv40_vpc *vpc)
+{
+ int idx = ffs(~vpc->r_temps) - 1;
+
+ if (idx < 0) {
+ NOUVEAU_ERR("out of temps!!\n");
+ assert(0);
+ return nv40_sr(NV40SR_TEMP, 0);
+ }
+
+ vpc->r_temps |= (1 << idx);
+ vpc->r_temps_discard |= (1 << idx);
+ return nv40_sr(NV40SR_TEMP, idx);
+}
+
+static INLINE void
+release_temps(struct nv40_vpc *vpc)
+{
+ vpc->r_temps &= ~vpc->r_temps_discard;
+ vpc->r_temps_discard = 0;
+}
+
+static struct nv40_sreg
+constant(struct nv40_vpc *vpc, int pipe, float x, float y, float z, float w)
+{
+ struct nv40_vertex_program *vp = vpc->vp;
+ struct nv40_vertex_program_data *vpd;
+ int idx;
+
+ if (pipe >= 0) {
+ for (idx = 0; idx < vp->nr_consts; idx++) {
+ if (vp->consts[idx].index == pipe)
+ return nv40_sr(NV40SR_CONST, idx);
+ }
+ }
+
+ idx = vp->nr_consts++;
+ vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
+ vpd = &vp->consts[idx];
+
+ vpd->index = pipe;
+ vpd->value[0] = x;
+ vpd->value[1] = y;
+ vpd->value[2] = z;
+ vpd->value[3] = w;
+ return nv40_sr(NV40SR_CONST, idx);
+}
+
+#define arith(cc,s,o,d,m,s0,s1,s2) \
+ nv40_vp_arith((cc), (s), NV40_VP_INST_##o, (d), (m), (s0), (s1), (s2))
+
+static void
+emit_src(struct nv40_vpc *vpc, uint32_t *hw, int pos, struct nv40_sreg src)
+{
+ struct nv40_vertex_program *vp = vpc->vp;
+ uint32_t sr = 0;
+
+ switch (src.type) {
+ case NV40SR_TEMP:
+ sr |= (NV40_VP_SRC_REG_TYPE_TEMP << NV40_VP_SRC_REG_TYPE_SHIFT);
+ sr |= (src.index << NV40_VP_SRC_TEMP_SRC_SHIFT);
+ break;
+ case NV40SR_INPUT:
+ sr |= (NV40_VP_SRC_REG_TYPE_INPUT <<
+ NV40_VP_SRC_REG_TYPE_SHIFT);
+ vp->ir |= (1 << src.index);
+ hw[1] |= (src.index << NV40_VP_INST_INPUT_SRC_SHIFT);
+ break;
+ case NV40SR_CONST:
+ sr |= (NV40_VP_SRC_REG_TYPE_CONST <<
+ NV40_VP_SRC_REG_TYPE_SHIFT);
+ assert(vpc->vpi->const_index == -1 ||
+ vpc->vpi->const_index == src.index);
+ vpc->vpi->const_index = src.index;
+ break;
+ case NV40SR_NONE:
+ sr |= (NV40_VP_SRC_REG_TYPE_INPUT <<
+ NV40_VP_SRC_REG_TYPE_SHIFT);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (src.negate)
+ sr |= NV40_VP_SRC_NEGATE;
+
+ if (src.abs)
+ hw[0] |= (1 << (21 + pos));
+
+ sr |= ((src.swz[0] << NV40_VP_SRC_SWZ_X_SHIFT) |
+ (src.swz[1] << NV40_VP_SRC_SWZ_Y_SHIFT) |
+ (src.swz[2] << NV40_VP_SRC_SWZ_Z_SHIFT) |
+ (src.swz[3] << NV40_VP_SRC_SWZ_W_SHIFT));
+
+ switch (pos) {
+ case 0:
+ hw[1] |= ((sr & NV40_VP_SRC0_HIGH_MASK) >>
+ NV40_VP_SRC0_HIGH_SHIFT) << NV40_VP_INST_SRC0H_SHIFT;
+ hw[2] |= (sr & NV40_VP_SRC0_LOW_MASK) <<
+ NV40_VP_INST_SRC0L_SHIFT;
+ break;
+ case 1:
+ hw[2] |= sr << NV40_VP_INST_SRC1_SHIFT;
+ break;
+ case 2:
+ hw[2] |= ((sr & NV40_VP_SRC2_HIGH_MASK) >>
+ NV40_VP_SRC2_HIGH_SHIFT) << NV40_VP_INST_SRC2H_SHIFT;
+ hw[3] |= (sr & NV40_VP_SRC2_LOW_MASK) <<
+ NV40_VP_INST_SRC2L_SHIFT;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void
+emit_dst(struct nv40_vpc *vpc, uint32_t *hw, int slot, struct nv40_sreg dst)
+{
+ struct nv40_vertex_program *vp = vpc->vp;
+
+ switch (dst.type) {
+ case NV40SR_TEMP:
+ hw[3] |= NV40_VP_INST_DEST_MASK;
+ if (slot == 0) {
+ hw[0] |= (dst.index <<
+ NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
+ } else {
+ hw[3] |= (dst.index <<
+ NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
+ }
+ break;
+ case NV40SR_OUTPUT:
+ switch (dst.index) {
+ case NV40_VP_INST_DEST_COL0 : vp->or |= (1 << 0); break;
+ case NV40_VP_INST_DEST_COL1 : vp->or |= (1 << 1); break;
+ case NV40_VP_INST_DEST_BFC0 : vp->or |= (1 << 2); break;
+ case NV40_VP_INST_DEST_BFC1 : vp->or |= (1 << 3); break;
+ case NV40_VP_INST_DEST_FOGC : vp->or |= (1 << 4); break;
+ case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
+ case NV40_VP_INST_DEST_TC(0): vp->or |= (1 << 14); break;
+ case NV40_VP_INST_DEST_TC(1): vp->or |= (1 << 15); break;
+ case NV40_VP_INST_DEST_TC(2): vp->or |= (1 << 16); break;
+ case NV40_VP_INST_DEST_TC(3): vp->or |= (1 << 17); break;
+ case NV40_VP_INST_DEST_TC(4): vp->or |= (1 << 18); break;
+ case NV40_VP_INST_DEST_TC(5): vp->or |= (1 << 19); break;
+ case NV40_VP_INST_DEST_TC(6): vp->or |= (1 << 20); break;
+ case NV40_VP_INST_DEST_TC(7): vp->or |= (1 << 21); break;
+ case NV40_VP_INST_DEST_CLIP(0):
+ vp->or |= (1 << 6);
+ vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE0;
+ dst.index = NV40_VP_INST_DEST_FOGC;
+ break;
+ case NV40_VP_INST_DEST_CLIP(1):
+ vp->or |= (1 << 7);
+ vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE1;
+ dst.index = NV40_VP_INST_DEST_FOGC;
+ break;
+ case NV40_VP_INST_DEST_CLIP(2):
+ vp->or |= (1 << 8);
+ vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE2;
+ dst.index = NV40_VP_INST_DEST_FOGC;
+ break;
+ case NV40_VP_INST_DEST_CLIP(3):
+ vp->or |= (1 << 9);
+ vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE3;
+ dst.index = NV40_VP_INST_DEST_PSZ;
+ break;
+ case NV40_VP_INST_DEST_CLIP(4):
+ vp->or |= (1 << 10);
+ vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE4;
+ dst.index = NV40_VP_INST_DEST_PSZ;
+ break;
+ case NV40_VP_INST_DEST_CLIP(5):
+ vp->or |= (1 << 11);
+ vp->clip_ctrl |= NV40TCL_CLIP_PLANE_ENABLE_PLANE5;
+ dst.index = NV40_VP_INST_DEST_PSZ;
+ break;
+ default:
+ break;
+ }
+
+ hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
+ if (slot == 0) {
+ hw[0] |= NV40_VP_INST_VEC_RESULT;
+ hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK | (1<<20);
+ } else {
+ hw[3] |= NV40_VP_INST_SCA_RESULT;
+ hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static void
+nv40_vp_arith(struct nv40_vpc *vpc, int slot, int op,
+ struct nv40_sreg dst, int mask,
+ struct nv40_sreg s0, struct nv40_sreg s1,
+ struct nv40_sreg s2)
+{
+ struct nv40_vertex_program *vp = vpc->vp;
+ uint32_t *hw;
+
+ vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
+ vpc->vpi = &vp->insns[vp->nr_insns - 1];
+ memset(vpc->vpi, 0, sizeof(*vpc->vpi));
+ vpc->vpi->const_index = -1;
+
+ hw = vpc->vpi->data;
+
+ hw[0] |= (NV40_VP_INST_COND_TR << NV40_VP_INST_COND_SHIFT);
+ hw[0] |= ((0 << NV40_VP_INST_COND_SWZ_X_SHIFT) |
+ (1 << NV40_VP_INST_COND_SWZ_Y_SHIFT) |
+ (2 << NV40_VP_INST_COND_SWZ_Z_SHIFT) |
+ (3 << NV40_VP_INST_COND_SWZ_W_SHIFT));
+
+ if (slot == 0) {
+ hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
+ hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
+ hw[3] |= (mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
+ } else {
+ hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
+ hw[0] |= (NV40_VP_INST_VEC_DEST_TEMP_MASK | (1 << 20));
+ hw[3] |= (mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
+ }
+
+ emit_dst(vpc, hw, slot, dst);
+ emit_src(vpc, hw, 0, s0);
+ emit_src(vpc, hw, 1, s1);
+ emit_src(vpc, hw, 2, s2);
+}
+
+static INLINE struct nv40_sreg
+tgsi_src(struct nv40_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
+ struct nv40_sreg src;
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ src = nv40_sr(NV40SR_INPUT, fsrc->SrcRegister.Index);
+ break;
+ case TGSI_FILE_CONSTANT:
+ src = constant(vpc, fsrc->SrcRegister.Index, 0, 0, 0, 0);
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ src = vpc->imm[fsrc->SrcRegister.Index];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ src = vpc->r_temp[fsrc->SrcRegister.Index];
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ break;
+ }
+
+ src.abs = fsrc->SrcRegisterExtMod.Absolute;
+ src.negate = fsrc->SrcRegister.Negate;
+ src.swz[0] = fsrc->SrcRegister.SwizzleX;
+ src.swz[1] = fsrc->SrcRegister.SwizzleY;
+ src.swz[2] = fsrc->SrcRegister.SwizzleZ;
+ src.swz[3] = fsrc->SrcRegister.SwizzleW;
+ return src;
+}
+
+static INLINE struct nv40_sreg
+tgsi_dst(struct nv40_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
+ struct nv40_sreg dst;
+
+ switch (fdst->DstRegister.File) {
+ case TGSI_FILE_OUTPUT:
+ dst = vpc->r_result[fdst->DstRegister.Index];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ dst = vpc->r_temp[fdst->DstRegister.Index];
+ break;
+ case TGSI_FILE_ADDRESS:
+ dst = vpc->r_address[fdst->DstRegister.Index];
+ break;
+ default:
+ NOUVEAU_ERR("bad dst file\n");
+ break;
+ }
+
+ return dst;
+}
+
+static INLINE int
+tgsi_mask(uint tgsi)
+{
+ int mask = 0;
+
+ if (tgsi & TGSI_WRITEMASK_X) mask |= MASK_X;
+ if (tgsi & TGSI_WRITEMASK_Y) mask |= MASK_Y;
+ if (tgsi & TGSI_WRITEMASK_Z) mask |= MASK_Z;
+ if (tgsi & TGSI_WRITEMASK_W) mask |= MASK_W;
+ return mask;
+}
+
+static boolean
+src_native_swz(struct nv40_vpc *vpc, const struct tgsi_full_src_register *fsrc,
+ struct nv40_sreg *src)
+{
+ const struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
+ struct nv40_sreg tgsi = tgsi_src(vpc, fsrc);
+ uint mask = 0, zero_mask = 0, one_mask = 0, neg_mask = 0;
+ uint neg[4] = { fsrc->SrcRegisterExtSwz.NegateX,
+ fsrc->SrcRegisterExtSwz.NegateY,
+ fsrc->SrcRegisterExtSwz.NegateZ,
+ fsrc->SrcRegisterExtSwz.NegateW };
+ uint c;
+
+ for (c = 0; c < 4; c++) {
+ switch (tgsi_util_get_full_src_register_extswizzle(fsrc, c)) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ mask |= tgsi_mask(1 << c);
+ break;
+ case TGSI_EXTSWIZZLE_ZERO:
+ zero_mask |= tgsi_mask(1 << c);
+ tgsi.swz[c] = SWZ_X;
+ break;
+ case TGSI_EXTSWIZZLE_ONE:
+ one_mask |= tgsi_mask(1 << c);
+ tgsi.swz[c] = SWZ_X;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (!tgsi.negate && neg[c])
+ neg_mask |= tgsi_mask(1 << c);
+ }
+
+ if (mask == MASK_ALL && !neg_mask)
+ return TRUE;
+
+ *src = temp(vpc);
+
+ if (mask)
+ arith(vpc, 0, OP_MOV, *src, mask, tgsi, none, none);
+
+ if (zero_mask)
+ arith(vpc, 0, OP_SFL, *src, zero_mask, *src, none, none);
+
+ if (one_mask)
+ arith(vpc, 0, OP_STR, *src, one_mask, *src, none, none);
+
+ if (neg_mask) {
+ struct nv40_sreg one = temp(vpc);
+ arith(vpc, 0, OP_STR, one, neg_mask, one, none, none);
+ arith(vpc, 0, OP_MUL, *src, neg_mask, *src, neg(one), none);
+ }
+
+ return FALSE;
+}
+
+static boolean
+nv40_vertprog_parse_instruction(struct nv40_vpc *vpc,
+ const struct tgsi_full_instruction *finst)
+{
+ struct nv40_sreg src[3], dst, tmp;
+ struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
+ int mask;
+ int ai = -1, ci = -1, ii = -1;
+ int i;
+
+ if (finst->Instruction.Opcode == TGSI_OPCODE_END)
+ return TRUE;
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+ if (fsrc->SrcRegister.File == TGSI_FILE_TEMPORARY) {
+ src[i] = tgsi_src(vpc, fsrc);
+ }
+ }
+
+ for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fsrc;
+
+ fsrc = &finst->FullSrcRegisters[i];
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_TEMPORARY:
+ if (!src_native_swz(vpc, fsrc, &src[i]))
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ switch (fsrc->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ if (ai == -1 || ai == fsrc->SrcRegister.Index) {
+ ai = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_CONSTANT:
+ if ((ci == -1 && ii == -1) ||
+ ci == fsrc->SrcRegister.Index) {
+ ci = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ if ((ci == -1 && ii == -1) ||
+ ii == fsrc->SrcRegister.Index) {
+ ii = fsrc->SrcRegister.Index;
+ src[i] = tgsi_src(vpc, fsrc);
+ } else {
+ src[i] = temp(vpc);
+ arith(vpc, 0, OP_MOV, src[i], MASK_ALL,
+ tgsi_src(vpc, fsrc), none, none);
+ }
+ break;
+ case TGSI_FILE_TEMPORARY:
+ /* handled above */
+ break;
+ default:
+ NOUVEAU_ERR("bad src file\n");
+ return FALSE;
+ }
+ }
+
+ dst = tgsi_dst(vpc, &finst->FullDstRegisters[0]);
+ mask = tgsi_mask(finst->FullDstRegisters[0].DstRegister.WriteMask);
+
+ switch (finst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ arith(vpc, 0, OP_MOV, dst, mask, abs(src[0]), none, none);
+ break;
+ case TGSI_OPCODE_ADD:
+ arith(vpc, 0, OP_ADD, dst, mask, src[0], none, src[1]);
+ break;
+ case TGSI_OPCODE_ARL:
+ arith(vpc, 0, OP_ARL, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_DP3:
+ arith(vpc, 0, OP_DP3, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DP4:
+ arith(vpc, 0, OP_DP4, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DPH:
+ arith(vpc, 0, OP_DPH, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_DST:
+ arith(vpc, 0, OP_DST, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_EX2:
+ arith(vpc, 1, OP_EX2, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_EXP:
+ arith(vpc, 1, OP_EXP, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_FLR:
+ arith(vpc, 0, OP_FLR, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_FRC:
+ arith(vpc, 0, OP_FRC, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_LG2:
+ arith(vpc, 1, OP_LG2, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_LIT:
+ arith(vpc, 1, OP_LIT, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_LOG:
+ arith(vpc, 1, OP_LOG, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_MAD:
+ arith(vpc, 0, OP_MAD, dst, mask, src[0], src[1], src[2]);
+ break;
+ case TGSI_OPCODE_MAX:
+ arith(vpc, 0, OP_MAX, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MIN:
+ arith(vpc, 0, OP_MIN, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_MOV:
+ arith(vpc, 0, OP_MOV, dst, mask, src[0], none, none);
+ break;
+ case TGSI_OPCODE_MUL:
+ arith(vpc, 0, OP_MUL, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_POW:
+ tmp = temp(vpc);
+ arith(vpc, 1, OP_LG2, tmp, MASK_X, none, none,
+ swz(src[0], X, X, X, X));
+ arith(vpc, 0, OP_MUL, tmp, MASK_X, swz(tmp, X, X, X, X),
+ swz(src[1], X, X, X, X), none);
+ arith(vpc, 1, OP_EX2, dst, mask, none, none,
+ swz(tmp, X, X, X, X));
+ break;
+ case TGSI_OPCODE_RCP:
+ arith(vpc, 1, OP_RCP, dst, mask, none, none, src[0]);
+ break;
+ case TGSI_OPCODE_RET:
+ break;
+ case TGSI_OPCODE_RSQ:
+ arith(vpc, 1, OP_RSQ, dst, mask, none, none, abs(src[0]));
+ break;
+ case TGSI_OPCODE_SGE:
+ arith(vpc, 0, OP_SGE, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SLT:
+ arith(vpc, 0, OP_SLT, dst, mask, src[0], src[1], none);
+ break;
+ case TGSI_OPCODE_SUB:
+ arith(vpc, 0, OP_ADD, dst, mask, src[0], none, neg(src[1]));
+ break;
+ case TGSI_OPCODE_XPD:
+ tmp = temp(vpc);
+ arith(vpc, 0, OP_MUL, tmp, mask,
+ swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none);
+ arith(vpc, 0, OP_MAD, dst, (mask & ~MASK_W),
+ swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y),
+ neg(tmp));
+ break;
+ default:
+ NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ release_temps(vpc);
+ return TRUE;
+}
+
+static boolean
+nv40_vertprog_parse_decl_output(struct nv40_vpc *vpc,
+ const struct tgsi_full_declaration *fdec)
+{
+ unsigned idx = fdec->DeclarationRange.First;
+ int hw;
+
+ switch (fdec->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ hw = NV40_VP_INST_DEST_POS;
+ vpc->hpos_idx = idx;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV40_VP_INST_DEST_COL0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV40_VP_INST_DEST_COL1;
+ } else {
+ NOUVEAU_ERR("bad colour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_BCOLOR:
+ if (fdec->Semantic.SemanticIndex == 0) {
+ hw = NV40_VP_INST_DEST_BFC0;
+ } else
+ if (fdec->Semantic.SemanticIndex == 1) {
+ hw = NV40_VP_INST_DEST_BFC1;
+ } else {
+ NOUVEAU_ERR("bad bcolour semantic index\n");
+ return FALSE;
+ }
+ break;
+ case TGSI_SEMANTIC_FOG:
+ hw = NV40_VP_INST_DEST_FOGC;
+ break;
+ case TGSI_SEMANTIC_PSIZE:
+ hw = NV40_VP_INST_DEST_PSZ;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ if (fdec->Semantic.SemanticIndex <= 7) {
+ hw = NV40_VP_INST_DEST_TC(fdec->Semantic.SemanticIndex);
+ } else {
+ NOUVEAU_ERR("bad generic semantic index\n");
+ return FALSE;
+ }
+ break;
+ default:
+ NOUVEAU_ERR("bad output semantic\n");
+ return FALSE;
+ }
+
+ vpc->r_result[idx] = nv40_sr(NV40SR_OUTPUT, hw);
+ return TRUE;
+}
+
+static boolean
+nv40_vertprog_prepare(struct nv40_vpc *vpc)
+{
+ struct tgsi_parse_context p;
+ int high_temp = -1, high_addr = -1, nr_imm = 0, i;
+
+ tgsi_parse_init(&p, vpc->vp->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&p)) {
+ const union tgsi_full_token *tok = &p.FullToken;
+
+ tgsi_parse_token(&p);
+ switch(tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ nr_imm++;
+ break;
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *fdec;
+
+ fdec = &p.FullToken.FullDeclaration;
+ switch (fdec->Declaration.File) {
+ case TGSI_FILE_TEMPORARY:
+ if (fdec->DeclarationRange.Last > high_temp) {
+ high_temp =
+ fdec->DeclarationRange.Last;
+ }
+ break;
+#if 0 /* this would be nice.. except gallium doesn't track it */
+ case TGSI_FILE_ADDRESS:
+ if (fdec->DeclarationRange.Last > high_addr) {
+ high_addr =
+ fdec->DeclarationRange.Last;
+ }
+ break;
+#endif
+ case TGSI_FILE_OUTPUT:
+ if (!nv40_vertprog_parse_decl_output(vpc, fdec))
+ return FALSE;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+#if 1 /* yay, parse instructions looking for address regs instead */
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *finst;
+ const struct tgsi_full_dst_register *fdst;
+
+ finst = &p.FullToken.FullInstruction;
+ fdst = &finst->FullDstRegisters[0];
+
+ if (fdst->DstRegister.File == TGSI_FILE_ADDRESS) {
+ if (fdst->DstRegister.Index > high_addr)
+ high_addr = fdst->DstRegister.Index;
+ }
+
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ tgsi_parse_free(&p);
+
+ if (nr_imm) {
+ vpc->imm = CALLOC(nr_imm, sizeof(struct nv40_sreg));
+ assert(vpc->imm);
+ }
+
+ if (++high_temp) {
+ vpc->r_temp = CALLOC(high_temp, sizeof(struct nv40_sreg));
+ for (i = 0; i < high_temp; i++)
+ vpc->r_temp[i] = temp(vpc);
+ }
+
+ if (++high_addr) {
+ vpc->r_address = CALLOC(high_addr, sizeof(struct nv40_sreg));
+ for (i = 0; i < high_addr; i++)
+ vpc->r_address[i] = temp(vpc);
+ }
+
+ vpc->r_temps_discard = 0;
+ return TRUE;
+}
+
+static void
+nv40_vertprog_translate(struct nv40_context *nv40,
+ struct nv40_vertex_program *vp)
+{
+ struct tgsi_parse_context parse;
+ struct nv40_vpc *vpc = NULL;
+ struct nv40_sreg none = nv40_sr(NV40SR_NONE, 0);
+ int i;
+
+ vpc = CALLOC(1, sizeof(struct nv40_vpc));
+ if (!vpc)
+ return;
+ vpc->vp = vp;
+
+ if (!nv40_vertprog_prepare(vpc)) {
+ FREE(vpc);
+ return;
+ }
+
+ /* Redirect post-transform vertex position to a temp if user clip
+ * planes are enabled. We need to append code the the vtxprog
+ * to handle clip planes later.
+ */
+ if (vp->ucp.nr) {
+ vpc->r_result[vpc->hpos_idx] = temp(vpc);
+ vpc->r_temps_discard = 0;
+ }
+
+ tgsi_parse_init(&parse, vp->pipe.tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ tgsi_parse_token(&parse);
+
+ switch (parse.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ const struct tgsi_full_immediate *imm;
+
+ imm = &parse.FullToken.FullImmediate;
+ assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
+ assert(imm->Immediate.NrTokens == 4 + 1);
+ vpc->imm[vpc->nr_imm++] =
+ constant(vpc, -1,
+ imm->u[0].Float,
+ imm->u[1].Float,
+ imm->u[2].Float,
+ imm->u[3].Float);
+ }
+ break;
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ {
+ const struct tgsi_full_instruction *finst;
+ finst = &parse.FullToken.FullInstruction;
+ if (!nv40_vertprog_parse_instruction(vpc, finst))
+ goto out_err;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Write out HPOS if it was redirected to a temp earlier */
+ if (vpc->r_result[vpc->hpos_idx].type != NV40SR_OUTPUT) {
+ struct nv40_sreg hpos = nv40_sr(NV40SR_OUTPUT,
+ NV40_VP_INST_DEST_POS);
+ struct nv40_sreg htmp = vpc->r_result[vpc->hpos_idx];
+
+ arith(vpc, 0, OP_MOV, hpos, MASK_ALL, htmp, none, none);
+ }
+
+ /* Insert code to handle user clip planes */
+ for (i = 0; i < vp->ucp.nr; i++) {
+ struct nv40_sreg cdst = nv40_sr(NV40SR_OUTPUT,
+ NV40_VP_INST_DEST_CLIP(i));
+ struct nv40_sreg ceqn = constant(vpc, -1,
+ nv40->clip.ucp[i][0],
+ nv40->clip.ucp[i][1],
+ nv40->clip.ucp[i][2],
+ nv40->clip.ucp[i][3]);
+ struct nv40_sreg htmp = vpc->r_result[vpc->hpos_idx];
+ unsigned mask;
+
+ switch (i) {
+ case 0: case 3: mask = MASK_Y; break;
+ case 1: case 4: mask = MASK_Z; break;
+ case 2: case 5: mask = MASK_W; break;
+ default:
+ NOUVEAU_ERR("invalid clip dist #%d\n", i);
+ goto out_err;
+ }
+
+ arith(vpc, 0, OP_DP4, cdst, mask, htmp, ceqn, none);
+ }
+
+ vp->insns[vp->nr_insns - 1].data[3] |= NV40_VP_INST_LAST;
+ vp->translated = TRUE;
+out_err:
+ tgsi_parse_free(&parse);
+ if (vpc->r_temp)
+ FREE(vpc->r_temp);
+ if (vpc->r_address)
+ FREE(vpc->r_address);
+ if (vpc->imm)
+ FREE(vpc->imm);
+ FREE(vpc);
+}
+
+static boolean
+nv40_vertprog_validate(struct nv40_context *nv40)
+{
+ struct pipe_screen *pscreen = nv40->pipe.screen;
+ struct nouveau_grobj *curie = nv40->screen->curie;
+ struct nv40_vertex_program *vp;
+ struct pipe_buffer *constbuf;
+ boolean upload_code = FALSE, upload_data = FALSE;
+ int i;
+
+ if (nv40->render_mode == HW) {
+ vp = nv40->vertprog;
+ constbuf = nv40->constbuf[PIPE_SHADER_VERTEX];
+
+ if ((nv40->dirty & NV40_NEW_UCP) ||
+ memcmp(&nv40->clip, &vp->ucp, sizeof(vp->ucp))) {
+ nv40_vertprog_destroy(nv40, vp);
+ memcpy(&vp->ucp, &nv40->clip, sizeof(vp->ucp));
+ }
+ } else {
+ vp = nv40->swtnl.vertprog;
+ constbuf = NULL;
+ }
+
+ /* Translate TGSI shader into hw bytecode */
+ if (vp->translated)
+ goto check_gpu_resources;
+
+ nv40->fallback_swtnl &= ~NV40_NEW_VERTPROG;
+ nv40_vertprog_translate(nv40, vp);
+ if (!vp->translated) {
+ nv40->fallback_swtnl |= NV40_NEW_VERTPROG;
+ return FALSE;
+ }
+
+check_gpu_resources:
+ /* Allocate hw vtxprog exec slots */
+ if (!vp->exec) {
+ struct nouveau_resource *heap = nv40->screen->vp_exec_heap;
+ struct nouveau_stateobj *so;
+ uint vplen = vp->nr_insns;
+
+ if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec)) {
+ while (heap->next && heap->size < vplen) {
+ struct nv40_vertex_program *evict;
+
+ evict = heap->next->priv;
+ nouveau_resource_free(&evict->exec);
+ }
+
+ if (nouveau_resource_alloc(heap, vplen, vp, &vp->exec))
+ assert(0);
+ }
+
+ so = so_new(7, 0);
+ so_method(so, curie, NV40TCL_VP_START_FROM_ID, 1);
+ so_data (so, vp->exec->start);
+ so_method(so, curie, NV40TCL_VP_ATTRIB_EN, 2);
+ so_data (so, vp->ir);
+ so_data (so, vp->or);
+ so_method(so, curie, NV40TCL_CLIP_PLANE_ENABLE, 1);
+ so_data (so, vp->clip_ctrl);
+ so_ref(so, &vp->so);
+ so_ref(NULL, &so);
+
+ upload_code = TRUE;
+ }
+
+ /* Allocate hw vtxprog const slots */
+ if (vp->nr_consts && !vp->data) {
+ struct nouveau_resource *heap = nv40->screen->vp_data_heap;
+
+ if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data)) {
+ while (heap->next && heap->size < vp->nr_consts) {
+ struct nv40_vertex_program *evict;
+
+ evict = heap->next->priv;
+ nouveau_resource_free(&evict->data);
+ }
+
+ if (nouveau_resource_alloc(heap, vp->nr_consts, vp, &vp->data))
+ assert(0);
+ }
+
+ /*XXX: handle this some day */
+ assert(vp->data->start >= vp->data_start_min);
+
+ upload_data = TRUE;
+ if (vp->data_start != vp->data->start)
+ upload_code = TRUE;
+ }
+
+ /* If exec or data segments moved we need to patch the program to
+ * fixup offsets and register IDs.
+ */
+ if (vp->exec_start != vp->exec->start) {
+ for (i = 0; i < vp->nr_insns; i++) {
+ struct nv40_vertex_program_exec *vpi = &vp->insns[i];
+
+ if (vpi->has_branch_offset) {
+ assert(0);
+ }
+ }
+
+ vp->exec_start = vp->exec->start;
+ }
+
+ if (vp->nr_consts && vp->data_start != vp->data->start) {
+ for (i = 0; i < vp->nr_insns; i++) {
+ struct nv40_vertex_program_exec *vpi = &vp->insns[i];
+
+ if (vpi->const_index >= 0) {
+ vpi->data[1] &= ~NV40_VP_INST_CONST_SRC_MASK;
+ vpi->data[1] |=
+ (vpi->const_index + vp->data->start) <<
+ NV40_VP_INST_CONST_SRC_SHIFT;
+
+ }
+ }
+
+ vp->data_start = vp->data->start;
+ }
+
+ /* Update + Upload constant values */
+ if (vp->nr_consts) {
+ float *map = NULL;
+
+ if (constbuf) {
+ map = pipe_buffer_map(pscreen, constbuf,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ }
+
+ for (i = 0; i < vp->nr_consts; i++) {
+ struct nv40_vertex_program_data *vpd = &vp->consts[i];
+
+ if (vpd->index >= 0) {
+ if (!upload_data &&
+ !memcmp(vpd->value, &map[vpd->index * 4],
+ 4 * sizeof(float)))
+ continue;
+ memcpy(vpd->value, &map[vpd->index * 4],
+ 4 * sizeof(float));
+ }
+
+ BEGIN_RING(curie, NV40TCL_VP_UPLOAD_CONST_ID, 5);
+ OUT_RING (i + vp->data->start);
+ OUT_RINGp ((uint32_t *)vpd->value, 4);
+ }
+
+ if (constbuf)
+ pscreen->buffer_unmap(pscreen, constbuf);
+ }
+
+ /* Upload vtxprog */
+ if (upload_code) {
+#if 0
+ for (i = 0; i < vp->nr_insns; i++) {
+ NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[0]);
+ NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[1]);
+ NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[2]);
+ NOUVEAU_MSG("VP %d: 0x%08x\n", i, vp->insns[i].data[3]);
+ }
+#endif
+ BEGIN_RING(curie, NV40TCL_VP_UPLOAD_FROM_ID, 1);
+ OUT_RING (vp->exec->start);
+ for (i = 0; i < vp->nr_insns; i++) {
+ BEGIN_RING(curie, NV40TCL_VP_UPLOAD_INST(0), 4);
+ OUT_RINGp (vp->insns[i].data, 4);
+ }
+ }
+
+ if (vp->so != nv40->state.hw[NV40_STATE_VERTPROG]) {
+ so_ref(vp->so, &nv40->state.hw[NV40_STATE_VERTPROG]);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+void
+nv40_vertprog_destroy(struct nv40_context *nv40, struct nv40_vertex_program *vp)
+{
+ vp->translated = FALSE;
+
+ if (vp->nr_insns) {
+ FREE(vp->insns);
+ vp->insns = NULL;
+ vp->nr_insns = 0;
+ }
+
+ if (vp->nr_consts) {
+ FREE(vp->consts);
+ vp->consts = NULL;
+ vp->nr_consts = 0;
+ }
+
+ nouveau_resource_free(&vp->exec);
+ vp->exec_start = 0;
+ nouveau_resource_free(&vp->data);
+ vp->data_start = 0;
+ vp->data_start_min = 0;
+
+ vp->ir = vp->or = vp->clip_ctrl = 0;
+ so_ref(NULL, &vp->so);
+}
+
+struct nv40_state_entry nv40_state_vertprog = {
+ .validate = nv40_vertprog_validate,
+ .dirty = {
+ .pipe = NV40_NEW_VERTPROG | NV40_NEW_UCP,
+ .hw = NV40_STATE_VERTPROG,
+ }
+};
+
diff --git a/src/gallium/drivers/nv50/Makefile b/src/gallium/drivers/nv50/Makefile
new file mode 100644
index 0000000000..612aea28a3
--- /dev/null
+++ b/src/gallium/drivers/nv50/Makefile
@@ -0,0 +1,21 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nv50
+
+C_SOURCES = \
+ nv50_clear.c \
+ nv50_context.c \
+ nv50_draw.c \
+ nv50_miptree.c \
+ nv50_query.c \
+ nv50_program.c \
+ nv50_screen.c \
+ nv50_state.c \
+ nv50_state_validate.c \
+ nv50_surface.c \
+ nv50_tex.c \
+ nv50_transfer.c \
+ nv50_vbo.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/nv50/nv50_clear.c b/src/gallium/drivers/nv50/nv50_clear.c
new file mode 100644
index 0000000000..e0b2d2880b
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_clear.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include "nv50_context.h"
+
+void
+nv50_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct pipe_framebuffer_state *fb = &nv50->framebuffer;
+ unsigned mode = 0, i;
+
+ if (!nv50_state_validate(nv50))
+ return;
+
+ if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
+ BEGIN_RING(chan, tesla, NV50TCL_CLEAR_COLOR(0), 4);
+ OUT_RING (chan, fui(rgba[0]));
+ OUT_RING (chan, fui(rgba[1]));
+ OUT_RING (chan, fui(rgba[2]));
+ OUT_RING (chan, fui(rgba[3]));
+ mode |= 0x3c;
+ }
+
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
+ BEGIN_RING(chan, tesla, NV50TCL_CLEAR_DEPTH, 1);
+ OUT_RING (chan, fui(depth));
+ BEGIN_RING(chan, tesla, NV50TCL_CLEAR_STENCIL, 1);
+ OUT_RING (chan, stencil & 0xff);
+
+ mode |= 0x03;
+ }
+
+ BEGIN_RING(chan, tesla, NV50TCL_CLEAR_BUFFERS, 1);
+ OUT_RING (chan, mode);
+
+ for (i = 1; i < fb->nr_cbufs; i++) {
+ BEGIN_RING(chan, tesla, NV50TCL_CLEAR_BUFFERS, 1);
+ OUT_RING (chan, (i << 6) | 0x3c);
+ }
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_context.c b/src/gallium/drivers/nv50/nv50_context.c
new file mode 100644
index 0000000000..6e8f4f9750
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_context.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nv50_context.h"
+#include "nv50_screen.h"
+
+static void
+nv50_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ struct nouveau_grobj *eng2d = nv50->screen->eng2d;
+
+ /* We need this in the ddx for reliable composite, not sure what we're
+ * actually flushing. We generate all our own flushes with flags = 0. */
+ WAIT_RING(chan, 3);
+ BEGIN_RING(chan, eng2d, 0x0110, 1);
+ OUT_RING (chan, 0);
+
+ FIRE_RING(chan);
+}
+
+static void
+nv50_destroy(struct pipe_context *pipe)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ draw_destroy(nv50->draw);
+ FREE(nv50);
+}
+
+
+static void
+nv50_set_edgeflags(struct pipe_context *pipe, const unsigned *bitfield)
+{
+}
+
+static unsigned int
+nv50_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+nv50_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context *
+nv50_create(struct pipe_screen *pscreen, unsigned pctx_id)
+{
+ struct pipe_winsys *pipe_winsys = pscreen->winsys;
+ struct nv50_screen *screen = nv50_screen(pscreen);
+ struct nv50_context *nv50;
+
+ nv50 = CALLOC_STRUCT(nv50_context);
+ if (!nv50)
+ return NULL;
+ nv50->screen = screen;
+ nv50->pctx_id = pctx_id;
+
+ nv50->pipe.winsys = pipe_winsys;
+ nv50->pipe.screen = pscreen;
+
+ nv50->pipe.destroy = nv50_destroy;
+
+ nv50->pipe.set_edgeflags = nv50_set_edgeflags;
+ nv50->pipe.draw_arrays = nv50_draw_arrays;
+ nv50->pipe.draw_elements = nv50_draw_elements;
+ nv50->pipe.clear = nv50_clear;
+
+ nv50->pipe.flush = nv50_flush;
+
+ nv50->pipe.is_texture_referenced = nv50_is_texture_referenced;
+ nv50->pipe.is_buffer_referenced = nv50_is_buffer_referenced;
+
+ nv50_init_surface_functions(nv50);
+ nv50_init_state_functions(nv50);
+ nv50_init_query_functions(nv50);
+
+ nv50->draw = draw_create();
+ assert(nv50->draw);
+ draw_set_rasterize_stage(nv50->draw, nv50_draw_render_stage(nv50));
+
+ return &nv50->pipe;
+}
diff --git a/src/gallium/drivers/nv50/nv50_context.h b/src/gallium/drivers/nv50/nv50_context.h
new file mode 100644
index 0000000000..1e9e8e49bf
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_context.h
@@ -0,0 +1,201 @@
+#ifndef __NV50_CONTEXT_H__
+#define __NV50_CONTEXT_H__
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_compiler.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "draw/draw_vertex.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_gldefs.h"
+#include "nouveau/nouveau_stateobj.h"
+
+#include "nv50_screen.h"
+#include "nv50_program.h"
+
+#define NOUVEAU_ERR(fmt, args...) \
+ fprintf(stderr, "%s:%d - "fmt, __func__, __LINE__, ##args);
+#define NOUVEAU_MSG(fmt, args...) \
+ fprintf(stderr, "nouveau: "fmt, ##args);
+
+/* Constant buffer assignment */
+#define NV50_CB_PMISC 0
+#define NV50_CB_PVP 1
+#define NV50_CB_PFP 2
+#define NV50_CB_PGP 3
+#define NV50_CB_TIC 4
+#define NV50_CB_TSC 5
+#define NV50_CB_PUPLOAD 6
+
+#define NV50_NEW_BLEND (1 << 0)
+#define NV50_NEW_ZSA (1 << 1)
+#define NV50_NEW_BLEND_COLOUR (1 << 2)
+#define NV50_NEW_STIPPLE (1 << 3)
+#define NV50_NEW_SCISSOR (1 << 4)
+#define NV50_NEW_VIEWPORT (1 << 5)
+#define NV50_NEW_RASTERIZER (1 << 6)
+#define NV50_NEW_FRAMEBUFFER (1 << 7)
+#define NV50_NEW_VERTPROG (1 << 8)
+#define NV50_NEW_VERTPROG_CB (1 << 9)
+#define NV50_NEW_FRAGPROG (1 << 10)
+#define NV50_NEW_FRAGPROG_CB (1 << 11)
+#define NV50_NEW_ARRAYS (1 << 12)
+#define NV50_NEW_SAMPLER (1 << 13)
+#define NV50_NEW_TEXTURE (1 << 14)
+
+struct nv50_blend_stateobj {
+ struct pipe_blend_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv50_zsa_stateobj {
+ struct pipe_depth_stencil_alpha_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv50_rasterizer_stateobj {
+ struct pipe_rasterizer_state pipe;
+ struct nouveau_stateobj *so;
+};
+
+struct nv50_sampler_stateobj {
+ bool normalized;
+ unsigned tsc[8];
+};
+
+struct nv50_miptree_level {
+ int *image_offset;
+ unsigned pitch;
+ unsigned tile_mode;
+};
+
+struct nv50_miptree {
+ struct nouveau_miptree base;
+
+ struct nv50_miptree_level level[PIPE_MAX_TEXTURE_LEVELS];
+ int image_nr;
+ int total_size;
+};
+
+static INLINE struct nv50_miptree *
+nv50_miptree(struct pipe_texture *pt)
+{
+ return (struct nv50_miptree *)pt;
+}
+
+struct nv50_surface {
+ struct pipe_surface base;
+};
+
+static INLINE struct nv50_surface *
+nv50_surface(struct pipe_surface *pt)
+{
+ return (struct nv50_surface *)pt;
+}
+
+struct nv50_state {
+ unsigned dirty;
+
+ struct nouveau_stateobj *fb;
+ struct nouveau_stateobj *blend;
+ struct nouveau_stateobj *blend_colour;
+ struct nouveau_stateobj *zsa;
+ struct nouveau_stateobj *rast;
+ struct nouveau_stateobj *stipple;
+ struct nouveau_stateobj *scissor;
+ unsigned scissor_enabled;
+ struct nouveau_stateobj *viewport;
+ unsigned viewport_bypass;
+ struct nouveau_stateobj *tsc_upload;
+ struct nouveau_stateobj *tic_upload;
+ unsigned miptree_nr;
+ struct nouveau_stateobj *vertprog;
+ struct nouveau_stateobj *fragprog;
+ struct nouveau_stateobj *vtxfmt;
+ struct nouveau_stateobj *vtxbuf;
+ struct nouveau_stateobj *vtxattr;
+};
+
+struct nv50_context {
+ struct pipe_context pipe;
+
+ struct nv50_screen *screen;
+ unsigned pctx_id;
+
+ struct draw_context *draw;
+
+ struct nv50_state state;
+
+ unsigned dirty;
+ struct nv50_blend_stateobj *blend;
+ struct nv50_zsa_stateobj *zsa;
+ struct nv50_rasterizer_stateobj *rasterizer;
+ struct pipe_blend_color blend_colour;
+ struct pipe_poly_stipple stipple;
+ struct pipe_scissor_state scissor;
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state framebuffer;
+ struct nv50_program *vertprog;
+ struct nv50_program *fragprog;
+ struct pipe_buffer *constbuf[PIPE_SHADER_TYPES];
+ struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ unsigned vtxbuf_nr;
+ struct pipe_vertex_element vtxelt[PIPE_MAX_ATTRIBS];
+ unsigned vtxelt_nr;
+ struct nv50_sampler_stateobj *sampler[PIPE_MAX_SAMPLERS];
+ unsigned sampler_nr;
+ struct nv50_miptree *miptree[PIPE_MAX_SAMPLERS];
+ unsigned miptree_nr;
+};
+
+static INLINE struct nv50_context *
+nv50_context(struct pipe_context *pipe)
+{
+ return (struct nv50_context *)pipe;
+}
+
+extern void nv50_init_surface_functions(struct nv50_context *nv50);
+extern void nv50_init_state_functions(struct nv50_context *nv50);
+extern void nv50_init_query_functions(struct nv50_context *nv50);
+
+extern void nv50_screen_init_miptree_functions(struct pipe_screen *pscreen);
+
+extern int
+nv50_surface_do_copy(struct nv50_screen *screen, struct pipe_surface *dst,
+ int dx, int dy, struct pipe_surface *src, int sx, int sy,
+ int w, int h);
+
+/* nv50_draw.c */
+extern struct draw_stage *nv50_draw_render_stage(struct nv50_context *nv50);
+
+/* nv50_vbo.c */
+extern boolean nv50_draw_arrays(struct pipe_context *, unsigned mode,
+ unsigned start, unsigned count);
+extern boolean nv50_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start,
+ unsigned count);
+extern void nv50_vbo_validate(struct nv50_context *nv50);
+
+/* nv50_clear.c */
+extern void nv50_clear(struct pipe_context *pipe, unsigned buffers,
+ const float *rgba, double depth, unsigned stencil);
+
+/* nv50_program.c */
+extern void nv50_vertprog_validate(struct nv50_context *nv50);
+extern void nv50_fragprog_validate(struct nv50_context *nv50);
+extern void nv50_program_destroy(struct nv50_context *nv50, struct nv50_program *p);
+
+/* nv50_state_validate.c */
+extern boolean nv50_state_validate(struct nv50_context *nv50);
+
+/* nv50_tex.c */
+extern void nv50_tex_validate(struct nv50_context *);
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv50_draw.c b/src/gallium/drivers/nv50/nv50_draw.c
new file mode 100644
index 0000000000..2f6f607261
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_draw.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "draw/draw_pipe.h"
+
+#include "nv50_context.h"
+
+struct nv50_render_stage {
+ struct draw_stage stage;
+ struct nv50_context *nv50;
+};
+
+static INLINE struct nv50_render_stage *
+nv50_render_stage(struct draw_stage *stage)
+{
+ return (struct nv50_render_stage *)stage;
+}
+
+static void
+nv50_render_point(struct draw_stage *stage, struct prim_header *prim)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv50_render_line(struct draw_stage *stage, struct prim_header *prim)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv50_render_tri(struct draw_stage *stage, struct prim_header *prim)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv50_render_flush(struct draw_stage *stage, unsigned flags)
+{
+}
+
+static void
+nv50_render_reset_stipple_counter(struct draw_stage *stage)
+{
+ NOUVEAU_ERR("\n");
+}
+
+static void
+nv50_render_destroy(struct draw_stage *stage)
+{
+ FREE(stage);
+}
+
+struct draw_stage *
+nv50_draw_render_stage(struct nv50_context *nv50)
+{
+ struct nv50_render_stage *rs = CALLOC_STRUCT(nv50_render_stage);
+
+ rs->nv50 = nv50;
+ rs->stage.draw = nv50->draw;
+ rs->stage.destroy = nv50_render_destroy;
+ rs->stage.point = nv50_render_point;
+ rs->stage.line = nv50_render_line;
+ rs->stage.tri = nv50_render_tri;
+ rs->stage.flush = nv50_render_flush;
+ rs->stage.reset_stipple_counter = nv50_render_reset_stipple_counter;
+
+ return &rs->stage;
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_miptree.c b/src/gallium/drivers/nv50/nv50_miptree.c
new file mode 100644
index 0000000000..93479a0314
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_miptree.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "nv50_context.h"
+
+static struct pipe_texture *
+nv50_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *tmp)
+{
+ struct nouveau_device *dev = nouveau_screen(pscreen)->device;
+ struct nv50_miptree *mt = CALLOC_STRUCT(nv50_miptree);
+ struct pipe_texture *pt = &mt->base.base;
+ unsigned width = tmp->width[0], height = tmp->height[0];
+ unsigned depth = tmp->depth[0];
+ uint32_t tile_mode, tile_flags, tile_h;
+ int ret, i, l;
+
+ *pt = *tmp;
+ pipe_reference_init(&pt->reference, 1);
+ pt->screen = pscreen;
+
+ switch (pt->format) {
+ case PIPE_FORMAT_Z32_FLOAT:
+ tile_flags = 0x4800;
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ tile_flags = 0x1800;
+ break;
+ case PIPE_FORMAT_X8Z24_UNORM:
+ case PIPE_FORMAT_S8Z24_UNORM:
+ tile_flags = 0x2800;
+ break;
+ default:
+ tile_flags = 0x7000;
+ break;
+ }
+
+ if (pt->height[0] > 32) tile_mode = 4;
+ else if (pt->height[0] > 16) tile_mode = 3;
+ else if (pt->height[0] > 8) tile_mode = 2;
+ else if (pt->height[0] > 4) tile_mode = 1;
+ else tile_mode = 0;
+ tile_h = 1 << (tile_mode + 2);
+
+ switch (pt->target) {
+ case PIPE_TEXTURE_3D:
+ mt->image_nr = pt->depth[0];
+ break;
+ case PIPE_TEXTURE_CUBE:
+ mt->image_nr = 6;
+ break;
+ default:
+ mt->image_nr = 1;
+ break;
+ }
+
+ for (l = 0; l <= pt->last_level; l++) {
+ struct nv50_miptree_level *lvl = &mt->level[l];
+
+ pt->width[l] = width;
+ pt->height[l] = height;
+ pt->depth[l] = depth;
+ pt->nblocksx[l] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[l] = pf_get_nblocksy(&pt->block, height);
+
+ lvl->image_offset = CALLOC(mt->image_nr, sizeof(int));
+ lvl->pitch = align(pt->width[l] * pt->block.size, 64);
+ lvl->tile_mode = tile_mode;
+
+ width = MAX2(1, width >> 1);
+ height = MAX2(1, height >> 1);
+ depth = MAX2(1, depth >> 1);
+
+ if (tile_mode && height <= (tile_h >> 1)) {
+ tile_mode--;
+ tile_h >>= 1;
+ }
+ }
+
+ for (i = 0; i < mt->image_nr; i++) {
+ for (l = 0; l <= pt->last_level; l++) {
+ struct nv50_miptree_level *lvl = &mt->level[l];
+ int size;
+ tile_h = 1 << (lvl->tile_mode + 2);
+
+ size = align(pt->width[l], 8) * pt->block.size;
+ size = align(size, 64);
+ size *= align(pt->height[l], tile_h);
+
+ lvl->image_offset[i] = mt->total_size;
+
+ mt->total_size += size;
+ }
+ }
+
+ ret = nouveau_bo_new_tile(dev, NOUVEAU_BO_VRAM, 256, mt->total_size,
+ mt->level[0].tile_mode, tile_flags,
+ &mt->base.bo);
+ if (ret) {
+ FREE(mt);
+ return NULL;
+ }
+
+ return pt;
+}
+
+static struct pipe_texture *
+nv50_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
+ const unsigned *stride, struct pipe_buffer *pb)
+{
+ struct nouveau_bo *bo = nouveau_bo(pb);
+ struct nv50_miptree *mt;
+
+ /* Only supports 2D, non-mipmapped textures for the moment */
+ if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
+ pt->depth[0] != 1)
+ return NULL;
+
+ mt = CALLOC_STRUCT(nv50_miptree);
+ if (!mt)
+ return NULL;
+
+ mt->base.base = *pt;
+ pipe_reference_init(&mt->base.base.reference, 1);
+ mt->base.base.screen = pscreen;
+ mt->image_nr = 1;
+ mt->level[0].pitch = *stride;
+ mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
+ mt->level[0].tile_mode = bo->tile_mode;
+
+ nouveau_bo_ref(bo, &mt->base.bo);
+ return &mt->base.base;
+}
+
+static void
+nv50_miptree_destroy(struct pipe_texture *pt)
+{
+ struct nv50_miptree *mt = nv50_miptree(pt);
+
+ nouveau_bo_ref(NULL, &mt->base.bo);
+ FREE(mt);
+}
+
+static struct pipe_surface *
+nv50_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned flags)
+{
+ struct nv50_miptree *mt = nv50_miptree(pt);
+ struct nv50_miptree_level *lvl = &mt->level[level];
+ struct pipe_surface *ps;
+ int img;
+
+ if (pt->target == PIPE_TEXTURE_CUBE)
+ img = face;
+ else
+ if (pt->target == PIPE_TEXTURE_3D)
+ img = zslice;
+ else
+ img = 0;
+
+ ps = CALLOC_STRUCT(pipe_surface);
+ if (!ps)
+ return NULL;
+ pipe_texture_reference(&ps->texture, pt);
+ ps->format = pt->format;
+ ps->width = pt->width[level];
+ ps->height = pt->height[level];
+ ps->usage = flags;
+ pipe_reference_init(&ps->reference, 1);
+ ps->face = face;
+ ps->level = level;
+ ps->zslice = zslice;
+ ps->offset = lvl->image_offset[img];
+
+ return ps;
+}
+
+static void
+nv50_miptree_surface_del(struct pipe_surface *ps)
+{
+ struct nv50_surface *s = nv50_surface(ps);
+
+ pipe_texture_reference(&ps->texture, NULL);
+ FREE(s);
+}
+
+void
+nv50_screen_init_miptree_functions(struct pipe_screen *pscreen)
+{
+ pscreen->texture_create = nv50_miptree_create;
+ pscreen->texture_blanket = nv50_miptree_blanket;
+ pscreen->texture_destroy = nv50_miptree_destroy;
+ pscreen->get_tex_surface = nv50_miptree_surface_new;
+ pscreen->tex_surface_destroy = nv50_miptree_surface_del;
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_program.c b/src/gallium/drivers/nv50/nv50_program.c
new file mode 100644
index 0000000000..4a838529de
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_program.c
@@ -0,0 +1,2482 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "pipe/p_shader_tokens.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+
+#include "nv50_context.h"
+
+#define NV50_SU_MAX_TEMP 64
+//#define NV50_PROGRAM_DUMP
+
+/* ARL - gallium craps itself on progs/vp/arl.txt
+ *
+ * MSB - Like MAD, but MUL+SUB
+ * - Fuck it off, introduce a way to negate args for ops that
+ * support it.
+ *
+ * Look into inlining IMMD for ops other than MOV (make it general?)
+ * - Maybe even relax restrictions a bit, can't do P_RESULT + P_IMMD,
+ * but can emit to P_TEMP first - then MOV later. NVIDIA does this
+ *
+ * In ops such as ADD it's possible to construct a bad opcode in the !is_long()
+ * case, if the emit_src() causes the inst to suddenly become long.
+ *
+ * Verify half-insns work where expected - and force disable them where they
+ * don't work - MUL has it forcibly disabled atm as it fixes POW..
+ *
+ * FUCK! watch dst==src vectors, can overwrite components that are needed.
+ * ie. SUB R0, R0.yzxw, R0
+ *
+ * Things to check with renouveau:
+ * FP attr/result assignment - how?
+ * attrib
+ * - 0x16bc maps vp output onto fp hpos
+ * - 0x16c0 maps vp output onto fp col0
+ * result
+ * - colr always 0-3
+ * - depr always 4
+ * 0x16bc->0x16e8 --> some binding between vp/fp regs
+ * 0x16b8 --> VP output count
+ *
+ * 0x1298 --> "MOV rcol.x, fcol.y" "MOV depr, fcol.y" = 0x00000005
+ * "MOV rcol.x, fcol.y" = 0x00000004
+ * 0x19a8 --> as above but 0x00000100 and 0x00000000
+ * - 0x00100000 used when KIL used
+ * 0x196c --> as above but 0x00000011 and 0x00000000
+ *
+ * 0x1988 --> 0xXXNNNNNN
+ * - XX == FP high something
+ */
+struct nv50_reg {
+ enum {
+ P_TEMP,
+ P_ATTR,
+ P_RESULT,
+ P_CONST,
+ P_IMMD
+ } type;
+ int index;
+
+ int hw;
+ int neg;
+
+ int rhw; /* result hw for FP outputs, or interpolant index */
+ int acc; /* instruction where this reg is last read (first insn == 1) */
+};
+
+struct nv50_pc {
+ struct nv50_program *p;
+
+ /* hw resources */
+ struct nv50_reg *r_temp[NV50_SU_MAX_TEMP];
+
+ /* tgsi resources */
+ struct nv50_reg *temp;
+ int temp_nr;
+ struct nv50_reg *attr;
+ int attr_nr;
+ struct nv50_reg *result;
+ int result_nr;
+ struct nv50_reg *param;
+ int param_nr;
+ struct nv50_reg *immd;
+ float *immd_buf;
+ int immd_nr;
+
+ struct nv50_reg *temp_temp[16];
+ unsigned temp_temp_nr;
+
+ unsigned interp_mode[32];
+ /* perspective interpolation registers */
+ struct nv50_reg *iv_p;
+ struct nv50_reg *iv_c;
+
+ /* current instruction and total number of insns */
+ unsigned insn_cur;
+ unsigned insn_nr;
+
+ boolean allow32;
+};
+
+static void
+alloc_reg(struct nv50_pc *pc, struct nv50_reg *reg)
+{
+ int i = 0;
+
+ if (reg->type == P_RESULT) {
+ if (pc->p->cfg.high_result < (reg->hw + 1))
+ pc->p->cfg.high_result = reg->hw + 1;
+ }
+
+ if (reg->type != P_TEMP)
+ return;
+
+ if (reg->hw >= 0) {
+ /*XXX: do this here too to catch FP temp-as-attr usage..
+ * not clean, but works */
+ if (pc->p->cfg.high_temp < (reg->hw + 1))
+ pc->p->cfg.high_temp = reg->hw + 1;
+ return;
+ }
+
+ if (reg->rhw != -1) {
+ /* try to allocate temporary with index rhw first */
+ if (!(pc->r_temp[reg->rhw])) {
+ pc->r_temp[reg->rhw] = reg;
+ reg->hw = reg->rhw;
+ if (pc->p->cfg.high_temp < (reg->rhw + 1))
+ pc->p->cfg.high_temp = reg->rhw + 1;
+ return;
+ }
+ /* make sure we don't get things like $r0 needs to go
+ * in $r1 and $r1 in $r0
+ */
+ i = pc->result_nr * 4;
+ }
+
+ for (; i < NV50_SU_MAX_TEMP; i++) {
+ if (!(pc->r_temp[i])) {
+ pc->r_temp[i] = reg;
+ reg->hw = i;
+ if (pc->p->cfg.high_temp < (i + 1))
+ pc->p->cfg.high_temp = i + 1;
+ return;
+ }
+ }
+
+ assert(0);
+}
+
+static struct nv50_reg *
+alloc_temp(struct nv50_pc *pc, struct nv50_reg *dst)
+{
+ struct nv50_reg *r;
+ int i;
+
+ if (dst && dst->type == P_TEMP && dst->hw == -1)
+ return dst;
+
+ for (i = 0; i < NV50_SU_MAX_TEMP; i++) {
+ if (!pc->r_temp[i]) {
+ r = CALLOC_STRUCT(nv50_reg);
+ r->type = P_TEMP;
+ r->index = -1;
+ r->hw = i;
+ r->rhw = -1;
+ pc->r_temp[i] = r;
+ return r;
+ }
+ }
+
+ assert(0);
+ return NULL;
+}
+
+/* Assign the hw of the discarded temporary register src
+ * to the tgsi register dst and free src.
+ */
+static void
+assimilate_temp(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ assert(src->index == -1 && src->hw != -1);
+
+ if (dst->hw != -1)
+ pc->r_temp[dst->hw] = NULL;
+ pc->r_temp[src->hw] = dst;
+ dst->hw = src->hw;
+
+ FREE(src);
+}
+
+/* release the hardware resource held by r */
+static void
+release_hw(struct nv50_pc *pc, struct nv50_reg *r)
+{
+ assert(r->type == P_TEMP);
+ if (r->hw == -1)
+ return;
+
+ assert(pc->r_temp[r->hw] == r);
+ pc->r_temp[r->hw] = NULL;
+
+ r->acc = 0;
+ if (r->index == -1)
+ FREE(r);
+}
+
+static void
+free_temp(struct nv50_pc *pc, struct nv50_reg *r)
+{
+ if (r->index == -1) {
+ unsigned hw = r->hw;
+
+ FREE(pc->r_temp[hw]);
+ pc->r_temp[hw] = NULL;
+ }
+}
+
+static int
+alloc_temp4(struct nv50_pc *pc, struct nv50_reg *dst[4], int idx)
+{
+ int i;
+
+ if ((idx + 4) >= NV50_SU_MAX_TEMP)
+ return 1;
+
+ if (pc->r_temp[idx] || pc->r_temp[idx + 1] ||
+ pc->r_temp[idx + 2] || pc->r_temp[idx + 3])
+ return alloc_temp4(pc, dst, idx + 4);
+
+ for (i = 0; i < 4; i++) {
+ dst[i] = CALLOC_STRUCT(nv50_reg);
+ dst[i]->type = P_TEMP;
+ dst[i]->index = -1;
+ dst[i]->hw = idx + i;
+ pc->r_temp[idx + i] = dst[i];
+ }
+
+ return 0;
+}
+
+static void
+free_temp4(struct nv50_pc *pc, struct nv50_reg *reg[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ free_temp(pc, reg[i]);
+}
+
+static struct nv50_reg *
+temp_temp(struct nv50_pc *pc)
+{
+ if (pc->temp_temp_nr >= 16)
+ assert(0);
+
+ pc->temp_temp[pc->temp_temp_nr] = alloc_temp(pc, NULL);
+ return pc->temp_temp[pc->temp_temp_nr++];
+}
+
+static void
+kill_temp_temp(struct nv50_pc *pc)
+{
+ int i;
+
+ for (i = 0; i < pc->temp_temp_nr; i++)
+ free_temp(pc, pc->temp_temp[i]);
+ pc->temp_temp_nr = 0;
+}
+
+static int
+ctor_immd(struct nv50_pc *pc, float x, float y, float z, float w)
+{
+ pc->immd_buf = REALLOC(pc->immd_buf, (pc->immd_nr * 4 * sizeof(float)),
+ (pc->immd_nr + 1) * 4 * sizeof(float));
+ pc->immd_buf[(pc->immd_nr * 4) + 0] = x;
+ pc->immd_buf[(pc->immd_nr * 4) + 1] = y;
+ pc->immd_buf[(pc->immd_nr * 4) + 2] = z;
+ pc->immd_buf[(pc->immd_nr * 4) + 3] = w;
+
+ return pc->immd_nr++;
+}
+
+static struct nv50_reg *
+alloc_immd(struct nv50_pc *pc, float f)
+{
+ struct nv50_reg *r = CALLOC_STRUCT(nv50_reg);
+ unsigned hw;
+
+ for (hw = 0; hw < pc->immd_nr * 4; hw++)
+ if (pc->immd_buf[hw] == f)
+ break;
+
+ if (hw == pc->immd_nr * 4)
+ hw = ctor_immd(pc, f, -f, 0.5 * f, 0) * 4;
+
+ r->type = P_IMMD;
+ r->hw = hw;
+ r->index = -1;
+ return r;
+}
+
+static struct nv50_program_exec *
+exec(struct nv50_pc *pc)
+{
+ struct nv50_program_exec *e = CALLOC_STRUCT(nv50_program_exec);
+
+ e->param.index = -1;
+ return e;
+}
+
+static void
+emit(struct nv50_pc *pc, struct nv50_program_exec *e)
+{
+ struct nv50_program *p = pc->p;
+
+ if (p->exec_tail)
+ p->exec_tail->next = e;
+ if (!p->exec_head)
+ p->exec_head = e;
+ p->exec_tail = e;
+ p->exec_size += (e->inst[0] & 1) ? 2 : 1;
+}
+
+static INLINE void set_long(struct nv50_pc *, struct nv50_program_exec *);
+
+static boolean
+is_long(struct nv50_program_exec *e)
+{
+ if (e->inst[0] & 1)
+ return TRUE;
+ return FALSE;
+}
+
+static boolean
+is_immd(struct nv50_program_exec *e)
+{
+ if (is_long(e) && (e->inst[1] & 3) == 3)
+ return TRUE;
+ return FALSE;
+}
+
+static INLINE void
+set_pred(struct nv50_pc *pc, unsigned pred, unsigned idx,
+ struct nv50_program_exec *e)
+{
+ set_long(pc, e);
+ e->inst[1] &= ~((0x1f << 7) | (0x3 << 12));
+ e->inst[1] |= (pred << 7) | (idx << 12);
+}
+
+static INLINE void
+set_pred_wr(struct nv50_pc *pc, unsigned on, unsigned idx,
+ struct nv50_program_exec *e)
+{
+ set_long(pc, e);
+ e->inst[1] &= ~((0x3 << 4) | (1 << 6));
+ e->inst[1] |= (idx << 4) | (on << 6);
+}
+
+static INLINE void
+set_long(struct nv50_pc *pc, struct nv50_program_exec *e)
+{
+ if (is_long(e))
+ return;
+
+ e->inst[0] |= 1;
+ set_pred(pc, 0xf, 0, e);
+ set_pred_wr(pc, 0, 0, e);
+}
+
+static INLINE void
+set_dst(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_program_exec *e)
+{
+ if (dst->type == P_RESULT) {
+ set_long(pc, e);
+ e->inst[1] |= 0x00000008;
+ }
+
+ alloc_reg(pc, dst);
+ e->inst[0] |= (dst->hw << 2);
+}
+
+static INLINE void
+set_immd(struct nv50_pc *pc, struct nv50_reg *imm, struct nv50_program_exec *e)
+{
+ float f = pc->immd_buf[imm->hw];
+ unsigned val = fui(imm->neg ? -f : f);
+
+ set_long(pc, e);
+ /*XXX: can't be predicated - bits overlap.. catch cases where both
+ * are required and avoid them. */
+ set_pred(pc, 0, 0, e);
+ set_pred_wr(pc, 0, 0, e);
+
+ e->inst[1] |= 0x00000002 | 0x00000001;
+ e->inst[0] |= (val & 0x3f) << 16;
+ e->inst[1] |= (val >> 6) << 2;
+}
+
+
+#define INTERP_LINEAR 0
+#define INTERP_FLAT 1
+#define INTERP_PERSPECTIVE 2
+#define INTERP_CENTROID 4
+
+/* interpolant index has been stored in dst->rhw */
+static void
+emit_interp(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *iv,
+ unsigned mode)
+{
+ assert(dst->rhw != -1);
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0x80000000;
+ set_dst(pc, dst, e);
+ e->inst[0] |= (dst->rhw << 16);
+
+ if (mode & INTERP_FLAT) {
+ e->inst[0] |= (1 << 8);
+ } else {
+ if (mode & INTERP_PERSPECTIVE) {
+ e->inst[0] |= (1 << 25);
+ alloc_reg(pc, iv);
+ e->inst[0] |= (iv->hw << 9);
+ }
+
+ if (mode & INTERP_CENTROID)
+ e->inst[0] |= (1 << 24);
+ }
+
+ emit(pc, e);
+}
+
+static void
+set_data(struct nv50_pc *pc, struct nv50_reg *src, unsigned m, unsigned s,
+ struct nv50_program_exec *e)
+{
+ set_long(pc, e);
+
+ e->param.index = src->hw;
+ e->param.shift = s;
+ e->param.mask = m << (s % 32);
+
+ e->inst[1] |= (((src->type == P_IMMD) ? 0 : 1) << 22);
+}
+
+static void
+emit_mov(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0x10000000;
+
+ set_dst(pc, dst, e);
+
+ if (pc->allow32 && dst->type != P_RESULT && src->type == P_IMMD) {
+ set_immd(pc, src, e);
+ /*XXX: 32-bit, but steals part of "half" reg space - need to
+ * catch and handle this case if/when we do half-regs
+ */
+ } else
+ if (src->type == P_IMMD || src->type == P_CONST) {
+ set_long(pc, e);
+ set_data(pc, src, 0x7f, 9, e);
+ e->inst[1] |= 0x20000000; /* src0 const? */
+ } else {
+ if (src->type == P_ATTR) {
+ set_long(pc, e);
+ e->inst[1] |= 0x00200000;
+ }
+
+ alloc_reg(pc, src);
+ e->inst[0] |= (src->hw << 9);
+ }
+
+ if (is_long(e) && !is_immd(e)) {
+ e->inst[1] |= 0x04000000; /* 32-bit */
+ e->inst[1] |= 0x0000c000; /* "subsubop" 0x3 */
+ if (!(e->inst[1] & 0x20000000))
+ e->inst[1] |= 0x00030000; /* "subsubop" 0xf */
+ } else
+ e->inst[0] |= 0x00008000;
+
+ emit(pc, e);
+}
+
+static INLINE void
+emit_mov_immdval(struct nv50_pc *pc, struct nv50_reg *dst, float f)
+{
+ struct nv50_reg *imm = alloc_immd(pc, f);
+ emit_mov(pc, dst, imm);
+ FREE(imm);
+}
+
+static boolean
+check_swap_src_0_1(struct nv50_pc *pc,
+ struct nv50_reg **s0, struct nv50_reg **s1)
+{
+ struct nv50_reg *src0 = *s0, *src1 = *s1;
+
+ if (src0->type == P_CONST) {
+ if (src1->type != P_CONST) {
+ *s0 = src1;
+ *s1 = src0;
+ return TRUE;
+ }
+ } else
+ if (src1->type == P_ATTR) {
+ if (src0->type != P_ATTR) {
+ *s0 = src1;
+ *s1 = src0;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+static void
+set_src_0(struct nv50_pc *pc, struct nv50_reg *src, struct nv50_program_exec *e)
+{
+ if (src->type == P_ATTR) {
+ set_long(pc, e);
+ e->inst[1] |= 0x00200000;
+ } else
+ if (src->type == P_CONST || src->type == P_IMMD) {
+ struct nv50_reg *temp = temp_temp(pc);
+
+ emit_mov(pc, temp, src);
+ src = temp;
+ }
+
+ alloc_reg(pc, src);
+ e->inst[0] |= (src->hw << 9);
+}
+
+static void
+set_src_1(struct nv50_pc *pc, struct nv50_reg *src, struct nv50_program_exec *e)
+{
+ if (src->type == P_ATTR) {
+ struct nv50_reg *temp = temp_temp(pc);
+
+ emit_mov(pc, temp, src);
+ src = temp;
+ } else
+ if (src->type == P_CONST || src->type == P_IMMD) {
+ assert(!(e->inst[0] & 0x00800000));
+ if (e->inst[0] & 0x01000000) {
+ struct nv50_reg *temp = temp_temp(pc);
+
+ emit_mov(pc, temp, src);
+ src = temp;
+ } else {
+ set_data(pc, src, 0x7f, 16, e);
+ e->inst[0] |= 0x00800000;
+ }
+ }
+
+ alloc_reg(pc, src);
+ e->inst[0] |= (src->hw << 16);
+}
+
+static void
+set_src_2(struct nv50_pc *pc, struct nv50_reg *src, struct nv50_program_exec *e)
+{
+ set_long(pc, e);
+
+ if (src->type == P_ATTR) {
+ struct nv50_reg *temp = temp_temp(pc);
+
+ emit_mov(pc, temp, src);
+ src = temp;
+ } else
+ if (src->type == P_CONST || src->type == P_IMMD) {
+ assert(!(e->inst[0] & 0x01000000));
+ if (e->inst[0] & 0x00800000) {
+ struct nv50_reg *temp = temp_temp(pc);
+
+ emit_mov(pc, temp, src);
+ src = temp;
+ } else {
+ set_data(pc, src, 0x7f, 32+14, e);
+ e->inst[0] |= 0x01000000;
+ }
+ }
+
+ alloc_reg(pc, src);
+ e->inst[1] |= (src->hw << 14);
+}
+
+static void
+emit_mul(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src0,
+ struct nv50_reg *src1)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0xc0000000;
+
+ if (!pc->allow32)
+ set_long(pc, e);
+
+ check_swap_src_0_1(pc, &src0, &src1);
+ set_dst(pc, dst, e);
+ set_src_0(pc, src0, e);
+ if (src1->type == P_IMMD && !is_long(e)) {
+ if (src0->neg)
+ e->inst[0] |= 0x00008000;
+ set_immd(pc, src1, e);
+ } else {
+ set_src_1(pc, src1, e);
+ if (src0->neg ^ src1->neg) {
+ if (is_long(e))
+ e->inst[1] |= 0x08000000;
+ else
+ e->inst[0] |= 0x00008000;
+ }
+ }
+
+ emit(pc, e);
+}
+
+static void
+emit_add(struct nv50_pc *pc, struct nv50_reg *dst,
+ struct nv50_reg *src0, struct nv50_reg *src1)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0xb0000000;
+
+ check_swap_src_0_1(pc, &src0, &src1);
+
+ if (!pc->allow32 || src0->neg || src1->neg) {
+ set_long(pc, e);
+ e->inst[1] |= (src0->neg << 26) | (src1->neg << 27);
+ }
+
+ set_dst(pc, dst, e);
+ set_src_0(pc, src0, e);
+ if (src1->type == P_CONST || src1->type == P_ATTR || is_long(e))
+ set_src_2(pc, src1, e);
+ else
+ if (src1->type == P_IMMD)
+ set_immd(pc, src1, e);
+ else
+ set_src_1(pc, src1, e);
+
+ emit(pc, e);
+}
+
+static void
+emit_minmax(struct nv50_pc *pc, unsigned sub, struct nv50_reg *dst,
+ struct nv50_reg *src0, struct nv50_reg *src1)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ set_long(pc, e);
+ e->inst[0] |= 0xb0000000;
+ e->inst[1] |= (sub << 29);
+
+ check_swap_src_0_1(pc, &src0, &src1);
+ set_dst(pc, dst, e);
+ set_src_0(pc, src0, e);
+ set_src_1(pc, src1, e);
+
+ emit(pc, e);
+}
+
+static INLINE void
+emit_sub(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src0,
+ struct nv50_reg *src1)
+{
+ src1->neg ^= 1;
+ emit_add(pc, dst, src0, src1);
+ src1->neg ^= 1;
+}
+
+static void
+emit_mad(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src0,
+ struct nv50_reg *src1, struct nv50_reg *src2)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0xe0000000;
+
+ check_swap_src_0_1(pc, &src0, &src1);
+ set_dst(pc, dst, e);
+ set_src_0(pc, src0, e);
+ set_src_1(pc, src1, e);
+ set_src_2(pc, src2, e);
+
+ if (src0->neg ^ src1->neg)
+ e->inst[1] |= 0x04000000;
+ if (src2->neg)
+ e->inst[1] |= 0x08000000;
+
+ emit(pc, e);
+}
+
+static INLINE void
+emit_msb(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src0,
+ struct nv50_reg *src1, struct nv50_reg *src2)
+{
+ src2->neg ^= 1;
+ emit_mad(pc, dst, src0, src1, src2);
+ src2->neg ^= 1;
+}
+
+static void
+emit_flop(struct nv50_pc *pc, unsigned sub,
+ struct nv50_reg *dst, struct nv50_reg *src)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0x90000000;
+ if (sub) {
+ set_long(pc, e);
+ e->inst[1] |= (sub << 29);
+ }
+
+ set_dst(pc, dst, e);
+ set_src_0(pc, src, e);
+
+ emit(pc, e);
+}
+
+static void
+emit_preex2(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0xb0000000;
+
+ set_dst(pc, dst, e);
+ set_src_0(pc, src, e);
+ set_long(pc, e);
+ e->inst[1] |= (6 << 29) | 0x00004000;
+
+ emit(pc, e);
+}
+
+static void
+emit_precossin(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ e->inst[0] |= 0xb0000000;
+
+ set_dst(pc, dst, e);
+ set_src_0(pc, src, e);
+ set_long(pc, e);
+ e->inst[1] |= (6 << 29);
+
+ emit(pc, e);
+}
+
+#define CVTOP_RN 0x01
+#define CVTOP_FLOOR 0x03
+#define CVTOP_CEIL 0x05
+#define CVTOP_TRUNC 0x07
+#define CVTOP_SAT 0x08
+#define CVTOP_ABS 0x10
+
+#define CVT_F32_F32 0xc4
+#define CVT_F32_S32 0x44
+#define CVT_F32_U32 0x64
+#define CVT_S32_F32 0x8c
+#define CVT_S32_S32 0x0c
+#define CVT_F32_F32_ROP 0xcc
+
+static void
+emit_cvt(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src,
+ int wp, unsigned cop, unsigned fmt)
+{
+ struct nv50_program_exec *e;
+
+ e = exec(pc);
+ set_long(pc, e);
+
+ e->inst[0] |= 0xa0000000;
+ e->inst[1] |= 0x00004000;
+ e->inst[1] |= (cop << 16);
+ e->inst[1] |= (fmt << 24);
+ set_src_0(pc, src, e);
+
+ if (wp >= 0)
+ set_pred_wr(pc, 1, wp, e);
+
+ if (dst)
+ set_dst(pc, dst, e);
+ else {
+ e->inst[0] |= 0x000001fc;
+ e->inst[1] |= 0x00000008;
+ }
+
+ emit(pc, e);
+}
+
+static void
+emit_set(struct nv50_pc *pc, unsigned c_op, struct nv50_reg *dst,
+ struct nv50_reg *src0, struct nv50_reg *src1)
+{
+ struct nv50_program_exec *e = exec(pc);
+ unsigned inv_cop[8] = { 0, 4, 2, 6, 1, 5, 3, 7 };
+ struct nv50_reg *rdst;
+
+ assert(c_op <= 7);
+ if (check_swap_src_0_1(pc, &src0, &src1))
+ c_op = inv_cop[c_op];
+
+ rdst = dst;
+ if (dst->type != P_TEMP)
+ dst = alloc_temp(pc, NULL);
+
+ /* set.u32 */
+ set_long(pc, e);
+ e->inst[0] |= 0xb0000000;
+ e->inst[1] |= (3 << 29);
+ e->inst[1] |= (c_op << 14);
+ /*XXX: breaks things, .u32 by default?
+ * decuda will disasm as .u16 and use .lo/.hi regs, but this
+ * doesn't seem to match what the hw actually does.
+ inst[1] |= 0x04000000; << breaks things.. .u32 by default?
+ */
+ set_dst(pc, dst, e);
+ set_src_0(pc, src0, e);
+ set_src_1(pc, src1, e);
+ emit(pc, e);
+
+ /* cvt.f32.u32 */
+ e = exec(pc);
+ e->inst[0] = 0xa0000001;
+ e->inst[1] = 0x64014780;
+ set_dst(pc, rdst, e);
+ set_src_0(pc, dst, e);
+ emit(pc, e);
+
+ if (dst != rdst)
+ free_temp(pc, dst);
+}
+
+static INLINE void
+emit_flr(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ emit_cvt(pc, dst, src, -1, CVTOP_FLOOR, CVT_F32_F32_ROP);
+}
+
+static void
+emit_pow(struct nv50_pc *pc, struct nv50_reg *dst,
+ struct nv50_reg *v, struct nv50_reg *e)
+{
+ struct nv50_reg *temp = alloc_temp(pc, NULL);
+
+ emit_flop(pc, 3, temp, v);
+ emit_mul(pc, temp, temp, e);
+ emit_preex2(pc, temp, temp);
+ emit_flop(pc, 6, dst, temp);
+
+ free_temp(pc, temp);
+}
+
+static INLINE void
+emit_abs(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ emit_cvt(pc, dst, src, -1, CVTOP_ABS, CVT_F32_F32);
+}
+
+static void
+emit_lit(struct nv50_pc *pc, struct nv50_reg **dst, unsigned mask,
+ struct nv50_reg **src)
+{
+ struct nv50_reg *one = alloc_immd(pc, 1.0);
+ struct nv50_reg *zero = alloc_immd(pc, 0.0);
+ struct nv50_reg *neg128 = alloc_immd(pc, -127.999999);
+ struct nv50_reg *pos128 = alloc_immd(pc, 127.999999);
+ struct nv50_reg *tmp[4];
+ boolean allow32 = pc->allow32;
+
+ pc->allow32 = FALSE;
+
+ if (mask & (3 << 1)) {
+ tmp[0] = alloc_temp(pc, NULL);
+ emit_minmax(pc, 4, tmp[0], src[0], zero);
+ }
+
+ if (mask & (1 << 2)) {
+ set_pred_wr(pc, 1, 0, pc->p->exec_tail);
+
+ tmp[1] = temp_temp(pc);
+ emit_minmax(pc, 4, tmp[1], src[1], zero);
+
+ tmp[3] = temp_temp(pc);
+ emit_minmax(pc, 4, tmp[3], src[3], neg128);
+ emit_minmax(pc, 5, tmp[3], tmp[3], pos128);
+
+ emit_pow(pc, dst[2], tmp[1], tmp[3]);
+ emit_mov(pc, dst[2], zero);
+ set_pred(pc, 3, 0, pc->p->exec_tail);
+ }
+
+ if (mask & (1 << 1))
+ assimilate_temp(pc, dst[1], tmp[0]);
+ else
+ if (mask & (1 << 2))
+ free_temp(pc, tmp[0]);
+
+ pc->allow32 = allow32;
+
+ /* do this last, in case src[i,j] == dst[0,3] */
+ if (mask & (1 << 0))
+ emit_mov(pc, dst[0], one);
+
+ if (mask & (1 << 3))
+ emit_mov(pc, dst[3], one);
+
+ FREE(pos128);
+ FREE(neg128);
+ FREE(zero);
+ FREE(one);
+}
+
+static void
+emit_neg(struct nv50_pc *pc, struct nv50_reg *dst, struct nv50_reg *src)
+{
+ struct nv50_program_exec *e = exec(pc);
+
+ set_long(pc, e);
+ e->inst[0] |= 0xa0000000; /* delta */
+ e->inst[1] |= (7 << 29); /* delta */
+ e->inst[1] |= 0x04000000; /* negate arg0? probably not */
+ e->inst[1] |= (1 << 14); /* src .f32 */
+ set_dst(pc, dst, e);
+ set_src_0(pc, src, e);
+
+ emit(pc, e);
+}
+
+static void
+emit_kil(struct nv50_pc *pc, struct nv50_reg *src)
+{
+ struct nv50_program_exec *e;
+ const int r_pred = 1;
+
+ /* Sets predicate reg ? */
+ e = exec(pc);
+ e->inst[0] = 0xa00001fd;
+ e->inst[1] = 0xc4014788;
+ set_src_0(pc, src, e);
+ set_pred_wr(pc, 1, r_pred, e);
+ if (src->neg)
+ e->inst[1] |= 0x20000000;
+ emit(pc, e);
+
+ /* This is probably KILP */
+ e = exec(pc);
+ e->inst[0] = 0x000001fe;
+ set_long(pc, e);
+ set_pred(pc, 1 /* LT? */, r_pred, e);
+ emit(pc, e);
+}
+
+static void
+emit_tex(struct nv50_pc *pc, struct nv50_reg **dst, unsigned mask,
+ struct nv50_reg **src, unsigned unit, unsigned type, boolean proj)
+{
+ struct nv50_reg *temp, *t[4];
+ struct nv50_program_exec *e;
+
+ unsigned c, mode, dim;
+
+ switch (type) {
+ case TGSI_TEXTURE_1D:
+ dim = 1;
+ break;
+ case TGSI_TEXTURE_UNKNOWN:
+ case TGSI_TEXTURE_2D:
+ case TGSI_TEXTURE_SHADOW1D: /* XXX: x, z */
+ case TGSI_TEXTURE_RECT:
+ dim = 2;
+ break;
+ case TGSI_TEXTURE_3D:
+ case TGSI_TEXTURE_CUBE:
+ case TGSI_TEXTURE_SHADOW2D:
+ case TGSI_TEXTURE_SHADOWRECT: /* XXX */
+ dim = 3;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* some cards need t[0]'s hw index to be a multiple of 4 */
+ alloc_temp4(pc, t, 0);
+
+ if (proj) {
+ if (src[0]->type == P_TEMP && src[0]->rhw != -1) {
+ mode = pc->interp_mode[src[0]->index];
+
+ t[3]->rhw = src[3]->rhw;
+ emit_interp(pc, t[3], NULL, (mode & INTERP_CENTROID));
+ emit_flop(pc, 0, t[3], t[3]);
+
+ for (c = 0; c < dim; c++) {
+ t[c]->rhw = src[c]->rhw;
+ emit_interp(pc, t[c], t[3],
+ (mode | INTERP_PERSPECTIVE));
+ }
+ } else {
+ emit_flop(pc, 0, t[3], src[3]);
+ for (c = 0; c < dim; c++)
+ emit_mul(pc, t[c], src[c], t[3]);
+
+ /* XXX: for some reason the blob sometimes uses MAD:
+ * emit_mad(pc, t[c], src[0][c], t[3], t[3])
+ * pc->p->exec_tail->inst[1] |= 0x080fc000;
+ */
+ }
+ } else {
+ if (type == TGSI_TEXTURE_CUBE) {
+ temp = temp_temp(pc);
+ emit_minmax(pc, 4, temp, src[0], src[1]);
+ emit_minmax(pc, 4, temp, temp, src[2]);
+ emit_flop(pc, 0, temp, temp);
+ for (c = 0; c < 3; c++)
+ emit_mul(pc, t[c], src[c], temp);
+ } else {
+ for (c = 0; c < dim; c++)
+ emit_mov(pc, t[c], src[c]);
+ }
+ }
+
+ e = exec(pc);
+ set_long(pc, e);
+ e->inst[0] |= 0xf0000000;
+ e->inst[1] |= 0x00000004;
+ set_dst(pc, t[0], e);
+ e->inst[0] |= (unit << 9);
+
+ if (dim == 2)
+ e->inst[0] |= 0x00400000;
+ else
+ if (dim == 3)
+ e->inst[0] |= 0x00800000;
+
+ e->inst[0] |= (mask & 0x3) << 25;
+ e->inst[1] |= (mask & 0xc) << 12;
+
+ emit(pc, e);
+
+#if 1
+ if (mask & 1) emit_mov(pc, dst[0], t[0]);
+ if (mask & 2) emit_mov(pc, dst[1], t[1]);
+ if (mask & 4) emit_mov(pc, dst[2], t[2]);
+ if (mask & 8) emit_mov(pc, dst[3], t[3]);
+
+ free_temp4(pc, t);
+#else
+ /* XXX: if p.e. MUL is used directly after TEX, it would still use
+ * the texture coordinates, not the fetched values: latency ? */
+
+ for (c = 0; c < 4; c++) {
+ if (mask & (1 << c))
+ assimilate_temp(pc, dst[c], t[c]);
+ else
+ free_temp(pc, t[c]);
+ }
+#endif
+}
+
+static void
+convert_to_long(struct nv50_pc *pc, struct nv50_program_exec *e)
+{
+ unsigned q = 0, m = ~0;
+
+ assert(!is_long(e));
+
+ switch (e->inst[0] >> 28) {
+ case 0x1:
+ /* MOV */
+ q = 0x0403c000;
+ m = 0xffff7fff;
+ break;
+ case 0x8:
+ /* INTERP (move centroid, perspective and flat bits) */
+ m = ~0x03000100;
+ q = (e->inst[0] & (3 << 24)) >> (24 - 16);
+ q |= (e->inst[0] & (1 << 8)) << (18 - 8);
+ break;
+ case 0x9:
+ /* RCP */
+ break;
+ case 0xB:
+ /* ADD */
+ m = ~(127 << 16);
+ q = ((e->inst[0] & (~m)) >> 2);
+ break;
+ case 0xC:
+ /* MUL */
+ m = ~0x00008000;
+ q = ((e->inst[0] & (~m)) << 12);
+ break;
+ case 0xE:
+ /* MAD (if src2 == dst) */
+ q = ((e->inst[0] & 0x1fc) << 12);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ set_long(pc, e);
+ pc->p->exec_size++;
+
+ e->inst[0] &= m;
+ e->inst[1] |= q;
+}
+
+static boolean
+negate_supported(const struct tgsi_full_instruction *insn, int i)
+{
+ switch (insn->Instruction.Opcode) {
+ case TGSI_OPCODE_DP3:
+ case TGSI_OPCODE_DP4:
+ case TGSI_OPCODE_MUL:
+ case TGSI_OPCODE_KIL:
+ case TGSI_OPCODE_ADD:
+ case TGSI_OPCODE_SUB:
+ case TGSI_OPCODE_MAD:
+ return TRUE;
+ case TGSI_OPCODE_POW:
+ return (i == 1) ? TRUE : FALSE;
+ default:
+ return FALSE;
+ }
+}
+
+static struct nv50_reg *
+tgsi_dst(struct nv50_pc *pc, int c, const struct tgsi_full_dst_register *dst)
+{
+ switch (dst->DstRegister.File) {
+ case TGSI_FILE_TEMPORARY:
+ return &pc->temp[dst->DstRegister.Index * 4 + c];
+ case TGSI_FILE_OUTPUT:
+ return &pc->result[dst->DstRegister.Index * 4 + c];
+ case TGSI_FILE_NULL:
+ return NULL;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static struct nv50_reg *
+tgsi_src(struct nv50_pc *pc, int chan, const struct tgsi_full_src_register *src,
+ boolean neg)
+{
+ struct nv50_reg *r = NULL;
+ struct nv50_reg *temp;
+ unsigned sgn, c;
+
+ sgn = tgsi_util_get_full_src_register_sign_mode(src, chan);
+
+ c = tgsi_util_get_full_src_register_extswizzle(src, chan);
+ switch (c) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ switch (src->SrcRegister.File) {
+ case TGSI_FILE_INPUT:
+ r = &pc->attr[src->SrcRegister.Index * 4 + c];
+ break;
+ case TGSI_FILE_TEMPORARY:
+ r = &pc->temp[src->SrcRegister.Index * 4 + c];
+ break;
+ case TGSI_FILE_CONSTANT:
+ r = &pc->param[src->SrcRegister.Index * 4 + c];
+ break;
+ case TGSI_FILE_IMMEDIATE:
+ r = &pc->immd[src->SrcRegister.Index * 4 + c];
+ break;
+ case TGSI_FILE_SAMPLER:
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+ case TGSI_EXTSWIZZLE_ZERO:
+ r = alloc_immd(pc, 0.0);
+ return r;
+ case TGSI_EXTSWIZZLE_ONE:
+ if (sgn == TGSI_UTIL_SIGN_TOGGLE || sgn == TGSI_UTIL_SIGN_SET)
+ return alloc_immd(pc, -1.0);
+ return alloc_immd(pc, 1.0);
+ default:
+ assert(0);
+ break;
+ }
+
+ switch (sgn) {
+ case TGSI_UTIL_SIGN_KEEP:
+ break;
+ case TGSI_UTIL_SIGN_CLEAR:
+ temp = temp_temp(pc);
+ emit_abs(pc, temp, r);
+ r = temp;
+ break;
+ case TGSI_UTIL_SIGN_TOGGLE:
+ if (neg)
+ r->neg = 1;
+ else {
+ temp = temp_temp(pc);
+ emit_neg(pc, temp, r);
+ r = temp;
+ }
+ break;
+ case TGSI_UTIL_SIGN_SET:
+ temp = temp_temp(pc);
+ emit_abs(pc, temp, r);
+ if (neg)
+ temp->neg = 1;
+ else
+ emit_neg(pc, temp, temp);
+ r = temp;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ return r;
+}
+
+/* returns TRUE if instruction can overwrite sources before they're read */
+static boolean
+direct2dest_op(const struct tgsi_full_instruction *insn)
+{
+ if (insn->Instruction.Saturate)
+ return FALSE;
+
+ switch (insn->Instruction.Opcode) {
+ case TGSI_OPCODE_COS:
+ case TGSI_OPCODE_DP3:
+ case TGSI_OPCODE_DP4:
+ case TGSI_OPCODE_DPH:
+ case TGSI_OPCODE_KIL:
+ case TGSI_OPCODE_LIT:
+ case TGSI_OPCODE_POW:
+ case TGSI_OPCODE_RCP:
+ case TGSI_OPCODE_RSQ:
+ case TGSI_OPCODE_SCS:
+ case TGSI_OPCODE_SIN:
+ case TGSI_OPCODE_TEX:
+ case TGSI_OPCODE_TXP:
+ return FALSE;
+ default:
+ return TRUE;
+ }
+}
+
+static boolean
+nv50_program_tx_insn(struct nv50_pc *pc, const union tgsi_full_token *tok)
+{
+ const struct tgsi_full_instruction *inst = &tok->FullInstruction;
+ struct nv50_reg *rdst[4], *dst[4], *src[3][4], *temp;
+ unsigned mask, sat, unit;
+ boolean assimilate = FALSE;
+ int i, c;
+
+ mask = inst->FullDstRegisters[0].DstRegister.WriteMask;
+ sat = inst->Instruction.Saturate == TGSI_SAT_ZERO_ONE;
+
+ for (c = 0; c < 4; c++) {
+ if (mask & (1 << c))
+ dst[c] = tgsi_dst(pc, c, &inst->FullDstRegisters[0]);
+ else
+ dst[c] = NULL;
+ rdst[c] = NULL;
+ src[0][c] = NULL;
+ src[1][c] = NULL;
+ src[2][c] = NULL;
+ }
+
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ const struct tgsi_full_src_register *fs = &inst->FullSrcRegisters[i];
+
+ if (fs->SrcRegister.File == TGSI_FILE_SAMPLER)
+ unit = fs->SrcRegister.Index;
+
+ for (c = 0; c < 4; c++)
+ src[i][c] = tgsi_src(pc, c, fs,
+ negate_supported(inst, i));
+ }
+
+ if (sat) {
+ for (c = 0; c < 4; c++) {
+ rdst[c] = dst[c];
+ dst[c] = temp_temp(pc);
+ }
+ } else
+ if (direct2dest_op(inst)) {
+ for (c = 0; c < 4; c++) {
+ if (!dst[c] || dst[c]->type != P_TEMP)
+ continue;
+
+ for (i = c + 1; i < 4; i++) {
+ if (dst[c] == src[0][i] ||
+ dst[c] == src[1][i] ||
+ dst[c] == src[2][i])
+ break;
+ }
+ if (i == 4)
+ continue;
+
+ assimilate = TRUE;
+ rdst[c] = dst[c];
+ dst[c] = alloc_temp(pc, NULL);
+ }
+ }
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ABS:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_abs(pc, dst[c], src[0][c]);
+ }
+ break;
+ case TGSI_OPCODE_ADD:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_add(pc, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_COS:
+ temp = temp_temp(pc);
+ emit_precossin(pc, temp, src[0][0]);
+ emit_flop(pc, 5, temp, temp);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_DP3:
+ temp = temp_temp(pc);
+ emit_mul(pc, temp, src[0][0], src[1][0]);
+ emit_mad(pc, temp, src[0][1], src[1][1], temp);
+ emit_mad(pc, temp, src[0][2], src[1][2], temp);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_DP4:
+ temp = temp_temp(pc);
+ emit_mul(pc, temp, src[0][0], src[1][0]);
+ emit_mad(pc, temp, src[0][1], src[1][1], temp);
+ emit_mad(pc, temp, src[0][2], src[1][2], temp);
+ emit_mad(pc, temp, src[0][3], src[1][3], temp);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_DPH:
+ temp = temp_temp(pc);
+ emit_mul(pc, temp, src[0][0], src[1][0]);
+ emit_mad(pc, temp, src[0][1], src[1][1], temp);
+ emit_mad(pc, temp, src[0][2], src[1][2], temp);
+ emit_add(pc, temp, src[1][3], temp);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_DST:
+ {
+ struct nv50_reg *one = alloc_immd(pc, 1.0);
+ if (mask & (1 << 0))
+ emit_mov(pc, dst[0], one);
+ if (mask & (1 << 1))
+ emit_mul(pc, dst[1], src[0][1], src[1][1]);
+ if (mask & (1 << 2))
+ emit_mov(pc, dst[2], src[0][2]);
+ if (mask & (1 << 3))
+ emit_mov(pc, dst[3], src[1][3]);
+ FREE(one);
+ }
+ break;
+ case TGSI_OPCODE_EX2:
+ temp = temp_temp(pc);
+ emit_preex2(pc, temp, src[0][0]);
+ emit_flop(pc, 6, temp, temp);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_FLR:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_flr(pc, dst[c], src[0][c]);
+ }
+ break;
+ case TGSI_OPCODE_FRC:
+ temp = temp_temp(pc);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_flr(pc, temp, src[0][c]);
+ emit_sub(pc, dst[c], src[0][c], temp);
+ }
+ break;
+ case TGSI_OPCODE_KIL:
+ emit_kil(pc, src[0][0]);
+ emit_kil(pc, src[0][1]);
+ emit_kil(pc, src[0][2]);
+ emit_kil(pc, src[0][3]);
+ pc->p->cfg.fp.regs[2] |= 0x00100000;
+ break;
+ case TGSI_OPCODE_LIT:
+ emit_lit(pc, &dst[0], mask, &src[0][0]);
+ break;
+ case TGSI_OPCODE_LG2:
+ temp = temp_temp(pc);
+ emit_flop(pc, 3, temp, src[0][0]);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_LRP:
+ temp = temp_temp(pc);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_sub(pc, temp, src[1][c], src[2][c]);
+ emit_mad(pc, dst[c], temp, src[0][c], src[2][c]);
+ }
+ break;
+ case TGSI_OPCODE_MAD:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mad(pc, dst[c], src[0][c], src[1][c], src[2][c]);
+ }
+ break;
+ case TGSI_OPCODE_MAX:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_minmax(pc, 4, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_MIN:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_minmax(pc, 5, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_MOV:
+ case TGSI_OPCODE_SWZ:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], src[0][c]);
+ }
+ break;
+ case TGSI_OPCODE_MUL:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mul(pc, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_POW:
+ temp = temp_temp(pc);
+ emit_pow(pc, temp, src[0][0], src[1][0]);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_RCP:
+ for (c = 3; c >= 0; c--) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_flop(pc, 0, dst[c], src[0][0]);
+ }
+ break;
+ case TGSI_OPCODE_RSQ:
+ for (c = 3; c >= 0; c--) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_flop(pc, 2, dst[c], src[0][0]);
+ }
+ break;
+ case TGSI_OPCODE_SCS:
+ temp = temp_temp(pc);
+ emit_precossin(pc, temp, src[0][0]);
+ if (mask & (1 << 0))
+ emit_flop(pc, 5, dst[0], temp);
+ if (mask & (1 << 1))
+ emit_flop(pc, 4, dst[1], temp);
+ if (mask & (1 << 2))
+ emit_mov_immdval(pc, dst[2], 0.0);
+ if (mask & (1 << 3))
+ emit_mov_immdval(pc, dst[3], 1.0);
+ break;
+ case TGSI_OPCODE_SGE:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_set(pc, 6, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_SIN:
+ temp = temp_temp(pc);
+ emit_precossin(pc, temp, src[0][0]);
+ emit_flop(pc, 4, temp, temp);
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_mov(pc, dst[c], temp);
+ }
+ break;
+ case TGSI_OPCODE_SLT:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_set(pc, 1, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_SUB:
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_sub(pc, dst[c], src[0][c], src[1][c]);
+ }
+ break;
+ case TGSI_OPCODE_TEX:
+ emit_tex(pc, dst, mask, src[0], unit,
+ inst->InstructionExtTexture.Texture, FALSE);
+ break;
+ case TGSI_OPCODE_TXP:
+ emit_tex(pc, dst, mask, src[0], unit,
+ inst->InstructionExtTexture.Texture, TRUE);
+ break;
+ case TGSI_OPCODE_XPD:
+ temp = temp_temp(pc);
+ if (mask & (1 << 0)) {
+ emit_mul(pc, temp, src[0][2], src[1][1]);
+ emit_msb(pc, dst[0], src[0][1], src[1][2], temp);
+ }
+ if (mask & (1 << 1)) {
+ emit_mul(pc, temp, src[0][0], src[1][2]);
+ emit_msb(pc, dst[1], src[0][2], src[1][0], temp);
+ }
+ if (mask & (1 << 2)) {
+ emit_mul(pc, temp, src[0][1], src[1][0]);
+ emit_msb(pc, dst[2], src[0][0], src[1][1], temp);
+ }
+ if (mask & (1 << 3))
+ emit_mov_immdval(pc, dst[3], 1.0);
+ break;
+ case TGSI_OPCODE_END:
+ break;
+ default:
+ NOUVEAU_ERR("invalid opcode %d\n", inst->Instruction.Opcode);
+ return FALSE;
+ }
+
+ if (sat) {
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ emit_cvt(pc, rdst[c], dst[c], -1, CVTOP_SAT,
+ CVT_F32_F32);
+ }
+ } else if (assimilate) {
+ for (c = 0; c < 4; c++)
+ if (rdst[c])
+ assimilate_temp(pc, rdst[c], dst[c]);
+ }
+
+ for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
+ for (c = 0; c < 4; c++) {
+ if (!src[i][c])
+ continue;
+ if (src[i][c]->index == -1 && src[i][c]->type == P_IMMD)
+ FREE(src[i][c]);
+ else
+ if (src[i][c]->acc == pc->insn_cur)
+ release_hw(pc, src[i][c]);
+ }
+ }
+
+ kill_temp_temp(pc);
+ return TRUE;
+}
+
+/* Adjust a bitmask that indicates what components of a source are used,
+ * we use this in tx_prep so we only load interpolants that are needed.
+ */
+static void
+insn_adjust_mask(const struct tgsi_full_instruction *insn, unsigned *mask)
+{
+ const struct tgsi_instruction_ext_texture *tex;
+
+ switch (insn->Instruction.Opcode) {
+ case TGSI_OPCODE_DP3:
+ *mask = 0x7;
+ break;
+ case TGSI_OPCODE_DP4:
+ case TGSI_OPCODE_DPH:
+ *mask = 0xF;
+ break;
+ case TGSI_OPCODE_LIT:
+ *mask = 0xB;
+ break;
+ case TGSI_OPCODE_RCP:
+ case TGSI_OPCODE_RSQ:
+ *mask = 0x1;
+ break;
+ case TGSI_OPCODE_TEX:
+ case TGSI_OPCODE_TXP:
+ assert(insn->Instruction.Extended);
+ tex = &insn->InstructionExtTexture;
+
+ *mask = 0x7;
+ if (tex->Texture == TGSI_TEXTURE_1D)
+ *mask = 0x1;
+ else
+ if (tex->Texture == TGSI_TEXTURE_2D)
+ *mask = 0x3;
+
+ if (insn->Instruction.Opcode == TGSI_OPCODE_TXP)
+ *mask |= 0x8;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+prep_inspect_insn(struct nv50_pc *pc, const union tgsi_full_token *tok,
+ unsigned *r_usage[2])
+{
+ const struct tgsi_full_instruction *insn;
+ const struct tgsi_full_src_register *src;
+ const struct tgsi_dst_register *dst;
+
+ unsigned i, c, k, n, mask, *acc_p;
+
+ insn = &tok->FullInstruction;
+ dst = &insn->FullDstRegisters[0].DstRegister;
+ mask = dst->WriteMask;
+
+ if (!r_usage[0])
+ r_usage[0] = CALLOC(pc->temp_nr * 4, sizeof(unsigned));
+ if (!r_usage[1])
+ r_usage[1] = CALLOC(pc->attr_nr * 4, sizeof(unsigned));
+
+ if (dst->File == TGSI_FILE_TEMPORARY) {
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+ r_usage[0][dst->Index * 4 + c] = pc->insn_nr;
+ }
+ }
+
+ for (i = 0; i < insn->Instruction.NumSrcRegs; i++) {
+ src = &insn->FullSrcRegisters[i];
+
+ switch (src->SrcRegister.File) {
+ case TGSI_FILE_TEMPORARY:
+ acc_p = r_usage[0];
+ break;
+ case TGSI_FILE_INPUT:
+ acc_p = r_usage[1];
+ break;
+ default:
+ continue;
+ }
+
+ insn_adjust_mask(insn, &mask);
+
+ for (c = 0; c < 4; c++) {
+ if (!(mask & (1 << c)))
+ continue;
+
+ k = tgsi_util_get_full_src_register_extswizzle(src, c);
+ switch (k) {
+ case TGSI_EXTSWIZZLE_X:
+ case TGSI_EXTSWIZZLE_Y:
+ case TGSI_EXTSWIZZLE_Z:
+ case TGSI_EXTSWIZZLE_W:
+ n = src->SrcRegister.Index * 4 + k;
+ acc_p[n] = pc->insn_nr;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static unsigned
+load_fp_attrib(struct nv50_pc *pc, int i, unsigned *acc, int *mid,
+ int *aid, int *p_oid)
+{
+ struct nv50_reg *iv;
+ int oid, c, n;
+ unsigned mask = 0;
+
+ iv = (pc->interp_mode[i] & INTERP_CENTROID) ? pc->iv_c : pc->iv_p;
+
+ for (c = 0, n = i * 4; c < 4; c++, n++) {
+ oid = (*p_oid)++;
+ pc->attr[n].type = P_TEMP;
+ pc->attr[n].index = i;
+
+ if (pc->attr[n].acc == acc[n])
+ continue;
+ mask |= (1 << c);
+
+ pc->attr[n].acc = acc[n];
+ pc->attr[n].rhw = pc->attr[n].hw = -1;
+ alloc_reg(pc, &pc->attr[n]);
+
+ pc->attr[n].rhw = (*aid)++;
+ emit_interp(pc, &pc->attr[n], iv, pc->interp_mode[i]);
+
+ pc->p->cfg.fp.map[(*mid) / 4] |= oid << (8 * ((*mid) % 4));
+ (*mid)++;
+ pc->p->cfg.fp.regs[1] += 0x00010001;
+ }
+
+ return mask;
+}
+
+static boolean
+nv50_program_tx_prep(struct nv50_pc *pc)
+{
+ struct tgsi_parse_context p;
+ boolean ret = FALSE;
+ unsigned i, c;
+ unsigned fcol, bcol, fcrd, depr;
+
+ /* count (centroid) perspective interpolations */
+ unsigned centroid_loads = 0;
+ unsigned perspect_loads = 0;
+
+ /* track register access for temps and attrs */
+ unsigned *r_usage[2];
+ r_usage[0] = NULL;
+ r_usage[1] = NULL;
+
+ depr = fcol = bcol = fcrd = 0xffff;
+
+ if (pc->p->type == PIPE_SHADER_FRAGMENT) {
+ pc->p->cfg.fp.regs[0] = 0x01000404;
+ pc->p->cfg.fp.regs[1] = 0x00000400;
+ }
+
+ tgsi_parse_init(&p, pc->p->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&p)) {
+ const union tgsi_full_token *tok = &p.FullToken;
+
+ tgsi_parse_token(&p);
+ switch (tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ {
+ const struct tgsi_full_immediate *imm =
+ &p.FullToken.FullImmediate;
+
+ ctor_immd(pc, imm->u[0].Float,
+ imm->u[1].Float,
+ imm->u[2].Float,
+ imm->u[3].Float);
+ }
+ break;
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ {
+ const struct tgsi_full_declaration *d;
+ unsigned last, first, mode;
+
+ d = &p.FullToken.FullDeclaration;
+ first = d->DeclarationRange.First;
+ last = d->DeclarationRange.Last;
+
+ switch (d->Declaration.File) {
+ case TGSI_FILE_TEMPORARY:
+ if (pc->temp_nr < (last + 1))
+ pc->temp_nr = last + 1;
+ break;
+ case TGSI_FILE_OUTPUT:
+ if (pc->result_nr < (last + 1))
+ pc->result_nr = last + 1;
+
+ if (!d->Declaration.Semantic)
+ break;
+
+ switch (d->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ depr = first;
+ pc->p->cfg.fp.regs[2] |= 0x00000100;
+ pc->p->cfg.fp.regs[3] |= 0x00000011;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case TGSI_FILE_INPUT:
+ {
+ if (pc->attr_nr < (last + 1))
+ pc->attr_nr = last + 1;
+
+ if (pc->p->type != PIPE_SHADER_FRAGMENT)
+ break;
+
+ switch (d->Declaration.Interpolate) {
+ case TGSI_INTERPOLATE_CONSTANT:
+ mode = INTERP_FLAT;
+ break;
+ case TGSI_INTERPOLATE_PERSPECTIVE:
+ mode = INTERP_PERSPECTIVE;
+ break;
+ default:
+ mode = INTERP_LINEAR;
+ break;
+ }
+
+ if (d->Declaration.Semantic) {
+ switch (d->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ fcrd = first;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ fcol = first;
+ mode = INTERP_PERSPECTIVE;
+ break;
+ case TGSI_SEMANTIC_BCOLOR:
+ bcol = first;
+ mode = INTERP_PERSPECTIVE;
+ break;
+ }
+ }
+
+ if (d->Declaration.Centroid) {
+ mode |= INTERP_CENTROID;
+ if (mode & INTERP_PERSPECTIVE)
+ centroid_loads++;
+ } else
+ if (mode & INTERP_PERSPECTIVE)
+ perspect_loads++;
+
+ assert(last < 32);
+ for (i = first; i <= last; i++)
+ pc->interp_mode[i] = mode;
+ }
+ break;
+ case TGSI_FILE_CONSTANT:
+ if (pc->param_nr < (last + 1))
+ pc->param_nr = last + 1;
+ break;
+ case TGSI_FILE_SAMPLER:
+ break;
+ default:
+ NOUVEAU_ERR("bad decl file %d\n",
+ d->Declaration.File);
+ goto out_err;
+ }
+ }
+ break;
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ pc->insn_nr++;
+ prep_inspect_insn(pc, tok, r_usage);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pc->temp_nr) {
+ pc->temp = CALLOC(pc->temp_nr * 4, sizeof(struct nv50_reg));
+ if (!pc->temp)
+ goto out_err;
+
+ for (i = 0; i < pc->temp_nr; i++) {
+ for (c = 0; c < 4; c++) {
+ pc->temp[i*4+c].type = P_TEMP;
+ pc->temp[i*4+c].hw = -1;
+ pc->temp[i*4+c].rhw = -1;
+ pc->temp[i*4+c].index = i;
+ pc->temp[i*4+c].acc = r_usage[0][i*4+c];
+ }
+ }
+ }
+
+ if (pc->attr_nr) {
+ int oid = 4, mid = 4, aid = 0;
+ /* oid = VP output id
+ * aid = FP attribute/interpolant id
+ * mid = VP output mapping field ID
+ */
+
+ pc->attr = CALLOC(pc->attr_nr * 4, sizeof(struct nv50_reg));
+ if (!pc->attr)
+ goto out_err;
+
+ if (pc->p->type == PIPE_SHADER_FRAGMENT) {
+ /* position should be loaded first */
+ if (fcrd != 0xffff) {
+ unsigned mask;
+ mid = 0;
+ mask = load_fp_attrib(pc, fcrd, r_usage[1],
+ &mid, &aid, &oid);
+ oid = 0;
+ pc->p->cfg.fp.regs[1] |= (mask << 24);
+ pc->p->cfg.fp.map[0] = 0x04040404 * fcrd;
+ }
+ pc->p->cfg.fp.map[0] += 0x03020100;
+
+ /* should do MAD fcrd.xy, fcrd, SOME_CONST, fcrd */
+
+ if (perspect_loads) {
+ pc->iv_p = alloc_temp(pc, NULL);
+
+ if (!(pc->p->cfg.fp.regs[1] & 0x08000000)) {
+ pc->p->cfg.fp.regs[1] |= 0x08000000;
+ pc->iv_p->rhw = aid++;
+ emit_interp(pc, pc->iv_p, NULL,
+ INTERP_LINEAR);
+ emit_flop(pc, 0, pc->iv_p, pc->iv_p);
+ } else {
+ pc->iv_p->rhw = aid - 1;
+ emit_flop(pc, 0, pc->iv_p,
+ &pc->attr[fcrd * 4 + 3]);
+ }
+ }
+
+ if (centroid_loads) {
+ pc->iv_c = alloc_temp(pc, NULL);
+ pc->iv_c->rhw = pc->iv_p ? aid - 1 : aid++;
+ emit_interp(pc, pc->iv_c, NULL,
+ INTERP_CENTROID);
+ emit_flop(pc, 0, pc->iv_c, pc->iv_c);
+ pc->p->cfg.fp.regs[1] |= 0x08000000;
+ }
+
+ for (c = 0; c < 4; c++) {
+ /* I don't know what these values do, but
+ * let's set them like the blob does:
+ */
+ if (fcol != 0xffff && r_usage[1][fcol * 4 + c])
+ pc->p->cfg.fp.regs[0] += 0x00010000;
+ if (bcol != 0xffff && r_usage[1][bcol * 4 + c])
+ pc->p->cfg.fp.regs[0] += 0x00010000;
+ }
+
+ for (i = 0; i < pc->attr_nr; i++)
+ load_fp_attrib(pc, i, r_usage[1],
+ &mid, &aid, &oid);
+
+ if (pc->iv_p)
+ free_temp(pc, pc->iv_p);
+ if (pc->iv_c)
+ free_temp(pc, pc->iv_c);
+
+ pc->p->cfg.fp.high_map = (mid / 4);
+ pc->p->cfg.fp.high_map += ((mid % 4) ? 1 : 0);
+ } else {
+ /* vertex program */
+ for (i = 0; i < pc->attr_nr * 4; i++) {
+ pc->p->cfg.vp.attr[aid / 32] |=
+ (1 << (aid % 32));
+ pc->attr[i].type = P_ATTR;
+ pc->attr[i].hw = aid++;
+ pc->attr[i].index = i / 4;
+ }
+ }
+ }
+
+ if (pc->result_nr) {
+ int rid = 0;
+
+ pc->result = CALLOC(pc->result_nr * 4, sizeof(struct nv50_reg));
+ if (!pc->result)
+ goto out_err;
+
+ for (i = 0; i < pc->result_nr; i++) {
+ for (c = 0; c < 4; c++) {
+ if (pc->p->type == PIPE_SHADER_FRAGMENT) {
+ pc->result[i*4+c].type = P_TEMP;
+ pc->result[i*4+c].hw = -1;
+ pc->result[i*4+c].rhw = (i == depr) ?
+ -1 : rid++;
+ } else {
+ pc->result[i*4+c].type = P_RESULT;
+ pc->result[i*4+c].hw = rid++;
+ }
+ pc->result[i*4+c].index = i;
+ }
+
+ if (pc->p->type == PIPE_SHADER_FRAGMENT &&
+ depr != 0xffff) {
+ pc->result[depr * 4 + 2].rhw =
+ (pc->result_nr - 1) * 4;
+ }
+ }
+ }
+
+ if (pc->param_nr) {
+ int rid = 0;
+
+ pc->param = CALLOC(pc->param_nr * 4, sizeof(struct nv50_reg));
+ if (!pc->param)
+ goto out_err;
+
+ for (i = 0; i < pc->param_nr; i++) {
+ for (c = 0; c < 4; c++) {
+ pc->param[i*4+c].type = P_CONST;
+ pc->param[i*4+c].hw = rid++;
+ pc->param[i*4+c].index = i;
+ }
+ }
+ }
+
+ if (pc->immd_nr) {
+ int rid = 0;
+
+ pc->immd = CALLOC(pc->immd_nr * 4, sizeof(struct nv50_reg));
+ if (!pc->immd)
+ goto out_err;
+
+ for (i = 0; i < pc->immd_nr; i++) {
+ for (c = 0; c < 4; c++) {
+ pc->immd[i*4+c].type = P_IMMD;
+ pc->immd[i*4+c].hw = rid++;
+ pc->immd[i*4+c].index = i;
+ }
+ }
+ }
+
+ ret = TRUE;
+out_err:
+ if (r_usage[0])
+ FREE(r_usage[0]);
+ if (r_usage[1])
+ FREE(r_usage[1]);
+
+ tgsi_parse_free(&p);
+ return ret;
+}
+
+static void
+free_nv50_pc(struct nv50_pc *pc)
+{
+ if (pc->immd)
+ FREE(pc->immd);
+ if (pc->param)
+ FREE(pc->param);
+ if (pc->result)
+ FREE(pc->result);
+ if (pc->attr)
+ FREE(pc->attr);
+ if (pc->temp)
+ FREE(pc->temp);
+
+ FREE(pc);
+}
+
+static boolean
+nv50_program_tx(struct nv50_program *p)
+{
+ struct tgsi_parse_context parse;
+ struct nv50_pc *pc;
+ unsigned k;
+ boolean ret;
+
+ pc = CALLOC_STRUCT(nv50_pc);
+ if (!pc)
+ return FALSE;
+ pc->p = p;
+ pc->p->cfg.high_temp = 4;
+
+ ret = nv50_program_tx_prep(pc);
+ if (ret == FALSE)
+ goto out_cleanup;
+
+ tgsi_parse_init(&parse, pc->p->pipe.tokens);
+ while (!tgsi_parse_end_of_tokens(&parse)) {
+ const union tgsi_full_token *tok = &parse.FullToken;
+
+ /* don't allow half insn/immd on first and last instruction */
+ pc->allow32 = TRUE;
+ if (pc->insn_cur == 0 || pc->insn_cur + 2 == pc->insn_nr)
+ pc->allow32 = FALSE;
+
+ tgsi_parse_token(&parse);
+
+ switch (tok->Token.Type) {
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ ++pc->insn_cur;
+ ret = nv50_program_tx_insn(pc, tok);
+ if (ret == FALSE)
+ goto out_err;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (p->type == PIPE_SHADER_FRAGMENT) {
+ struct nv50_reg out;
+
+ out.type = P_TEMP;
+ for (k = 0; k < pc->result_nr * 4; k++) {
+ if (pc->result[k].rhw == -1)
+ continue;
+ if (pc->result[k].hw != pc->result[k].rhw) {
+ out.hw = pc->result[k].rhw;
+ emit_mov(pc, &out, &pc->result[k]);
+ }
+ if (pc->p->cfg.high_result < (pc->result[k].rhw + 1))
+ pc->p->cfg.high_result = pc->result[k].rhw + 1;
+ }
+ }
+
+ /* look for single half instructions and make them long */
+ struct nv50_program_exec *e, *e_prev;
+
+ for (k = 0, e = pc->p->exec_head, e_prev = NULL; e; e = e->next) {
+ if (!is_long(e))
+ k++;
+
+ if (!e->next || is_long(e->next)) {
+ if (k & 1)
+ convert_to_long(pc, e);
+ k = 0;
+ }
+
+ if (e->next)
+ e_prev = e;
+ }
+
+ if (!is_long(pc->p->exec_tail)) {
+ /* this may occur if moving FP results */
+ assert(e_prev && !is_long(e_prev));
+ convert_to_long(pc, e_prev);
+ convert_to_long(pc, pc->p->exec_tail);
+ }
+
+ assert(is_long(pc->p->exec_tail) && !is_immd(pc->p->exec_head));
+ pc->p->exec_tail->inst[1] |= 0x00000001;
+
+ p->param_nr = pc->param_nr * 4;
+ p->immd_nr = pc->immd_nr * 4;
+ p->immd = pc->immd_buf;
+
+out_err:
+ tgsi_parse_free(&parse);
+
+out_cleanup:
+ free_nv50_pc(pc);
+ return ret;
+}
+
+static void
+nv50_program_validate(struct nv50_context *nv50, struct nv50_program *p)
+{
+ if (nv50_program_tx(p) == FALSE)
+ assert(0);
+ p->translated = TRUE;
+}
+
+static void
+nv50_program_upload_data(struct nv50_context *nv50, float *map,
+ unsigned start, unsigned count, unsigned cbuf)
+{
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+
+ while (count) {
+ unsigned nr = count > 2047 ? 2047 : count;
+
+ BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 1);
+ OUT_RING (chan, (cbuf << 0) | (start << 8));
+ BEGIN_RING(chan, tesla, NV50TCL_CB_DATA(0) | 0x40000000, nr);
+ OUT_RINGp (chan, map, nr);
+
+ map += nr;
+ start += nr;
+ count -= nr;
+ }
+}
+
+static void
+nv50_program_validate_data(struct nv50_context *nv50, struct nv50_program *p)
+{
+ struct pipe_screen *pscreen = nv50->pipe.screen;
+
+ if (!p->data[0] && p->immd_nr) {
+ struct nouveau_resource *heap = nv50->screen->immd_heap[0];
+
+ if (nouveau_resource_alloc(heap, p->immd_nr, p, &p->data[0])) {
+ while (heap->next && heap->size < p->immd_nr) {
+ struct nv50_program *evict = heap->next->priv;
+ nouveau_resource_free(&evict->data[0]);
+ }
+
+ if (nouveau_resource_alloc(heap, p->immd_nr, p,
+ &p->data[0]))
+ assert(0);
+ }
+
+ /* immediates only need to be uploaded again when freed */
+ nv50_program_upload_data(nv50, p->immd, p->data[0]->start,
+ p->immd_nr, NV50_CB_PMISC);
+ }
+
+ if (!p->data[1] && p->param_nr) {
+ struct nouveau_resource *heap =
+ nv50->screen->parm_heap[p->type];
+
+ if (nouveau_resource_alloc(heap, p->param_nr, p, &p->data[1])) {
+ while (heap->next && heap->size < p->param_nr) {
+ struct nv50_program *evict = heap->next->priv;
+ nouveau_resource_free(&evict->data[1]);
+ }
+
+ if (nouveau_resource_alloc(heap, p->param_nr, p,
+ &p->data[1]))
+ assert(0);
+ }
+ }
+
+ if (p->param_nr) {
+ unsigned cbuf = NV50_CB_PVP;
+ float *map = pipe_buffer_map(pscreen, nv50->constbuf[p->type],
+ PIPE_BUFFER_USAGE_CPU_READ);
+ if (p->type == PIPE_SHADER_FRAGMENT)
+ cbuf = NV50_CB_PFP;
+ nv50_program_upload_data(nv50, map, p->data[1]->start,
+ p->param_nr, cbuf);
+ pipe_buffer_unmap(pscreen, nv50->constbuf[p->type]);
+ }
+}
+
+static void
+nv50_program_validate_code(struct nv50_context *nv50, struct nv50_program *p)
+{
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nv50_program_exec *e;
+ struct nouveau_stateobj *so;
+ const unsigned flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_WR;
+ unsigned start, count, *up, *ptr;
+ boolean upload = FALSE;
+
+ if (!p->bo) {
+ nouveau_bo_new(chan->device, NOUVEAU_BO_VRAM, 0x100,
+ p->exec_size * 4, &p->bo);
+ upload = TRUE;
+ }
+
+ if ((p->data[0] && p->data[0]->start != p->data_start[0]) ||
+ (p->data[1] && p->data[1]->start != p->data_start[1])) {
+ for (e = p->exec_head; e; e = e->next) {
+ unsigned ei, ci, bs;
+
+ if (e->param.index < 0)
+ continue;
+ bs = (e->inst[1] >> 22) & 0x07;
+ assert(bs < 2);
+ ei = e->param.shift >> 5;
+ ci = e->param.index + p->data[bs]->start;
+
+ e->inst[ei] &= ~e->param.mask;
+ e->inst[ei] |= (ci << e->param.shift);
+ }
+
+ if (p->data[0])
+ p->data_start[0] = p->data[0]->start;
+ if (p->data[1])
+ p->data_start[1] = p->data[1]->start;
+
+ upload = TRUE;
+ }
+
+ if (!upload)
+ return;
+
+#ifdef NV50_PROGRAM_DUMP
+ NOUVEAU_ERR("-------\n");
+ for (e = p->exec_head; e; e = e->next) {
+ NOUVEAU_ERR("0x%08x\n", e->inst[0]);
+ if (is_long(e))
+ NOUVEAU_ERR("0x%08x\n", e->inst[1]);
+ }
+#endif
+
+ up = ptr = MALLOC(p->exec_size * 4);
+ for (e = p->exec_head; e; e = e->next) {
+ *(ptr++) = e->inst[0];
+ if (is_long(e))
+ *(ptr++) = e->inst[1];
+ }
+
+ so = so_new(4,2);
+ so_method(so, nv50->screen->tesla, NV50TCL_CB_DEF_ADDRESS_HIGH, 3);
+ so_reloc (so, p->bo, 0, flags | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, p->bo, 0, flags | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, (NV50_CB_PUPLOAD << 16) | 0x0800); //(p->exec_size * 4));
+
+ start = 0; count = p->exec_size;
+ while (count) {
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ unsigned nr;
+
+ so_emit(chan, so);
+
+ nr = MIN2(count, 2047);
+ nr = MIN2(chan->pushbuf->remaining, nr);
+ if (chan->pushbuf->remaining < (nr + 3)) {
+ FIRE_RING(chan);
+ continue;
+ }
+
+ BEGIN_RING(chan, tesla, NV50TCL_CB_ADDR, 1);
+ OUT_RING (chan, (start << 8) | NV50_CB_PUPLOAD);
+ BEGIN_RING(chan, tesla, NV50TCL_CB_DATA(0) | 0x40000000, nr);
+ OUT_RINGp (chan, up + start, nr);
+
+ start += nr;
+ count -= nr;
+ }
+
+ FREE(up);
+ so_ref(NULL, &so);
+}
+
+void
+nv50_vertprog_validate(struct nv50_context *nv50)
+{
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nv50_program *p = nv50->vertprog;
+ struct nouveau_stateobj *so;
+
+ if (!p->translated) {
+ nv50_program_validate(nv50, p);
+ if (!p->translated)
+ assert(0);
+ }
+
+ nv50_program_validate_data(nv50, p);
+ nv50_program_validate_code(nv50, p);
+
+ so = so_new(13, 2);
+ so_method(so, tesla, NV50TCL_VP_ADDRESS_HIGH, 2);
+ so_reloc (so, p->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD |
+ NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, p->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD |
+ NOUVEAU_BO_LOW, 0, 0);
+ so_method(so, tesla, NV50TCL_VP_ATTR_EN_0, 2);
+ so_data (so, p->cfg.vp.attr[0]);
+ so_data (so, p->cfg.vp.attr[1]);
+ so_method(so, tesla, NV50TCL_VP_REG_ALLOC_RESULT, 1);
+ so_data (so, p->cfg.high_result);
+ so_method(so, tesla, NV50TCL_VP_RESULT_MAP_SIZE, 2);
+ so_data (so, p->cfg.high_result); //8);
+ so_data (so, p->cfg.high_temp);
+ so_method(so, tesla, NV50TCL_VP_START_ID, 1);
+ so_data (so, 0); /* program start offset */
+ so_ref(so, &nv50->state.vertprog);
+ so_ref(NULL, &so);
+}
+
+void
+nv50_fragprog_validate(struct nv50_context *nv50)
+{
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nv50_program *p = nv50->fragprog;
+ struct nouveau_stateobj *so;
+ unsigned i;
+
+ if (!p->translated) {
+ nv50_program_validate(nv50, p);
+ if (!p->translated)
+ assert(0);
+ }
+
+ nv50_program_validate_data(nv50, p);
+ nv50_program_validate_code(nv50, p);
+
+ so = so_new(64, 2);
+ so_method(so, tesla, NV50TCL_FP_ADDRESS_HIGH, 2);
+ so_reloc (so, p->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD |
+ NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, p->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RD |
+ NOUVEAU_BO_LOW, 0, 0);
+ so_method(so, tesla, NV50TCL_MAP_SEMANTIC_0, 4);
+ so_data (so, p->cfg.fp.regs[0]); /* 0x01000404 / 0x00040404 */
+ so_data (so, 0x00000004);
+ so_data (so, 0x00000000);
+ so_data (so, 0x00000000);
+ so_method(so, tesla, NV50TCL_VP_RESULT_MAP(0), p->cfg.fp.high_map);
+ for (i = 0; i < p->cfg.fp.high_map; i++)
+ so_data(so, p->cfg.fp.map[i]);
+ so_method(so, tesla, NV50TCL_FP_INTERPOLANT_CTRL, 2);
+ so_data (so, p->cfg.fp.regs[1]); /* 0x08040404 / 0x0f000401 */
+ so_data (so, p->cfg.high_temp);
+ so_method(so, tesla, NV50TCL_FP_RESULT_COUNT, 1);
+ so_data (so, p->cfg.high_result);
+ so_method(so, tesla, NV50TCL_FP_CTRL_UNK19A8, 1);
+ so_data (so, p->cfg.fp.regs[2]);
+ so_method(so, tesla, NV50TCL_FP_CTRL_UNK196C, 1);
+ so_data (so, p->cfg.fp.regs[3]);
+ so_method(so, tesla, NV50TCL_FP_START_ID, 1);
+ so_data (so, 0); /* program start offset */
+ so_ref(so, &nv50->state.fragprog);
+ so_ref(NULL, &so);
+}
+
+void
+nv50_program_destroy(struct nv50_context *nv50, struct nv50_program *p)
+{
+ while (p->exec_head) {
+ struct nv50_program_exec *e = p->exec_head;
+
+ p->exec_head = e->next;
+ FREE(e);
+ }
+ p->exec_tail = NULL;
+ p->exec_size = 0;
+
+ nouveau_bo_ref(NULL, &p->bo);
+
+ nouveau_resource_free(&p->data[0]);
+ nouveau_resource_free(&p->data[1]);
+
+ p->translated = 0;
+}
diff --git a/src/gallium/drivers/nv50/nv50_program.h b/src/gallium/drivers/nv50/nv50_program.h
new file mode 100644
index 0000000000..096e0476aa
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_program.h
@@ -0,0 +1,50 @@
+#ifndef __NV50_PROGRAM_H__
+#define __NV50_PROGRAM_H__
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+struct nv50_program_exec {
+ struct nv50_program_exec *next;
+
+ unsigned inst[2];
+ struct {
+ int index;
+ unsigned mask;
+ unsigned shift;
+ } param;
+};
+
+struct nv50_program {
+ struct pipe_shader_state pipe;
+ struct tgsi_shader_info info;
+ boolean translated;
+
+ unsigned type;
+ struct nv50_program_exec *exec_head;
+ struct nv50_program_exec *exec_tail;
+ unsigned exec_size;
+ struct nouveau_resource *data[2];
+ unsigned data_start[2];
+
+ struct nouveau_bo *bo;
+
+ float *immd;
+ unsigned immd_nr;
+ unsigned param_nr;
+
+ struct {
+ unsigned high_temp;
+ unsigned high_result;
+ struct {
+ unsigned attr[2];
+ } vp;
+ struct {
+ unsigned regs[4];
+ unsigned map[5];
+ unsigned high_map;
+ } fp;
+ } cfg;
+};
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv50_query.c b/src/gallium/drivers/nv50/nv50_query.c
new file mode 100644
index 0000000000..5305c93d59
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_query.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+
+#include "nv50_context.h"
+
+struct nv50_query {
+ struct nouveau_bo *bo;
+ unsigned type;
+ boolean ready;
+ uint64_t result;
+};
+
+static INLINE struct nv50_query *
+nv50_query(struct pipe_query *pipe)
+{
+ return (struct nv50_query *)pipe;
+}
+
+static struct pipe_query *
+nv50_query_create(struct pipe_context *pipe, unsigned type)
+{
+ struct nouveau_device *dev = nouveau_screen(pipe->screen)->device;
+ struct nv50_query *q = CALLOC_STRUCT(nv50_query);
+ int ret;
+
+ assert (q->type == PIPE_QUERY_OCCLUSION_COUNTER);
+ q->type = type;
+
+ ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM | NOUVEAU_BO_MAP, 256,
+ 16, &q->bo);
+ if (ret) {
+ FREE(q);
+ return NULL;
+ }
+
+ return (struct pipe_query *)q;
+}
+
+static void
+nv50_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv50_query *q = nv50_query(pq);
+
+ if (q) {
+ nouveau_bo_ref(NULL, &q->bo);
+ FREE(q);
+ }
+}
+
+static void
+nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nv50_query *q = nv50_query(pq);
+
+ BEGIN_RING(chan, tesla, 0x1530, 1);
+ OUT_RING (chan, 1);
+ BEGIN_RING(chan, tesla, 0x1514, 1);
+ OUT_RING (chan, 1);
+
+ q->ready = FALSE;
+}
+
+static void
+nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nouveau_channel *chan = nv50->screen->base.channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nv50_query *q = nv50_query(pq);
+
+ WAIT_RING (chan, 5);
+ BEGIN_RING(chan, tesla, NV50TCL_QUERY_ADDRESS_HIGH, 4);
+ OUT_RELOCh(chan, q->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ OUT_RELOCl(chan, q->bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_WR);
+ OUT_RING (chan, 0x00000000);
+ OUT_RING (chan, 0x0100f002);
+ FIRE_RING (chan);
+}
+
+static boolean
+nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq,
+ boolean wait, uint64_t *result)
+{
+ struct nv50_query *q = nv50_query(pq);
+ int ret;
+
+ if (!q->ready) {
+ ret = nouveau_bo_map(q->bo, NOUVEAU_BO_RD |
+ wait ? 0 : NOUVEAU_BO_NOWAIT);
+ if (ret)
+ return false;
+ q->result = ((uint32_t *)q->bo->map)[1];
+ q->ready = TRUE;
+ nouveau_bo_unmap(q->bo);
+ }
+
+ *result = q->result;
+ return q->ready;
+}
+
+void
+nv50_init_query_functions(struct nv50_context *nv50)
+{
+ nv50->pipe.create_query = nv50_query_create;
+ nv50->pipe.destroy_query = nv50_query_destroy;
+ nv50->pipe.begin_query = nv50_query_begin;
+ nv50->pipe.end_query = nv50_query_end;
+ nv50->pipe.get_query_result = nv50_query_result;
+}
diff --git a/src/gallium/drivers/nv50/nv50_screen.c b/src/gallium/drivers/nv50/nv50_screen.c
new file mode 100644
index 0000000000..c7f80a2203
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_screen.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_screen.h"
+
+#include "nv50_context.h"
+#include "nv50_screen.h"
+
+#include "nouveau/nouveau_stateobj.h"
+
+static boolean
+nv50_screen_is_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage, unsigned geom_flags)
+{
+ if (tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET) {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else
+ if (tex_usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z32_FLOAT:
+ case PIPE_FORMAT_Z24S8_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ case PIPE_FORMAT_S8Z24_UNORM:
+ return TRUE;
+ default:
+ break;
+ }
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_L8_UNORM:
+ case PIPE_FORMAT_A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ case PIPE_FORMAT_A8L8_UNORM:
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ return TRUE;
+ default:
+ break;
+ }
+ }
+
+ return FALSE;
+}
+
+static int
+nv50_screen_get_param(struct pipe_screen *pscreen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return 32;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 0;
+ case PIPE_CAP_S3TC:
+ return 1;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 8;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 13;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 10;
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 13;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return 0;
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+ case NOUVEAU_CAP_HW_VTXBUF:
+ return 1;
+ case NOUVEAU_CAP_HW_IDXBUF:
+ return 0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0;
+ }
+}
+
+static float
+nv50_screen_get_paramf(struct pipe_screen *pscreen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 10.0;
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 64.0;
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 16.0;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 4.0;
+ default:
+ NOUVEAU_ERR("Unknown PIPE_CAP %d\n", param);
+ return 0.0;
+ }
+}
+
+static void
+nv50_screen_destroy(struct pipe_screen *pscreen)
+{
+ struct nv50_screen *screen = nv50_screen(pscreen);
+
+ nouveau_notifier_free(&screen->sync);
+ nouveau_grobj_free(&screen->tesla);
+ nouveau_grobj_free(&screen->eng2d);
+ nouveau_grobj_free(&screen->m2mf);
+ nouveau_screen_fini(&screen->base);
+ FREE(screen);
+}
+
+struct pipe_screen *
+nv50_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
+{
+ struct nv50_screen *screen = CALLOC_STRUCT(nv50_screen);
+ struct nouveau_channel *chan;
+ struct pipe_screen *pscreen;
+ struct nouveau_stateobj *so;
+ unsigned chipset = dev->chipset;
+ unsigned tesla_class = 0;
+ int ret, i;
+
+ if (!screen)
+ return NULL;
+ pscreen = &screen->base.base;
+
+ ret = nouveau_screen_init(&screen->base, dev);
+ if (ret) {
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+ chan = screen->base.channel;
+
+ pscreen->winsys = ws;
+ pscreen->destroy = nv50_screen_destroy;
+ pscreen->get_param = nv50_screen_get_param;
+ pscreen->get_paramf = nv50_screen_get_paramf;
+ pscreen->is_format_supported = nv50_screen_is_format_supported;
+
+ nv50_screen_init_miptree_functions(pscreen);
+ nv50_transfer_init_screen_functions(pscreen);
+
+ /* DMA engine object */
+ ret = nouveau_grobj_alloc(chan, 0xbeef5039,
+ NV50_MEMORY_TO_MEMORY_FORMAT, &screen->m2mf);
+ if (ret) {
+ NOUVEAU_ERR("Error creating M2MF object: %d\n", ret);
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+ BIND_RING(chan, screen->m2mf, 1);
+
+ /* 2D object */
+ ret = nouveau_grobj_alloc(chan, 0xbeef502d, NV50_2D, &screen->eng2d);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 2D object: %d\n", ret);
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+ BIND_RING(chan, screen->eng2d, 2);
+
+ /* 3D object */
+ switch (chipset & 0xf0) {
+ case 0x50:
+ tesla_class = NV50TCL;
+ break;
+ case 0x80:
+ case 0x90:
+ /* this stupid name should be corrected. */
+ tesla_class = NV54TCL;
+ break;
+ case 0xa0:
+ tesla_class = NVA0TCL;
+ break;
+ default:
+ NOUVEAU_ERR("Not a known NV50 chipset: NV%02x\n", chipset);
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ if (tesla_class == 0) {
+ NOUVEAU_ERR("Unknown G8x chipset: NV%02x\n", chipset);
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ ret = nouveau_grobj_alloc(chan, 0xbeef5097, tesla_class,
+ &screen->tesla);
+ if (ret) {
+ NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+ BIND_RING(chan, screen->tesla, 3);
+
+ /* Sync notifier */
+ ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
+ if (ret) {
+ NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /* Static M2MF init */
+ so = so_new(32, 0);
+ so_method(so, screen->m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
+ so_data (so, screen->sync->handle);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+ so_emit(chan, so);
+ so_ref (NULL, &so);
+
+ /* Static 2D init */
+ so = so_new(64, 0);
+ so_method(so, screen->eng2d, NV50_2D_DMA_NOTIFY, 4);
+ so_data (so, screen->sync->handle);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+ so_data (so, chan->vram->handle);
+ so_method(so, screen->eng2d, NV50_2D_OPERATION, 1);
+ so_data (so, NV50_2D_OPERATION_SRCCOPY);
+ so_method(so, screen->eng2d, 0x0290, 1);
+ so_data (so, 0);
+ so_method(so, screen->eng2d, 0x0888, 1);
+ so_data (so, 1);
+ so_emit(chan, so);
+ so_ref(NULL, &so);
+
+ /* Static tesla init */
+ so = so_new(256, 20);
+
+ so_method(so, screen->tesla, 0x1558, 1);
+ so_data (so, 1);
+ so_method(so, screen->tesla, NV50TCL_DMA_NOTIFY, 1);
+ so_data (so, screen->sync->handle);
+ so_method(so, screen->tesla, NV50TCL_DMA_UNK0(0),
+ NV50TCL_DMA_UNK0__SIZE);
+ for (i = 0; i < NV50TCL_DMA_UNK0__SIZE; i++)
+ so_data(so, chan->vram->handle);
+ so_method(so, screen->tesla, NV50TCL_DMA_UNK1(0),
+ NV50TCL_DMA_UNK1__SIZE);
+ for (i = 0; i < NV50TCL_DMA_UNK1__SIZE; i++)
+ so_data(so, chan->vram->handle);
+ so_method(so, screen->tesla, 0x121c, 1);
+ so_data (so, 1);
+
+ so_method(so, screen->tesla, 0x13bc, 1);
+ so_data (so, 0x54);
+ /* origin is top left (set to 1 for bottom left) */
+ so_method(so, screen->tesla, 0x13ac, 1);
+ so_data (so, 0);
+ so_method(so, screen->tesla, NV50TCL_VP_REG_ALLOC_RESULT, 1);
+ so_data (so, 8);
+
+ /* constant buffers for immediates and VP/FP parameters */
+ ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, 128*4*4,
+ &screen->constbuf_misc[0]);
+ if (ret) {
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ for (i = 0; i < 2; i++) {
+ ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, 128*4*4,
+ &screen->constbuf_parm[i]);
+ if (ret) {
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+ }
+
+ if (nouveau_resource_init(&screen->immd_heap[0], 0, 128) ||
+ nouveau_resource_init(&screen->parm_heap[0], 0, 128) ||
+ nouveau_resource_init(&screen->parm_heap[1], 0, 128))
+ {
+ NOUVEAU_ERR("Error initialising constant buffers.\n");
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ /*
+ // map constant buffers:
+ // B = buffer ID (maybe more than 1 byte)
+ // N = CB index used in shader instruction
+ // P = program type (0 = VP, 2 = GP, 3 = FP)
+ so_method(so, screen->tesla, 0x1694, 1);
+ so_data (so, 0x000BBNP1);
+ */
+
+ so_method(so, screen->tesla, NV50TCL_CB_DEF_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->constbuf_misc[0], 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->constbuf_misc[0], 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, (NV50_CB_PMISC << 16) | 0x00000800);
+ so_method(so, screen->tesla, NV50TCL_SET_PROGRAM_CB, 1);
+ so_data (so, 0x00000001 | (NV50_CB_PMISC << 12));
+ so_method(so, screen->tesla, NV50TCL_SET_PROGRAM_CB, 1);
+ so_data (so, 0x00000031 | (NV50_CB_PMISC << 12));
+
+ so_method(so, screen->tesla, NV50TCL_CB_DEF_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->constbuf_parm[0], 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->constbuf_parm[0], 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, (NV50_CB_PVP << 16) | 0x00000800);
+ so_method(so, screen->tesla, NV50TCL_SET_PROGRAM_CB, 1);
+ so_data (so, 0x00000101 | (NV50_CB_PVP << 12));
+
+ so_method(so, screen->tesla, NV50TCL_CB_DEF_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->constbuf_parm[1], 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->constbuf_parm[1], 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, (NV50_CB_PFP << 16) | 0x00000800);
+ so_method(so, screen->tesla, NV50TCL_SET_PROGRAM_CB, 1);
+ so_data (so, 0x00000131 | (NV50_CB_PFP << 12));
+
+ /* Texture sampler/image unit setup - we abuse the constant buffer
+ * upload mechanism for the moment to upload data to the tex config
+ * blocks. At some point we *may* want to go the NVIDIA way of doing
+ * things?
+ */
+ ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, 32*8*4, &screen->tic);
+ if (ret) {
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ so_method(so, screen->tesla, NV50TCL_CB_DEF_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->tic, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->tic, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, (NV50_CB_TIC << 16) | 0x0800);
+ so_method(so, screen->tesla, NV50TCL_TIC_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->tic, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->tic, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, 0x00000800);
+
+ ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, 32*8*4, &screen->tsc);
+ if (ret) {
+ nv50_screen_destroy(pscreen);
+ return NULL;
+ }
+
+ so_method(so, screen->tesla, NV50TCL_CB_DEF_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->tsc, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->tsc, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, (NV50_CB_TSC << 16) | 0x0800);
+ so_method(so, screen->tesla, NV50TCL_TSC_ADDRESS_HIGH, 3);
+ so_reloc (so, screen->tsc, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (so, screen->tsc, 0, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+ so_data (so, 0x00000800);
+
+
+ /* Vertex array limits - max them out */
+ for (i = 0; i < 16; i++) {
+ so_method(so, screen->tesla, NV50TCL_UNK1080_OFFSET_HIGH(i), 2);
+ so_data (so, 0x000000ff);
+ so_data (so, 0xffffffff);
+ }
+
+ so_method(so, screen->tesla, NV50TCL_DEPTH_RANGE_NEAR, 2);
+ so_data (so, fui(0.0));
+ so_data (so, fui(1.0));
+
+ so_method(so, screen->tesla, 0x1234, 1);
+ so_data (so, 1);
+
+ /* activate first scissor rectangle */
+ so_method(so, screen->tesla, NV50TCL_SCISSOR_ENABLE, 1);
+ so_data (so, 1);
+
+ so_emit(chan, so);
+ so_ref (so, &screen->static_init);
+ so_ref (NULL, &so);
+ nouveau_pushbuf_flush(chan, 0);
+
+ return pscreen;
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_screen.h b/src/gallium/drivers/nv50/nv50_screen.h
new file mode 100644
index 0000000000..61e24a5b57
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_screen.h
@@ -0,0 +1,38 @@
+#ifndef __NV50_SCREEN_H__
+#define __NV50_SCREEN_H__
+
+#include "nouveau/nouveau_screen.h"
+
+struct nv50_screen {
+ struct nouveau_screen base;
+
+ struct nouveau_winsys *nvws;
+
+ unsigned cur_pctx;
+
+ struct nouveau_grobj *tesla;
+ struct nouveau_grobj *eng2d;
+ struct nouveau_grobj *m2mf;
+ struct nouveau_notifier *sync;
+
+ struct nouveau_bo *constbuf_misc[1];
+ struct nouveau_bo *constbuf_parm[2];
+
+ struct nouveau_resource *immd_heap[1];
+ struct nouveau_resource *parm_heap[2];
+
+ struct nouveau_bo *tic;
+ struct nouveau_bo *tsc;
+
+ struct nouveau_stateobj *static_init;
+};
+
+static INLINE struct nv50_screen *
+nv50_screen(struct pipe_screen *screen)
+{
+ return (struct nv50_screen *)screen;
+}
+
+void nv50_transfer_init_screen_functions(struct pipe_screen *);
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv50_state.c b/src/gallium/drivers/nv50/nv50_state.c
new file mode 100644
index 0000000000..4283808ed9
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_state.c
@@ -0,0 +1,683 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "nv50_context.h"
+#include "nv50_texture.h"
+
+#include "nouveau/nouveau_stateobj.h"
+
+static void *
+nv50_blend_state_create(struct pipe_context *pipe,
+ const struct pipe_blend_state *cso)
+{
+ struct nouveau_stateobj *so = so_new(64, 0);
+ struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
+ struct nv50_blend_stateobj *bso = CALLOC_STRUCT(nv50_blend_stateobj);
+ unsigned cmask = 0, i;
+
+ /*XXX ignored:
+ * - dither
+ */
+
+ if (cso->blend_enable == 0) {
+ so_method(so, tesla, NV50TCL_BLEND_ENABLE(0), 8);
+ for (i = 0; i < 8; i++)
+ so_data(so, 0);
+ } else {
+ so_method(so, tesla, NV50TCL_BLEND_ENABLE(0), 8);
+ for (i = 0; i < 8; i++)
+ so_data(so, 1);
+ so_method(so, tesla, NV50TCL_BLEND_EQUATION_RGB, 5);
+ so_data (so, nvgl_blend_eqn(cso->rgb_func));
+ so_data (so, 0x4000 | nvgl_blend_func(cso->rgb_src_factor));
+ so_data (so, 0x4000 | nvgl_blend_func(cso->rgb_dst_factor));
+ so_data (so, nvgl_blend_eqn(cso->alpha_func));
+ so_data (so, 0x4000 | nvgl_blend_func(cso->alpha_src_factor));
+ so_method(so, tesla, NV50TCL_BLEND_FUNC_DST_ALPHA, 1);
+ so_data (so, 0x4000 | nvgl_blend_func(cso->alpha_dst_factor));
+ }
+
+ if (cso->logicop_enable == 0 ) {
+ so_method(so, tesla, NV50TCL_LOGIC_OP_ENABLE, 1);
+ so_data (so, 0);
+ } else {
+ so_method(so, tesla, NV50TCL_LOGIC_OP_ENABLE, 2);
+ so_data (so, 1);
+ so_data (so, nvgl_logicop_func(cso->logicop_func));
+ }
+
+ if (cso->colormask & PIPE_MASK_R)
+ cmask |= (1 << 0);
+ if (cso->colormask & PIPE_MASK_G)
+ cmask |= (1 << 4);
+ if (cso->colormask & PIPE_MASK_B)
+ cmask |= (1 << 8);
+ if (cso->colormask & PIPE_MASK_A)
+ cmask |= (1 << 12);
+ so_method(so, tesla, NV50TCL_COLOR_MASK(0), 8);
+ for (i = 0; i < 8; i++)
+ so_data(so, cmask);
+
+ bso->pipe = *cso;
+ so_ref(so, &bso->so);
+ so_ref(NULL, &so);
+ return (void *)bso;
+}
+
+static void
+nv50_blend_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->blend = hwcso;
+ nv50->dirty |= NV50_NEW_BLEND;
+}
+
+static void
+nv50_blend_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_blend_stateobj *bso = hwcso;
+
+ so_ref(NULL, &bso->so);
+ FREE(bso);
+}
+
+static INLINE unsigned
+wrap_mode(unsigned wrap)
+{
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ return NV50TSC_1_0_WRAPS_REPEAT;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ return NV50TSC_1_0_WRAPS_MIRROR_REPEAT;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ return NV50TSC_1_0_WRAPS_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ return NV50TSC_1_0_WRAPS_CLAMP_TO_BORDER;
+ case PIPE_TEX_WRAP_CLAMP:
+ return NV50TSC_1_0_WRAPS_CLAMP;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ return NV50TSC_1_0_WRAPS_MIRROR_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ return NV50TSC_1_0_WRAPS_MIRROR_CLAMP_TO_BORDER;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ return NV50TSC_1_0_WRAPS_MIRROR_CLAMP;
+ default:
+ NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
+ return NV50TSC_1_0_WRAPS_REPEAT;
+ }
+}
+static void *
+nv50_sampler_state_create(struct pipe_context *pipe,
+ const struct pipe_sampler_state *cso)
+{
+ struct nv50_sampler_stateobj *sso = CALLOC(1, sizeof(*sso));
+ unsigned *tsc = sso->tsc;
+ float limit;
+
+ tsc[0] = (0x00026000 |
+ (wrap_mode(cso->wrap_s) << 0) |
+ (wrap_mode(cso->wrap_t) << 3) |
+ (wrap_mode(cso->wrap_r) << 6));
+
+ switch (cso->mag_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ tsc[1] |= NV50TSC_1_1_MAGF_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ tsc[1] |= NV50TSC_1_1_MAGF_NEAREST;
+ break;
+ }
+
+ switch (cso->min_img_filter) {
+ case PIPE_TEX_FILTER_LINEAR:
+ tsc[1] |= NV50TSC_1_1_MINF_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_NEAREST:
+ default:
+ tsc[1] |= NV50TSC_1_1_MINF_NEAREST;
+ break;
+ }
+
+ switch (cso->min_mip_filter) {
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ tsc[1] |= NV50TSC_1_1_MIPF_LINEAR;
+ break;
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ tsc[1] |= NV50TSC_1_1_MIPF_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_NONE:
+ default:
+ tsc[1] |= NV50TSC_1_1_MIPF_NONE;
+ break;
+ }
+
+ if (cso->max_anisotropy >= 16.0)
+ tsc[0] |= (7 << 20);
+ else
+ if (cso->max_anisotropy >= 12.0)
+ tsc[0] |= (6 << 20);
+ else
+ if (cso->max_anisotropy >= 10.0)
+ tsc[0] |= (5 << 20);
+ else
+ if (cso->max_anisotropy >= 8.0)
+ tsc[0] |= (4 << 20);
+ else
+ if (cso->max_anisotropy >= 6.0)
+ tsc[0] |= (3 << 20);
+ else
+ if (cso->max_anisotropy >= 4.0)
+ tsc[0] |= (2 << 20);
+ else
+ if (cso->max_anisotropy >= 2.0)
+ tsc[0] |= (1 << 20);
+
+ if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ tsc[0] |= (1 << 8);
+ tsc[0] |= (nvgl_comparison_op(cso->compare_func) & 0x7);
+ }
+
+ limit = CLAMP(cso->lod_bias, -16.0, 15.0);
+ tsc[1] |= ((int)(limit * 256.0) & 0x1fff) << 12;
+
+ tsc[2] |= ((int)CLAMP(cso->max_lod, 0.0, 15.0) << 20) |
+ ((int)CLAMP(cso->min_lod, 0.0, 15.0) << 8);
+
+ tsc[4] = fui(cso->border_color[0]);
+ tsc[5] = fui(cso->border_color[1]);
+ tsc[6] = fui(cso->border_color[2]);
+ tsc[7] = fui(cso->border_color[3]);
+
+ sso->normalized = cso->normalized_coords;
+ return (void *)sso;
+}
+
+static void
+nv50_sampler_state_bind(struct pipe_context *pipe, unsigned nr, void **sampler)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ int i;
+
+ nv50->sampler_nr = nr;
+ for (i = 0; i < nv50->sampler_nr; i++)
+ nv50->sampler[i] = sampler[i];
+
+ nv50->dirty |= NV50_NEW_SAMPLER;
+}
+
+static void
+nv50_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ FREE(hwcso);
+}
+
+static void
+nv50_set_sampler_texture(struct pipe_context *pipe, unsigned nr,
+ struct pipe_texture **pt)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ int i;
+
+ for (i = 0; i < nr; i++)
+ pipe_texture_reference((void *)&nv50->miptree[i], pt[i]);
+ for (i = nr; i < nv50->miptree_nr; i++)
+ pipe_texture_reference((void *)&nv50->miptree[i], NULL);
+
+ nv50->miptree_nr = nr;
+ nv50->dirty |= NV50_NEW_TEXTURE;
+}
+
+static void *
+nv50_rasterizer_state_create(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *cso)
+{
+ struct nouveau_stateobj *so = so_new(64, 0);
+ struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
+ struct nv50_rasterizer_stateobj *rso =
+ CALLOC_STRUCT(nv50_rasterizer_stateobj);
+
+ /*XXX: ignored
+ * - light_twosize
+ * - point_smooth
+ * - multisample
+ * - point_sprite / sprite_coord_mode
+ */
+
+ so_method(so, tesla, NV50TCL_SHADE_MODEL, 1);
+ so_data (so, cso->flatshade ? NV50TCL_SHADE_MODEL_FLAT :
+ NV50TCL_SHADE_MODEL_SMOOTH);
+ so_method(so, tesla, 0x1684, 1);
+ so_data (so, cso->flatshade_first ? 0 : 1);
+
+ so_method(so, tesla, NV50TCL_LINE_WIDTH, 1);
+ so_data (so, fui(cso->line_width));
+ so_method(so, tesla, NV50TCL_LINE_SMOOTH_ENABLE, 1);
+ so_data (so, cso->line_smooth ? 1 : 0);
+ if (cso->line_stipple_enable) {
+ so_method(so, tesla, NV50TCL_LINE_STIPPLE_ENABLE, 1);
+ so_data (so, 1);
+ so_method(so, tesla, NV50TCL_LINE_STIPPLE_PATTERN, 1);
+ so_data (so, (cso->line_stipple_pattern << 8) |
+ cso->line_stipple_factor);
+ } else {
+ so_method(so, tesla, NV50TCL_LINE_STIPPLE_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ so_method(so, tesla, NV50TCL_POINT_SIZE, 1);
+ so_data (so, fui(cso->point_size));
+
+ so_method(so, tesla, NV50TCL_POLYGON_MODE_FRONT, 3);
+ if (cso->front_winding == PIPE_WINDING_CCW) {
+ so_data(so, nvgl_polygon_mode(cso->fill_ccw));
+ so_data(so, nvgl_polygon_mode(cso->fill_cw));
+ } else {
+ so_data(so, nvgl_polygon_mode(cso->fill_cw));
+ so_data(so, nvgl_polygon_mode(cso->fill_ccw));
+ }
+ so_data(so, cso->poly_smooth ? 1 : 0);
+
+ so_method(so, tesla, NV50TCL_CULL_FACE_ENABLE, 3);
+ so_data (so, cso->cull_mode != PIPE_WINDING_NONE);
+ if (cso->front_winding == PIPE_WINDING_CCW) {
+ so_data(so, NV50TCL_FRONT_FACE_CCW);
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ so_data(so, NV50TCL_CULL_FACE_FRONT);
+ break;
+ case PIPE_WINDING_CW:
+ so_data(so, NV50TCL_CULL_FACE_BACK);
+ break;
+ case PIPE_WINDING_BOTH:
+ so_data(so, NV50TCL_CULL_FACE_FRONT_AND_BACK);
+ break;
+ default:
+ so_data(so, NV50TCL_CULL_FACE_BACK);
+ break;
+ }
+ } else {
+ so_data(so, NV50TCL_FRONT_FACE_CW);
+ switch (cso->cull_mode) {
+ case PIPE_WINDING_CCW:
+ so_data(so, NV50TCL_CULL_FACE_BACK);
+ break;
+ case PIPE_WINDING_CW:
+ so_data(so, NV50TCL_CULL_FACE_FRONT);
+ break;
+ case PIPE_WINDING_BOTH:
+ so_data(so, NV50TCL_CULL_FACE_FRONT_AND_BACK);
+ break;
+ default:
+ so_data(so, NV50TCL_CULL_FACE_BACK);
+ break;
+ }
+ }
+
+ so_method(so, tesla, NV50TCL_POLYGON_STIPPLE_ENABLE, 1);
+ so_data (so, cso->poly_stipple_enable ? 1 : 0);
+
+ so_method(so, tesla, NV50TCL_POLYGON_OFFSET_POINT_ENABLE, 3);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_POINT) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_POINT))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_LINE) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_LINE))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+ if ((cso->offset_cw && cso->fill_cw == PIPE_POLYGON_MODE_FILL) ||
+ (cso->offset_ccw && cso->fill_ccw == PIPE_POLYGON_MODE_FILL))
+ so_data(so, 1);
+ else
+ so_data(so, 0);
+
+ if (cso->offset_cw || cso->offset_ccw) {
+ so_method(so, tesla, NV50TCL_POLYGON_OFFSET_FACTOR, 1);
+ so_data (so, fui(cso->offset_scale));
+ so_method(so, tesla, NV50TCL_POLYGON_OFFSET_UNITS, 1);
+ so_data (so, fui(cso->offset_units));
+ }
+
+ rso->pipe = *cso;
+ so_ref(so, &rso->so);
+ so_ref(NULL, &so);
+ return (void *)rso;
+}
+
+static void
+nv50_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->rasterizer = hwcso;
+ nv50->dirty |= NV50_NEW_RASTERIZER;
+}
+
+static void
+nv50_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_rasterizer_stateobj *rso = hwcso;
+
+ so_ref(NULL, &rso->so);
+ FREE(rso);
+}
+
+static void *
+nv50_depth_stencil_alpha_state_create(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *cso)
+{
+ struct nouveau_grobj *tesla = nv50_context(pipe)->screen->tesla;
+ struct nv50_zsa_stateobj *zsa = CALLOC_STRUCT(nv50_zsa_stateobj);
+ struct nouveau_stateobj *so = so_new(64, 0);
+
+ so_method(so, tesla, NV50TCL_DEPTH_WRITE_ENABLE, 1);
+ so_data (so, cso->depth.writemask ? 1 : 0);
+ if (cso->depth.enabled) {
+ so_method(so, tesla, NV50TCL_DEPTH_TEST_ENABLE, 1);
+ so_data (so, 1);
+ so_method(so, tesla, NV50TCL_DEPTH_TEST_FUNC, 1);
+ so_data (so, nvgl_comparison_op(cso->depth.func));
+ } else {
+ so_method(so, tesla, NV50TCL_DEPTH_TEST_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ /* XXX: keep hex values until header is updated (names reversed) */
+ if (cso->stencil[0].enabled) {
+ so_method(so, tesla, 0x1380, 8);
+ so_data (so, 1);
+ so_data (so, nvgl_stencil_op(cso->stencil[0].fail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
+ so_data (so, nvgl_comparison_op(cso->stencil[0].func));
+ so_data (so, cso->stencil[0].ref_value);
+ so_data (so, cso->stencil[0].writemask);
+ so_data (so, cso->stencil[0].valuemask);
+ } else {
+ so_method(so, tesla, 0x1380, 1);
+ so_data (so, 0);
+ }
+
+ if (cso->stencil[1].enabled) {
+ so_method(so, tesla, 0x1594, 5);
+ so_data (so, 1);
+ so_data (so, nvgl_stencil_op(cso->stencil[1].fail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
+ so_data (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
+ so_data (so, nvgl_comparison_op(cso->stencil[1].func));
+ so_method(so, tesla, 0x0f54, 3);
+ so_data (so, cso->stencil[1].ref_value);
+ so_data (so, cso->stencil[1].writemask);
+ so_data (so, cso->stencil[1].valuemask);
+ } else {
+ so_method(so, tesla, 0x1594, 1);
+ so_data (so, 0);
+ }
+
+ if (cso->alpha.enabled) {
+ so_method(so, tesla, NV50TCL_ALPHA_TEST_ENABLE, 1);
+ so_data (so, 1);
+ so_method(so, tesla, NV50TCL_ALPHA_TEST_REF, 2);
+ so_data (so, fui(cso->alpha.ref_value));
+ so_data (so, nvgl_comparison_op(cso->alpha.func));
+ } else {
+ so_method(so, tesla, NV50TCL_ALPHA_TEST_ENABLE, 1);
+ so_data (so, 0);
+ }
+
+ zsa->pipe = *cso;
+ so_ref(so, &zsa->so);
+ so_ref(NULL, &so);
+ return (void *)zsa;
+}
+
+static void
+nv50_depth_stencil_alpha_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->zsa = hwcso;
+ nv50->dirty |= NV50_NEW_ZSA;
+}
+
+static void
+nv50_depth_stencil_alpha_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_zsa_stateobj *zsa = hwcso;
+
+ so_ref(NULL, &zsa->so);
+ FREE(zsa);
+}
+
+static void *
+nv50_vp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv50_program *p = CALLOC_STRUCT(nv50_program);
+
+ p->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+ p->type = PIPE_SHADER_VERTEX;
+ tgsi_scan_shader(p->pipe.tokens, &p->info);
+ return (void *)p;
+}
+
+static void
+nv50_vp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->vertprog = hwcso;
+ nv50->dirty |= NV50_NEW_VERTPROG;
+}
+
+static void
+nv50_vp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nv50_program *p = hwcso;
+
+ nv50_program_destroy(nv50, p);
+ FREE((void*)p->pipe.tokens);
+ FREE(p);
+}
+
+static void *
+nv50_fp_state_create(struct pipe_context *pipe,
+ const struct pipe_shader_state *cso)
+{
+ struct nv50_program *p = CALLOC_STRUCT(nv50_program);
+
+ p->pipe.tokens = tgsi_dup_tokens(cso->tokens);
+ p->type = PIPE_SHADER_FRAGMENT;
+ tgsi_scan_shader(p->pipe.tokens, &p->info);
+ return (void *)p;
+}
+
+static void
+nv50_fp_state_bind(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->fragprog = hwcso;
+ nv50->dirty |= NV50_NEW_FRAGPROG;
+}
+
+static void
+nv50_fp_state_delete(struct pipe_context *pipe, void *hwcso)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nv50_program *p = hwcso;
+
+ nv50_program_destroy(nv50, p);
+ FREE((void*)p->pipe.tokens);
+ FREE(p);
+}
+
+static void
+nv50_set_blend_color(struct pipe_context *pipe,
+ const struct pipe_blend_color *bcol)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->blend_colour = *bcol;
+ nv50->dirty |= NV50_NEW_BLEND_COLOUR;
+}
+
+static void
+nv50_set_clip_state(struct pipe_context *pipe,
+ const struct pipe_clip_state *clip)
+{
+}
+
+static void
+nv50_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
+ const struct pipe_constant_buffer *buf )
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ if (shader == PIPE_SHADER_VERTEX) {
+ nv50->constbuf[PIPE_SHADER_VERTEX] = buf->buffer;
+ nv50->dirty |= NV50_NEW_VERTPROG_CB;
+ } else
+ if (shader == PIPE_SHADER_FRAGMENT) {
+ nv50->constbuf[PIPE_SHADER_FRAGMENT] = buf->buffer;
+ nv50->dirty |= NV50_NEW_FRAGPROG_CB;
+ }
+}
+
+static void
+nv50_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->framebuffer = *fb;
+ nv50->dirty |= NV50_NEW_FRAMEBUFFER;
+}
+
+static void
+nv50_set_polygon_stipple(struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->stipple = *stipple;
+ nv50->dirty |= NV50_NEW_STIPPLE;
+}
+
+static void
+nv50_set_scissor_state(struct pipe_context *pipe,
+ const struct pipe_scissor_state *s)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->scissor = *s;
+ nv50->dirty |= NV50_NEW_SCISSOR;
+}
+
+static void
+nv50_set_viewport_state(struct pipe_context *pipe,
+ const struct pipe_viewport_state *vpt)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ nv50->viewport = *vpt;
+ nv50->dirty |= NV50_NEW_VIEWPORT;
+}
+
+static void
+nv50_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_buffer *vb)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ memcpy(nv50->vtxbuf, vb, sizeof(*vb) * count);
+ nv50->vtxbuf_nr = count;
+
+ nv50->dirty |= NV50_NEW_ARRAYS;
+}
+
+static void
+nv50_set_vertex_elements(struct pipe_context *pipe, unsigned count,
+ const struct pipe_vertex_element *ve)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+
+ memcpy(nv50->vtxelt, ve, sizeof(*ve) * count);
+ nv50->vtxelt_nr = count;
+
+ nv50->dirty |= NV50_NEW_ARRAYS;
+}
+
+void
+nv50_init_state_functions(struct nv50_context *nv50)
+{
+ nv50->pipe.create_blend_state = nv50_blend_state_create;
+ nv50->pipe.bind_blend_state = nv50_blend_state_bind;
+ nv50->pipe.delete_blend_state = nv50_blend_state_delete;
+
+ nv50->pipe.create_sampler_state = nv50_sampler_state_create;
+ nv50->pipe.bind_sampler_states = nv50_sampler_state_bind;
+ nv50->pipe.delete_sampler_state = nv50_sampler_state_delete;
+ nv50->pipe.set_sampler_textures = nv50_set_sampler_texture;
+
+ nv50->pipe.create_rasterizer_state = nv50_rasterizer_state_create;
+ nv50->pipe.bind_rasterizer_state = nv50_rasterizer_state_bind;
+ nv50->pipe.delete_rasterizer_state = nv50_rasterizer_state_delete;
+
+ nv50->pipe.create_depth_stencil_alpha_state =
+ nv50_depth_stencil_alpha_state_create;
+ nv50->pipe.bind_depth_stencil_alpha_state =
+ nv50_depth_stencil_alpha_state_bind;
+ nv50->pipe.delete_depth_stencil_alpha_state =
+ nv50_depth_stencil_alpha_state_delete;
+
+ nv50->pipe.create_vs_state = nv50_vp_state_create;
+ nv50->pipe.bind_vs_state = nv50_vp_state_bind;
+ nv50->pipe.delete_vs_state = nv50_vp_state_delete;
+
+ nv50->pipe.create_fs_state = nv50_fp_state_create;
+ nv50->pipe.bind_fs_state = nv50_fp_state_bind;
+ nv50->pipe.delete_fs_state = nv50_fp_state_delete;
+
+ nv50->pipe.set_blend_color = nv50_set_blend_color;
+ nv50->pipe.set_clip_state = nv50_set_clip_state;
+ nv50->pipe.set_constant_buffer = nv50_set_constant_buffer;
+ nv50->pipe.set_framebuffer_state = nv50_set_framebuffer_state;
+ nv50->pipe.set_polygon_stipple = nv50_set_polygon_stipple;
+ nv50->pipe.set_scissor_state = nv50_set_scissor_state;
+ nv50->pipe.set_viewport_state = nv50_set_viewport_state;
+
+ nv50->pipe.set_vertex_buffers = nv50_set_vertex_buffers;
+ nv50->pipe.set_vertex_elements = nv50_set_vertex_elements;
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_state_validate.c b/src/gallium/drivers/nv50/nv50_state_validate.c
new file mode 100644
index 0000000000..d294356f75
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_state_validate.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "nv50_context.h"
+#include "nouveau/nouveau_stateobj.h"
+
+static void
+nv50_state_validate_fb(struct nv50_context *nv50)
+{
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nouveau_stateobj *so = so_new(128, 18);
+ struct pipe_framebuffer_state *fb = &nv50->framebuffer;
+ unsigned i, w, h, gw = 0;
+
+ for (i = 0; i < fb->nr_cbufs; i++) {
+ struct pipe_texture *pt = fb->cbufs[i]->texture;
+ struct nouveau_bo *bo = nv50_miptree(pt)->base.bo;
+
+ if (!gw) {
+ w = fb->cbufs[i]->width;
+ h = fb->cbufs[i]->height;
+ gw = 1;
+ } else {
+ assert(w == fb->cbufs[i]->width);
+ assert(h == fb->cbufs[i]->height);
+ }
+
+ so_method(so, tesla, NV50TCL_RT_HORIZ(i), 2);
+ so_data (so, fb->cbufs[i]->width);
+ so_data (so, fb->cbufs[i]->height);
+
+ so_method(so, tesla, NV50TCL_RT_ADDRESS_HIGH(i), 5);
+ so_reloc (so, bo, fb->cbufs[i]->offset, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_HIGH | NOUVEAU_BO_RDWR, 0, 0);
+ so_reloc (so, bo, fb->cbufs[i]->offset, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_LOW | NOUVEAU_BO_RDWR, 0, 0);
+ switch (fb->cbufs[i]->format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ so_data(so, NV50TCL_RT_FORMAT_A8R8G8B8_UNORM);
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ so_data(so, NV50TCL_RT_FORMAT_R5G6B5_UNORM);
+ break;
+ default:
+ NOUVEAU_ERR("AIIII unknown format %s\n",
+ pf_name(fb->cbufs[i]->format));
+ so_data(so, NV50TCL_RT_FORMAT_X8R8G8B8_UNORM);
+ break;
+ }
+ so_data(so, nv50_miptree(pt)->
+ level[fb->cbufs[i]->level].tile_mode << 4);
+ so_data(so, 0x00000000);
+
+ so_method(so, tesla, 0x1224, 1);
+ so_data (so, 1);
+ }
+
+ if (fb->zsbuf) {
+ struct pipe_texture *pt = fb->zsbuf->texture;
+ struct nouveau_bo *bo = nv50_miptree(pt)->base.bo;
+
+ if (!gw) {
+ w = fb->zsbuf->width;
+ h = fb->zsbuf->height;
+ gw = 1;
+ } else {
+ assert(w == fb->zsbuf->width);
+ assert(h == fb->zsbuf->height);
+ }
+
+ so_method(so, tesla, NV50TCL_ZETA_ADDRESS_HIGH, 5);
+ so_reloc (so, bo, fb->zsbuf->offset, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_HIGH | NOUVEAU_BO_RDWR, 0, 0);
+ so_reloc (so, bo, fb->zsbuf->offset, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_LOW | NOUVEAU_BO_RDWR, 0, 0);
+ switch (fb->zsbuf->format) {
+ case PIPE_FORMAT_Z32_FLOAT:
+ so_data(so, NV50TCL_ZETA_FORMAT_Z32_FLOAT);
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ so_data(so, NV50TCL_ZETA_FORMAT_Z24S8_UNORM);
+ break;
+ case PIPE_FORMAT_X8Z24_UNORM:
+ so_data(so, NV50TCL_ZETA_FORMAT_X8Z24_UNORM);
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ so_data(so, NV50TCL_ZETA_FORMAT_S8Z24_UNORM);
+ break;
+ default:
+ NOUVEAU_ERR("AIIII unknown format %s\n",
+ pf_name(fb->zsbuf->format));
+ so_data(so, NV50TCL_ZETA_FORMAT_S8Z24_UNORM);
+ break;
+ }
+ so_data(so, nv50_miptree(pt)->
+ level[fb->zsbuf->level].tile_mode << 4);
+ so_data(so, 0x00000000);
+
+ so_method(so, tesla, 0x1538, 1);
+ so_data (so, 1);
+ so_method(so, tesla, NV50TCL_ZETA_HORIZ, 3);
+ so_data (so, fb->zsbuf->width);
+ so_data (so, fb->zsbuf->height);
+ so_data (so, 0x00010001);
+ }
+
+ so_method(so, tesla, NV50TCL_VIEWPORT_HORIZ, 2);
+ so_data (so, w << 16);
+ so_data (so, h << 16);
+ /* set window lower left corner */
+ so_method(so, tesla, NV50TCL_WINDOW_LEFT, 2);
+ so_data (so, 0);
+ so_data (so, 0);
+ /* set screen scissor rectangle */
+ so_method(so, tesla, NV50TCL_SCREEN_SCISSOR_HORIZ, 2);
+ so_data (so, w << 16);
+ so_data (so, h << 16);
+
+ /* we set scissors to framebuffer size when they're 'turned off' */
+ nv50->dirty |= NV50_NEW_SCISSOR;
+ so_ref(NULL, &nv50->state.scissor);
+
+ so_ref(so, &nv50->state.fb);
+ so_ref(NULL, &so);
+}
+
+static void
+nv50_state_emit(struct nv50_context *nv50)
+{
+ struct nv50_screen *screen = nv50->screen;
+ struct nouveau_channel *chan = screen->base.channel;
+
+ if (nv50->pctx_id != screen->cur_pctx) {
+ if (nv50->state.fb)
+ nv50->state.dirty |= NV50_NEW_FRAMEBUFFER;
+ if (nv50->state.blend)
+ nv50->state.dirty |= NV50_NEW_BLEND;
+ if (nv50->state.zsa)
+ nv50->state.dirty |= NV50_NEW_ZSA;
+ if (nv50->state.vertprog)
+ nv50->state.dirty |= NV50_NEW_VERTPROG;
+ if (nv50->state.fragprog)
+ nv50->state.dirty |= NV50_NEW_FRAGPROG;
+ if (nv50->state.rast)
+ nv50->state.dirty |= NV50_NEW_RASTERIZER;
+ if (nv50->state.blend_colour)
+ nv50->state.dirty |= NV50_NEW_BLEND_COLOUR;
+ if (nv50->state.stipple)
+ nv50->state.dirty |= NV50_NEW_STIPPLE;
+ if (nv50->state.scissor)
+ nv50->state.dirty |= NV50_NEW_SCISSOR;
+ if (nv50->state.viewport)
+ nv50->state.dirty |= NV50_NEW_VIEWPORT;
+ if (nv50->state.tsc_upload)
+ nv50->state.dirty |= NV50_NEW_SAMPLER;
+ if (nv50->state.tic_upload)
+ nv50->state.dirty |= NV50_NEW_TEXTURE;
+ if (nv50->state.vtxfmt && nv50->state.vtxbuf)
+ nv50->state.dirty |= NV50_NEW_ARRAYS;
+ screen->cur_pctx = nv50->pctx_id;
+ }
+
+ if (nv50->state.dirty & NV50_NEW_FRAMEBUFFER)
+ so_emit(chan, nv50->state.fb);
+ if (nv50->state.dirty & NV50_NEW_BLEND)
+ so_emit(chan, nv50->state.blend);
+ if (nv50->state.dirty & NV50_NEW_ZSA)
+ so_emit(chan, nv50->state.zsa);
+ if (nv50->state.dirty & NV50_NEW_VERTPROG)
+ so_emit(chan, nv50->state.vertprog);
+ if (nv50->state.dirty & NV50_NEW_FRAGPROG)
+ so_emit(chan, nv50->state.fragprog);
+ if (nv50->state.dirty & NV50_NEW_RASTERIZER)
+ so_emit(chan, nv50->state.rast);
+ if (nv50->state.dirty & NV50_NEW_BLEND_COLOUR)
+ so_emit(chan, nv50->state.blend_colour);
+ if (nv50->state.dirty & NV50_NEW_STIPPLE)
+ so_emit(chan, nv50->state.stipple);
+ if (nv50->state.dirty & NV50_NEW_SCISSOR)
+ so_emit(chan, nv50->state.scissor);
+ if (nv50->state.dirty & NV50_NEW_VIEWPORT)
+ so_emit(chan, nv50->state.viewport);
+ if (nv50->state.dirty & NV50_NEW_SAMPLER)
+ so_emit(chan, nv50->state.tsc_upload);
+ if (nv50->state.dirty & NV50_NEW_TEXTURE)
+ so_emit(chan, nv50->state.tic_upload);
+ if (nv50->state.dirty & NV50_NEW_ARRAYS) {
+ so_emit(chan, nv50->state.vtxfmt);
+ so_emit(chan, nv50->state.vtxbuf);
+ if (nv50->state.vtxattr)
+ so_emit(chan, nv50->state.vtxattr);
+ }
+ nv50->state.dirty = 0;
+
+ so_emit_reloc_markers(chan, nv50->state.fb);
+ so_emit_reloc_markers(chan, nv50->state.vertprog);
+ so_emit_reloc_markers(chan, nv50->state.fragprog);
+ so_emit_reloc_markers(chan, nv50->state.vtxbuf);
+ so_emit_reloc_markers(chan, nv50->screen->static_init);
+}
+
+boolean
+nv50_state_validate(struct nv50_context *nv50)
+{
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nouveau_stateobj *so;
+ unsigned i;
+
+ if (nv50->dirty & NV50_NEW_FRAMEBUFFER)
+ nv50_state_validate_fb(nv50);
+
+ if (nv50->dirty & NV50_NEW_BLEND)
+ so_ref(nv50->blend->so, &nv50->state.blend);
+
+ if (nv50->dirty & NV50_NEW_ZSA)
+ so_ref(nv50->zsa->so, &nv50->state.zsa);
+
+ if (nv50->dirty & (NV50_NEW_VERTPROG | NV50_NEW_VERTPROG_CB))
+ nv50_vertprog_validate(nv50);
+
+ if (nv50->dirty & (NV50_NEW_FRAGPROG | NV50_NEW_FRAGPROG_CB))
+ nv50_fragprog_validate(nv50);
+
+ if (nv50->dirty & NV50_NEW_RASTERIZER)
+ so_ref(nv50->rasterizer->so, &nv50->state.rast);
+
+ if (nv50->dirty & NV50_NEW_BLEND_COLOUR) {
+ so = so_new(5, 0);
+ so_method(so, tesla, NV50TCL_BLEND_COLOR(0), 4);
+ so_data (so, fui(nv50->blend_colour.color[0]));
+ so_data (so, fui(nv50->blend_colour.color[1]));
+ so_data (so, fui(nv50->blend_colour.color[2]));
+ so_data (so, fui(nv50->blend_colour.color[3]));
+ so_ref(so, &nv50->state.blend_colour);
+ so_ref(NULL, &so);
+ }
+
+ if (nv50->dirty & NV50_NEW_STIPPLE) {
+ so = so_new(33, 0);
+ so_method(so, tesla, NV50TCL_POLYGON_STIPPLE_PATTERN(0), 32);
+ for (i = 0; i < 32; i++)
+ so_data(so, nv50->stipple.stipple[i]);
+ so_ref(so, &nv50->state.stipple);
+ so_ref(NULL, &so);
+ }
+
+ if (nv50->dirty & (NV50_NEW_SCISSOR | NV50_NEW_RASTERIZER)) {
+ struct pipe_rasterizer_state *rast = &nv50->rasterizer->pipe;
+ struct pipe_scissor_state *s = &nv50->scissor;
+
+ if (nv50->state.scissor &&
+ (rast->scissor == 0 && nv50->state.scissor_enabled == 0))
+ goto scissor_uptodate;
+ nv50->state.scissor_enabled = rast->scissor;
+
+ so = so_new(3, 0);
+ so_method(so, tesla, NV50TCL_SCISSOR_HORIZ, 2);
+ if (nv50->state.scissor_enabled) {
+ so_data(so, (s->maxx << 16) | s->minx);
+ so_data(so, (s->maxy << 16) | s->miny);
+ } else {
+ so_data(so, (nv50->framebuffer.width << 16));
+ so_data(so, (nv50->framebuffer.height << 16));
+ }
+ so_ref(so, &nv50->state.scissor);
+ so_ref(NULL, &so);
+ nv50->state.dirty |= NV50_NEW_SCISSOR;
+ }
+scissor_uptodate:
+
+ if (nv50->dirty & (NV50_NEW_VIEWPORT | NV50_NEW_RASTERIZER)) {
+ unsigned bypass;
+
+ if (!nv50->rasterizer->pipe.bypass_vs_clip_and_viewport)
+ bypass = 0;
+ else
+ bypass = 1;
+
+ if (nv50->state.viewport &&
+ (bypass || !(nv50->dirty & NV50_NEW_VIEWPORT)) &&
+ nv50->state.viewport_bypass == bypass)
+ goto viewport_uptodate;
+ nv50->state.viewport_bypass = bypass;
+
+ so = so_new(12, 0);
+ if (!bypass) {
+ so_method(so, tesla, NV50TCL_VIEWPORT_TRANSLATE(0), 3);
+ so_data (so, fui(nv50->viewport.translate[0]));
+ so_data (so, fui(nv50->viewport.translate[1]));
+ so_data (so, fui(nv50->viewport.translate[2]));
+ so_method(so, tesla, NV50TCL_VIEWPORT_SCALE(0), 3);
+ so_data (so, fui(nv50->viewport.scale[0]));
+ so_data (so, fui(nv50->viewport.scale[1]));
+ so_data (so, fui(nv50->viewport.scale[2]));
+
+ so_method(so, tesla, NV50TCL_VIEWPORT_TRANSFORM_EN, 1);
+ so_data (so, 1);
+ /* no idea what 0f90 does */
+ so_method(so, tesla, 0x0f90, 1);
+ so_data (so, 0);
+ } else {
+ so_method(so, tesla, NV50TCL_VIEWPORT_TRANSFORM_EN, 1);
+ so_data (so, 0);
+ so_method(so, tesla, 0x0f90, 1);
+ so_data (so, 1);
+ }
+
+ so_ref(so, &nv50->state.viewport);
+ so_ref(NULL, &so);
+ nv50->state.dirty |= NV50_NEW_VIEWPORT;
+ }
+viewport_uptodate:
+
+ if (nv50->dirty & NV50_NEW_SAMPLER) {
+ int i;
+
+ so = so_new(nv50->sampler_nr * 8 + 3, 0);
+ so_method(so, tesla, NV50TCL_CB_ADDR, 1);
+ so_data (so, NV50_CB_TSC);
+ so_method(so, tesla, NV50TCL_CB_DATA(0) | 0x40000000,
+ nv50->sampler_nr * 8);
+ for (i = 0; i < nv50->sampler_nr; i++)
+ so_datap (so, nv50->sampler[i]->tsc, 8);
+ so_ref(so, &nv50->state.tsc_upload);
+ so_ref(NULL, &so);
+ }
+
+ if (nv50->dirty & (NV50_NEW_TEXTURE | NV50_NEW_SAMPLER))
+ nv50_tex_validate(nv50);
+
+ if (nv50->dirty & NV50_NEW_ARRAYS)
+ nv50_vbo_validate(nv50);
+
+ nv50->state.dirty |= nv50->dirty;
+ nv50->dirty = 0;
+ nv50_state_emit(nv50);
+
+ return TRUE;
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_surface.c b/src/gallium/drivers/nv50/nv50_surface.c
new file mode 100644
index 0000000000..6bf6f773b0
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_surface.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define __NOUVEAU_PUSH_H__
+#include <stdint.h>
+#include "nouveau/nouveau_pushbuf.h"
+#include "nv50_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_tile.h"
+
+static INLINE int
+nv50_format(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return NV50_2D_DST_FORMAT_A8R8G8B8_UNORM;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ return NV50_2D_DST_FORMAT_X8R8G8B8_UNORM;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return NV50_2D_DST_FORMAT_R5G6B5_UNORM;
+ case PIPE_FORMAT_A8_UNORM:
+ return NV50_2D_DST_FORMAT_R8_UNORM;
+ default:
+ return -1;
+ }
+}
+
+static int
+nv50_surface_set(struct nv50_screen *screen, struct pipe_surface *ps, int dst)
+{
+ struct nv50_miptree *mt = nv50_miptree(ps->texture);
+ struct nouveau_channel *chan = screen->eng2d->channel;
+ struct nouveau_grobj *eng2d = screen->eng2d;
+ struct nouveau_bo *bo = nv50_miptree(ps->texture)->base.bo;
+ int format, mthd = dst ? NV50_2D_DST_FORMAT : NV50_2D_SRC_FORMAT;
+ int flags = NOUVEAU_BO_VRAM | (dst ? NOUVEAU_BO_WR : NOUVEAU_BO_RD);
+
+ format = nv50_format(ps->format);
+ if (format < 0)
+ return 1;
+
+ if (!bo->tile_flags) {
+ BEGIN_RING(chan, eng2d, mthd, 2);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ BEGIN_RING(chan, eng2d, mthd + 0x14, 5);
+ OUT_RING (chan, mt->level[ps->level].pitch);
+ OUT_RING (chan, ps->width);
+ OUT_RING (chan, ps->height);
+ OUT_RELOCh(chan, bo, ps->offset, flags);
+ OUT_RELOCl(chan, bo, ps->offset, flags);
+ } else {
+ BEGIN_RING(chan, eng2d, mthd, 5);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, mt->level[ps->level].tile_mode << 4);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, eng2d, mthd + 0x18, 4);
+ OUT_RING (chan, ps->width);
+ OUT_RING (chan, ps->height);
+ OUT_RELOCh(chan, bo, ps->offset, flags);
+ OUT_RELOCl(chan, bo, ps->offset, flags);
+ }
+
+#if 0
+ if (dst) {
+ BEGIN_RING(chan, eng2d, NV50_2D_CLIP_X, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, surf->width);
+ OUT_RING (chan, surf->height);
+ }
+#endif
+
+ return 0;
+}
+
+int
+nv50_surface_do_copy(struct nv50_screen *screen, struct pipe_surface *dst,
+ int dx, int dy, struct pipe_surface *src, int sx, int sy,
+ int w, int h)
+{
+ struct nouveau_channel *chan = screen->eng2d->channel;
+ struct nouveau_grobj *eng2d = screen->eng2d;
+ int ret;
+
+ WAIT_RING (chan, 32);
+
+ ret = nv50_surface_set(screen, dst, 1);
+ if (ret)
+ return ret;
+
+ ret = nv50_surface_set(screen, src, 0);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, eng2d, 0x088c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, eng2d, NV50_2D_BLIT_DST_X, 4);
+ OUT_RING (chan, dx);
+ OUT_RING (chan, dy);
+ OUT_RING (chan, w);
+ OUT_RING (chan, h);
+ BEGIN_RING(chan, eng2d, 0x08c0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_RING(chan, eng2d, 0x08d0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, sx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, sy);
+
+ return 0;
+}
+
+static void
+nv50_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nv50_screen *screen = nv50->screen;
+
+ assert(src->format == dest->format);
+
+ nv50_surface_do_copy(screen, dest, destx, desty, src, srcx,
+ srcy, width, height);
+}
+
+static void
+nv50_surface_fill(struct pipe_context *pipe, struct pipe_surface *dest,
+ unsigned destx, unsigned desty, unsigned width,
+ unsigned height, unsigned value)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nv50_screen *screen = nv50->screen;
+ struct nouveau_channel *chan = screen->eng2d->channel;
+ struct nouveau_grobj *eng2d = screen->eng2d;
+ int format, ret;
+
+ format = nv50_format(dest->format);
+ if (format < 0)
+ return;
+
+ WAIT_RING (chan, 32);
+
+ ret = nv50_surface_set(screen, dest, 1);
+ if (ret)
+ return;
+
+ BEGIN_RING(chan, eng2d, 0x0580, 3);
+ OUT_RING (chan, 4);
+ OUT_RING (chan, format);
+ OUT_RING (chan, value);
+ BEGIN_RING(chan, eng2d, NV50_2D_RECT_X1, 4);
+ OUT_RING (chan, destx);
+ OUT_RING (chan, desty);
+ OUT_RING (chan, width);
+ OUT_RING (chan, height);
+}
+
+void
+nv50_init_surface_functions(struct nv50_context *nv50)
+{
+ nv50->pipe.surface_copy = nv50_surface_copy;
+ nv50->pipe.surface_fill = nv50_surface_fill;
+}
+
+
diff --git a/src/gallium/drivers/nv50/nv50_tex.c b/src/gallium/drivers/nv50/nv50_tex.c
new file mode 100644
index 0000000000..033cb50c11
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_tex.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "nv50_context.h"
+#include "nv50_texture.h"
+
+#include "nouveau/nouveau_stateobj.h"
+
+static int
+nv50_tex_construct(struct nv50_context *nv50, struct nouveau_stateobj *so,
+ struct nv50_miptree *mt, int unit)
+{
+ switch (mt->base.base.format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_C3 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C2 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_8_8_8_8);
+ break;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_C3 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C2 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_1_5_5_5);
+ break;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_C3 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C2 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_4_4_4_4);
+ break;
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_ONE | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C2 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_5_6_5);
+ break;
+ case PIPE_FORMAT_L8_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_ONE | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C0 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_8);
+ break;
+ case PIPE_FORMAT_A8_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_C0 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_ZERO | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_ZERO | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_ZERO | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_8);
+ break;
+ case PIPE_FORMAT_I8_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_C0 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C0 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_8);
+ break;
+ case PIPE_FORMAT_A8L8_UNORM:
+ so_data(so, NV50TIC_0_0_MAPA_C1 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C0 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C0 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_8_8);
+ break;
+ case PIPE_FORMAT_DXT1_RGB:
+ so_data(so, NV50TIC_0_0_MAPA_ONE | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C2 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_DXT1);
+ break;
+ case PIPE_FORMAT_DXT1_RGBA:
+ so_data(so, NV50TIC_0_0_MAPA_C3 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C2 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_DXT1);
+ break;
+ case PIPE_FORMAT_DXT3_RGBA:
+ so_data(so, NV50TIC_0_0_MAPA_C3 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C2 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_DXT3);
+ break;
+ case PIPE_FORMAT_DXT5_RGBA:
+ so_data(so, NV50TIC_0_0_MAPA_C3 | NV50TIC_0_0_TYPEA_UNORM |
+ NV50TIC_0_0_MAPR_C0 | NV50TIC_0_0_TYPER_UNORM |
+ NV50TIC_0_0_MAPG_C1 | NV50TIC_0_0_TYPEG_UNORM |
+ NV50TIC_0_0_MAPB_C2 | NV50TIC_0_0_TYPEB_UNORM |
+ NV50TIC_0_0_FMT_DXT5);
+ break;
+ default:
+ return 1;
+ }
+
+ so_reloc(so, mt->base.bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_LOW |
+ NOUVEAU_BO_RD, 0, 0);
+ if (nv50->sampler[unit]->normalized)
+ so_data (so, 0xd0005000 | mt->base.bo->tile_mode << 22);
+ else
+ so_data (so, 0x5001d000 | mt->base.bo->tile_mode << 22);
+ so_data (so, 0x00300000);
+ so_data (so, mt->base.base.width[0]);
+ so_data (so, (mt->base.base.last_level << 28) |
+ (mt->base.base.depth[0] << 16) | mt->base.base.height[0]);
+ so_data (so, 0x03000000);
+ so_data (so, mt->base.base.last_level << 4);
+
+ return 0;
+}
+
+void
+nv50_tex_validate(struct nv50_context *nv50)
+{
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nouveau_stateobj *so;
+ int unit, push;
+
+ push = nv50->miptree_nr * 9 + 2;
+ push += MAX2(nv50->miptree_nr, nv50->state.miptree_nr) * 2;
+
+ so = so_new(push, nv50->miptree_nr * 2);
+ so_method(so, tesla, NV50TCL_CB_ADDR, 1);
+ so_data (so, NV50_CB_TIC);
+ for (unit = 0; unit < nv50->miptree_nr; unit++) {
+ struct nv50_miptree *mt = nv50->miptree[unit];
+
+ so_method(so, tesla, NV50TCL_CB_DATA(0) | 0x40000000, 8);
+ if (nv50_tex_construct(nv50, so, mt, unit)) {
+ NOUVEAU_ERR("failed tex validate\n");
+ so_ref(NULL, &so);
+ return;
+ }
+
+ so_method(so, tesla, NV50TCL_SET_SAMPLER_TEX, 1);
+ so_data (so, (unit << NV50TCL_SET_SAMPLER_TEX_TIC_SHIFT) |
+ (unit << NV50TCL_SET_SAMPLER_TEX_SAMPLER_SHIFT) |
+ NV50TCL_SET_SAMPLER_TEX_VALID);
+ }
+
+ for (; unit < nv50->state.miptree_nr; unit++) {
+ so_method(so, tesla, NV50TCL_SET_SAMPLER_TEX, 1);
+ so_data (so,
+ (unit << NV50TCL_SET_SAMPLER_TEX_SAMPLER_SHIFT) | 0);
+ }
+
+ so_ref(so, &nv50->state.tic_upload);
+ so_ref(NULL, &so);
+ nv50->state.miptree_nr = nv50->miptree_nr;
+}
+
diff --git a/src/gallium/drivers/nv50/nv50_texture.h b/src/gallium/drivers/nv50/nv50_texture.h
new file mode 100644
index 0000000000..207fb039f7
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_texture.h
@@ -0,0 +1,150 @@
+#ifndef __NV50_TEXTURE_H__
+#define __NV50_TEXTURE_H__
+
+/* It'd be really nice to have these in nouveau_class.h generated by
+ * renouveau like the rest of the object header - but not sure it can
+ * handle non-object stuff nicely - need to look into it.
+ */
+
+/* Texture image control block */
+#define NV50TIC_0_0_MAPA_MASK 0x38000000
+#define NV50TIC_0_0_MAPA_ZERO 0x00000000
+#define NV50TIC_0_0_MAPA_C0 0x10000000
+#define NV50TIC_0_0_MAPA_C1 0x18000000
+#define NV50TIC_0_0_MAPA_C2 0x20000000
+#define NV50TIC_0_0_MAPA_C3 0x28000000
+#define NV50TIC_0_0_MAPA_ONE 0x38000000
+#define NV50TIC_0_0_MAPB_MASK 0x07000000
+#define NV50TIC_0_0_MAPB_ZERO 0x00000000
+#define NV50TIC_0_0_MAPB_C0 0x02000000
+#define NV50TIC_0_0_MAPB_C1 0x03000000
+#define NV50TIC_0_0_MAPB_C2 0x04000000
+#define NV50TIC_0_0_MAPB_C3 0x05000000
+#define NV50TIC_0_0_MAPB_ONE 0x07000000
+#define NV50TIC_0_0_MAPG_MASK 0x00e00000
+#define NV50TIC_0_0_MAPG_ZERO 0x00000000
+#define NV50TIC_0_0_MAPG_C0 0x00400000
+#define NV50TIC_0_0_MAPG_C1 0x00600000
+#define NV50TIC_0_0_MAPG_C2 0x00800000
+#define NV50TIC_0_0_MAPG_C3 0x00a00000
+#define NV50TIC_0_0_MAPG_ONE 0x00e00000
+#define NV50TIC_0_0_MAPR_MASK 0x001c0000
+#define NV50TIC_0_0_MAPR_ZERO 0x00000000
+#define NV50TIC_0_0_MAPR_C0 0x00080000
+#define NV50TIC_0_0_MAPR_C1 0x000c0000
+#define NV50TIC_0_0_MAPR_C2 0x00100000
+#define NV50TIC_0_0_MAPR_C3 0x00140000
+#define NV50TIC_0_0_MAPR_ONE 0x001c0000
+#define NV50TIC_0_0_TYPEA_MASK 0x00038000
+#define NV50TIC_0_0_TYPEA_UNORM 0x00010000
+#define NV50TIC_0_0_TYPEA_SNORM 0x00008000
+#define NV50TIC_0_0_TYPEA_FLOAT 0x00038000
+#define NV50TIC_0_0_TYPEB_MASK 0x00007000
+#define NV50TIC_0_0_TYPEB_UNORM 0x00002000
+#define NV50TIC_0_0_TYPEB_SNORM 0x00001000
+#define NV50TIC_0_0_TYPEB_FLOAT 0x00007000
+#define NV50TIC_0_0_TYPEG_MASK 0x00000e00
+#define NV50TIC_0_0_TYPEG_UNORM 0x00000400
+#define NV50TIC_0_0_TYPEG_SNORM 0x00000200
+#define NV50TIC_0_0_TYPEG_FLOAT 0x00000e00
+#define NV50TIC_0_0_TYPER_MASK 0x000001c0
+#define NV50TIC_0_0_TYPER_UNORM 0x00000080
+#define NV50TIC_0_0_TYPER_SNORM 0x00000040
+#define NV50TIC_0_0_TYPER_FLOAT 0x000001c0
+#define NV50TIC_0_0_FMT_MASK 0x0000003f
+#define NV50TIC_0_0_FMT_32_32_32_32 0x00000001
+#define NV50TIC_0_0_FMT_16_16_16_16 0x00000003
+#define NV50TIC_0_0_FMT_32_32 0x00000004
+#define NV50TIC_0_0_FMT_8_8_8_8 0x00000008
+#define NV50TIC_0_0_FMT_2_10_10_10 0x00000009
+#define NV50TIC_0_0_FMT_32 0x0000000f
+#define NV50TIC_0_0_FMT_4_4_4_4 0x00000012
+/* #define NV50TIC_0_0_FMT_1_5_5_5 0x00000013 */
+#define NV50TIC_0_0_FMT_1_5_5_5 0x00000014
+#define NV50TIC_0_0_FMT_5_6_5 0x00000015
+#define NV50TIC_0_0_FMT_8_8 0x00000018
+#define NV50TIC_0_0_FMT_16 0x0000001b
+#define NV50TIC_0_0_FMT_8 0x0000001d
+#define NV50TIC_0_0_FMT_10_11_11 0x00000021
+#define NV50TIC_0_0_FMT_DXT1 0x00000024
+#define NV50TIC_0_0_FMT_DXT3 0x00000025
+#define NV50TIC_0_0_FMT_DXT5 0x00000026
+#define NV50TIC_0_0_FMT_RGTC1 0x00000027
+#define NV50TIC_0_0_FMT_RGTC2 0x00000028
+
+#define NV50TIC_0_1_OFFSET_LOW_MASK 0xffffffff
+#define NV50TIC_0_1_OFFSET_LOW_SHIFT 0
+
+#define NV50TIC_0_2_UNKNOWN_MASK 0xffffffff
+
+#define NV50TIC_0_3_UNKNOWN_MASK 0xffffffff
+
+#define NV50TIC_0_4_WIDTH_MASK 0x0000ffff
+#define NV50TIC_0_4_WIDTH_SHIFT 0
+
+#define NV50TIC_0_5_DEPTH_MASK 0xffff0000
+#define NV50TIC_0_5_DEPTH_SHIFT 16
+#define NV50TIC_0_5_HEIGHT_MASK 0x0000ffff
+#define NV50TIC_0_5_HEIGHT_SHIFT 0
+
+#define NV50TIC_0_6_UNKNOWN_MASK 0xffffffff
+
+#define NV50TIC_0_7_OFFSET_HIGH_MASK 0xffffffff
+#define NV50TIC_0_7_OFFSET_HIGH_SHIFT 0
+
+/* Texture sampler control block */
+#define NV50TSC_1_0_WRAPS_MASK 0x00000007
+#define NV50TSC_1_0_WRAPS_REPEAT 0x00000000
+#define NV50TSC_1_0_WRAPS_MIRROR_REPEAT 0x00000001
+#define NV50TSC_1_0_WRAPS_CLAMP_TO_EDGE 0x00000002
+#define NV50TSC_1_0_WRAPS_CLAMP_TO_BORDER 0x00000003
+#define NV50TSC_1_0_WRAPS_CLAMP 0x00000004
+#define NV50TSC_1_0_WRAPS_MIRROR_CLAMP_TO_EDGE 0x00000005
+#define NV50TSC_1_0_WRAPS_MIRROR_CLAMP_TO_BORDER 0x00000006
+#define NV50TSC_1_0_WRAPS_MIRROR_CLAMP 0x00000007
+#define NV50TSC_1_0_WRAPT_MASK 0x00000038
+#define NV50TSC_1_0_WRAPT_REPEAT 0x00000000
+#define NV50TSC_1_0_WRAPT_MIRROR_REPEAT 0x00000008
+#define NV50TSC_1_0_WRAPT_CLAMP_TO_EDGE 0x00000010
+#define NV50TSC_1_0_WRAPT_CLAMP_TO_BORDER 0x00000018
+#define NV50TSC_1_0_WRAPT_CLAMP 0x00000020
+#define NV50TSC_1_0_WRAPT_MIRROR_CLAMP_TO_EDGE 0x00000028
+#define NV50TSC_1_0_WRAPT_MIRROR_CLAMP_TO_BORDER 0x00000030
+#define NV50TSC_1_0_WRAPT_MIRROR_CLAMP 0x00000038
+#define NV50TSC_1_0_WRAPR_MASK 0x000001c0
+#define NV50TSC_1_0_WRAPR_REPEAT 0x00000000
+#define NV50TSC_1_0_WRAPR_MIRROR_REPEAT 0x00000040
+#define NV50TSC_1_0_WRAPR_CLAMP_TO_EDGE 0x00000080
+#define NV50TSC_1_0_WRAPR_CLAMP_TO_BORDER 0x000000c0
+#define NV50TSC_1_0_WRAPR_CLAMP 0x00000100
+#define NV50TSC_1_0_WRAPR_MIRROR_CLAMP_TO_EDGE 0x00000140
+#define NV50TSC_1_0_WRAPR_MIRROR_CLAMP_TO_BORDER 0x00000180
+#define NV50TSC_1_0_WRAPR_MIRROR_CLAMP 0x000001c0
+#define NV50TSC_1_0_MAX_ANISOTROPY_MASK 0x00700000
+
+#define NV50TSC_1_1_MAGF_MASK 0x00000003
+#define NV50TSC_1_1_MAGF_NEAREST 0x00000001
+#define NV50TSC_1_1_MAGF_LINEAR 0x00000002
+#define NV50TSC_1_1_MINF_MASK 0x00000030
+#define NV50TSC_1_1_MINF_NEAREST 0x00000010
+#define NV50TSC_1_1_MINF_LINEAR 0x00000020
+#define NV50TSC_1_1_MIPF_MASK 0x000000c0
+#define NV50TSC_1_1_MIPF_NONE 0x00000040
+#define NV50TSC_1_1_MIPF_NEAREST 0x00000080
+#define NV50TSC_1_1_MIPF_LINEAR 0x000000c0
+#define NV50TSC_1_1_LOD_BIAS_MASK 0x01fff000
+
+#define NV50TSC_1_2_MIN_LOD_MASK 0x00000f00
+#define NV50TSC_1_2_MAX_LOD_MASK 0x00f00000
+
+#define NV50TSC_1_3_UNKNOWN_MASK 0xffffffff
+
+#define NV50TSC_1_4_BORDER_COLOR_RED_MASK 0xffffffff
+
+#define NV50TSC_1_5_BORDER_COLOR_GREEN_MASK 0xffffffff
+
+#define NV50TSC_1_6_BORDER_COLOR_BLUE_MASK 0xffffffff
+
+#define NV50TSC_1_7_BORDER_COLOR_ALPHA_MASK 0xffffffff
+
+#endif
diff --git a/src/gallium/drivers/nv50/nv50_transfer.c b/src/gallium/drivers/nv50/nv50_transfer.c
new file mode 100644
index 0000000000..e9c3562194
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_transfer.c
@@ -0,0 +1,239 @@
+
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+
+#include "nv50_context.h"
+
+struct nv50_transfer {
+ struct pipe_transfer base;
+ struct nouveau_bo *bo;
+ unsigned level_offset;
+ unsigned level_tiling;
+ int level_pitch;
+ int level_width;
+ int level_height;
+ int level_x;
+ int level_y;
+};
+
+static void
+nv50_transfer_rect_m2mf(struct pipe_screen *pscreen,
+ struct nouveau_bo *src_bo, unsigned src_offset,
+ int src_pitch, unsigned src_tile_mode,
+ int sx, int sy, int sw, int sh,
+ struct nouveau_bo *dst_bo, unsigned dst_offset,
+ int dst_pitch, unsigned dst_tile_mode,
+ int dx, int dy, int dw, int dh,
+ int cpp, int width, int height,
+ unsigned src_reloc, unsigned dst_reloc)
+{
+ struct nv50_screen *screen = nv50_screen(pscreen);
+ struct nouveau_channel *chan = screen->m2mf->channel;
+ struct nouveau_grobj *m2mf = screen->m2mf;
+
+ src_reloc |= NOUVEAU_BO_RD;
+ dst_reloc |= NOUVEAU_BO_WR;
+
+ WAIT_RING (chan, 14);
+
+ if (!src_bo->tile_flags) {
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_IN, 1);
+ OUT_RING (chan, 1);
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_PITCH_IN, 1);
+ OUT_RING (chan, src_pitch);
+ src_offset += (sy * src_pitch) + (sx * cpp);
+ } else {
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_IN, 6);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, src_tile_mode << 4);
+ OUT_RING (chan, sw * cpp);
+ OUT_RING (chan, sh);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ }
+
+ if (!dst_bo->tile_flags) {
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_OUT, 1);
+ OUT_RING (chan, 1);
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_PITCH_OUT, 1);
+ OUT_RING (chan, dst_pitch);
+ dst_offset += (dy * dst_pitch) + (dx * cpp);
+ } else {
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_OUT, 6);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, dst_tile_mode << 4);
+ OUT_RING (chan, dw * cpp);
+ OUT_RING (chan, dh);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ }
+
+ while (height) {
+ int line_count = height > 2047 ? 2047 : height;
+
+ WAIT_RING (chan, 15);
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH, 2);
+ OUT_RELOCh(chan, src_bo, src_offset, src_reloc);
+ OUT_RELOCh(chan, dst_bo, dst_offset, dst_reloc);
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 2);
+ OUT_RELOCl(chan, src_bo, src_offset, src_reloc);
+ OUT_RELOCl(chan, dst_bo, dst_offset, dst_reloc);
+ if (src_bo->tile_flags) {
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_TILING_POSITION_IN, 1);
+ OUT_RING (chan, (sy << 16) | sx);
+ } else {
+ src_offset += (line_count * src_pitch);
+ }
+ if (dst_bo->tile_flags) {
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_TILING_POSITION_OUT, 1);
+ OUT_RING (chan, (dy << 16) | dx);
+ } else {
+ dst_offset += (line_count * dst_pitch);
+ }
+ BEGIN_RING(chan, m2mf,
+ NV50_MEMORY_TO_MEMORY_FORMAT_LINE_LENGTH_IN, 4);
+ OUT_RING (chan, width * cpp);
+ OUT_RING (chan, line_count);
+ OUT_RING (chan, 0x00000101);
+ OUT_RING (chan, 0);
+ FIRE_RING (chan);
+
+ height -= line_count;
+ sy += line_count;
+ dy += line_count;
+ }
+}
+
+static struct pipe_transfer *
+nv50_transfer_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct nouveau_device *dev = nouveau_screen(pscreen)->device;
+ struct nv50_miptree *mt = nv50_miptree(pt);
+ struct nv50_miptree_level *lvl = &mt->level[level];
+ struct nv50_transfer *tx;
+ unsigned image = 0;
+ int ret;
+
+ if (pt->target == PIPE_TEXTURE_CUBE)
+ image = face;
+ else
+ if (pt->target == PIPE_TEXTURE_3D)
+ image = zslice;
+
+ tx = CALLOC_STRUCT(nv50_transfer);
+ if (!tx)
+ return NULL;
+
+ pipe_texture_reference(&tx->base.texture, pt);
+ tx->base.format = pt->format;
+ tx->base.width = w;
+ tx->base.height = h;
+ tx->base.block = pt->block;
+ tx->base.nblocksx = pt->nblocksx[level];
+ tx->base.nblocksy = pt->nblocksy[level];
+ tx->base.stride = (w * pt->block.size);
+ tx->base.usage = usage;
+
+ tx->level_pitch = lvl->pitch;
+ tx->level_width = mt->base.base.width[level];
+ tx->level_height = mt->base.base.height[level];
+ tx->level_offset = lvl->image_offset[image];
+ tx->level_tiling = lvl->tile_mode;
+ tx->level_x = x;
+ tx->level_y = y;
+ ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
+ w * pt->block.size * h, &tx->bo);
+ if (ret) {
+ FREE(tx);
+ return NULL;
+ }
+
+ if (usage != PIPE_TRANSFER_WRITE) {
+ nv50_transfer_rect_m2mf(pscreen, mt->base.bo, tx->level_offset,
+ tx->level_pitch, tx->level_tiling,
+ x, y,
+ tx->level_width, tx->level_height,
+ tx->bo, 0, tx->base.stride,
+ tx->bo->tile_mode, 0, 0,
+ tx->base.width, tx->base.height,
+ tx->base.block.size, w, h,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART,
+ NOUVEAU_BO_GART);
+ }
+
+ return &tx->base;
+}
+
+static void
+nv50_transfer_del(struct pipe_transfer *ptx)
+{
+ struct nv50_transfer *tx = (struct nv50_transfer *)ptx;
+ struct nv50_miptree *mt = nv50_miptree(ptx->texture);
+
+ if (ptx->usage != PIPE_TRANSFER_READ) {
+ struct pipe_screen *pscreen = ptx->texture->screen;
+ nv50_transfer_rect_m2mf(pscreen, tx->bo, 0, tx->base.stride,
+ tx->bo->tile_mode, 0, 0,
+ tx->base.width, tx->base.height,
+ mt->base.bo, tx->level_offset,
+ tx->level_pitch, tx->level_tiling,
+ tx->level_x, tx->level_y,
+ tx->level_width, tx->level_height,
+ tx->base.block.size, tx->base.width,
+ tx->base.height,
+ NOUVEAU_BO_GART, NOUVEAU_BO_VRAM |
+ NOUVEAU_BO_GART);
+ }
+
+ nouveau_bo_ref(NULL, &tx->bo);
+ pipe_texture_reference(&ptx->texture, NULL);
+ FREE(ptx);
+}
+
+static void *
+nv50_transfer_map(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv50_transfer *tx = (struct nv50_transfer *)ptx;
+ unsigned flags = 0;
+ int ret;
+
+ if (ptx->usage & PIPE_TRANSFER_WRITE)
+ flags |= NOUVEAU_BO_WR;
+ if (ptx->usage & PIPE_TRANSFER_READ)
+ flags |= NOUVEAU_BO_RD;
+
+ ret = nouveau_bo_map(tx->bo, flags);
+ if (ret)
+ return NULL;
+ return tx->bo->map;
+}
+
+static void
+nv50_transfer_unmap(struct pipe_screen *pscreen, struct pipe_transfer *ptx)
+{
+ struct nv50_transfer *tx = (struct nv50_transfer *)ptx;
+
+ nouveau_bo_unmap(tx->bo);
+}
+
+void
+nv50_transfer_init_screen_functions(struct pipe_screen *pscreen)
+{
+ pscreen->get_tex_transfer = nv50_transfer_new;
+ pscreen->tex_transfer_destroy = nv50_transfer_del;
+ pscreen->transfer_map = nv50_transfer_map;
+ pscreen->transfer_unmap = nv50_transfer_unmap;
+}
diff --git a/src/gallium/drivers/nv50/nv50_vbo.c b/src/gallium/drivers/nv50/nv50_vbo.c
new file mode 100644
index 0000000000..eeed148c7b
--- /dev/null
+++ b/src/gallium/drivers/nv50/nv50_vbo.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2008 Ben Skeggs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "nv50_context.h"
+
+static INLINE unsigned
+nv50_prim(unsigned mode)
+{
+ switch (mode) {
+ case PIPE_PRIM_POINTS: return NV50TCL_VERTEX_BEGIN_POINTS;
+ case PIPE_PRIM_LINES: return NV50TCL_VERTEX_BEGIN_LINES;
+ case PIPE_PRIM_LINE_LOOP: return NV50TCL_VERTEX_BEGIN_LINE_LOOP;
+ case PIPE_PRIM_LINE_STRIP: return NV50TCL_VERTEX_BEGIN_LINE_STRIP;
+ case PIPE_PRIM_TRIANGLES: return NV50TCL_VERTEX_BEGIN_TRIANGLES;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP;
+ case PIPE_PRIM_TRIANGLE_FAN: return NV50TCL_VERTEX_BEGIN_TRIANGLE_FAN;
+ case PIPE_PRIM_QUADS: return NV50TCL_VERTEX_BEGIN_QUADS;
+ case PIPE_PRIM_QUAD_STRIP: return NV50TCL_VERTEX_BEGIN_QUAD_STRIP;
+ case PIPE_PRIM_POLYGON: return NV50TCL_VERTEX_BEGIN_POLYGON;
+ default:
+ break;
+ }
+
+ NOUVEAU_ERR("invalid primitive type %d\n", mode);
+ return NV50TCL_VERTEX_BEGIN_POINTS;
+}
+
+static INLINE uint32_t
+nv50_vbo_type_to_hw(unsigned type)
+{
+ switch (type) {
+ case PIPE_FORMAT_TYPE_FLOAT:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT;
+ case PIPE_FORMAT_TYPE_UNORM:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM;
+ case PIPE_FORMAT_TYPE_SNORM:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM;
+ case PIPE_FORMAT_TYPE_USCALED:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED;
+ case PIPE_FORMAT_TYPE_SSCALED:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED;
+ /*
+ case PIPE_FORMAT_TYPE_UINT:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
+ case PIPE_FORMAT_TYPE_SINT:
+ return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
+ default:
+ return 0;
+ }
+}
+
+static INLINE uint32_t
+nv50_vbo_size_to_hw(unsigned size, unsigned nr_c)
+{
+ static const uint32_t hw_values[] = {
+ 0, 0, 0, 0,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8_8,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8_8_8,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8_8_8_8,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16_16,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16_16_16,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16_16_16_16,
+ 0, 0, 0, 0,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32_32,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32_32_32,
+ NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32_32_32_32 };
+
+ /* we'd also have R11G11B10 and R10G10B10A2 */
+
+ assert(nr_c > 0 && nr_c <= 4);
+
+ if (size > 32)
+ return 0;
+ size >>= (3 - 2);
+
+ return hw_values[size + (nr_c - 1)];
+}
+
+static INLINE uint32_t
+nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element *ve)
+{
+ uint32_t hw_type, hw_size;
+ enum pipe_format pf = ve->src_format;
+ unsigned size = pf_size_x(pf) << pf_exp2(pf);
+
+ hw_type = nv50_vbo_type_to_hw(pf_type(pf));
+ hw_size = nv50_vbo_size_to_hw(size, ve->nr_components);
+
+ if (!hw_type || !hw_size) {
+ NOUVEAU_ERR("unsupported vbo format: %s\n", pf_name(pf));
+ abort();
+ return 0x24e80000;
+ }
+
+ if (pf_swizzle_x(pf) == 2) /* BGRA */
+ hw_size |= (1 << 31); /* no real swizzle bits :-( */
+
+ return (hw_type | hw_size);
+}
+
+boolean
+nv50_draw_arrays(struct pipe_context *pipe, unsigned mode, unsigned start,
+ unsigned count)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nouveau_channel *chan = nv50->screen->tesla->channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+
+ nv50_state_validate(nv50);
+
+ BEGIN_RING(chan, tesla, 0x142c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, tesla, 0x142c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, tesla, 0x1440, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, tesla, 0x1334, 1);
+ OUT_RING (chan, 0);
+
+ BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
+ OUT_RING (chan, nv50_prim(mode));
+ BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BUFFER_FIRST, 2);
+ OUT_RING (chan, start);
+ OUT_RING (chan, count);
+ BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
+ OUT_RING (chan, 0);
+
+ pipe->flush(pipe, 0, NULL);
+ return TRUE;
+}
+
+static INLINE void
+nv50_draw_elements_inline_u08(struct nv50_context *nv50, uint8_t *map,
+ unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv50->screen->tesla->channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+
+ map += start;
+
+ if (count & 1) {
+ BEGIN_RING(chan, tesla, 0x15e8, 1);
+ OUT_RING (chan, map[0]);
+ map++;
+ count--;
+ }
+
+ while (count) {
+ unsigned nr = count > 2046 ? 2046 : count;
+ int i;
+
+ BEGIN_RING(chan, tesla, 0x400015f0, nr >> 1);
+ for (i = 0; i < nr; i += 2)
+ OUT_RING (chan, (map[i + 1] << 16) | map[i]);
+
+ count -= nr;
+ map += nr;
+ }
+}
+
+static INLINE void
+nv50_draw_elements_inline_u16(struct nv50_context *nv50, uint16_t *map,
+ unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv50->screen->tesla->channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+
+ map += start;
+
+ if (count & 1) {
+ BEGIN_RING(chan, tesla, 0x15e8, 1);
+ OUT_RING (chan, map[0]);
+ map++;
+ count--;
+ }
+
+ while (count) {
+ unsigned nr = count > 2046 ? 2046 : count;
+ int i;
+
+ BEGIN_RING(chan, tesla, 0x400015f0, nr >> 1);
+ for (i = 0; i < nr; i += 2)
+ OUT_RING (chan, (map[i + 1] << 16) | map[i]);
+
+ count -= nr;
+ map += nr;
+ }
+}
+
+static INLINE void
+nv50_draw_elements_inline_u32(struct nv50_context *nv50, uint32_t *map,
+ unsigned start, unsigned count)
+{
+ struct nouveau_channel *chan = nv50->screen->tesla->channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+
+ map += start;
+
+ while (count) {
+ unsigned nr = count > 2047 ? 2047 : count;
+
+ BEGIN_RING(chan, tesla, 0x400015e8, nr);
+ OUT_RINGp (chan, map, nr);
+
+ count -= nr;
+ map += nr;
+ }
+}
+
+boolean
+nv50_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer, unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct nv50_context *nv50 = nv50_context(pipe);
+ struct nouveau_channel *chan = nv50->screen->tesla->channel;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct pipe_screen *pscreen = pipe->screen;
+ void *map;
+
+ map = pipe_buffer_map(pscreen, indexBuffer, PIPE_BUFFER_USAGE_CPU_READ);
+
+ nv50_state_validate(nv50);
+
+ BEGIN_RING(chan, tesla, 0x142c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, tesla, 0x142c, 1);
+ OUT_RING (chan, 0);
+
+ BEGIN_RING(chan, tesla, NV50TCL_VERTEX_BEGIN, 1);
+ OUT_RING (chan, nv50_prim(mode));
+ switch (indexSize) {
+ case 1:
+ nv50_draw_elements_inline_u08(nv50, map, start, count);
+ break;
+ case 2:
+ nv50_draw_elements_inline_u16(nv50, map, start, count);
+ break;
+ case 4:
+ nv50_draw_elements_inline_u32(nv50, map, start, count);
+ break;
+ default:
+ assert(0);
+ }
+ BEGIN_RING(chan, tesla, NV50TCL_VERTEX_END, 1);
+ OUT_RING (chan, 0);
+
+ pipe_buffer_unmap(pscreen, indexBuffer);
+ pipe->flush(pipe, 0, NULL);
+ return TRUE;
+}
+
+static INLINE boolean
+nv50_vbo_static_attrib(struct nv50_context *nv50, unsigned attrib,
+ struct nouveau_stateobj **pso,
+ struct pipe_vertex_element *ve,
+ struct pipe_vertex_buffer *vb)
+
+{
+ struct nouveau_stateobj *so;
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nouveau_bo *bo = nouveau_bo(vb->buffer);
+ float *v;
+ int ret;
+ enum pipe_format pf = ve->src_format;
+
+ if ((pf_type(pf) != PIPE_FORMAT_TYPE_FLOAT) ||
+ (pf_size_x(pf) << pf_exp2(pf)) != 32)
+ return FALSE;
+
+ ret = nouveau_bo_map(bo, NOUVEAU_BO_RD);
+ if (ret)
+ return FALSE;
+ v = (float *)(bo->map + (vb->buffer_offset + ve->src_offset));
+
+ so = *pso;
+ if (!so)
+ *pso = so = so_new(nv50->vtxelt_nr * 5, 0);
+
+ switch (ve->nr_components) {
+ case 4:
+ so_method(so, tesla, NV50TCL_VTX_ATTR_4F_X(attrib), 4);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ so_data (so, fui(v[2]));
+ so_data (so, fui(v[3]));
+ break;
+ case 3:
+ so_method(so, tesla, NV50TCL_VTX_ATTR_3F_X(attrib), 3);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ so_data (so, fui(v[2]));
+ break;
+ case 2:
+ so_method(so, tesla, NV50TCL_VTX_ATTR_2F_X(attrib), 2);
+ so_data (so, fui(v[0]));
+ so_data (so, fui(v[1]));
+ break;
+ case 1:
+ so_method(so, tesla, NV50TCL_VTX_ATTR_1F(attrib), 1);
+ so_data (so, fui(v[0]));
+ break;
+ default:
+ nouveau_bo_unmap(bo);
+ return FALSE;
+ }
+
+ nouveau_bo_unmap(bo);
+ return TRUE;
+}
+
+void
+nv50_vbo_validate(struct nv50_context *nv50)
+{
+ struct nouveau_grobj *tesla = nv50->screen->tesla;
+ struct nouveau_stateobj *vtxbuf, *vtxfmt, *vtxattr;
+ unsigned i;
+
+ /* don't validate if Gallium took away our buffers */
+ if (nv50->vtxbuf_nr == 0)
+ return;
+
+ vtxattr = NULL;
+ vtxbuf = so_new(nv50->vtxelt_nr * 7, nv50->vtxelt_nr * 4);
+ vtxfmt = so_new(nv50->vtxelt_nr + 1, 0);
+ so_method(vtxfmt, tesla, NV50TCL_VERTEX_ARRAY_ATTRIB(0),
+ nv50->vtxelt_nr);
+
+ for (i = 0; i < nv50->vtxelt_nr; i++) {
+ struct pipe_vertex_element *ve = &nv50->vtxelt[i];
+ struct pipe_vertex_buffer *vb =
+ &nv50->vtxbuf[ve->vertex_buffer_index];
+ struct nouveau_bo *bo = nouveau_bo(vb->buffer);
+ uint32_t hw = nv50_vbo_vtxelt_to_hw(ve);
+
+ if (!vb->stride &&
+ nv50_vbo_static_attrib(nv50, i, &vtxattr, ve, vb)) {
+ so_data(vtxfmt, hw | (1 << 4));
+
+ so_method(vtxbuf, tesla,
+ NV50TCL_VERTEX_ARRAY_FORMAT(i), 1);
+ so_data (vtxbuf, 0);
+ continue;
+ }
+ so_data(vtxfmt, hw | i);
+
+ so_method(vtxbuf, tesla, NV50TCL_VERTEX_ARRAY_FORMAT(i), 3);
+ so_data (vtxbuf, 0x20000000 | vb->stride);
+ so_reloc (vtxbuf, bo, vb->buffer_offset +
+ ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
+ NOUVEAU_BO_RD | NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (vtxbuf, bo, vb->buffer_offset +
+ ve->src_offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART |
+ NOUVEAU_BO_RD | NOUVEAU_BO_LOW, 0, 0);
+
+ /* vertex array limits */
+ so_method(vtxbuf, tesla, 0x1080 + (i * 8), 2);
+ so_reloc (vtxbuf, bo, vb->buffer->size - 1,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
+ NOUVEAU_BO_HIGH, 0, 0);
+ so_reloc (vtxbuf, bo, vb->buffer->size - 1,
+ NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD |
+ NOUVEAU_BO_LOW, 0, 0);
+ }
+
+ so_ref (vtxfmt, &nv50->state.vtxfmt);
+ so_ref (vtxbuf, &nv50->state.vtxbuf);
+ so_ref (vtxattr, &nv50->state.vtxattr);
+ so_ref (NULL, &vtxbuf);
+ so_ref (NULL, &vtxfmt);
+ so_ref (NULL, &vtxattr);
+}
+
diff --git a/src/gallium/drivers/r300/Makefile b/src/gallium/drivers/r300/Makefile
new file mode 100644
index 0000000000..93c2152edc
--- /dev/null
+++ b/src/gallium/drivers/r300/Makefile
@@ -0,0 +1,42 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = r300
+
+C_SOURCES = \
+ r3xx_fs.c \
+ r5xx_fs.c \
+ r300_chipset.c \
+ r300_clear.c \
+ r300_context.c \
+ r300_debug.c \
+ r300_emit.c \
+ r300_flush.c \
+ r300_fs.c \
+ r300_query.c \
+ r300_render.c \
+ r300_screen.c \
+ r300_state.c \
+ r300_state_derived.c \
+ r300_state_invariant.c \
+ r300_vs.c \
+ r300_surface.c \
+ r300_texture.c \
+ r300_tgsi_to_rc.c
+
+LIBRARY_INCLUDES = \
+ -I$(TOP)/src/mesa/drivers/dri/r300/compiler \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/include
+
+COMPILER_ARCHIVE = $(TOP)/src/mesa/drivers/dri/r300/compiler/libr300compiler.a
+
+EXTRA_OBJECTS = \
+ $(COMPILER_ARCHIVE)
+
+include ../../Makefile.template
+
+.PHONY : $(COMPILER_ARCHIVE)
+
+$(COMPILER_ARCHIVE):
+ cd $(TOP)/src/mesa/drivers/dri/r300/compiler; make
diff --git a/src/gallium/drivers/r300/SConscript b/src/gallium/drivers/r300/SConscript
new file mode 100644
index 0000000000..493d7b28bc
--- /dev/null
+++ b/src/gallium/drivers/r300/SConscript
@@ -0,0 +1,29 @@
+Import('*')
+
+env = env.Clone()
+
+r300 = env.ConvenienceLibrary(
+ target = 'r300',
+ source = [
+ 'r3xx_fs.c',
+ 'r5xx_fs.c',
+ 'r300_chipset.c',
+ 'r300_clear.c',
+ 'r300_context.c',
+ 'r300_debug.c',
+ 'r300_emit.c',
+ 'r300_flush.c',
+ 'r300_fs.c',
+ 'r300_query.c',
+ 'r300_render.c',
+ 'r300_screen.c',
+ 'r300_state.c',
+ 'r300_state_derived.c',
+ 'r300_state_invariant.c',
+ 'r300_vs.c',
+ 'r300_surface.c',
+ 'r300_texture.c',
+ ])
+
+Export('r300')
+
diff --git a/src/gallium/drivers/r300/r300_chipset.c b/src/gallium/drivers/r300/r300_chipset.c
new file mode 100644
index 0000000000..d138866d33
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_chipset.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_chipset.h"
+#include "util/u_debug.h"
+
+/* r300_chipset: A file all to itself for deducing the various properties of
+ * Radeons. */
+
+/* Parse a PCI ID and fill an r300_capabilities struct with information. */
+void r300_parse_chipset(struct r300_capabilities* caps)
+{
+ /* Reasonable defaults */
+ caps->num_vert_fpus = 4;
+ caps->has_tcl = getenv("RADEON_NO_TCL") ? FALSE : TRUE;
+ caps->is_r500 = FALSE;
+ caps->high_second_pipe = FALSE;
+
+
+ /* Note: These are not ordered by PCI ID. I leave that task to GCC,
+ * which will perform the ordering while collating jump tables. Instead,
+ * I've tried to group them according to capabilities and age. */
+ switch (caps->pci_id) {
+ case 0x4144:
+ caps->family = CHIP_FAMILY_R300;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x4145:
+ case 0x4146:
+ case 0x4147:
+ case 0x4E44:
+ case 0x4E45:
+ case 0x4E46:
+ case 0x4E47:
+ caps->family = CHIP_FAMILY_R300;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x4150:
+ case 0x4151:
+ case 0x4152:
+ case 0x4153:
+ case 0x4154:
+ case 0x4155:
+ case 0x4156:
+ case 0x4E50:
+ case 0x4E51:
+ case 0x4E52:
+ case 0x4E53:
+ case 0x4E54:
+ case 0x4E56:
+ caps->family = CHIP_FAMILY_RV350;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x4148:
+ case 0x4149:
+ case 0x414A:
+ case 0x414B:
+ case 0x4E48:
+ case 0x4E49:
+ case 0x4E4B:
+ caps->family = CHIP_FAMILY_R350;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x4E4A:
+ caps->family = CHIP_FAMILY_R360;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x5460:
+ case 0x5462:
+ case 0x5464:
+ case 0x5B60:
+ case 0x5B62:
+ case 0x5B63:
+ case 0x5B64:
+ case 0x5B65:
+ caps->family = CHIP_FAMILY_RV370;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x3150:
+ case 0x3152:
+ case 0x3154:
+ case 0x3E50:
+ case 0x3E54:
+ caps->family = CHIP_FAMILY_RV380;
+ caps->high_second_pipe = TRUE;
+ break;
+
+ case 0x4A48:
+ case 0x4A49:
+ case 0x4A4A:
+ case 0x4A4B:
+ case 0x4A4C:
+ case 0x4A4D:
+ case 0x4A4E:
+ case 0x4A4F:
+ case 0x4A50:
+ case 0x4A54:
+ caps->family = CHIP_FAMILY_R420;
+ caps->num_vert_fpus = 6;
+ break;
+
+ case 0x5548:
+ case 0x5549:
+ case 0x554A:
+ case 0x554B:
+ case 0x5550:
+ case 0x5551:
+ case 0x5552:
+ case 0x5554:
+ case 0x5D57:
+ caps->family = CHIP_FAMILY_R423;
+ caps->num_vert_fpus = 6;
+ break;
+
+ case 0x554C:
+ case 0x554D:
+ case 0x554E:
+ case 0x554F:
+ case 0x5D48:
+ case 0x5D49:
+ case 0x5D4A:
+ caps->family = CHIP_FAMILY_R430;
+ caps->num_vert_fpus = 6;
+ break;
+
+ case 0x5D4C:
+ case 0x5D4D:
+ case 0x5D4E:
+ case 0x5D4F:
+ case 0x5D50:
+ case 0x5D52:
+ caps->family = CHIP_FAMILY_R480;
+ caps->num_vert_fpus = 6;
+ break;
+
+ case 0x4B48:
+ case 0x4B49:
+ case 0x4B4A:
+ case 0x4B4B:
+ case 0x4B4C:
+ caps->family = CHIP_FAMILY_R481;
+ caps->num_vert_fpus = 6;
+ break;
+
+ case 0x5E4C:
+ case 0x5E4F:
+ case 0x564A:
+ case 0x564B:
+ case 0x564F:
+ case 0x5652:
+ case 0x5653:
+ case 0x5657:
+ case 0x5E48:
+ case 0x5E4A:
+ case 0x5E4B:
+ case 0x5E4D:
+ caps->family = CHIP_FAMILY_RV410;
+ caps->num_vert_fpus = 6;
+ break;
+
+ case 0x5954:
+ case 0x5955:
+ caps->family = CHIP_FAMILY_RS480;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x5974:
+ case 0x5975:
+ caps->family = CHIP_FAMILY_RS482;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x5A41:
+ case 0x5A42:
+ caps->family = CHIP_FAMILY_RS400;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x5A61:
+ case 0x5A62:
+ caps->family = CHIP_FAMILY_RC410;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x791E:
+ case 0x791F:
+ caps->family = CHIP_FAMILY_RS690;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x793F:
+ case 0x7941:
+ case 0x7942:
+ caps->family = CHIP_FAMILY_RS600;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x796C:
+ case 0x796D:
+ case 0x796E:
+ case 0x796F:
+ caps->family = CHIP_FAMILY_RS740;
+ caps->has_tcl = FALSE;
+ break;
+
+ case 0x7100:
+ case 0x7101:
+ case 0x7102:
+ case 0x7103:
+ case 0x7104:
+ case 0x7105:
+ case 0x7106:
+ case 0x7108:
+ case 0x7109:
+ case 0x710A:
+ case 0x710B:
+ case 0x710C:
+ case 0x710E:
+ case 0x710F:
+ caps->family = CHIP_FAMILY_R520;
+ caps->num_vert_fpus = 8;
+ caps->is_r500 = TRUE;
+ break;
+
+ case 0x7140:
+ case 0x7141:
+ case 0x7142:
+ case 0x7143:
+ case 0x7144:
+ case 0x7145:
+ case 0x7146:
+ case 0x7147:
+ case 0x7149:
+ case 0x714A:
+ case 0x714B:
+ case 0x714C:
+ case 0x714D:
+ case 0x714E:
+ case 0x714F:
+ case 0x7151:
+ case 0x7152:
+ case 0x7153:
+ case 0x715E:
+ case 0x715F:
+ case 0x7180:
+ case 0x7181:
+ case 0x7183:
+ case 0x7186:
+ case 0x7187:
+ case 0x7188:
+ case 0x718A:
+ case 0x718B:
+ case 0x718C:
+ case 0x718D:
+ case 0x718F:
+ case 0x7193:
+ case 0x7196:
+ case 0x719B:
+ case 0x719F:
+ case 0x7200:
+ case 0x7210:
+ case 0x7211:
+ caps->family = CHIP_FAMILY_RV515;
+ caps->num_vert_fpus = 2;
+ caps->is_r500 = TRUE;
+ break;
+
+ case 0x71C0:
+ case 0x71C1:
+ case 0x71C2:
+ case 0x71C3:
+ case 0x71C4:
+ case 0x71C5:
+ case 0x71C6:
+ case 0x71C7:
+ case 0x71CD:
+ case 0x71CE:
+ case 0x71D2:
+ case 0x71D4:
+ case 0x71D5:
+ case 0x71D6:
+ case 0x71DA:
+ case 0x71DE:
+ caps->family = CHIP_FAMILY_RV530;
+ caps->num_vert_fpus = 5;
+ caps->is_r500 = TRUE;
+ break;
+
+ case 0x7240:
+ case 0x7243:
+ case 0x7244:
+ case 0x7245:
+ case 0x7246:
+ case 0x7247:
+ case 0x7248:
+ case 0x7249:
+ case 0x724A:
+ case 0x724B:
+ case 0x724C:
+ case 0x724D:
+ case 0x724E:
+ case 0x724F:
+ case 0x7284:
+ caps->family = CHIP_FAMILY_R580;
+ caps->num_vert_fpus = 8;
+ caps->is_r500 = TRUE;
+ break;
+
+ case 0x7280:
+ caps->family = CHIP_FAMILY_RV570;
+ caps->num_vert_fpus = 5;
+ caps->is_r500 = TRUE;
+ break;
+
+ case 0x7281:
+ case 0x7283:
+ case 0x7287:
+ case 0x7288:
+ case 0x7289:
+ case 0x728B:
+ case 0x728C:
+ case 0x7290:
+ case 0x7291:
+ case 0x7293:
+ case 0x7297:
+ caps->family = CHIP_FAMILY_RV560;
+ caps->num_vert_fpus = 5;
+ caps->is_r500 = TRUE;
+ break;
+
+ default:
+ debug_printf("r300: Warning: Unknown chipset 0x%x\n",
+ caps->pci_id);
+ break;
+ }
+}
diff --git a/src/gallium/drivers/r300/r300_chipset.h b/src/gallium/drivers/r300/r300_chipset.h
new file mode 100644
index 0000000000..322d4a57e4
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_chipset.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_CHIPSET_H
+#define R300_CHIPSET_H
+
+#include "pipe/p_compiler.h"
+
+/* Structure containing all the possible information about a specific Radeon
+ * in the R3xx, R4xx, and R5xx families. */
+struct r300_capabilities {
+ /* PCI ID */
+ uint32_t pci_id;
+ /* Chipset family */
+ int family;
+ /* The number of vertex floating-point units */
+ int num_vert_fpus;
+ /* The number of fragment pipes */
+ int num_frag_pipes;
+ /* Whether or not TCL is physically present */
+ boolean has_tcl;
+ /* Whether or not this is an RV515 or newer; R500s have many differences
+ * that require extra consideration, compared to their R3xx cousins:
+ * - Extra bit of width and height on texture sizes
+ * - Blend color is split across two registers
+ * - Universal Shader (US) block used for fragment shaders */
+ boolean is_r500;
+ /* Whether or not the second pixel pipe is accessed with the high bit */
+ boolean high_second_pipe;
+};
+
+/* Enumerations for legibility and telling which card we're running on. */
+enum {
+ CHIP_FAMILY_R300 = 0,
+ CHIP_FAMILY_R350,
+ CHIP_FAMILY_R360,
+ CHIP_FAMILY_RV350,
+ CHIP_FAMILY_RV370,
+ CHIP_FAMILY_RV380,
+ CHIP_FAMILY_R420,
+ CHIP_FAMILY_R423,
+ CHIP_FAMILY_R430,
+ CHIP_FAMILY_R480,
+ CHIP_FAMILY_R481,
+ CHIP_FAMILY_RV410,
+ CHIP_FAMILY_RS400,
+ CHIP_FAMILY_RC410,
+ CHIP_FAMILY_RS480,
+ CHIP_FAMILY_RS482,
+ CHIP_FAMILY_RS600,
+ CHIP_FAMILY_RS690,
+ CHIP_FAMILY_RS740,
+ CHIP_FAMILY_RV515,
+ CHIP_FAMILY_R520,
+ CHIP_FAMILY_RV530,
+ CHIP_FAMILY_R580,
+ CHIP_FAMILY_RV560,
+ CHIP_FAMILY_RV570
+};
+
+void r300_parse_chipset(struct r300_capabilities* caps);
+
+#endif /* R300_CHIPSET_H */
diff --git a/src/gallium/drivers/r300/r300_clear.c b/src/gallium/drivers/r300/r300_clear.c
new file mode 100644
index 0000000000..8b9cb819ae
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_clear.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_clear.h"
+
+/* Clears currently bound buffers. */
+void r300_clear(struct pipe_context* pipe,
+ unsigned buffers,
+ const float* rgba,
+ double depth,
+ unsigned stencil)
+{
+ /* XXX we can and should do one clear if both color and zs are set */
+ util_clear(pipe, &r300_context(pipe)->framebuffer_state,
+ buffers, rgba, depth, stencil);
+}
diff --git a/src/gallium/drivers/r300/r300_clear.h b/src/gallium/drivers/r300/r300_clear.h
new file mode 100644
index 0000000000..cd5900565e
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_clear.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_CLEAR_H
+#define R300_CLEAR_H
+
+#include "util/u_clear.h"
+
+#include "r300_context.h"
+
+void r300_clear(struct pipe_context* pipe,
+ unsigned buffers,
+ const float* rgba,
+ double depth,
+ unsigned stencil);
+
+#endif /* R300_CLEAR_H */
diff --git a/src/gallium/drivers/r300/r300_context.c b/src/gallium/drivers/r300/r300_context.c
new file mode 100644
index 0000000000..9cc455135d
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_context.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_context.h"
+
+#include "r300_flush.h"
+#include "r300_state_invariant.h"
+
+static boolean r300_draw_range_elements(struct pipe_context* pipe,
+ struct pipe_buffer* indexBuffer,
+ unsigned indexSize,
+ unsigned minIndex,
+ unsigned maxIndex,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ int i;
+
+ for (i = 0; i < r300->vertex_buffer_count; i++) {
+ void* buf = pipe_buffer_map(pipe->screen,
+ r300->vertex_buffers[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(r300->draw, i, buf);
+ }
+
+ if (indexBuffer) {
+ void* indices = pipe_buffer_map(pipe->screen, indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer_range(r300->draw, indexSize,
+ minIndex, maxIndex, indices);
+ } else {
+ draw_set_mapped_element_buffer(r300->draw, 0, NULL);
+ }
+
+ draw_set_mapped_constant_buffer(r300->draw,
+ r300->shader_constants[PIPE_SHADER_VERTEX].constants,
+ r300->shader_constants[PIPE_SHADER_VERTEX].count *
+ (sizeof(float) * 4));
+
+ draw_arrays(r300->draw, mode, start, count);
+
+ for (i = 0; i < r300->vertex_buffer_count; i++) {
+ pipe_buffer_unmap(pipe->screen, r300->vertex_buffers[i].buffer);
+ draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
+ }
+
+ if (indexBuffer) {
+ pipe_buffer_unmap(pipe->screen, indexBuffer);
+ draw_set_mapped_element_buffer_range(r300->draw, 0, start,
+ start + count - 1, NULL);
+ }
+
+ return TRUE;
+}
+
+static boolean r300_draw_elements(struct pipe_context* pipe,
+ struct pipe_buffer* indexBuffer,
+ unsigned indexSize, unsigned mode,
+ unsigned start, unsigned count)
+{
+ return r300_draw_range_elements(pipe, indexBuffer, indexSize, 0, ~0,
+ mode, start, count);
+}
+
+static boolean r300_draw_arrays(struct pipe_context* pipe, unsigned mode,
+ unsigned start, unsigned count)
+{
+ return r300_draw_elements(pipe, NULL, 0, mode, start, count);
+}
+
+static void r300_destroy_context(struct pipe_context* context) {
+ struct r300_context* r300 = r300_context(context);
+ struct r300_query* query, * temp;
+
+ draw_destroy(r300->draw);
+
+ /* Free the OQ BO. */
+ context->screen->buffer_destroy(r300->oqbo);
+
+ /* If there are any queries pending or not destroyed, remove them now. */
+ if (r300->query_list) {
+ foreach_s(query, temp, r300->query_list) {
+ remove_from_list(query);
+ FREE(query);
+ }
+ }
+
+ FREE(r300->blend_color_state);
+ FREE(r300->rs_block);
+ FREE(r300->scissor_state);
+ FREE(r300->viewport_state);
+ FREE(r300);
+}
+
+static unsigned int
+r300_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+static unsigned int
+r300_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ /**
+ * FIXME: Optimize.
+ */
+
+ return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+}
+
+struct pipe_context* r300_create_context(struct pipe_screen* screen,
+ struct r300_winsys* r300_winsys)
+{
+ struct r300_context* r300 = CALLOC_STRUCT(r300_context);
+
+ if (!r300)
+ return NULL;
+
+ r300->winsys = r300_winsys;
+
+ r300->context.winsys = (struct pipe_winsys*)r300_winsys;
+ r300->context.screen = r300_screen(screen);
+
+ r300_init_debug(r300);
+
+ r300->context.destroy = r300_destroy_context;
+
+ r300->context.clear = r300_clear;
+
+ r300->context.draw_arrays = r300_draw_arrays;
+ r300->context.draw_elements = r300_draw_elements;
+ r300->context.draw_range_elements = r300_draw_range_elements;
+
+ r300->context.is_texture_referenced = r300_is_texture_referenced;
+ r300->context.is_buffer_referenced = r300_is_buffer_referenced;
+
+ r300->blend_color_state = CALLOC_STRUCT(r300_blend_color_state);
+ r300->rs_block = CALLOC_STRUCT(r300_rs_block);
+ r300->scissor_state = CALLOC_STRUCT(r300_scissor_state);
+ r300->viewport_state = CALLOC_STRUCT(r300_viewport_state);
+
+ /* Create a Draw. This is used for vert collation and SW TCL. */
+ r300->draw = draw_create();
+ /* Enable our renderer. */
+ draw_set_rasterize_stage(r300->draw, r300_draw_stage(r300));
+ /* Disable Draw's clipping if TCL is present. */
+ draw_set_driver_clipping(r300->draw, r300_screen(screen)->caps->has_tcl);
+ /* Force Draw to never do viewport transform, since (again) we can do
+ * transform in hardware, always. */
+ draw_set_viewport_state(r300->draw, &r300_viewport_identity);
+
+ /* Open up the OQ BO. */
+ r300->oqbo = screen->buffer_create(screen, 4096,
+ PIPE_BUFFER_USAGE_VERTEX, 4096);
+
+ r300_init_flush_functions(r300);
+
+ r300_init_query_functions(r300);
+
+ r300_init_surface_functions(r300);
+
+ r300_init_state_functions(r300);
+
+ r300_emit_invariant_state(r300);
+ r300->dirty_state = R300_NEW_KITCHEN_SINK;
+ r300->dirty_hw++;
+
+ return &r300->context;
+}
diff --git a/src/gallium/drivers/r300/r300_context.h b/src/gallium/drivers/r300/r300_context.h
new file mode 100644
index 0000000000..6c5914baa3
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_context.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_CONTEXT_H
+#define R300_CONTEXT_H
+
+#include "draw/draw_context.h"
+#include "draw/draw_vertex.h"
+
+#include "pipe/p_context.h"
+
+#include "tgsi/tgsi_scan.h"
+
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+
+#include "r300_clear.h"
+#include "r300_query.h"
+#include "r300_screen.h"
+#include "r300_winsys.h"
+
+struct r300_fragment_shader;
+struct r300_vertex_shader;
+
+struct r300_blend_state {
+ uint32_t blend_control; /* R300_RB3D_CBLEND: 0x4e04 */
+ uint32_t alpha_blend_control; /* R300_RB3D_ABLEND: 0x4e08 */
+ uint32_t rop; /* R300_RB3D_ROPCNTL: 0x4e18 */
+ uint32_t dither; /* R300_RB3D_DITHER_CTL: 0x4e50 */
+};
+
+struct r300_blend_color_state {
+ /* RV515 and earlier */
+ uint32_t blend_color; /* R300_RB3D_BLEND_COLOR: 0x4e10 */
+ /* R520 and newer */
+ uint32_t blend_color_red_alpha; /* R500_RB3D_CONSTANT_COLOR_AR: 0x4ef8 */
+ uint32_t blend_color_green_blue; /* R500_RB3D_CONSTANT_COLOR_GB: 0x4efc */
+};
+
+struct r300_dsa_state {
+ uint32_t alpha_function; /* R300_FG_ALPHA_FUNC: 0x4bd4 */
+ uint32_t alpha_reference; /* R500_FG_ALPHA_VALUE: 0x4be0 */
+ uint32_t z_buffer_control; /* R300_ZB_CNTL: 0x4f00 */
+ uint32_t z_stencil_control; /* R300_ZB_ZSTENCILCNTL: 0x4f04 */
+ uint32_t stencil_ref_mask; /* R300_ZB_STENCILREFMASK: 0x4f08 */
+ uint32_t z_buffer_top; /* R300_ZB_ZTOP: 0x4f14 */
+ uint32_t stencil_ref_bf; /* R500_ZB_STENCILREFMASK_BF: 0x4fd4 */
+};
+
+struct r300_rs_state {
+ /* Draw-specific rasterizer state */
+ struct pipe_rasterizer_state rs;
+
+ /* Whether or not to enable the VTE. This is referenced at the very
+ * last moment during emission of VTE state, to decide whether or not
+ * the VTE should be used for transformation. */
+ boolean enable_vte;
+
+ uint32_t vap_control_status; /* R300_VAP_CNTL_STATUS: 0x2140 */
+ uint32_t point_size; /* R300_GA_POINT_SIZE: 0x421c */
+ uint32_t point_minmax; /* R300_GA_POINT_MINMAX: 0x4230 */
+ uint32_t line_control; /* R300_GA_LINE_CNTL: 0x4234 */
+ uint32_t depth_scale_front; /* R300_SU_POLY_OFFSET_FRONT_SCALE: 0x42a4 */
+ uint32_t depth_offset_front;/* R300_SU_POLY_OFFSET_FRONT_OFFSET: 0x42a8 */
+ uint32_t depth_scale_back; /* R300_SU_POLY_OFFSET_BACK_SCALE: 0x42ac */
+ uint32_t depth_offset_back; /* R300_SU_POLY_OFFSET_BACK_OFFSET: 0x42b0 */
+ uint32_t polygon_offset_enable; /* R300_SU_POLY_OFFSET_ENABLE: 0x42b4 */
+ uint32_t cull_mode; /* R300_SU_CULL_MODE: 0x42b8 */
+ uint32_t line_stipple_config; /* R300_GA_LINE_STIPPLE_CONFIG: 0x4328 */
+ uint32_t line_stipple_value; /* R300_GA_LINE_STIPPLE_VALUE: 0x4260 */
+ uint32_t color_control; /* R300_GA_COLOR_CONTROL: 0x4278 */
+};
+
+struct r300_rs_block {
+ uint32_t ip[8]; /* R300_RS_IP_[0-7], R500_RS_IP_[0-7] */
+ uint32_t count; /* R300_RS_COUNT */
+ uint32_t inst_count; /* R300_RS_INST_COUNT */
+ uint32_t inst[8]; /* R300_RS_INST_[0-7] */
+};
+
+struct r300_sampler_state {
+ uint32_t filter0; /* R300_TX_FILTER0: 0x4400 */
+ uint32_t filter1; /* R300_TX_FILTER1: 0x4440 */
+ uint32_t border_color; /* R300_TX_BORDER_COLOR: 0x45c0 */
+};
+
+struct r300_scissor_state {
+ uint32_t scissor_top_left; /* R300_SC_SCISSORS_TL: 0x43e0 */
+ uint32_t scissor_bottom_right; /* R300_SC_SCISSORS_BR: 0x43e4 */
+};
+
+struct r300_texture_state {
+ uint32_t format0; /* R300_TX_FORMAT0: 0x4480 */
+ uint32_t format1; /* R300_TX_FORMAT1: 0x44c0 */
+ uint32_t format2; /* R300_TX_FORMAT2: 0x4500 */
+};
+
+struct r300_viewport_state {
+ float xscale; /* R300_VAP_VPORT_XSCALE: 0x2098 */
+ float xoffset; /* R300_VAP_VPORT_XOFFSET: 0x209c */
+ float yscale; /* R300_VAP_VPORT_YSCALE: 0x20a0 */
+ float yoffset; /* R300_VAP_VPORT_YOFFSET: 0x20a4 */
+ float zscale; /* R300_VAP_VPORT_ZSCALE: 0x20a8 */
+ float zoffset; /* R300_VAP_VPORT_ZOFFSET: 0x20ac */
+ uint32_t vte_control; /* R300_VAP_VTE_CNTL: 0x20b0 */
+};
+
+#define R300_NEW_BLEND 0x00000001
+#define R300_NEW_BLEND_COLOR 0x00000002
+#define R300_NEW_CLIP 0x00000004
+#define R300_NEW_CONSTANTS 0x00000008
+#define R300_NEW_DSA 0x00000010
+#define R300_NEW_FRAMEBUFFERS 0x00000020
+#define R300_NEW_FRAGMENT_SHADER 0x00000040
+#define R300_NEW_RASTERIZER 0x00000080
+#define R300_NEW_RS_BLOCK 0x00000100
+#define R300_NEW_SAMPLER 0x00000200
+#define R300_ANY_NEW_SAMPLERS 0x0001fe00
+#define R300_NEW_SCISSOR 0x00020000
+#define R300_NEW_TEXTURE 0x00040000
+#define R300_ANY_NEW_TEXTURES 0x03fc0000
+#define R300_NEW_VERTEX_FORMAT 0x04000000
+#define R300_NEW_VERTEX_SHADER 0x08000000
+#define R300_NEW_VIEWPORT 0x10000000
+#define R300_NEW_KITCHEN_SINK 0x1fffffff
+
+/* The next several objects are not pure Radeon state; they inherit from
+ * various Gallium classes. */
+
+struct r300_constant_buffer {
+ /* Buffer of constants */
+ /* XXX first number should be raised */
+ float constants[32][4];
+ /* Total number of constants */
+ unsigned count;
+};
+
+/* Query object.
+ *
+ * This is not a subclass of pipe_query because pipe_query is never
+ * actually fully defined. So, rather than have it as a member, and do
+ * subclass-style casting, we treat pipe_query as an opaque, and just
+ * trust that our state tracker does not ever mess up query objects.
+ */
+struct r300_query {
+ /* The kind of query. Currently only OQ is supported. */
+ unsigned type;
+ /* Whether this query is currently active. Only active queries will
+ * get emitted into the command stream, and only active queries get
+ * tallied. */
+ boolean active;
+ /* The current count of this query. Required to be at least 32 bits. */
+ unsigned int count;
+ /* The offset of this query into the query buffer, in bytes. */
+ unsigned offset;
+ /* Linked list members. */
+ struct r300_query* prev;
+ struct r300_query* next;
+};
+
+struct r300_texture {
+ /* Parent class */
+ struct pipe_texture tex;
+
+ /* Offsets into the buffer. */
+ unsigned offset[PIPE_MAX_TEXTURE_LEVELS];
+
+ /* Stride (pitch?) of this texture in bytes */
+ unsigned stride;
+
+ /* Total size of this texture, in bytes. */
+ unsigned size;
+
+ /* Pipe buffer backing this texture. */
+ struct pipe_buffer* buffer;
+
+ /* Registers carrying texture format data. */
+ struct r300_texture_state state;
+};
+
+struct r300_vertex_format {
+ /* Parent class */
+ struct vertex_info vinfo;
+ /* R300_VAP_PROG_STREAK_CNTL_[0-7] */
+ uint32_t vap_prog_stream_cntl[8];
+ /* R300_VAP_PROG_STREAK_CNTL_EXT_[0-7] */
+ uint32_t vap_prog_stream_cntl_ext[8];
+ /* Map of vertex attributes into PVS memory for HW TCL,
+ * or GA memory for SW TCL. */
+ int vs_tab[16];
+ /* Map of rasterizer attributes from GB through RS to US. */
+ int fs_tab[16];
+};
+
+extern struct pipe_viewport_state r300_viewport_identity;
+
+struct r300_context {
+ /* Parent class */
+ struct pipe_context context;
+
+ /* The interface to the windowing system, etc. */
+ struct r300_winsys* winsys;
+ /* Draw module. Used mostly for SW TCL. */
+ struct draw_context* draw;
+
+ /* Vertex buffer for rendering. */
+ struct pipe_buffer* vbo;
+ /* Offset into the VBO. */
+ size_t vbo_offset;
+
+ /* Occlusion query buffer. */
+ struct pipe_buffer* oqbo;
+ /* Query list. */
+ struct r300_query* query_list;
+
+ /* Various CSO state objects. */
+ /* Blend state. */
+ struct r300_blend_state* blend_state;
+ /* Blend color state. */
+ struct r300_blend_color_state* blend_color_state;
+ /* User clip planes. */
+ struct pipe_clip_state clip_state;
+ /* Shader constants. */
+ struct r300_constant_buffer shader_constants[PIPE_SHADER_TYPES];
+ /* Depth, stencil, and alpha state. */
+ struct r300_dsa_state* dsa_state;
+ /* Fragment shader. */
+ struct r300_fragment_shader* fs;
+ /* Framebuffer state. We currently don't need our own version of this. */
+ struct pipe_framebuffer_state framebuffer_state;
+ /* Rasterizer state. */
+ struct r300_rs_state* rs_state;
+ /* RS block state. */
+ struct r300_rs_block* rs_block;
+ /* Sampler states. */
+ struct r300_sampler_state* sampler_states[8];
+ int sampler_count;
+ /* Scissor state. */
+ struct r300_scissor_state* scissor_state;
+ /* Texture states. */
+ struct r300_texture* textures[8];
+ int texture_count;
+ /* Vertex buffers for Gallium. */
+ struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
+ int vertex_buffer_count;
+ /* Vertex information. */
+ struct r300_vertex_format vertex_info;
+ /* Vertex shader. */
+ struct r300_vertex_shader* vs;
+ /* Viewport state. */
+ struct r300_viewport_state* viewport_state;
+ /* Bitmask of dirty state objects. */
+ uint32_t dirty_state;
+ /* Flag indicating whether or not the HW is dirty. */
+ uint32_t dirty_hw;
+
+ /** Combination of DBG_xxx flags */
+ unsigned debug;
+};
+
+/* Convenience cast wrapper. */
+static INLINE struct r300_context* r300_context(struct pipe_context* context)
+{
+ return (struct r300_context*)context;
+}
+
+/* Context initialization. */
+struct draw_stage* r300_draw_stage(struct r300_context* r300);
+void r300_init_state_functions(struct r300_context* r300);
+void r300_init_surface_functions(struct r300_context* r300);
+
+/* Debug functionality. */
+
+/**
+ * Debug flags to disable/enable certain groups of debugging outputs.
+ *
+ * \note These may be rather coarse, and the grouping may be impractical.
+ * If you find, while debugging the driver, that a different grouping
+ * of these flags would be beneficial, just feel free to change them
+ * but make sure to update the documentation in r300_debug.c to reflect
+ * those changes.
+ */
+/*@{*/
+#define DBG_HELP 0x0000001
+#define DBG_FP 0x0000002
+#define DBG_VP 0x0000004
+#define DBG_CS 0x0000008
+#define DBG_DRAW 0x0000010
+/*@}*/
+
+static INLINE boolean DBG_ON(struct r300_context * ctx, unsigned flags)
+{
+ return (ctx->debug & flags) ? true : false;
+}
+
+static INLINE void DBG(struct r300_context * ctx, unsigned flags, const char * fmt, ...)
+{
+ if (DBG_ON(ctx, flags)) {
+ va_list va;
+ va_start(va, fmt);
+ debug_vprintf(fmt, va);
+ va_end(va);
+ }
+}
+
+void r300_init_debug(struct r300_context * ctx);
+
+#endif /* R300_CONTEXT_H */
diff --git a/src/gallium/drivers/r300/r300_cs.h b/src/gallium/drivers/r300/r300_cs.h
new file mode 100644
index 0000000000..0a7e470363
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_cs.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_CS_H
+#define R300_CS_H
+
+#include "util/u_math.h"
+
+#include "r300_reg.h"
+#include "r300_winsys.h"
+
+/* Yes, I know macros are ugly. However, they are much prettier than the code
+ * that they neatly hide away, and don't have the cost of function setup,so
+ * we're going to use them. */
+
+#define MAX_CS_SIZE 64 * 1024 / 4
+
+#define VERY_VERBOSE_CS 0
+#define VERY_VERBOSE_REGISTERS 0
+
+/* XXX stolen from radeon_drm.h */
+#define RADEON_GEM_DOMAIN_CPU 0x1
+#define RADEON_GEM_DOMAIN_GTT 0x2
+#define RADEON_GEM_DOMAIN_VRAM 0x4
+
+/* XXX stolen from radeon_reg.h */
+#define RADEON_CP_PACKET0 0x0
+
+#define CP_PACKET0(register, count) \
+ (RADEON_CP_PACKET0 | ((count) << 16) | ((register) >> 2))
+
+#define CS_LOCALS(context) \
+ struct r300_context* const cs_context_copy = (context); \
+ struct r300_winsys* cs_winsys = cs_context_copy->winsys; \
+ int cs_count = 0;
+
+#define CHECK_CS(size) \
+ cs_winsys->check_cs(cs_winsys, (size))
+
+#define BEGIN_CS(size) do { \
+ CHECK_CS(size); \
+ if (VERY_VERBOSE_CS) { \
+ DBG(cs_context_copy, DBG_CS, "r300: BEGIN_CS, count %d, in %s (%s:%d)\n", \
+ size, __FUNCTION__, __FILE__, __LINE__); \
+ } \
+ cs_winsys->begin_cs(cs_winsys, (size), \
+ __FILE__, __FUNCTION__, __LINE__); \
+ cs_count = size; \
+} while (0)
+
+#define OUT_CS(value) do { \
+ cs_winsys->write_cs_dword(cs_winsys, (value)); \
+ cs_count--; \
+} while (0)
+
+#define OUT_CS_32F(value) do { \
+ cs_winsys->write_cs_dword(cs_winsys, fui(value)); \
+ cs_count--; \
+} while (0)
+
+#define OUT_CS_REG(register, value) do { \
+ if (VERY_VERBOSE_REGISTERS) \
+ DBG(cs_context_copy, DBG_CS, "r300: writing 0x%08X to register 0x%04X\n", \
+ value, register); \
+ assert(register); \
+ OUT_CS(CP_PACKET0(register, 0)); \
+ OUT_CS(value); \
+} while (0)
+
+/* Note: This expects count to be the number of registers,
+ * not the actual packet0 count! */
+#define OUT_CS_REG_SEQ(register, count) do { \
+ if (VERY_VERBOSE_REGISTERS) \
+ DBG(cs_context_copy, DBG_CS, "r300: writing register sequence of %d to 0x%04X\n", \
+ count, register); \
+ assert(register); \
+ OUT_CS(CP_PACKET0(register, ((count) - 1))); \
+} while (0)
+
+#define OUT_CS_RELOC(bo, offset, rd, wd, flags) do { \
+ DBG(cs_context_copy, DBG_CS, "r300: writing relocation for buffer %p, offset %d, " \
+ "domains (%d, %d, %d)\n", \
+ bo, offset, rd, wd, flags); \
+ assert(bo); \
+ OUT_CS(offset); \
+ cs_winsys->write_cs_reloc(cs_winsys, bo, rd, wd, flags); \
+ cs_count -= 2; \
+} while (0)
+
+#define END_CS do { \
+ if (VERY_VERBOSE_CS) { \
+ DBG(cs_context_copy, DBG_CS, "r300: END_CS in %s (%s:%d)\n", __FUNCTION__, \
+ __FILE__, __LINE__); \
+ } \
+ if (cs_count != 0) \
+ debug_printf("r300: Warning: cs_count off by %d\n", cs_count); \
+ cs_winsys->end_cs(cs_winsys, __FILE__, __FUNCTION__, __LINE__); \
+} while (0)
+
+#define FLUSH_CS do { \
+ if (VERY_VERBOSE_CS) { \
+ DBG(cs_context_copy, DBG_CS, "r300: FLUSH_CS in %s (%s:%d)\n\n", __FUNCTION__, \
+ __FILE__, __LINE__); \
+ } \
+ cs_winsys->flush_cs(cs_winsys); \
+} while (0)
+
+#define RADEON_ONE_REG_WR (1 << 15)
+
+#define OUT_CS_ONE_REG(register, count) do { \
+ if (VERY_VERBOSE_REGISTERS) \
+ DBG(cs_context_copy, DBG_CS, "r300: writing data sequence of %d to 0x%04X\n", \
+ count, register); \
+ assert(register); \
+ OUT_CS(CP_PACKET0(register, ((count) - 1)) | RADEON_ONE_REG_WR); \
+} while (0)
+
+#define CP_PACKET3(op, count) \
+ (RADEON_CP_PACKET3 | (op) | ((count) << 16))
+
+#define OUT_CS_PKT3(op, count) do { \
+ OUT_CS(CP_PACKET3(op, count)); \
+} while (0)
+
+#define OUT_CS_INDEX_RELOC(bo, offset, count, rd, wd, flags) do { \
+ DBG(cs_context_copy, DBG_CS, "r300: writing relocation for index buffer %p," \
+ "offset %d\n", bo, offset); \
+ assert(bo); \
+ OUT_CS(offset); \
+ OUT_CS(count); \
+ cs_winsys->write_cs_reloc(cs_winsys, bo, rd, wd, flags); \
+ cs_count -= 2; \
+} while (0)
+
+#endif /* R300_CS_H */
diff --git a/src/gallium/drivers/r300/r300_debug.c b/src/gallium/drivers/r300/r300_debug.c
new file mode 100644
index 0000000000..15308dda1d
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_debug.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2009 Nicolai Haehnle <nhaehnle@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_context.h"
+
+#include <ctype.h>
+
+
+struct debug_option {
+ const char * name;
+ unsigned flag;
+ const char * description;
+};
+
+static struct debug_option debug_options[] = {
+ { "help", DBG_HELP, "Helpful meta-information about the driver" },
+ { "fp", DBG_FP, "Fragment program handling" },
+ { "vp", DBG_VP, "Vertex program handling" },
+ { "cs", DBG_CS, "Command submissions" },
+ { "draw", DBG_DRAW, "Draw and emit" },
+
+ { "all", ~0, "Convenience option that enables all debug flags" },
+
+ /* must be last */
+ { 0, 0, 0 }
+};
+
+void r300_init_debug(struct r300_context * ctx)
+{
+ const char * options = debug_get_option("RADEON_DEBUG", 0);
+ boolean printhint = false;
+
+ if (options) {
+ while(*options) {
+ if (*options == ' ' || *options == ',') {
+ options++;
+ continue;
+ }
+
+ size_t length = strcspn(options, " ,");
+ struct debug_option * opt;
+
+ for(opt = debug_options; opt->name; ++opt) {
+ if (!strncmp(options, opt->name, length)) {
+ ctx->debug |= opt->flag;
+ break;
+ }
+ }
+
+ if (!opt->name) {
+ debug_printf("Unknown debug option: %s\n", options);
+ printhint = true;
+ }
+
+ options += length;
+ }
+
+ if (!ctx->debug)
+ printhint = true;
+ }
+
+ if (printhint || ctx->debug & DBG_HELP) {
+ debug_printf("You can enable debug output by setting the RADEON_DEBUG environment variable\n"
+ "to a comma-separated list of debug options. Available options are:\n");
+ for(struct debug_option * opt = debug_options; opt->name; ++opt) {
+ debug_printf(" %s: %s\n", opt->name, opt->description);
+ }
+ }
+}
diff --git a/src/gallium/drivers/r300/r300_emit.c b/src/gallium/drivers/r300/r300_emit.c
new file mode 100644
index 0000000000..1bc35c2486
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_emit.c
@@ -0,0 +1,813 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+/* r300_emit: Functions for emitting state. */
+
+#include "r300_emit.h"
+
+#include "r300_fs.h"
+#include "r300_state_derived.h"
+#include "r300_vs.h"
+
+void r300_emit_blend_state(struct r300_context* r300,
+ struct r300_blend_state* blend)
+{
+ CS_LOCALS(r300);
+ BEGIN_CS(7);
+ OUT_CS_REG_SEQ(R300_RB3D_CBLEND, 2);
+ OUT_CS(blend->blend_control);
+ OUT_CS(blend->alpha_blend_control);
+ OUT_CS_REG(R300_RB3D_ROPCNTL, blend->rop);
+ OUT_CS_REG(R300_RB3D_DITHER_CTL, blend->dither);
+ END_CS;
+}
+
+void r300_emit_blend_color_state(struct r300_context* r300,
+ struct r300_blend_color_state* bc)
+{
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ CS_LOCALS(r300);
+
+ if (r300screen->caps->is_r500) {
+ BEGIN_CS(3);
+ OUT_CS_REG_SEQ(R500_RB3D_CONSTANT_COLOR_AR, 2);
+ OUT_CS(bc->blend_color_red_alpha);
+ OUT_CS(bc->blend_color_green_blue);
+ END_CS;
+ } else {
+ BEGIN_CS(2);
+ OUT_CS_REG(R300_RB3D_BLEND_COLOR, bc->blend_color);
+ END_CS;
+ }
+}
+
+void r300_emit_clip_state(struct r300_context* r300,
+ struct pipe_clip_state* clip)
+{
+ int i;
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ CS_LOCALS(r300);
+
+ if (r300screen->caps->has_tcl) {
+ BEGIN_CS(5 + (6 * 4));
+ OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
+ (r300screen->caps->is_r500 ?
+ R500_PVS_UCP_START : R300_PVS_UCP_START));
+ OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, 6 * 4);
+ for (i = 0; i < 6; i++) {
+ OUT_CS_32F(clip->ucp[i][0]);
+ OUT_CS_32F(clip->ucp[i][1]);
+ OUT_CS_32F(clip->ucp[i][2]);
+ OUT_CS_32F(clip->ucp[i][3]);
+ }
+ OUT_CS_REG(R300_VAP_CLIP_CNTL, ((1 << clip->nr) - 1) |
+ R300_PS_UCP_MODE_CLIP_AS_TRIFAN);
+ END_CS;
+ } else {
+ BEGIN_CS(2);
+ OUT_CS_REG(R300_VAP_CLIP_CNTL, R300_CLIP_DISABLE);
+ END_CS;
+ }
+
+}
+
+void r300_emit_dsa_state(struct r300_context* r300,
+ struct r300_dsa_state* dsa)
+{
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ CS_LOCALS(r300);
+
+ BEGIN_CS(r300screen->caps->is_r500 ? 8 : 8);
+ OUT_CS_REG(R300_FG_ALPHA_FUNC, dsa->alpha_function);
+ /* XXX figure out the r300 counterpart for this */
+ if (r300screen->caps->is_r500) {
+ /* OUT_CS_REG(R500_FG_ALPHA_VALUE, dsa->alpha_reference); */
+ }
+ OUT_CS_REG_SEQ(R300_ZB_CNTL, 3);
+ OUT_CS(dsa->z_buffer_control);
+ OUT_CS(dsa->z_stencil_control);
+ OUT_CS(dsa->stencil_ref_mask);
+ OUT_CS_REG(R300_ZB_ZTOP, dsa->z_buffer_top);
+ if (r300screen->caps->is_r500) {
+ /* OUT_CS_REG(R500_ZB_STENCILREFMASK_BF, dsa->stencil_ref_bf); */
+ }
+ END_CS;
+}
+
+static const float * get_shader_constant(
+ struct r300_context * r300,
+ struct rc_constant * constant,
+ struct r300_constant_buffer * externals)
+{
+ static const float zero[4] = { 0.0, 0.0, 0.0, 0.0 };
+ switch(constant->Type) {
+ case RC_CONSTANT_EXTERNAL:
+ return externals->constants[constant->u.External];
+
+ case RC_CONSTANT_IMMEDIATE:
+ return constant->u.Immediate;
+
+ default:
+ debug_printf("r300: Implementation error: Unhandled constant type %i\n",
+ constant->Type);
+ return zero;
+ }
+}
+
+/* Convert a normal single-precision float into the 7.16 format
+ * used by the R300 fragment shader.
+ */
+static uint32_t pack_float24(float f)
+{
+ union {
+ float fl;
+ uint32_t u;
+ } u;
+ float mantissa;
+ int exponent;
+ uint32_t float24 = 0;
+
+ if (f == 0.0)
+ return 0;
+
+ u.fl = f;
+
+ mantissa = frexpf(f, &exponent);
+
+ /* Handle -ve */
+ if (mantissa < 0) {
+ float24 |= (1 << 23);
+ mantissa = mantissa * -1.0;
+ }
+ /* Handle exponent, bias of 63 */
+ exponent += 62;
+ float24 |= (exponent << 16);
+ /* Kill 7 LSB of mantissa */
+ float24 |= (u.u & 0x7FFFFF) >> 7;
+
+ return float24;
+}
+
+void r300_emit_fragment_program_code(struct r300_context* r300,
+ struct rX00_fragment_program_code* generic_code,
+ struct r300_constant_buffer* externals)
+{
+ struct r300_fragment_program_code * code = &generic_code->code.r300;
+ struct rc_constant_list * constants = &generic_code->constants;
+ int i;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(15 +
+ code->alu.length * 4 +
+ (code->tex.length ? (1 + code->tex.length) : 0) +
+ (constants->Count ? (1 + constants->Count * 4) : 0));
+
+ OUT_CS_REG(R300_US_CONFIG, code->config);
+ OUT_CS_REG(R300_US_PIXSIZE, code->pixsize);
+ OUT_CS_REG(R300_US_CODE_OFFSET, code->code_offset);
+
+ OUT_CS_REG_SEQ(R300_US_CODE_ADDR_0, 4);
+ for(i = 0; i < 4; ++i)
+ OUT_CS(code->code_addr[i]);
+
+ OUT_CS_REG_SEQ(R300_US_ALU_RGB_INST_0, code->alu.length);
+ for (i = 0; i < code->alu.length; i++)
+ OUT_CS(code->alu.inst[i].rgb_inst);
+
+ OUT_CS_REG_SEQ(R300_US_ALU_RGB_ADDR_0, code->alu.length);
+ for (i = 0; i < code->alu.length; i++)
+ OUT_CS(code->alu.inst[i].rgb_addr);
+
+ OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_INST_0, code->alu.length);
+ for (i = 0; i < code->alu.length; i++)
+ OUT_CS(code->alu.inst[i].alpha_inst);
+
+ OUT_CS_REG_SEQ(R300_US_ALU_ALPHA_ADDR_0, code->alu.length);
+ for (i = 0; i < code->alu.length; i++)
+ OUT_CS(code->alu.inst[i].alpha_addr);
+
+ if (code->tex.length) {
+ OUT_CS_REG_SEQ(R300_US_TEX_INST_0, code->tex.length);
+ for(i = 0; i < code->tex.length; ++i)
+ OUT_CS(code->tex.inst[i]);
+ }
+
+ if (constants->Count) {
+ OUT_CS_ONE_REG(R300_PFS_PARAM_0_X, constants->Count * 4);
+ for(i = 0; i < constants->Count; ++i) {
+ const float * data = get_shader_constant(r300, &constants->Constants[i], externals);
+ OUT_CS(pack_float24(data[0]));
+ OUT_CS(pack_float24(data[1]));
+ OUT_CS(pack_float24(data[2]));
+ OUT_CS(pack_float24(data[3]));
+ }
+ }
+
+ END_CS;
+}
+
+void r500_emit_fragment_program_code(struct r300_context* r300,
+ struct rX00_fragment_program_code* generic_code,
+ struct r300_constant_buffer* externals)
+{
+ struct r500_fragment_program_code * code = &generic_code->code.r500;
+ struct rc_constant_list * constants = &generic_code->constants;
+ int i;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(13 +
+ ((code->inst_end + 1) * 6) +
+ (constants->Count ? (3 + (constants->Count * 4)) : 0));
+ OUT_CS_REG(R500_US_CONFIG, 0);
+ OUT_CS_REG(R500_US_PIXSIZE, code->max_temp_idx);
+ OUT_CS_REG(R500_US_CODE_RANGE,
+ R500_US_CODE_RANGE_ADDR(0) | R500_US_CODE_RANGE_SIZE(code->inst_end));
+ OUT_CS_REG(R500_US_CODE_OFFSET, 0);
+ OUT_CS_REG(R500_US_CODE_ADDR,
+ R500_US_CODE_START_ADDR(0) | R500_US_CODE_END_ADDR(code->inst_end));
+
+ OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_INSTR);
+ OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, (code->inst_end + 1) * 6);
+ for (i = 0; i <= code->inst_end; i++) {
+ OUT_CS(code->inst[i].inst0);
+ OUT_CS(code->inst[i].inst1);
+ OUT_CS(code->inst[i].inst2);
+ OUT_CS(code->inst[i].inst3);
+ OUT_CS(code->inst[i].inst4);
+ OUT_CS(code->inst[i].inst5);
+ }
+
+ if (constants->Count) {
+ OUT_CS_REG(R500_GA_US_VECTOR_INDEX, R500_GA_US_VECTOR_INDEX_TYPE_CONST);
+ OUT_CS_ONE_REG(R500_GA_US_VECTOR_DATA, constants->Count * 4);
+ for (i = 0; i < constants->Count; i++) {
+ const float * data = get_shader_constant(r300, &constants->Constants[i], externals);
+ OUT_CS_32F(data[0]);
+ OUT_CS_32F(data[1]);
+ OUT_CS_32F(data[2]);
+ OUT_CS_32F(data[3]);
+ }
+ }
+
+ END_CS;
+}
+
+void r300_emit_fb_state(struct r300_context* r300,
+ struct pipe_framebuffer_state* fb)
+{
+ struct r300_texture* tex;
+ unsigned pixpitch;
+ int i;
+ CS_LOCALS(r300);
+
+ BEGIN_CS((10 * fb->nr_cbufs) + (fb->zsbuf ? 10 : 0) + 4);
+ for (i = 0; i < fb->nr_cbufs; i++) {
+ tex = (struct r300_texture*)fb->cbufs[i]->texture;
+ assert(tex && tex->buffer && "cbuf is marked, but NULL!");
+ pixpitch = tex->stride / tex->tex.block.size;
+
+ OUT_CS_REG_SEQ(R300_RB3D_COLOROFFSET0 + (4 * i), 1);
+ OUT_CS_RELOC(tex->buffer, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+
+ OUT_CS_REG_SEQ(R300_RB3D_COLORPITCH0 + (4 * i), 1);
+ OUT_CS_RELOC(tex->buffer, pixpitch |
+ r300_translate_colorformat(tex->tex.format), 0,
+ RADEON_GEM_DOMAIN_VRAM, 0);
+
+ OUT_CS_REG(R300_US_OUT_FMT_0 + (4 * i),
+ r300_translate_out_fmt(fb->cbufs[i]->format));
+ }
+
+ if (fb->zsbuf) {
+ tex = (struct r300_texture*)fb->zsbuf->texture;
+ assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
+ pixpitch = tex->stride / tex->tex.block.size;
+
+ OUT_CS_REG_SEQ(R300_ZB_DEPTHOFFSET, 1);
+ OUT_CS_RELOC(tex->buffer, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+
+ OUT_CS_REG(R300_ZB_FORMAT, r300_translate_zsformat(tex->tex.format));
+
+ OUT_CS_REG_SEQ(R300_ZB_DEPTHPITCH, 1);
+ OUT_CS_RELOC(tex->buffer, pixpitch, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+ }
+
+ OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT,
+ R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
+ R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
+ OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
+ R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
+ R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
+ END_CS;
+}
+
+void r300_emit_query_begin(struct r300_context* r300,
+ struct r300_query* query)
+{
+ CS_LOCALS(r300);
+
+ /* XXX This will almost certainly not return good results
+ * for overlapping queries. */
+ BEGIN_CS(2);
+ OUT_CS_REG(R300_ZB_ZPASS_DATA, 0);
+ END_CS;
+}
+
+void r300_emit_query_end(struct r300_context* r300,
+ struct r300_query* query)
+{
+ struct r300_capabilities* caps = r300_screen(r300->context.screen)->caps;
+ CS_LOCALS(r300);
+
+ if (!r300->winsys->add_buffer(r300->winsys, r300->oqbo,
+ 0, RADEON_GEM_DOMAIN_GTT)) {
+ debug_printf("r300: There wasn't room for the OQ buffer!?"
+ " Oh noes!\n");
+ }
+
+ assert(caps->num_frag_pipes);
+ BEGIN_CS(6 * caps->num_frag_pipes + 2);
+ /* I'm not so sure I like this switch, but it's hard to be elegant
+ * when there's so many special cases...
+ *
+ * So here's the basic idea. For each pipe, enable writes to it only,
+ * then put out the relocation for ZPASS_ADDR, taking into account a
+ * 4-byte offset for each pipe. RV380 and older are special; they have
+ * only two pipes, and the second pipe's enable is on bit 3, not bit 1,
+ * so there's a chipset cap for that. */
+ switch (caps->num_frag_pipes) {
+ case 4:
+ /* pipe 3 only */
+ OUT_CS_REG(R300_SU_REG_DEST, 1 << 3);
+ OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
+ OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 3),
+ 0, RADEON_GEM_DOMAIN_GTT, 0);
+ case 3:
+ /* pipe 2 only */
+ OUT_CS_REG(R300_SU_REG_DEST, 1 << 2);
+ OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
+ OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 2),
+ 0, RADEON_GEM_DOMAIN_GTT, 0);
+ case 2:
+ /* pipe 1 only */
+ /* As mentioned above, accomodate RV380 and older. */
+ OUT_CS_REG(R300_SU_REG_DEST,
+ 1 << (caps->high_second_pipe ? 3 : 1));
+ OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
+ OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 1),
+ 0, RADEON_GEM_DOMAIN_GTT, 0);
+ case 1:
+ /* pipe 0 only */
+ OUT_CS_REG(R300_SU_REG_DEST, 1 << 0);
+ OUT_CS_REG_SEQ(R300_ZB_ZPASS_ADDR, 1);
+ OUT_CS_RELOC(r300->oqbo, query->offset + (sizeof(uint32_t) * 0),
+ 0, RADEON_GEM_DOMAIN_GTT, 0);
+ default:
+ debug_printf("r300: Implementation error: Chipset reports %d"
+ " pixel pipes!\n", caps->num_frag_pipes);
+ assert(0);
+ }
+
+ /* And, finally, reset it to normal... */
+ OUT_CS_REG(R300_SU_REG_DEST, 0xF);
+ END_CS;
+
+}
+
+void r300_emit_rs_state(struct r300_context* r300, struct r300_rs_state* rs)
+{
+ CS_LOCALS(r300);
+
+ BEGIN_CS(20);
+ OUT_CS_REG(R300_VAP_CNTL_STATUS, rs->vap_control_status);
+ OUT_CS_REG(R300_GA_POINT_SIZE, rs->point_size);
+ OUT_CS_REG_SEQ(R300_GA_POINT_MINMAX, 2);
+ OUT_CS(rs->point_minmax);
+ OUT_CS(rs->line_control);
+ OUT_CS_REG_SEQ(R300_SU_POLY_OFFSET_FRONT_SCALE, 6);
+ OUT_CS(rs->depth_scale_front);
+ OUT_CS(rs->depth_offset_front);
+ OUT_CS(rs->depth_scale_back);
+ OUT_CS(rs->depth_offset_back);
+ OUT_CS(rs->polygon_offset_enable);
+ OUT_CS(rs->cull_mode);
+ OUT_CS_REG(R300_GA_LINE_STIPPLE_CONFIG, rs->line_stipple_config);
+ OUT_CS_REG(R300_GA_LINE_STIPPLE_VALUE, rs->line_stipple_value);
+ OUT_CS_REG(R300_GA_COLOR_CONTROL, rs->color_control);
+ END_CS;
+}
+
+void r300_emit_rs_block_state(struct r300_context* r300,
+ struct r300_rs_block* rs)
+{
+ int i;
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ CS_LOCALS(r300);
+
+ BEGIN_CS(21);
+ if (r300screen->caps->is_r500) {
+ OUT_CS_REG_SEQ(R500_RS_IP_0, 8);
+ } else {
+ OUT_CS_REG_SEQ(R300_RS_IP_0, 8);
+ }
+ for (i = 0; i < 8; i++) {
+ OUT_CS(rs->ip[i]);
+ /* debug_printf("ip %d: 0x%08x\n", i, rs->ip[i]); */
+ }
+
+ OUT_CS_REG_SEQ(R300_RS_COUNT, 2);
+ OUT_CS(rs->count);
+ OUT_CS(rs->inst_count);
+
+ if (r300screen->caps->is_r500) {
+ OUT_CS_REG_SEQ(R500_RS_INST_0, 8);
+ } else {
+ OUT_CS_REG_SEQ(R300_RS_INST_0, 8);
+ }
+ for (i = 0; i < 8; i++) {
+ OUT_CS(rs->inst[i]);
+ /* debug_printf("inst %d: 0x%08x\n", i, rs->inst[i]); */
+ }
+
+ /* debug_printf("count: 0x%08x inst_count: 0x%08x\n", rs->count,
+ * rs->inst_count); */
+
+ END_CS;
+}
+
+void r300_emit_scissor_state(struct r300_context* r300,
+ struct r300_scissor_state* scissor)
+{
+ CS_LOCALS(r300);
+
+ BEGIN_CS(3);
+ OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL, 2);
+ OUT_CS(scissor->scissor_top_left);
+ OUT_CS(scissor->scissor_bottom_right);
+ END_CS;
+}
+
+void r300_emit_texture(struct r300_context* r300,
+ struct r300_sampler_state* sampler,
+ struct r300_texture* tex,
+ unsigned offset)
+{
+ CS_LOCALS(r300);
+
+ BEGIN_CS(16);
+ OUT_CS_REG(R300_TX_FILTER0_0 + (offset * 4), sampler->filter0);
+ OUT_CS_REG(R300_TX_FILTER1_0 + (offset * 4), sampler->filter1);
+ OUT_CS_REG(R300_TX_BORDER_COLOR_0 + (offset * 4), sampler->border_color);
+
+ OUT_CS_REG(R300_TX_FORMAT0_0 + (offset * 4), tex->state.format0);
+ OUT_CS_REG(R300_TX_FORMAT1_0 + (offset * 4), tex->state.format1);
+ OUT_CS_REG(R300_TX_FORMAT2_0 + (offset * 4), tex->state.format2);
+ OUT_CS_REG_SEQ(R300_TX_OFFSET_0 + (offset * 4), 1);
+ OUT_CS_RELOC(tex->buffer, 0,
+ RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0, 0);
+ END_CS;
+}
+
+void r300_emit_vertex_buffer(struct r300_context* r300)
+{
+ CS_LOCALS(r300);
+
+ DBG(r300, DBG_DRAW, "r300: Preparing vertex buffer %p for render, "
+ "vertex size %d\n", r300->vbo,
+ r300->vertex_info.vinfo.size);
+ /* Set the pointer to our vertex buffer. The emitted values are this:
+ * PACKET3 [3D_LOAD_VBPNTR]
+ * COUNT [1]
+ * FORMAT [size | stride << 8]
+ * OFFSET [offset into BO]
+ * VBPNTR [relocated BO]
+ */
+ BEGIN_CS(7);
+ OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, 3);
+ OUT_CS(1);
+ OUT_CS(r300->vertex_info.vinfo.size |
+ (r300->vertex_info.vinfo.size << 8));
+ OUT_CS(r300->vbo_offset);
+ OUT_CS_RELOC(r300->vbo, 0, RADEON_GEM_DOMAIN_GTT, 0, 0);
+ END_CS;
+}
+
+void r300_emit_vertex_format_state(struct r300_context* r300)
+{
+ int i;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(26);
+ OUT_CS_REG(R300_VAP_VTX_SIZE, r300->vertex_info.vinfo.size);
+
+ OUT_CS_REG_SEQ(R300_VAP_VTX_STATE_CNTL, 2);
+ OUT_CS(r300->vertex_info.vinfo.hwfmt[0]);
+ OUT_CS(r300->vertex_info.vinfo.hwfmt[1]);
+ OUT_CS_REG_SEQ(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+ OUT_CS(r300->vertex_info.vinfo.hwfmt[2]);
+ OUT_CS(r300->vertex_info.vinfo.hwfmt[3]);
+ /* for (i = 0; i < 4; i++) {
+ * debug_printf("hwfmt%d: 0x%08x\n", i,
+ * r300->vertex_info.vinfo.hwfmt[i]);
+ * } */
+
+ OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_0, 8);
+ for (i = 0; i < 8; i++) {
+ OUT_CS(r300->vertex_info.vap_prog_stream_cntl[i]);
+ /* debug_printf("prog_stream_cntl%d: 0x%08x\n", i,
+ * r300->vertex_info.vap_prog_stream_cntl[i]); */
+ }
+ OUT_CS_REG_SEQ(R300_VAP_PROG_STREAM_CNTL_EXT_0, 8);
+ for (i = 0; i < 8; i++) {
+ OUT_CS(r300->vertex_info.vap_prog_stream_cntl_ext[i]);
+ /* debug_printf("prog_stream_cntl_ext%d: 0x%08x\n", i,
+ * r300->vertex_info.vap_prog_stream_cntl_ext[i]); */
+ }
+ END_CS;
+}
+
+void r300_emit_vertex_program_code(struct r300_context* r300,
+ struct r300_vertex_program_code* code,
+ struct r300_constant_buffer* constants)
+{
+ int i;
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ unsigned instruction_count = code->length / 4;
+ CS_LOCALS(r300);
+
+ if (!r300screen->caps->has_tcl) {
+ debug_printf("r300: Implementation error: emit_vertex_shader called,"
+ " but has_tcl is FALSE!\n");
+ return;
+ }
+
+ if (code->constants.Count) {
+ BEGIN_CS(14 + code->length + (code->constants.Count * 4));
+ } else {
+ BEGIN_CS(11 + code->length);
+ }
+
+ /* R300_VAP_PVS_CODE_CNTL_0
+ * R300_VAP_PVS_CONST_CNTL
+ * R300_VAP_PVS_CODE_CNTL_1
+ * See the r5xx docs for instructions on how to use these.
+ * XXX these could be optimized to select better values... */
+ OUT_CS_REG_SEQ(R300_VAP_PVS_CODE_CNTL_0, 3);
+ OUT_CS(R300_PVS_FIRST_INST(0) |
+ R300_PVS_XYZW_VALID_INST(instruction_count - 1) |
+ R300_PVS_LAST_INST(instruction_count - 1));
+ OUT_CS(R300_PVS_MAX_CONST_ADDR(code->constants.Count - 1));
+ OUT_CS(instruction_count - 1);
+
+ OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG, 0);
+ OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->length);
+ for (i = 0; i < code->length; i++)
+ OUT_CS(code->body.d[i]);
+
+ if (code->constants.Count) {
+ OUT_CS_REG(R300_VAP_PVS_VECTOR_INDX_REG,
+ (r300screen->caps->is_r500 ?
+ R500_PVS_CONST_START : R300_PVS_CONST_START));
+ OUT_CS_ONE_REG(R300_VAP_PVS_UPLOAD_DATA, code->constants.Count * 4);
+ for (i = 0; i < code->constants.Count; i++) {
+ const float * data = get_shader_constant(r300, &code->constants.Constants[i], constants);
+ OUT_CS_32F(data[0]);
+ OUT_CS_32F(data[1]);
+ OUT_CS_32F(data[2]);
+ OUT_CS_32F(data[3]);
+ }
+ }
+
+ OUT_CS_REG(R300_VAP_CNTL, R300_PVS_NUM_SLOTS(10) |
+ R300_PVS_NUM_CNTLRS(5) |
+ R300_PVS_NUM_FPUS(r300screen->caps->num_vert_fpus) |
+ R300_PVS_VF_MAX_VTX_NUM(12));
+ OUT_CS_REG(R300_VAP_PVS_STATE_FLUSH_REG, 0x0);
+ END_CS;
+}
+
+void r300_emit_vertex_shader(struct r300_context* r300,
+ struct r300_vertex_shader* vs)
+{
+ r300_emit_vertex_program_code(r300, &vs->code, &r300->shader_constants[PIPE_SHADER_VERTEX]);
+}
+
+void r300_emit_viewport_state(struct r300_context* r300,
+ struct r300_viewport_state* viewport)
+{
+ CS_LOCALS(r300);
+
+ BEGIN_CS(9);
+ OUT_CS_REG_SEQ(R300_SE_VPORT_XSCALE, 6);
+ OUT_CS_32F(viewport->xscale);
+ OUT_CS_32F(viewport->xoffset);
+ OUT_CS_32F(viewport->yscale);
+ OUT_CS_32F(viewport->yoffset);
+ OUT_CS_32F(viewport->zscale);
+ OUT_CS_32F(viewport->zoffset);
+
+ if (r300->rs_state->enable_vte) {
+ OUT_CS_REG(R300_VAP_VTE_CNTL, viewport->vte_control);
+ } else {
+ OUT_CS_REG(R300_VAP_VTE_CNTL, 0);
+ }
+ END_CS;
+}
+
+void r300_flush_textures(struct r300_context* r300)
+{
+ CS_LOCALS(r300);
+
+ BEGIN_CS(4);
+ OUT_CS_REG(R300_TX_INVALTAGS, 0);
+ OUT_CS_REG(R300_TX_ENABLE, (1 << r300->texture_count) - 1);
+ END_CS;
+}
+
+/* Emit all dirty state. */
+void r300_emit_dirty_state(struct r300_context* r300)
+{
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ struct r300_texture* tex;
+ int i, dirty_tex = 0;
+ boolean invalid = FALSE;
+
+ if (!(r300->dirty_state)) {
+ return;
+ }
+
+ r300_update_derived_state(r300);
+
+ /* XXX check size */
+validate:
+ /* Color buffers... */
+ for (i = 0; i < r300->framebuffer_state.nr_cbufs; i++) {
+ tex = (struct r300_texture*)r300->framebuffer_state.cbufs[i]->texture;
+ assert(tex && tex->buffer && "cbuf is marked, but NULL!");
+ if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
+ 0, RADEON_GEM_DOMAIN_VRAM)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ }
+ /* ...depth buffer... */
+ if (r300->framebuffer_state.zsbuf) {
+ tex = (struct r300_texture*)r300->framebuffer_state.zsbuf->texture;
+ assert(tex && tex->buffer && "zsbuf is marked, but NULL!");
+ if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
+ 0, RADEON_GEM_DOMAIN_VRAM)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ }
+ /* ...textures... */
+ for (i = 0; i < r300->texture_count; i++) {
+ tex = r300->textures[i];
+ assert(tex && tex->buffer && "texture is marked, but NULL!");
+ if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
+ RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ }
+ /* ...occlusion query buffer... */
+ if (!r300->winsys->add_buffer(r300->winsys, r300->oqbo,
+ 0, RADEON_GEM_DOMAIN_GTT)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ /* ...and vertex buffer. */
+ if (r300->vbo) {
+ if (!r300->winsys->add_buffer(r300->winsys, r300->vbo,
+ RADEON_GEM_DOMAIN_GTT, 0)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ } else {
+ debug_printf("No VBO while emitting dirty state!\n");
+ }
+ if (!r300->winsys->validate(r300->winsys)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ if (invalid) {
+ /* Well, hell. */
+ debug_printf("r300: Stuck in validation loop, gonna quit now.");
+ exit(1);
+ }
+ invalid = TRUE;
+ goto validate;
+ }
+
+ if (r300->dirty_state & R300_NEW_BLEND) {
+ r300_emit_blend_state(r300, r300->blend_state);
+ r300->dirty_state &= ~R300_NEW_BLEND;
+ }
+
+ if (r300->dirty_state & R300_NEW_BLEND_COLOR) {
+ r300_emit_blend_color_state(r300, r300->blend_color_state);
+ r300->dirty_state &= ~R300_NEW_BLEND_COLOR;
+ }
+
+ if (r300->dirty_state & R300_NEW_CLIP) {
+ r300_emit_clip_state(r300, &r300->clip_state);
+ r300->dirty_state &= ~R300_NEW_CLIP;
+ }
+
+ if (r300->dirty_state & R300_NEW_DSA) {
+ r300_emit_dsa_state(r300, r300->dsa_state);
+ r300->dirty_state &= ~R300_NEW_DSA;
+ }
+
+ if (r300->dirty_state & R300_NEW_FRAGMENT_SHADER) {
+ if (r300screen->caps->is_r500) {
+ r500_emit_fragment_program_code(r300, &r300->fs->code, &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
+ } else {
+ r300_emit_fragment_program_code(r300, &r300->fs->code, &r300->shader_constants[PIPE_SHADER_FRAGMENT]);
+ }
+ r300->dirty_state &= ~R300_NEW_FRAGMENT_SHADER;
+ }
+
+ if (r300->dirty_state & R300_NEW_FRAMEBUFFERS) {
+ r300_emit_fb_state(r300, &r300->framebuffer_state);
+ r300->dirty_state &= ~R300_NEW_FRAMEBUFFERS;
+ }
+
+ if (r300->dirty_state & R300_NEW_RASTERIZER) {
+ r300_emit_rs_state(r300, r300->rs_state);
+ r300->dirty_state &= ~R300_NEW_RASTERIZER;
+ }
+
+ if (r300->dirty_state & R300_NEW_RS_BLOCK) {
+ r300_emit_rs_block_state(r300, r300->rs_block);
+ r300->dirty_state &= ~R300_NEW_RS_BLOCK;
+ }
+
+ if (r300->dirty_state & R300_NEW_SCISSOR) {
+ r300_emit_scissor_state(r300, r300->scissor_state);
+ r300->dirty_state &= ~R300_NEW_SCISSOR;
+ }
+
+ /* Samplers and textures are tracked separately but emitted together. */
+ if (r300->dirty_state &
+ (R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES)) {
+ for (i = 0; i < MIN2(r300->sampler_count, r300->texture_count); i++) {
+ if (r300->dirty_state &
+ ((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i))) {
+ r300_emit_texture(r300,
+ r300->sampler_states[i],
+ r300->textures[i],
+ i);
+ r300->dirty_state &=
+ ~((R300_NEW_SAMPLER << i) | (R300_NEW_TEXTURE << i));
+ dirty_tex++;
+ }
+ }
+ r300->dirty_state &= ~(R300_ANY_NEW_SAMPLERS | R300_ANY_NEW_TEXTURES);
+ }
+
+ if (r300->dirty_state & R300_NEW_VIEWPORT) {
+ r300_emit_viewport_state(r300, r300->viewport_state);
+ r300->dirty_state &= ~R300_NEW_VIEWPORT;
+ }
+
+ if (dirty_tex) {
+ r300_flush_textures(r300);
+ }
+
+ if (r300->dirty_state & R300_NEW_VERTEX_FORMAT) {
+ r300_emit_vertex_format_state(r300);
+ r300->dirty_state &= ~R300_NEW_VERTEX_FORMAT;
+ }
+
+ if (r300->dirty_state & R300_NEW_VERTEX_SHADER) {
+ r300_emit_vertex_shader(r300, r300->vs);
+ r300->dirty_state &= ~R300_NEW_VERTEX_SHADER;
+ }
+
+ /* XXX
+ assert(r300->dirty_state == 0);
+ */
+
+ /* Finally, emit the VBO. */
+ r300_emit_vertex_buffer(r300);
+
+ r300->dirty_hw++;
+}
diff --git a/src/gallium/drivers/r300/r300_emit.h b/src/gallium/drivers/r300/r300_emit.h
new file mode 100644
index 0000000000..c4002b8e5d
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_emit.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_EMIT_H
+#define R300_EMIT_H
+
+#include "util/u_math.h"
+
+#include "r300_context.h"
+#include "r300_cs.h"
+#include "r300_screen.h"
+#include "r300_state_inlines.h"
+
+struct rX00_fragment_program_code;
+struct r300_vertex_program_code;
+
+void r300_emit_blend_state(struct r300_context* r300,
+ struct r300_blend_state* blend);
+
+void r300_emit_blend_color_state(struct r300_context* r300,
+ struct r300_blend_color_state* bc);
+
+void r300_emit_clip_state(struct r300_context* r300,
+ struct pipe_clip_state* clip);
+
+void r300_emit_dsa_state(struct r300_context* r300,
+ struct r300_dsa_state* dsa);
+
+void r300_emit_fragment_program_code(struct r300_context* r300,
+ struct rX00_fragment_program_code* generic_code,
+ struct r300_constant_buffer* externals);
+
+void r500_emit_fragment_program_code(struct r300_context* r300,
+ struct rX00_fragment_program_code* generic_code,
+ struct r300_constant_buffer* externals);
+
+void r300_emit_fb_state(struct r300_context* r300,
+ struct pipe_framebuffer_state* fb);
+
+void r300_emit_query_begin(struct r300_context* r300,
+ struct r300_query* query);
+void r300_emit_query_end(struct r300_context* r300,
+ struct r300_query* query);
+
+void r300_emit_rs_state(struct r300_context* r300, struct r300_rs_state* rs);
+
+void r300_emit_rs_block_state(struct r300_context* r300,
+ struct r300_rs_block* rs);
+
+void r300_emit_scissor_state(struct r300_context* r300,
+ struct r300_scissor_state* scissor);
+
+void r300_emit_texture(struct r300_context* r300,
+ struct r300_sampler_state* sampler,
+ struct r300_texture* tex,
+ unsigned offset);
+
+void r300_emit_vertex_buffer(struct r300_context* r300);
+
+void r300_emit_vertex_format_state(struct r300_context* r300);
+
+void r300_emit_vertex_program_code(struct r300_context* r300,
+ struct r300_vertex_program_code* code,
+ struct r300_constant_buffer* constants);
+
+void r300_emit_vertex_shader(struct r300_context* r300,
+ struct r300_vertex_shader* vs);
+
+void r300_emit_viewport_state(struct r300_context* r300,
+ struct r300_viewport_state* viewport);
+
+void r300_flush_textures(struct r300_context* r300);
+
+/* Emit all dirty state. */
+void r300_emit_dirty_state(struct r300_context* r300);
+
+#endif /* R300_EMIT_H */
diff --git a/src/gallium/drivers/r300/r300_flush.c b/src/gallium/drivers/r300/r300_flush.c
new file mode 100644
index 0000000000..0dff1c6f4f
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_flush.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_flush.h"
+
+static void r300_flush(struct pipe_context* pipe,
+ unsigned flags,
+ struct pipe_fence_handle** fence)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ CS_LOCALS(r300);
+
+ /* We probably need to flush Draw, but we may have been called from
+ * within Draw. This feels kludgy, but it might be the best thing. */
+ if (!r300->draw->flushing) {
+ draw_flush(r300->draw);
+ }
+
+ if (r300->dirty_hw) {
+ FLUSH_CS;
+ r300_emit_invariant_state(r300);
+ r300->dirty_state = R300_NEW_KITCHEN_SINK;
+ r300->dirty_hw = 0;
+ }
+}
+
+void r300_init_flush_functions(struct r300_context* r300)
+{
+ r300->context.flush = r300_flush;
+}
diff --git a/src/gallium/drivers/r300/r300_flush.h b/src/gallium/drivers/r300/r300_flush.h
new file mode 100644
index 0000000000..9a83d89daa
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_flush.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_FLUSH_H
+#define R300_FLUSH_H
+
+#include "draw/draw_private.h"
+
+#include "pipe/p_context.h"
+
+#include "r300_context.h"
+#include "r300_cs.h"
+
+void r300_init_flush_functions(struct r300_context* r300);
+
+#endif /* R300_FLUSH_H */
diff --git a/src/gallium/drivers/r300/r300_fs.c b/src/gallium/drivers/r300/r300_fs.c
new file mode 100644
index 0000000000..a0e848a59a
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_fs.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_fs.h"
+
+#include "r300_tgsi_to_rc.h"
+
+#include "radeon_compiler.h"
+
+static void find_output_registers(struct r300_fragment_program_compiler * compiler,
+ struct r300_fragment_shader * fs)
+{
+ unsigned i;
+
+ /* Mark the outputs as not present initially */
+ compiler->OutputColor = fs->info.num_outputs;
+ compiler->OutputDepth = fs->info.num_outputs;
+
+ /* Now see where they really are. */
+ for(i = 0; i < fs->info.num_outputs; ++i) {
+ switch(fs->info.output_semantic_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ compiler->OutputColor = i;
+ break;
+ case TGSI_SEMANTIC_POSITION:
+ compiler->OutputDepth = i;
+ break;
+ }
+ }
+}
+
+static void allocate_hardware_inputs(
+ struct r300_fragment_program_compiler * c,
+ void (*allocate)(void * data, unsigned input, unsigned hwreg),
+ void * mydata)
+{
+ struct tgsi_shader_info* info = &((struct r300_fragment_shader*)c->UserData)->info;
+ int total_colors = 0;
+ int colors = 0;
+ int total_generic = 0;
+ int generic = 0;
+ int i;
+
+ for (i = 0; i < info->num_inputs; i++) {
+ switch (info->input_semantic_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ total_colors++;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ case TGSI_SEMANTIC_GENERIC:
+ total_generic++;
+ break;
+ }
+ }
+
+ for(i = 0; i < info->num_inputs; i++) {
+ switch (info->input_semantic_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ allocate(mydata, i, colors);
+ colors++;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ case TGSI_SEMANTIC_GENERIC:
+ allocate(mydata, i, total_colors + generic);
+ generic++;
+ break;
+ }
+ }
+}
+
+void r300_translate_fragment_shader(struct r300_context* r300,
+ struct r300_fragment_shader* fs)
+{
+ struct r300_fragment_program_compiler compiler;
+ struct tgsi_to_rc ttr;
+
+ memset(&compiler, 0, sizeof(compiler));
+ rc_init(&compiler.Base);
+ compiler.Base.Debug = DBG_ON(r300, DBG_FP);
+
+ compiler.code = &fs->code;
+ compiler.is_r500 = r300_screen(r300->context.screen)->caps->is_r500;
+ compiler.AllocateHwInputs = &allocate_hardware_inputs;
+ compiler.UserData = fs;
+
+ /* TODO: Program compilation depends on texture compare modes,
+ * which are sampler state. Therefore, programs need to be recompiled
+ * depending on this state as in the classic Mesa driver.
+ *
+ * This is not yet handled correctly.
+ */
+
+ find_output_registers(&compiler, fs);
+
+ if (compiler.Base.Debug) {
+ debug_printf("r300: Initial fragment program\n");
+ tgsi_dump(fs->state.tokens, 0);
+ }
+
+ /* Translate TGSI to our internal representation */
+ ttr.compiler = &compiler.Base;
+ ttr.info = &fs->info;
+
+ r300_tgsi_to_rc(&ttr, fs->state.tokens);
+
+ /* Invoke the compiler */
+ r3xx_compile_fragment_program(&compiler);
+ if (compiler.Base.Error) {
+ /* Todo: Fail gracefully */
+ fprintf(stderr, "r300 FP: Compiler error\n");
+ abort();
+ }
+
+ /* And, finally... */
+ rc_destroy(&compiler.Base);
+ fs->translated = TRUE;
+}
diff --git a/src/gallium/drivers/r300/r300_fs.h b/src/gallium/drivers/r300/r300_fs.h
new file mode 100644
index 0000000000..9fab789402
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_fs.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_FS_H
+#define R300_FS_H
+
+#include "tgsi/tgsi_dump.h"
+
+#include "r300_context.h"
+#include "r3xx_fs.h"
+#include "r5xx_fs.h"
+
+#include "radeon_code.h"
+
+struct r300_fragment_shader {
+ /* Parent class */
+ struct pipe_shader_state state;
+ struct tgsi_shader_info info;
+
+ /* Has this shader been translated yet? */
+ boolean translated;
+
+ /* Compiled code */
+ struct rX00_fragment_program_code code;
+};
+
+
+void r300_translate_fragment_shader(struct r300_context* r300,
+ struct r300_fragment_shader* fs);
+
+ #endif /* R300_FS_H */
diff --git a/src/gallium/drivers/r300/r300_query.c b/src/gallium/drivers/r300/r300_query.c
new file mode 100644
index 0000000000..2880d34877
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_query.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_query.h"
+
+#include "r300_emit.h"
+
+static struct pipe_query* r300_create_query(struct pipe_context* pipe,
+ unsigned query_type)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ unsigned query_size = r300screen->caps->num_frag_pipes * 4;
+ struct r300_query* q, * qptr;
+
+ q = CALLOC_STRUCT(r300_query);
+
+ q->type = query_type;
+ assert(q->type == PIPE_QUERY_OCCLUSION_COUNTER);
+
+ q->active = FALSE;
+
+ if (!r300->query_list) {
+ r300->query_list = q;
+ } else if (!is_empty_list(r300->query_list)) {
+ qptr = last_elem(r300->query_list);
+ q->offset = qptr->offset + query_size;
+ insert_at_tail(r300->query_list, q);
+ }
+
+ /* XXX */
+ if (q->offset >= 4096) {
+ q->offset = 0;
+ }
+
+ return (struct pipe_query*)q;
+}
+
+static void r300_destroy_query(struct pipe_context* pipe,
+ struct pipe_query* query)
+{
+ struct r300_query* q = (struct r300_query*)query;
+
+ remove_from_list(q);
+ FREE(query);
+}
+
+static void r300_begin_query(struct pipe_context* pipe,
+ struct pipe_query* query)
+{
+ uint32_t* map;
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_query* q = (struct r300_query*)query;
+
+ map = pipe->screen->buffer_map(pipe->screen, r300->oqbo,
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+ map += q->offset / 4;
+ *map = ~0;
+ pipe->screen->buffer_unmap(pipe->screen, r300->oqbo);
+
+ r300_emit_dirty_state(r300);
+ r300_emit_query_begin(r300, q);
+}
+
+static void r300_end_query(struct pipe_context* pipe,
+ struct pipe_query* query)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_query* q = (struct r300_query*)query;
+
+ r300_emit_dirty_state(r300);
+ r300_emit_query_end(r300, q);
+}
+
+static boolean r300_get_query_result(struct pipe_context* pipe,
+ struct pipe_query* query,
+ boolean wait,
+ uint64_t* result)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ struct r300_query* q = (struct r300_query*)query;
+ unsigned flags = PIPE_BUFFER_USAGE_CPU_READ;
+ uint32_t* map;
+ uint32_t temp;
+ unsigned i;
+
+ if (wait) {
+ pipe->flush(pipe, 0, NULL);
+ } else {
+ flags |= PIPE_BUFFER_USAGE_DONTBLOCK;
+ }
+
+ map = pipe->screen->buffer_map(pipe->screen, r300->oqbo, flags);
+ map += q->offset / 4;
+ for (i = 0; i < r300screen->caps->num_frag_pipes; i++) {
+ if (*map == ~0) {
+ /* Looks like our results aren't ready yet. */
+ if (wait) {
+ debug_printf("r300: Despite waiting, OQ results haven't"
+ " come in yet.\n");
+ }
+ temp = ~0;
+ break;
+ }
+ temp += *map;
+ map++;
+ }
+ pipe->screen->buffer_unmap(pipe->screen, r300->oqbo);
+
+ if (temp == ~0) {
+ /* Our results haven't been written yet... */
+ return FALSE;
+ }
+
+ *result = temp;
+ return TRUE;
+}
+
+void r300_init_query_functions(struct r300_context* r300) {
+ r300->context.create_query = r300_create_query;
+ r300->context.destroy_query = r300_destroy_query;
+ r300->context.begin_query = r300_begin_query;
+ r300->context.end_query = r300_end_query;
+ r300->context.get_query_result = r300_get_query_result;
+}
diff --git a/src/gallium/drivers/r300/r300_query.h b/src/gallium/drivers/r300/r300_query.h
new file mode 100644
index 0000000000..4f50e8f844
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_query.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_QUERY_H
+#define R300_QUERY_H
+
+#include "r300_context.h"
+#include "r300_cs.h"
+#include "r300_reg.h"
+
+struct r300_context;
+
+static INLINE struct r300_query* r300_query(struct pipe_query* q)
+{
+ return (struct r300_query*)q;
+}
+
+void r300_init_query_functions(struct r300_context* r300);
+
+#endif /* R300_QUERY_H */
diff --git a/src/gallium/drivers/r300/r300_reg.h b/src/gallium/drivers/r300/r300_reg.h
new file mode 100644
index 0000000000..03cd219cde
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_reg.h
@@ -0,0 +1,3323 @@
+/**************************************************************************
+
+Copyright (C) 2004-2005 Nicolai Haehnle et al.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+on the rights to use, copy, modify, merge, publish, distribute, sub
+license, and/or sell copies of the Software, and to permit persons to whom
+the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/* *INDENT-OFF* */
+
+#ifndef _R300_REG_H
+#define _R300_REG_H
+
+#define R300_MC_INIT_MISC_LAT_TIMER 0x180
+# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
+# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
+# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
+# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
+# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
+# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
+# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
+# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
+
+
+#define R300_MC_INIT_GFX_LAT_TIMER 0x154
+# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
+# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
+# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
+# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
+# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
+# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
+# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
+# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
+
+#define R300_SE_VPORT_XSCALE 0x1D98
+#define R300_SE_VPORT_XOFFSET 0x1D9C
+#define R300_SE_VPORT_YSCALE 0x1DA0
+#define R300_SE_VPORT_YOFFSET 0x1DA4
+#define R300_SE_VPORT_ZSCALE 0x1DA8
+#define R300_SE_VPORT_ZOFFSET 0x1DAC
+
+#define R300_VAP_PORT_IDX0 0x2040
+/*
+ * Vertex Array Processing (VAP) Control
+ */
+#define R300_VAP_CNTL 0x2080
+# define R300_PVS_NUM_SLOTS_SHIFT 0
+# define R300_PVS_NUM_CNTLRS_SHIFT 4
+# define R300_PVS_NUM_FPUS_SHIFT 8
+# define R300_VF_MAX_VTX_NUM_SHIFT 18
+# define R300_PVS_NUM_SLOTS(x) ((x) << 0)
+# define R300_PVS_NUM_CNTLRS(x) ((x) << 4)
+# define R300_PVS_NUM_FPUS(x) ((x) << 8)
+# define R300_PVS_VF_MAX_VTX_NUM(x) ((x) << 18)
+# define R300_GL_CLIP_SPACE_DEF (0 << 22)
+# define R300_DX_CLIP_SPACE_DEF (1 << 22)
+# define R500_TCL_STATE_OPTIMIZATION (1 << 23)
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL 0x2084
+# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
+# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
+# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
+# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
+# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
+# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
+
+# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
+ /* State based - direct writes to registers trigger vertex
+ generation */
+# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
+
+ /* I don't think I saw these three used.. */
+# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
+# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
+# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
+
+ /* index size - when not set the indices are assumed to be 16 bit */
+# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
+ /* number of vertices */
+# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
+
+#define R500_VAP_INDEX_OFFSET 0x208c
+
+#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
+# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
+# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16)
+
+#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
+ /* each of the following is 3 bits wide, specifies number
+ of components */
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+# define R300_VAP_OUTPUT_VTX_FMT_1__NOT_PRESENT 0
+# define R300_VAP_OUTPUT_VTX_FMT_1__1_COMPONENT 1
+# define R300_VAP_OUTPUT_VTX_FMT_1__2_COMPONENTS 2
+# define R300_VAP_OUTPUT_VTX_FMT_1__3_COMPONENTS 3
+# define R300_VAP_OUTPUT_VTX_FMT_1__4_COMPONENTS 4
+
+#define R300_VAP_VPORT_XSCALE 0x2098
+#define R300_VAP_VPORT_XOFFSET 0x209c
+#define R300_VAP_VPORT_YSCALE 0x20a0
+#define R300_VAP_VPORT_YOFFSET 0x20a4
+#define R300_VAP_VPORT_ZSCALE 0x20a8
+#define R300_VAP_VPORT_ZOFFSET 0x20ac
+
+#define R300_VAP_VTE_CNTL 0x20b0
+#define R300_SE_VTE_CNTL R300_VAP_VTE_CNTL
+# define R300_VPORT_X_SCALE_ENA (1 << 0)
+# define R300_VPORT_X_OFFSET_ENA (1 << 1)
+# define R300_VPORT_Y_SCALE_ENA (1 << 2)
+# define R300_VPORT_Y_OFFSET_ENA (1 << 3)
+# define R300_VPORT_Z_SCALE_ENA (1 << 4)
+# define R300_VPORT_Z_OFFSET_ENA (1 << 5)
+# define R300_VTX_XY_FMT (1 << 8)
+# define R300_VTX_Z_FMT (1 << 9)
+# define R300_VTX_W0_FMT (1 << 10)
+# define R300_SERIAL_PROC_ENA (1 << 11)
+
+#define R300_VAP_VTX_SIZE 0x20b4
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+/* Maximum Vertex Indx Clamp */
+#define R300_VAP_VF_MAX_VTX_INDX 0x2134
+/* Minimum Vertex Indx Clamp */
+#define R300_VAP_VF_MIN_VTX_INDX 0x2138
+
+/** Vertex assembler/processor control status */
+#define R300_VAP_CNTL_STATUS 0x2140
+/* No swap at all (default) */
+# define R300_VC_NO_SWAP (0 << 0)
+/* 16-bit swap: 0xAABBCCDD becomes 0xBBAADDCC */
+# define R300_VC_16BIT_SWAP (1 << 0)
+/* 32-bit swap: 0xAABBCCDD becomes 0xDDCCBBAA */
+# define R300_VC_32BIT_SWAP (2 << 0)
+/* Half-dword swap: 0xAABBCCDD becomes 0xCCDDAABB */
+# define R300_VC_HALF_DWORD_SWAP (3 << 0)
+/* The TCL engine will not be used (as it is logically or even physically removed) */
+# define R300_VAP_TCL_BYPASS (1 << 8)
+/* Read only flag if TCL engine is busy. */
+# define R300_VAP_PVS_BUSY (1 << 11)
+/* TODO: gap for MAX_MPS */
+/* Read only flag if the vertex store is busy. */
+# define R300_VAP_VS_BUSY (1 << 24)
+/* Read only flag if the reciprocal engine is busy. */
+# define R300_VAP_RCP_BUSY (1 << 25)
+/* Read only flag if the viewport transform engine is busy. */
+# define R300_VAP_VTE_BUSY (1 << 26)
+/* Read only flag if the memory interface unit is busy. */
+# define R300_VAP_MUI_BUSY (1 << 27)
+/* Read only flag if the vertex cache is busy. */
+# define R300_VAP_VC_BUSY (1 << 28)
+/* Read only flag if the vertex fetcher is busy. */
+# define R300_VAP_VF_BUSY (1 << 29)
+/* Read only flag if the register pipeline is busy. */
+# define R300_VAP_REGPIPE_BUSY (1 << 30)
+/* Read only flag if the VAP engine is busy. */
+# define R300_VAP_VAP_BUSY (1 << 31)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
+
+#define R300_VAP_PROG_STREAM_CNTL_0 0x2150
+# define R300_DATA_TYPE_0_SHIFT 0
+# define R300_DATA_TYPE_FLOAT_1 0
+# define R300_DATA_TYPE_FLOAT_2 1
+# define R300_DATA_TYPE_FLOAT_3 2
+# define R300_DATA_TYPE_FLOAT_4 3
+# define R300_DATA_TYPE_BYTE 4
+# define R300_DATA_TYPE_D3DCOLOR 5
+# define R300_DATA_TYPE_SHORT_2 6
+# define R300_DATA_TYPE_SHORT_4 7
+# define R300_DATA_TYPE_VECTOR_3_TTT 8
+# define R300_DATA_TYPE_VECTOR_3_EET 9
+# define R300_SKIP_DWORDS_SHIFT 4
+# define R300_DST_VEC_LOC_SHIFT 8
+# define R300_LAST_VEC (1 << 13)
+# define R300_SIGNED (1 << 14)
+# define R300_NORMALIZE (1 << 15)
+# define R300_DATA_TYPE_1_SHIFT 16
+#define R300_VAP_PROG_STREAM_CNTL_1 0x2154
+#define R300_VAP_PROG_STREAM_CNTL_2 0x2158
+#define R300_VAP_PROG_STREAM_CNTL_3 0x215C
+#define R300_VAP_PROG_STREAM_CNTL_4 0x2160
+#define R300_VAP_PROG_STREAM_CNTL_5 0x2164
+#define R300_VAP_PROG_STREAM_CNTL_6 0x2168
+#define R300_VAP_PROG_STREAM_CNTL_7 0x216C
+/* gap */
+
+/* Notes:
+ * - always set up to produce at least two attributes:
+ * if vertex program uses only position, fglrx will set normal, too
+ * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
+#define R300_VAP_VTX_STATE_CNTL 0x2180
+# define R300_COLOR_0_ASSEMBLY_SHIFT 0
+# define R300_SEL_COLOR 0
+# define R300_SEL_USER_COLOR_0 1
+# define R300_SEL_USER_COLOR_1 2
+# define R300_COLOR_1_ASSEMBLY_SHIFT 2
+# define R300_COLOR_2_ASSEMBLY_SHIFT 4
+# define R300_COLOR_3_ASSEMBLY_SHIFT 6
+# define R300_COLOR_4_ASSEMBLY_SHIFT 8
+# define R300_COLOR_5_ASSEMBLY_SHIFT 10
+# define R300_COLOR_6_ASSEMBLY_SHIFT 12
+# define R300_COLOR_7_ASSEMBLY_SHIFT 14
+# define R300_UPDATE_USER_COLOR_0_ENA (1 << 16)
+
+/*
+ * Each bit in this field applies to the corresponding vector in the VSM
+ * memory (i.e. Bit 0 applies to VECTOR_0 (POSITION), etc.). If the bit
+ * is set, then the corresponding 4-Dword Vector is output into the Vertex Stream.
+ */
+#define R300_VAP_VSM_VTX_ASSM 0x2184
+# define R300_INPUT_CNTL_POS 0x00000001
+# define R300_INPUT_CNTL_NORMAL 0x00000002
+# define R300_INPUT_CNTL_COLOR 0x00000004
+# define R300_INPUT_CNTL_TC0 0x00000400
+# define R300_INPUT_CNTL_TC1 0x00000800
+# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
+# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
+# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
+# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
+# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
+# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
+
+/* Programmable Stream Control Signed Normalize Control */
+#define R300_VAP_PSC_SGN_NORM_CNTL 0x21dc
+# define SGN_NORM_ZERO 0
+# define SGN_NORM_ZERO_CLAMP_MINUS_ONE 1
+# define SGN_NORM_NO_ZERO 2
+# define R300_SGN_NORM_NO_ZERO (SGN_NORM_NO_ZERO | \
+ (SGN_NORM_NO_ZERO << 2) | (SGN_NORM_NO_ZERO << 4) | \
+ (SGN_NORM_NO_ZERO << 6) | (SGN_NORM_NO_ZERO << 8) | \
+ (SGN_NORM_NO_ZERO << 10) | (SGN_NORM_NO_ZERO << 12) | \
+ (SGN_NORM_NO_ZERO << 14) | (SGN_NORM_NO_ZERO << 16) | \
+ (SGN_NORM_NO_ZERO << 18) | (SGN_NORM_NO_ZERO << 20) | \
+ (SGN_NORM_NO_ZERO << 22) | (SGN_NORM_NO_ZERO << 24) | \
+ (SGN_NORM_NO_ZERO << 26) | (SGN_NORM_NO_ZERO << 28) | \
+ (SGN_NORM_NO_ZERO << 30))
+
+/* gap */
+
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
+#define R300_VAP_PROG_STREAM_CNTL_EXT_0 0x21e0
+# define R300_SWIZZLE0_SHIFT 0
+# define R300_SWIZZLE_SELECT_X_SHIFT 0
+# define R300_SWIZZLE_SELECT_Y_SHIFT 3
+# define R300_SWIZZLE_SELECT_Z_SHIFT 6
+# define R300_SWIZZLE_SELECT_W_SHIFT 9
+
+# define R300_SWIZZLE_SELECT_X 0
+# define R300_SWIZZLE_SELECT_Y 1
+# define R300_SWIZZLE_SELECT_Z 2
+# define R300_SWIZZLE_SELECT_W 3
+# define R300_SWIZZLE_SELECT_FP_ZERO 4
+# define R300_SWIZZLE_SELECT_FP_ONE 5
+/* alternate forms for r300_emit.c */
+# define R300_INPUT_ROUTE_SELECT_X 0
+# define R300_INPUT_ROUTE_SELECT_Y 1
+# define R300_INPUT_ROUTE_SELECT_Z 2
+# define R300_INPUT_ROUTE_SELECT_W 3
+# define R300_INPUT_ROUTE_SELECT_ZERO 4
+# define R300_INPUT_ROUTE_SELECT_ONE 5
+
+# define R300_WRITE_ENA_SHIFT 12
+# define R300_WRITE_ENA_X 1
+# define R300_WRITE_ENA_Y 2
+# define R300_WRITE_ENA_Z 4
+# define R300_WRITE_ENA_W 8
+# define R300_SWIZZLE1_SHIFT 16
+
+# define R300_VAP_SWIZZLE_XYZW \
+ ((R300_SWIZZLE_SELECT_X << R300_SWIZZLE_SELECT_X_SHIFT) | \
+ (R300_SWIZZLE_SELECT_Y << R300_SWIZZLE_SELECT_Y_SHIFT) | \
+ (R300_SWIZZLE_SELECT_Z << R300_SWIZZLE_SELECT_Z_SHIFT) | \
+ (R300_SWIZZLE_SELECT_W << R300_SWIZZLE_SELECT_W_SHIFT) | \
+ (0xf << R300_WRITE_ENA_SHIFT))
+
+#define R300_VAP_PROG_STREAM_CNTL_EXT_1 0x21e4
+#define R300_VAP_PROG_STREAM_CNTL_EXT_2 0x21e8
+#define R300_VAP_PROG_STREAM_CNTL_EXT_3 0x21ec
+#define R300_VAP_PROG_STREAM_CNTL_EXT_4 0x21f0
+#define R300_VAP_PROG_STREAM_CNTL_EXT_5 0x21f4
+#define R300_VAP_PROG_STREAM_CNTL_EXT_6 0x21f8
+#define R300_VAP_PROG_STREAM_CNTL_EXT_7 0x21fc
+
+/* END: Vertex data assembly */
+
+/* gap */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
+#define R300_VAP_PVS_VECTOR_INDX_REG 0x2200
+# define R300_PVS_CODE_START 0
+# define R300_MAX_PVS_CODE_LINES 256
+# define R500_MAX_PVS_CODE_LINES 1024
+# define R300_PVS_CONST_START 512
+# define R500_PVS_CONST_START 1024
+# define R300_MAX_PVS_CONST_VECS 256
+# define R500_MAX_PVS_CONST_VECS 1024
+# define R300_PVS_UCP_START 1024
+# define R500_PVS_UCP_START 1536
+# define R300_POINT_VPORT_SCALE_OFFSET 1030
+# define R500_POINT_VPORT_SCALE_OFFSET 1542
+# define R300_POINT_GEN_TEX_OFFSET 1031
+# define R500_POINT_GEN_TEX_OFFSET 1543
+
+/*
+ * These are obsolete defines form r300_context.h, but they might give some
+ * clues when investigating the addresses further...
+ */
+#if 0
+#define VSF_DEST_PROGRAM 0x0
+#define VSF_DEST_MATRIX0 0x200
+#define VSF_DEST_MATRIX1 0x204
+#define VSF_DEST_MATRIX2 0x208
+#define VSF_DEST_VECTOR0 0x20c
+#define VSF_DEST_VECTOR1 0x20d
+#define VSF_DEST_UNKNOWN1 0x400
+#define VSF_DEST_UNKNOWN2 0x406
+#endif
+
+/* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA 0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
+/* I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ *
+ * 2007-11-05: This register is the user clip plane control register, but there
+ * also seems to be a rendering mode control; the NORMAL/CLEAR defines.
+ *
+ * See bug #9871. http://bugs.freedesktop.org/attachment.cgi?id=10672&action=view
+ */
+#define R300_VAP_CLIP_CNTL 0x221C
+# define R300_VAP_UCP_ENABLE_0 (1 << 0)
+# define R300_VAP_UCP_ENABLE_1 (1 << 1)
+# define R300_VAP_UCP_ENABLE_2 (1 << 2)
+# define R300_VAP_UCP_ENABLE_3 (1 << 3)
+# define R300_VAP_UCP_ENABLE_4 (1 << 4)
+# define R300_VAP_UCP_ENABLE_5 (1 << 5)
+# define R300_PS_UCP_MODE_DIST_COP (0 << 14)
+# define R300_PS_UCP_MODE_RADIUS_COP (1 << 14)
+# define R300_PS_UCP_MODE_RADIUS_COP_CLIP (2 << 14)
+# define R300_PS_UCP_MODE_CLIP_AS_TRIFAN (3 << 14)
+# define R300_CLIP_DISABLE (1 << 16)
+# define R300_UCP_CULL_ONLY_ENABLE (1 << 17)
+# define R300_BOUNDARY_EDGE_FLAG_ENABLE (1 << 18)
+# define R500_COLOR2_IS_TEXTURE (1 << 20)
+# define R500_COLOR3_IS_TEXTURE (1 << 21)
+
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_GB_VERT_CLIP_ADJ 0x2220
+#define R300_VAP_GB_VERT_DISC_ADJ 0x2224
+#define R300_VAP_GB_HORZ_CLIP_ADJ 0x2228
+#define R300_VAP_GB_HORZ_DISC_ADJ 0x222c
+
+/* gap */
+
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284
+
+/* This register is used to define the number of core clocks to wait for a
+ * vertex to be received by the VAP input controller (while the primitive
+ * path is backed up) before forcing any accumulated vertices to be submitted
+ * to the vertex processing path.
+ */
+#define VAP_PVS_VTX_TIMEOUT_REG 0x2288
+# define R300_2288_R300 0x00750000 /* -- nh */
+# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
+
+/* gap */
+
+/* Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
+#define R300_VAP_PVS_CODE_CNTL_0 0x22D0
+# define R300_PVS_FIRST_INST_SHIFT 0
+# define R300_PVS_XYZW_VALID_INST_SHIFT 10
+# define R300_PVS_LAST_INST_SHIFT 20
+# define R300_PVS_FIRST_INST(x) ((x) << 0)
+# define R300_PVS_XYZW_VALID_INST(x) ((x) << 10)
+# define R300_PVS_LAST_INST(x) ((x) << 20)
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CONST_CNTL 0x22D4
+# define R300_PVS_CONST_BASE_OFFSET_SHIFT 0
+# define R300_PVS_MAX_CONST_ADDR_SHIFT 16
+# define R300_PVS_MAX_CONST_ADDR(x) ((x) << 16)
+#define R300_VAP_PVS_CODE_CNTL_1 0x22D8
+# define R300_PVS_LAST_VTX_SRC_INST_SHIFT 0
+#define R300_VAP_PVS_FLOW_CNTL_OPC 0x22DC
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+ * immediate vertices
+ */
+#define R300_VAP_VTX_COLOR_R 0x2464
+#define R300_VAP_VTX_COLOR_G 0x2468
+#define R300_VAP_VTX_COLOR_B 0x246C
+#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1 0x2494
+#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2 0x24A4
+#define R300_VAP_VTX_POS_0_Z_2 0x24A8
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT 0x24AC
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
+# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
+# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
+ /* each of the following is 3 bits wide, specifies number
+ of components */
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+
+/* UNK30 seems to enables point to quad transformation on textures
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ * Specifies top of Raster pipe specific enable controls.
+ */
+#define R300_GB_ENABLE 0x4008
+# define R300_GB_POINT_STUFF_DISABLE (0 << 0)
+# define R300_GB_POINT_STUFF_ENABLE (1 << 0) /* Specifies if points will have stuffed texture coordinates. */
+# define R300_GB_LINE_STUFF_DISABLE (0 << 1)
+# define R300_GB_LINE_STUFF_ENABLE (1 << 1) /* Specifies if lines will have stuffed texture coordinates. */
+# define R300_GB_TRIANGLE_STUFF_DISABLE (0 << 2)
+# define R300_GB_TRIANGLE_STUFF_ENABLE (1 << 2) /* Specifies if triangles will have stuffed texture coordinates. */
+# define R300_GB_STENCIL_AUTO_DISABLE (0 << 4)
+# define R300_GB_STENCIL_AUTO_ENABLE (1 << 4) /* Enable stencil auto inc/dec based on triangle cw/ccw, force into dzy low bit. */
+# define R300_GB_STENCIL_AUTO_FORCE (2 << 4) /* Force 0 into dzy low bit. */
+
+ /* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE 0 /* Replicate VAP source texture coordinates (S,T,[R,Q]). */
+#define R300_GB_TEX_ST 1 /* Stuff with source texture coordinates (S,T). */
+#define R300_GB_TEX_STR 2 /* Stuff with source texture coordinates (S,T,R). */
+# define R300_GB_TEX0_SOURCE_SHIFT 16
+# define R300_GB_TEX1_SOURCE_SHIFT 18
+# define R300_GB_TEX2_SOURCE_SHIFT 20
+# define R300_GB_TEX3_SOURCE_SHIFT 22
+# define R300_GB_TEX4_SOURCE_SHIFT 24
+# define R300_GB_TEX5_SOURCE_SHIFT 26
+# define R300_GB_TEX6_SOURCE_SHIFT 28
+# define R300_GB_TEX7_SOURCE_SHIFT 30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0 0x4010
+ /* shifts - each of the fields is 4 bits */
+# define R300_GB_MSPOS0__MS_X0_SHIFT 0
+# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
+# define R300_GB_MSPOS0__MS_X1_SHIFT 8
+# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
+# define R300_GB_MSPOS0__MS_X2_SHIFT 16
+# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
+# define R300_GB_MSPOS0__MSBD0_Y 24
+# define R300_GB_MSPOS0__MSBD0_X 28
+
+#define R300_GB_MSPOS1 0x4014
+# define R300_GB_MSPOS1__MS_X3_SHIFT 0
+# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
+# define R300_GB_MSPOS1__MS_X4_SHIFT 8
+# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
+# define R300_GB_MSPOS1__MS_X5_SHIFT 16
+# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
+# define R300_GB_MSPOS1__MSBD1 24
+
+/* Specifies the graphics pipeline configuration for rasterization. */
+#define R300_GB_TILE_CONFIG 0x4018
+# define R300_GB_TILE_DISABLE (0 << 0)
+# define R300_GB_TILE_ENABLE (1 << 0)
+# define R300_GB_TILE_PIPE_COUNT_RV300 (0 << 1) /* RV350 (1 pipe, 1 ctx) */
+# define R300_GB_TILE_PIPE_COUNT_R300 (3 << 1) /* R300 (2 pipes, 1 ctx) */
+# define R300_GB_TILE_PIPE_COUNT_R420_3P (6 << 1) /* R420-3P (3 pipes, 1 ctx) */
+# define R300_GB_TILE_PIPE_COUNT_R420 (7 << 1) /* R420 (4 pipes, 1 ctx) */
+# define R300_GB_TILE_SIZE_8 (0 << 4)
+# define R300_GB_TILE_SIZE_16 (1 << 4)
+# define R300_GB_TILE_SIZE_32 (2 << 4)
+# define R300_GB_SUPER_SIZE_1 (0 << 6)
+# define R300_GB_SUPER_SIZE_2 (1 << 6)
+# define R300_GB_SUPER_SIZE_4 (2 << 6)
+# define R300_GB_SUPER_SIZE_8 (3 << 6)
+# define R300_GB_SUPER_SIZE_16 (4 << 6)
+# define R300_GB_SUPER_SIZE_32 (5 << 6)
+# define R300_GB_SUPER_SIZE_64 (6 << 6)
+# define R300_GB_SUPER_SIZE_128 (7 << 6)
+# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
+# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
+# define R300_GB_SUPER_TILE_A (0 << 15)
+# define R300_GB_SUPER_TILE_B (1 << 15)
+# define R300_GB_SUBPIXEL_1_12 (0 << 16)
+# define R300_GB_SUBPIXEL_1_16 (1 << 16)
+# define GB_TILE_CONFIG_QUADS_PER_RAS_4 (0 << 17)
+# define GB_TILE_CONFIG_QUADS_PER_RAS_8 (1 << 17)
+# define GB_TILE_CONFIG_QUADS_PER_RAS_16 (2 << 17)
+# define GB_TILE_CONFIG_QUADS_PER_RAS_32 (3 << 17)
+# define GB_TILE_CONFIG_BB_SCAN_INTERCEPT (0 << 19)
+# define GB_TILE_CONFIG_BB_SCAN_BOUND_BOX (1 << 19)
+# define GB_TILE_CONFIG_ALT_SCAN_EN_LR (0 << 20)
+# define GB_TILE_CONFIG_ALT_SCAN_EN_LRL (1 << 20)
+# define GB_TILE_CONFIG_ALT_OFFSET (0 << 21)
+# define GB_TILE_CONFIG_SUBPRECISION (0 << 22)
+# define GB_TILE_CONFIG_ALT_TILING_DEF (0 << 23)
+# define GB_TILE_CONFIG_ALT_TILING_3_2 (1 << 23)
+# define GB_TILE_CONFIG_Z_EXTENDED_24_1 (0 << 24)
+# define GB_TILE_CONFIG_Z_EXTENDED_S25_1 (1 << 24)
+
+/* Specifies the sizes of the various FIFO`s in the sc/rs/us. This register must be the first one written */
+#define R300_GB_FIFO_SIZE 0x4024
+ /* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32 0
+#define R300_GB_FIFO_SIZE_64 1
+#define R300_GB_FIFO_SIZE_128 2
+#define R300_GB_FIFO_SIZE_256 3
+# define R300_SC_IFIFO_SIZE_SHIFT 0
+# define R300_SC_TZFIFO_SIZE_SHIFT 2
+# define R300_SC_BFIFO_SIZE_SHIFT 4
+
+# define R300_US_OFIFO_SIZE_SHIFT 12
+# define R300_US_WFIFO_SIZE_SHIFT 14
+ /* the following use the same constants as above, but meaning is
+ is times 2 (i.e. instead of 32 words it means 64 */
+# define R300_RS_TFIFO_SIZE_SHIFT 6
+# define R300_RS_CFIFO_SIZE_SHIFT 8
+# define R300_US_RAM_SIZE_SHIFT 10
+ /* watermarks, 3 bits wide */
+# define R300_RS_HIGHWATER_COL_SHIFT 16
+# define R300_RS_HIGHWATER_TEX_SHIFT 19
+# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
+# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
+
+#define GB_Z_PEQ_CONFIG 0x4028
+# define GB_Z_PEQ_CONFIG_Z_PEQ_SIZE_4_4 (0 << 0)
+# define GB_Z_PEQ_CONFIG_Z_PEQ_SIZE_8_8 (1 << 0)
+
+/* Specifies various polygon specific selects (fog, depth, perspective). */
+#define R300_GB_SELECT 0x401c
+# define R300_GB_FOG_SELECT_C0A (0 << 0)
+# define R300_GB_FOG_SELECT_C1A (1 << 0)
+# define R300_GB_FOG_SELECT_C2A (2 << 0)
+# define R300_GB_FOG_SELECT_C3A (3 << 0)
+# define R300_GB_FOG_SELECT_1_1_W (4 << 0)
+# define R300_GB_FOG_SELECT_Z (5 << 0)
+# define R300_GB_DEPTH_SELECT_Z (0 << 3)
+# define R300_GB_DEPTH_SELECT_1_1_W (1 << 3)
+# define R300_GB_W_SELECT_1_W (0 << 4)
+# define R300_GB_W_SELECT_1 (1 << 4)
+# define R300_GB_FOG_STUFF_DISABLE (0 << 5)
+# define R300_GB_FOG_STUFF_ENABLE (1 << 5)
+# define R300_GB_FOG_STUFF_TEX_SHIFT 6
+# define R300_GB_FOG_STUFF_TEX_MASK 0x000003c0
+# define R300_GB_FOG_STUFF_COMP_SHIFT 10
+# define R300_GB_FOG_STUFF_COMP_MASK 0x00000c00
+
+/* Specifies the graphics pipeline configuration for antialiasing. */
+#define R300_GB_AA_CONFIG 0x4020
+# define GB_AA_CONFIG_AA_DISABLE (0 << 0)
+# define GB_AA_CONFIG_AA_ENABLE (1 << 0)
+# define GB_AA_CONFIG_NUM_AA_SUBSAMPLES_2 (0 << 1)
+# define GB_AA_CONFIG_NUM_AA_SUBSAMPLES_3 (1 << 1)
+# define GB_AA_CONFIG_NUM_AA_SUBSAMPLES_4 (2 << 1)
+# define GB_AA_CONFIG_NUM_AA_SUBSAMPLES_6 (3 << 1)
+
+/* Selects which of 4 pipes are active. */
+#define GB_PIPE_SELECT 0x402c
+# define GB_PIPE_SELECT_PIPE0_ID_SHIFT 0
+# define GB_PIPE_SELECT_PIPE1_ID_SHIFT 2
+# define GB_PIPE_SELECT_PIPE2_ID_SHIFT 4
+# define GB_PIPE_SELECT_PIPE3_ID_SHIFT 6
+# define GB_PIPE_SELECT_PIPE_MASK_SHIFT 8
+# define GB_PIPE_SELECT_MAX_PIPE 12
+# define GB_PIPE_SELECT_BAD_PIPES 14
+# define GB_PIPE_SELECT_CONFIG_PIPES 18
+
+
+/* Specifies the sizes of the various FIFO`s in the sc/rs. */
+#define GB_FIFO_SIZE1 0x4070
+/* High water mark for SC input fifo */
+# define GB_FIFO_SIZE1_SC_HIGHWATER_IFIFO_SHIFT 0
+# define GB_FIFO_SIZE1_SC_HIGHWATER_IFIFO_MASK 0x0000003f
+/* High water mark for SC input fifo (B) */
+# define GB_FIFO_SIZE1_SC_HIGHWATER_BFIFO_SHIFT 6
+# define GB_FIFO_SIZE1_SC_HIGHWATER_BFIFO_MASK 0x00000fc0
+/* High water mark for RS colors' fifo */
+# define GB_FIFO_SIZE1_SC_HIGHWATER_COL_SHIFT 12
+# define GB_FIFO_SIZE1_SC_HIGHWATER_COL_MASK 0x0003f000
+/* High water mark for RS textures' fifo */
+# define GB_FIFO_SIZE1_SC_HIGHWATER_TEX_SHIFT 18
+# define GB_FIFO_SIZE1_SC_HIGHWATER_TEX_MASK 0x00fc0000
+
+/* This table specifies the source location and format for up to 16 texture
+ * addresses (i[0]:i[15]) and four colors (c[0]:c[3])
+ */
+#define R500_RS_IP_0 0x4074
+#define R500_RS_IP_1 0x4078
+#define R500_RS_IP_2 0x407C
+#define R500_RS_IP_3 0x4080
+#define R500_RS_IP_4 0x4084
+#define R500_RS_IP_5 0x4088
+#define R500_RS_IP_6 0x408C
+#define R500_RS_IP_7 0x4090
+#define R500_RS_IP_8 0x4094
+#define R500_RS_IP_9 0x4098
+#define R500_RS_IP_10 0x409C
+#define R500_RS_IP_11 0x40A0
+#define R500_RS_IP_12 0x40A4
+#define R500_RS_IP_13 0x40A8
+#define R500_RS_IP_14 0x40AC
+#define R500_RS_IP_15 0x40B0
+#define R500_RS_IP_PTR_K0 62
+#define R500_RS_IP_PTR_K1 63
+#define R500_RS_IP_TEX_PTR_S_SHIFT 0
+#define R500_RS_IP_TEX_PTR_T_SHIFT 6
+#define R500_RS_IP_TEX_PTR_R_SHIFT 12
+#define R500_RS_IP_TEX_PTR_Q_SHIFT 18
+#define R500_RS_IP_COL_PTR_SHIFT 24
+#define R500_RS_IP_COL_FMT_SHIFT 27
+# define R500_RS_SEL_S(x) ((x) << 0)
+# define R500_RS_SEL_T(x) ((x) << 6)
+# define R500_RS_SEL_R(x) ((x) << 12)
+# define R500_RS_SEL_Q(x) ((x) << 18)
+# define R500_RS_COL_PTR(x) ((x) << 24)
+# define R500_RS_COL_FMT(x) ((x) << 27)
+/* gap */
+#define R500_RS_IP_OFFSET_DIS (0 << 31)
+#define R500_RS_IP_OFFSET_EN (1 << 31)
+
+/* gap */
+
+/* Zero to flush caches. */
+#define R300_TX_INVALTAGS 0x4100
+#define R300_TX_FLUSH 0x0
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE 0x4104
+# define R300_TX_ENABLE_0 (1 << 0)
+# define R300_TX_ENABLE_1 (1 << 1)
+# define R300_TX_ENABLE_2 (1 << 2)
+# define R300_TX_ENABLE_3 (1 << 3)
+# define R300_TX_ENABLE_4 (1 << 4)
+# define R300_TX_ENABLE_5 (1 << 5)
+# define R300_TX_ENABLE_6 (1 << 6)
+# define R300_TX_ENABLE_7 (1 << 7)
+# define R300_TX_ENABLE_8 (1 << 8)
+# define R300_TX_ENABLE_9 (1 << 9)
+# define R300_TX_ENABLE_10 (1 << 10)
+# define R300_TX_ENABLE_11 (1 << 11)
+# define R300_TX_ENABLE_12 (1 << 12)
+# define R300_TX_ENABLE_13 (1 << 13)
+# define R300_TX_ENABLE_14 (1 << 14)
+# define R300_TX_ENABLE_15 (1 << 15)
+
+#define R500_TX_FILTER_4 0x4110
+# define R500_TX_WEIGHT_1_SHIFT (0)
+# define R500_TX_WEIGHT_0_SHIFT (11)
+# define R500_TX_WEIGHT_PAIR (1<<22)
+# define R500_TX_PHASE_SHIFT (23)
+# define R500_TX_DIRECTION_HORIZONTAL (0<<27)
+# define R500_TX_DIRECTION_VERITCAL (1<<27)
+
+/* S Texture Coordinate of Vertex 0 for Point texture stuffing (LLC) */
+#define R300_GA_POINT_S0 0x4200
+
+/* T Texture Coordinate of Vertex 0 for Point texture stuffing (LLC) */
+#define R300_GA_POINT_T0 0x4204
+
+/* S Texture Coordinate of Vertex 2 for Point texture stuffing (URC) */
+#define R300_GA_POINT_S1 0x4208
+
+/* T Texture Coordinate of Vertex 2 for Point texture stuffing (URC) */
+#define R300_GA_POINT_T1 0x420c
+
+/* Specifies amount to shift integer position of vertex (screen space) before
+ * converting to float for triangle stipple.
+ */
+#define R300_GA_TRIANGLE_STIPPLE 0x4214
+# define R300_GA_TRIANGLE_STIPPLE_X_SHIFT_SHIFT 0
+# define R300_GA_TRIANGLE_STIPPLE_X_SHIFT_MASK 0x0000000f
+# define R300_GA_TRIANGLE_STIPPLE_Y_SHIFT_SHIFT 16
+# define R300_GA_TRIANGLE_STIPPLE_Y_SHIFT_MASK 0x000f0000
+
+/* The pointsize is given in multiples of 6. The pointsize can be enormous:
+ * Clear() renders a single point that fills the entire framebuffer.
+ * 1/2 Height of point; fixed (16.0), subpixel format (1/12 or 1/16, even if in
+ * 8b precision).
+ */
+#define R300_GA_POINT_SIZE 0x421C
+# define R300_POINTSIZE_Y_SHIFT 0
+# define R300_POINTSIZE_Y_MASK 0x0000ffff
+# define R300_POINTSIZE_X_SHIFT 16
+# define R300_POINTSIZE_X_MASK 0xffff0000
+# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
+
+/* Blue fill color */
+#define R500_GA_FILL_R 0x4220
+
+/* Blue fill color */
+#define R500_GA_FILL_G 0x4224
+
+/* Blue fill color */
+#define R500_GA_FILL_B 0x4228
+
+/* Alpha fill color */
+#define R500_GA_FILL_A 0x422c
+
+
+/* Specifies maximum and minimum point & sprite sizes for per vertex size
+ * specification. The lower part (15:0) is MIN and (31:16) is max.
+ */
+#define R300_GA_POINT_MINMAX 0x4230
+# define R300_GA_POINT_MINMAX_MIN_SHIFT 0
+# define R300_GA_POINT_MINMAX_MIN_MASK (0xFFFF << 0)
+# define R300_GA_POINT_MINMAX_MAX_SHIFT 16
+# define R300_GA_POINT_MINMAX_MAX_MASK (0xFFFF << 16)
+
+/* 1/2 width of line, in subpixels (1/12 or 1/16 only, even in 8b
+ * subprecision); (16.0) fixed format.
+ *
+ * The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_GA_LINE_CNTL 0x4234
+# define R300_GA_LINE_CNTL_WIDTH_SHIFT 0
+# define R300_GA_LINE_CNTL_WIDTH_MASK 0x0000ffff
+# define R300_GA_LINE_CNTL_END_TYPE_HOR (0 << 16)
+# define R300_GA_LINE_CNTL_END_TYPE_VER (1 << 16)
+# define R300_GA_LINE_CNTL_END_TYPE_SQR (2 << 16) /* horizontal or vertical depending upon slope */
+# define R300_GA_LINE_CNTL_END_TYPE_COMP (3 << 16) /* Computed (perpendicular to slope) */
+# define R500_GA_LINE_CNTL_SORT_NO (0 << 18)
+# define R500_GA_LINE_CNTL_SORT_MINX_MINY (1 << 18)
+/** TODO: looks wrong */
+# define R300_LINESIZE_MAX (R300_GA_LINE_CNTL_WIDTH_MASK / 6)
+/** TODO: looks wrong */
+# define R300_LINE_CNT_HO (1 << 16)
+/** TODO: looks wrong */
+# define R300_LINE_CNT_VE (1 << 17)
+
+/* Line Stipple configuration information. */
+#define R300_GA_LINE_STIPPLE_CONFIG 0x4238
+# define R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_NO (0 << 0)
+# define R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE (1 << 0)
+# define R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_PACKET (2 << 0)
+# define R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_SHIFT 2
+# define R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK 0xfffffffc
+
+/* Used to load US instructions and constants */
+#define R500_GA_US_VECTOR_INDEX 0x4250
+# define R500_GA_US_VECTOR_INDEX_SHIFT 0
+# define R500_GA_US_VECTOR_INDEX_MASK 0x000000ff
+# define R500_GA_US_VECTOR_INDEX_TYPE_INSTR (0 << 16)
+# define R500_GA_US_VECTOR_INDEX_TYPE_CONST (1 << 16)
+# define R500_GA_US_VECTOR_INDEX_CLAMP_NO (0 << 17)
+# define R500_GA_US_VECTOR_INDEX_CLAMP_CONST (1 << 17)
+
+/* Data register for loading US instructions and constants */
+#define R500_GA_US_VECTOR_DATA 0x4254
+
+/* Specifies color properties and mappings of textures. */
+#define R500_GA_COLOR_CONTROL_PS3 0x4258
+# define R500_TEX0_SHADING_PS3_SOLID (0 << 0)
+# define R500_TEX0_SHADING_PS3_FLAT (1 << 0)
+# define R500_TEX0_SHADING_PS3_GOURAUD (2 << 0)
+# define R500_TEX1_SHADING_PS3_SOLID (0 << 2)
+# define R500_TEX1_SHADING_PS3_FLAT (1 << 2)
+# define R500_TEX1_SHADING_PS3_GOURAUD (2 << 2)
+# define R500_TEX2_SHADING_PS3_SOLID (0 << 4)
+# define R500_TEX2_SHADING_PS3_FLAT (1 << 4)
+# define R500_TEX2_SHADING_PS3_GOURAUD (2 << 4)
+# define R500_TEX3_SHADING_PS3_SOLID (0 << 6)
+# define R500_TEX3_SHADING_PS3_FLAT (1 << 6)
+# define R500_TEX3_SHADING_PS3_GOURAUD (2 << 6)
+# define R500_TEX4_SHADING_PS3_SOLID (0 << 8)
+# define R500_TEX4_SHADING_PS3_FLAT (1 << 8)
+# define R500_TEX4_SHADING_PS3_GOURAUD (2 << 8)
+# define R500_TEX5_SHADING_PS3_SOLID (0 << 10)
+# define R500_TEX5_SHADING_PS3_FLAT (1 << 10)
+# define R500_TEX5_SHADING_PS3_GOURAUD (2 << 10)
+# define R500_TEX6_SHADING_PS3_SOLID (0 << 12)
+# define R500_TEX6_SHADING_PS3_FLAT (1 << 12)
+# define R500_TEX6_SHADING_PS3_GOURAUD (2 << 12)
+# define R500_TEX7_SHADING_PS3_SOLID (0 << 14)
+# define R500_TEX7_SHADING_PS3_FLAT (1 << 14)
+# define R500_TEX7_SHADING_PS3_GOURAUD (2 << 14)
+# define R500_TEX8_SHADING_PS3_SOLID (0 << 16)
+# define R500_TEX8_SHADING_PS3_FLAT (1 << 16)
+# define R500_TEX8_SHADING_PS3_GOURAUD (2 << 16)
+# define R500_TEX9_SHADING_PS3_SOLID (0 << 18)
+# define R500_TEX9_SHADING_PS3_FLAT (1 << 18)
+# define R500_TEX9_SHADING_PS3_GOURAUD (2 << 18)
+# define R500_TEX10_SHADING_PS3_SOLID (0 << 20)
+# define R500_TEX10_SHADING_PS3_FLAT (1 << 20)
+# define R500_TEX10_SHADING_PS3_GOURAUD (2 << 20)
+# define R500_COLOR0_TEX_OVERRIDE_NO (0 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_0 (1 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_1 (2 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_2 (3 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_3 (4 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_4 (5 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_5 (6 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_6 (7 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_7 (8 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_8_C2 (9 << 22)
+# define R500_COLOR0_TEX_OVERRIDE_TEX_9_C3 (10 << 22)
+# define R500_COLOR1_TEX_OVERRIDE_NO (0 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_0 (1 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_1 (2 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_2 (3 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_3 (4 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_4 (5 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_5 (6 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_6 (7 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_7 (8 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_8_C2 (9 << 26)
+# define R500_COLOR1_TEX_OVERRIDE_TEX_9_C3 (10 << 26)
+
+/* Returns idle status of various G3D block, captured when GA_IDLE written or
+ * when hard or soft reset asserted.
+ */
+#define R500_GA_IDLE 0x425c
+# define R500_GA_IDLE_PIPE3_Z_IDLE (0 << 0)
+# define R500_GA_IDLE_PIPE2_Z_IDLE (0 << 1)
+# define R500_GA_IDLE_PIPE3_CD_IDLE (0 << 2)
+# define R500_GA_IDLE_PIPE2_CD_IDLE (0 << 3)
+# define R500_GA_IDLE_PIPE3_FG_IDLE (0 << 4)
+# define R500_GA_IDLE_PIPE2_FG_IDLE (0 << 5)
+# define R500_GA_IDLE_PIPE3_US_IDLE (0 << 6)
+# define R500_GA_IDLE_PIPE2_US_IDLE (0 << 7)
+# define R500_GA_IDLE_PIPE3_SC_IDLE (0 << 8)
+# define R500_GA_IDLE_PIPE2_SC_IDLE (0 << 9)
+# define R500_GA_IDLE_PIPE3_RS_IDLE (0 << 10)
+# define R500_GA_IDLE_PIPE2_RS_IDLE (0 << 11)
+# define R500_GA_IDLE_PIPE1_Z_IDLE (0 << 12)
+# define R500_GA_IDLE_PIPE0_Z_IDLE (0 << 13)
+# define R500_GA_IDLE_PIPE1_CD_IDLE (0 << 14)
+# define R500_GA_IDLE_PIPE0_CD_IDLE (0 << 15)
+# define R500_GA_IDLE_PIPE1_FG_IDLE (0 << 16)
+# define R500_GA_IDLE_PIPE0_FG_IDLE (0 << 17)
+# define R500_GA_IDLE_PIPE1_US_IDLE (0 << 18)
+# define R500_GA_IDLE_PIPE0_US_IDLE (0 << 19)
+# define R500_GA_IDLE_PIPE1_SC_IDLE (0 << 20)
+# define R500_GA_IDLE_PIPE0_SC_IDLE (0 << 21)
+# define R500_GA_IDLE_PIPE1_RS_IDLE (0 << 22)
+# define R500_GA_IDLE_PIPE0_RS_IDLE (0 << 23)
+# define R500_GA_IDLE_SU_IDLE (0 << 24)
+# define R500_GA_IDLE_GA_IDLE (0 << 25)
+# define R500_GA_IDLE_GA_UNIT2_IDLE (0 << 26)
+
+/* Current value of stipple accumulator. */
+#define R300_GA_LINE_STIPPLE_VALUE 0x4260
+
+/* S Texture Coordinate Value for Vertex 0 of Line (stuff textures -- i.e. AA) */
+#define R300_GA_LINE_S0 0x4264
+/* S Texture Coordinate Value for Vertex 1 of Lines (V2 of parallelogram -- stuff textures -- i.e. AA) */
+#define R300_GA_LINE_S1 0x4268
+
+/* GA Input fifo high water marks */
+#define R500_GA_FIFO_CNTL 0x4270
+# define R500_GA_FIFO_CNTL_VERTEX_FIFO_MASK 0x00000007
+# define R500_GA_FIFO_CNTL_VERTEX_FIFO_SHIFT 0
+# define R500_GA_FIFO_CNTL_VERTEX_INDEX_MASK 0x00000038
+# define R500_GA_FIFO_CNTL_VERTEX_INDEX_SHIFT 3
+# define R500_GA_FIFO_CNTL_VERTEX_REG_MASK 0x00003fc0
+# define R500_GA_FIFO_CNTL_VERTEX_REG_SHIFT 6
+
+/* GA enhance/tweaks */
+#define R300_GA_ENHANCE 0x4274
+# define R300_GA_ENHANCE_DEADLOCK_CNTL_NO_EFFECT (0 << 0)
+# define R300_GA_ENHANCE_DEADLOCK_CNTL_PREVENT_TCL (1 << 0) /* Prevents TCL interface from deadlocking on GA side. */
+# define R300_GA_ENHANCE_FASTSYNC_CNTL_NO_EFFECT (0 << 1)
+# define R300_GA_ENHANCE_FASTSYNC_CNTL_ENABLE (1 << 1) /* Enables high-performance register/primitive switching. */
+# define R500_GA_ENHANCE_REG_READWRITE_NO_EFFECT (0 << 2) /* R520+ only */
+# define R500_GA_ENHANCE_REG_READWRITE_ENABLE (1 << 2) /* R520+ only, Enables GA support of simultaneous register reads and writes. */
+# define R500_GA_ENHANCE_REG_NOSTALL_NO_EFFECT (0 << 3)
+# define R500_GA_ENHANCE_REG_NOSTALL_ENABLE (1 << 3) /* Enables GA support of no-stall reads for register read back. */
+
+#define R300_GA_COLOR_CONTROL 0x4278
+# define R300_GA_COLOR_CONTROL_RGB0_SHADING_SOLID (0 << 0)
+# define R300_GA_COLOR_CONTROL_RGB0_SHADING_FLAT (1 << 0)
+# define R300_GA_COLOR_CONTROL_RGB0_SHADING_GOURAUD (2 << 0)
+# define R300_GA_COLOR_CONTROL_ALPHA0_SHADING_SOLID (0 << 2)
+# define R300_GA_COLOR_CONTROL_ALPHA0_SHADING_FLAT (1 << 2)
+# define R300_GA_COLOR_CONTROL_ALPHA0_SHADING_GOURAUD (2 << 2)
+# define R300_GA_COLOR_CONTROL_RGB1_SHADING_SOLID (0 << 4)
+# define R300_GA_COLOR_CONTROL_RGB1_SHADING_FLAT (1 << 4)
+# define R300_GA_COLOR_CONTROL_RGB1_SHADING_GOURAUD (2 << 4)
+# define R300_GA_COLOR_CONTROL_ALPHA1_SHADING_SOLID (0 << 6)
+# define R300_GA_COLOR_CONTROL_ALPHA1_SHADING_FLAT (1 << 6)
+# define R300_GA_COLOR_CONTROL_ALPHA1_SHADING_GOURAUD (2 << 6)
+# define R300_GA_COLOR_CONTROL_RGB2_SHADING_SOLID (0 << 8)
+# define R300_GA_COLOR_CONTROL_RGB2_SHADING_FLAT (1 << 8)
+# define R300_GA_COLOR_CONTROL_RGB2_SHADING_GOURAUD (2 << 8)
+# define R300_GA_COLOR_CONTROL_ALPHA2_SHADING_SOLID (0 << 10)
+# define R300_GA_COLOR_CONTROL_ALPHA2_SHADING_FLAT (1 << 10)
+# define R300_GA_COLOR_CONTROL_ALPHA2_SHADING_GOURAUD (2 << 10)
+# define R300_GA_COLOR_CONTROL_RGB3_SHADING_SOLID (0 << 12)
+# define R300_GA_COLOR_CONTROL_RGB3_SHADING_FLAT (1 << 12)
+# define R300_GA_COLOR_CONTROL_RGB3_SHADING_GOURAUD (2 << 12)
+# define R300_GA_COLOR_CONTROL_ALPHA3_SHADING_SOLID (0 << 14)
+# define R300_GA_COLOR_CONTROL_ALPHA3_SHADING_FLAT (1 << 14)
+# define R300_GA_COLOR_CONTROL_ALPHA3_SHADING_GOURAUD (2 << 14)
+# define R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_FIRST (0 << 16)
+# define R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_SECOND (1 << 16)
+# define R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_THIRD (2 << 16)
+# define R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST (3 << 16)
+
+# define R300_SHADE_MODEL_FLAT ( \
+ R300_GA_COLOR_CONTROL_RGB0_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_ALPHA0_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_RGB1_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_ALPHA1_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_RGB2_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_ALPHA2_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_RGB3_SHADING_FLAT | \
+ R300_GA_COLOR_CONTROL_ALPHA3_SHADING_FLAT )
+
+# define R300_SHADE_MODEL_SMOOTH ( \
+ R300_GA_COLOR_CONTROL_RGB0_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_ALPHA0_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_RGB1_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_ALPHA1_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_RGB2_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_ALPHA2_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_RGB3_SHADING_GOURAUD | \
+ R300_GA_COLOR_CONTROL_ALPHA3_SHADING_GOURAUD )
+
+/* Specifies red & green components of fill color -- S312 format -- Backwards comp. */
+#define R300_GA_SOLID_RG 0x427c
+# define GA_SOLID_RG_COLOR_GREEN_SHIFT 0
+# define GA_SOLID_RG_COLOR_GREEN_MASK 0x0000ffff
+# define GA_SOLID_RG_COLOR_RED_SHIFT 16
+# define GA_SOLID_RG_COLOR_RED_MASK 0xffff0000
+/* Specifies blue & alpha components of fill color -- S312 format -- Backwards comp. */
+#define R300_GA_SOLID_BA 0x4280
+# define GA_SOLID_BA_COLOR_ALPHA_SHIFT 0
+# define GA_SOLID_BA_COLOR_ALPHA_MASK 0x0000ffff
+# define GA_SOLID_BA_COLOR_BLUE_SHIFT 16
+# define GA_SOLID_BA_COLOR_BLUE_MASK 0xffff0000
+
+/* Polygon Mode
+ * Dangerous
+ */
+#define R300_GA_POLY_MODE 0x4288
+# define R300_GA_POLY_MODE_DISABLE (0 << 0)
+# define R300_GA_POLY_MODE_DUAL (1 << 0) /* send 2 sets of 3 polys with specified poly type */
+/* reserved */
+# define R300_GA_POLY_MODE_FRONT_PTYPE_POINT (0 << 4)
+# define R300_GA_POLY_MODE_FRONT_PTYPE_LINE (1 << 4)
+# define R300_GA_POLY_MODE_FRONT_PTYPE_TRI (2 << 4)
+/* reserved */
+# define R300_GA_POLY_MODE_BACK_PTYPE_POINT (0 << 7)
+# define R300_GA_POLY_MODE_BACK_PTYPE_LINE (1 << 7)
+# define R300_GA_POLY_MODE_BACK_PTYPE_TRI (2 << 7)
+/* reserved */
+
+/* Specifies the rouding mode for geometry & color SPFP to FP conversions. */
+#define R300_GA_ROUND_MODE 0x428c
+# define R300_GA_ROUND_MODE_GEOMETRY_ROUND_TRUNC (0 << 0)
+# define R300_GA_ROUND_MODE_GEOMETRY_ROUND_NEAREST (1 << 0)
+# define R300_GA_ROUND_MODE_COLOR_ROUND_TRUNC (0 << 2)
+# define R300_GA_ROUND_MODE_COLOR_ROUND_NEAREST (1 << 2)
+# define R300_GA_ROUND_MODE_RGB_CLAMP_RGB (0 << 4)
+# define R300_GA_ROUND_MODE_RGB_CLAMP_FP20 (1 << 4)
+# define R300_GA_ROUND_MODE_ALPHA_CLAMP_RGB (0 << 5)
+# define R300_GA_ROUND_MODE_ALPHA_CLAMP_FP20 (1 << 5)
+# define R500_GA_ROUND_MODE_GEOMETRY_MASK_SHIFT 6
+# define R500_GA_ROUND_MODE_GEOMETRY_MASK_MASK 0x000003c0
+
+/* Specifies x & y offsets for vertex data after conversion to FP.
+ * Offsets are in S15 format (subpixels -- 1/12 or 1/16, even in 8b
+ * subprecision).
+ */
+#define R300_GA_OFFSET 0x4290
+# define R300_GA_OFFSET_X_OFFSET_SHIFT 0
+# define R300_GA_OFFSET_X_OFFSET_MASK 0x0000ffff
+# define R300_GA_OFFSET_Y_OFFSET_SHIFT 16
+# define R300_GA_OFFSET_Y_OFFSET_MASK 0xffff0000
+
+/* Specifies the scale to apply to fog. */
+#define R300_GA_FOG_SCALE 0x4294
+/* Specifies the offset to apply to fog. */
+#define R300_GA_FOG_OFFSET 0x4298
+/* Specifies number of cycles to assert reset, and also causes RB3D soft reset to assert. */
+#define R300_GA_SOFT_RESET 0x429c
+
+/* Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are seperate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_SU_TEX_WRAP 0x42A0
+#define R300_SU_POLY_OFFSET_FRONT_SCALE 0x42A4
+#define R300_SU_POLY_OFFSET_FRONT_OFFSET 0x42A8
+#define R300_SU_POLY_OFFSET_BACK_SCALE 0x42AC
+#define R300_SU_POLY_OFFSET_BACK_OFFSET 0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ * One to enable depth test and one for depth write.
+ * Yet this doesnt explain why depth writes work ...
+ */
+#define R300_SU_POLY_OFFSET_ENABLE 0x42B4
+# define R300_FRONT_ENABLE (1 << 0)
+# define R300_BACK_ENABLE (1 << 1)
+# define R300_PARA_ENABLE (1 << 2)
+
+#define R300_SU_CULL_MODE 0x42B8
+# define R300_CULL_FRONT (1 << 0)
+# define R300_CULL_BACK (1 << 1)
+# define R300_FRONT_FACE_CCW (0 << 2)
+# define R300_FRONT_FACE_CW (1 << 2)
+
+/* SU Depth Scale value */
+#define R300_SU_DEPTH_SCALE 0x42c0
+/* SU Depth Offset value */
+#define R300_SU_DEPTH_OFFSET 0x42c4
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/*
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
+#define R300_RS_COUNT 0x4300
+# define R300_IT_COUNT_SHIFT 0
+# define R300_IT_COUNT_MASK 0x0000007f
+# define R300_IC_COUNT_SHIFT 7
+# define R300_IC_COUNT_MASK 0x00000780
+# define R300_W_ADDR_SHIFT 12
+# define R300_W_ADDR_MASK 0x0003f000
+# define R300_HIRES_DIS (0 << 18)
+# define R300_HIRES_EN (1 << 18)
+# define R300_IT_COUNT(x) ((x) << 0)
+# define R300_IC_COUNT(x) ((x) << 7)
+# define R300_W_COUNT(x) ((x) << 12)
+
+#define R300_RS_INST_COUNT 0x4304
+# define R300_RS_INST_COUNT_SHIFT 0
+# define R300_RS_INST_COUNT_MASK 0x0000000f
+# define R300_RS_TX_OFFSET_SHIFT 5
+# define R300_RS_TX_OFFSET_MASK 0x000000e0
+# define R300_RS_TX_OFFSET(x) ((x) << 5)
+
+/* gap */
+
+/* Only used for texture coordinates.
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
+#define R300_RS_IP_0 0x4310
+#define R300_RS_IP_1 0x4314
+#define R300_RS_IP_2 0x4318
+#define R300_RS_IP_3 0x431C
+# define R300_RS_INTERP_SRC_SHIFT 2 /* TODO: check for removal */
+# define R300_RS_INTERP_SRC_MASK (7 << 2) /* TODO: check for removal */
+# define R300_RS_TEX_PTR(x) (x << 0)
+# define R300_RS_COL_PTR(x) ((x) << 6)
+# define R300_RS_COL_FMT(x) ((x) << 9)
+# define R300_RS_COL_FMT_RGBA 0
+# define R300_RS_COL_FMT_RGB0 1
+# define R300_RS_COL_FMT_RGB1 2
+# define R300_RS_COL_FMT_000A 4
+# define R300_RS_COL_FMT_0000 5
+# define R300_RS_COL_FMT_0001 6
+# define R300_RS_COL_FMT_111A 8
+# define R300_RS_COL_FMT_1110 9
+# define R300_RS_COL_FMT_1111 10
+# define R300_RS_SEL_S(x) ((x) << 13)
+# define R300_RS_SEL_T(x) ((x) << 16)
+# define R300_RS_SEL_R(x) ((x) << 19)
+# define R300_RS_SEL_Q(x) ((x) << 22)
+# define R300_RS_SEL_C0 0
+# define R300_RS_SEL_C1 1
+# define R300_RS_SEL_C2 2
+# define R300_RS_SEL_C3 3
+# define R300_RS_SEL_K0 4
+# define R300_RS_SEL_K1 5
+
+
+/* */
+#define R500_RS_INST_0 0x4320
+#define R500_RS_INST_1 0x4324
+#define R500_RS_INST_2 0x4328
+#define R500_RS_INST_3 0x432c
+#define R500_RS_INST_4 0x4330
+#define R500_RS_INST_5 0x4334
+#define R500_RS_INST_6 0x4338
+#define R500_RS_INST_7 0x433c
+#define R500_RS_INST_8 0x4340
+#define R500_RS_INST_9 0x4344
+#define R500_RS_INST_10 0x4348
+#define R500_RS_INST_11 0x434c
+#define R500_RS_INST_12 0x4350
+#define R500_RS_INST_13 0x4354
+#define R500_RS_INST_14 0x4358
+#define R500_RS_INST_15 0x435c
+#define R500_RS_INST_TEX_ID_SHIFT 0
+# define R500_RS_INST_TEX_ID(x) ((x) << 0)
+#define R500_RS_INST_TEX_CN_WRITE (1 << 4)
+#define R500_RS_INST_TEX_ADDR_SHIFT 5
+# define R500_RS_INST_TEX_ADDR(x) ((x) << 0)
+#define R500_RS_INST_COL_ID_SHIFT 12
+# define R500_RS_INST_COL_ID(x) ((x) << 12)
+#define R500_RS_INST_COL_CN_NO_WRITE (0 << 16)
+#define R500_RS_INST_COL_CN_WRITE (1 << 16)
+#define R500_RS_INST_COL_CN_WRITE_FBUFFER (2 << 16)
+#define R500_RS_INST_COL_CN_WRITE_BACKFACE (3 << 16)
+#define R500_RS_INST_COL_ADDR_SHIFT 18
+# define R500_RS_INST_COL_ADDR(x) ((x) << 18)
+#define R500_RS_INST_TEX_ADJ (1 << 25)
+#define R500_RS_INST_W_CN (1 << 26)
+
+/* These DWORDs control how vertex data is routed into fragment program
+ * registers, after interpolators.
+ */
+#define R300_RS_INST_0 0x4330
+#define R300_RS_INST_1 0x4334
+#define R300_RS_INST_2 0x4338
+#define R300_RS_INST_3 0x433C
+#define R300_RS_INST_4 0x4340
+#define R300_RS_INST_5 0x4344
+#define R300_RS_INST_6 0x4348
+#define R300_RS_INST_7 0x434C
+# define R300_RS_INST_TEX_ID(x) ((x) << 0)
+# define R300_RS_INST_TEX_CN_WRITE (1 << 3)
+# define R300_RS_INST_TEX_ADDR(x) ((x) << 6)
+# define R300_RS_INST_TEX_ADDR_SHIFT 6
+# define R300_RS_INST_COL_ID(x) ((x) << 11)
+# define R300_RS_INST_COL_CN_WRITE (1 << 14)
+# define R300_RS_INST_COL_ADDR(x) ((x) << 17)
+# define R300_RS_INST_COL_ADDR_SHIFT 17
+# define R300_RS_INST_TEX_ADJ (1 << 22)
+# define R300_RS_COL_BIAS_UNUSED_SHIFT 23
+
+/* END: Rasterization / Interpolators - many guesses */
+
+/* Hierarchical Z Enable */
+#define R300_SC_HYPERZ 0x43a4
+# define R300_SC_HYPERZ_DISABLE (0 << 0)
+# define R300_SC_HYPERZ_ENABLE (1 << 0)
+# define R300_SC_HYPERZ_MIN (0 << 1)
+# define R300_SC_HYPERZ_MAX (1 << 1)
+# define R300_SC_HYPERZ_ADJ_256 (0 << 2)
+# define R300_SC_HYPERZ_ADJ_128 (1 << 2)
+# define R300_SC_HYPERZ_ADJ_64 (2 << 2)
+# define R300_SC_HYPERZ_ADJ_32 (3 << 2)
+# define R300_SC_HYPERZ_ADJ_16 (4 << 2)
+# define R300_SC_HYPERZ_ADJ_8 (5 << 2)
+# define R300_SC_HYPERZ_ADJ_4 (6 << 2)
+# define R300_SC_HYPERZ_ADJ_2 (7 << 2)
+# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
+# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5)
+# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
+# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6)
+
+#define R300_SC_EDGERULE 0x43a8
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
+#define R300_SC_CLIPRECT_TL_0 0x43B0
+#define R300_SC_CLIPRECT_BR_0 0x43B4
+#define R300_SC_CLIPRECT_TL_1 0x43B8
+#define R300_SC_CLIPRECT_BR_1 0x43BC
+#define R300_SC_CLIPRECT_TL_2 0x43C0
+#define R300_SC_CLIPRECT_BR_2 0x43C4
+#define R300_SC_CLIPRECT_TL_3 0x43C8
+#define R300_SC_CLIPRECT_BR_3 0x43CC
+# define R300_CLIPRECT_OFFSET 1440
+# define R300_CLIPRECT_MASK 0x1FFF
+# define R300_CLIPRECT_X_SHIFT 0
+# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
+# define R300_CLIPRECT_Y_SHIFT 13
+# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
+#define R300_SC_CLIP_RULE 0x43D0
+# define R300_CLIP_OUT (1 << 0)
+# define R300_CLIP_0 (1 << 1)
+# define R300_CLIP_1 (1 << 2)
+# define R300_CLIP_10 (1 << 3)
+# define R300_CLIP_2 (1 << 4)
+# define R300_CLIP_20 (1 << 5)
+# define R300_CLIP_21 (1 << 6)
+# define R300_CLIP_210 (1 << 7)
+# define R300_CLIP_3 (1 << 8)
+# define R300_CLIP_30 (1 << 9)
+# define R300_CLIP_31 (1 << 10)
+# define R300_CLIP_310 (1 << 11)
+# define R300_CLIP_32 (1 << 12)
+# define R300_CLIP_320 (1 << 13)
+# define R300_CLIP_321 (1 << 14)
+# define R300_CLIP_3210 (1 << 15)
+
+/* gap */
+
+#define R300_SC_SCISSORS_TL 0x43E0
+#define R300_SC_SCISSORS_BR 0x43E4
+# define R300_SCISSORS_OFFSET 1440
+# define R300_SCISSORS_X_SHIFT 0
+# define R300_SCISSORS_X_MASK (0x1FFF << 0)
+# define R300_SCISSORS_Y_SHIFT 13
+# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
+
+/* Screen door sample mask */
+#define R300_SC_SCREENDOOR 0x43e8
+
+/* END: Scissors and cliprects */
+
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
+#define R300_TX_FILTER0_0 0x4400
+#define R300_TX_FILTER0_1 0x4404
+#define R300_TX_FILTER0_2 0x4408
+#define R300_TX_FILTER0_3 0x440c
+#define R300_TX_FILTER0_4 0x4410
+#define R300_TX_FILTER0_5 0x4414
+#define R300_TX_FILTER0_6 0x4418
+#define R300_TX_FILTER0_7 0x441c
+#define R300_TX_FILTER0_8 0x4420
+#define R300_TX_FILTER0_9 0x4424
+#define R300_TX_FILTER0_10 0x4428
+#define R300_TX_FILTER0_11 0x442c
+#define R300_TX_FILTER0_12 0x4430
+#define R300_TX_FILTER0_13 0x4434
+#define R300_TX_FILTER0_14 0x4438
+#define R300_TX_FILTER0_15 0x443c
+# define R300_TX_REPEAT 0
+# define R300_TX_MIRRORED 1
+# define R300_TX_CLAMP_TO_EDGE 2
+# define R300_TX_MIRROR_ONCE_TO_EDGE 3
+# define R300_TX_CLAMP 4
+# define R300_TX_MIRROR_ONCE 5
+# define R300_TX_CLAMP_TO_BORDER 6
+# define R300_TX_MIRROR_ONCE_TO_BORDER 7
+# define R300_TX_WRAP_S_SHIFT 0
+# define R300_TX_WRAP_S_MASK (7 << 0)
+# define R300_TX_WRAP_T_SHIFT 3
+# define R300_TX_WRAP_T_MASK (7 << 3)
+# define R300_TX_WRAP_R_SHIFT 6
+# define R300_TX_WRAP_R_MASK (7 << 6)
+# define R300_TX_MAG_FILTER_4 (0 << 9)
+# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
+# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
+# define R300_TX_MAG_FILTER_ANISO (3 << 9)
+# define R300_TX_MAG_FILTER_MASK (3 << 9)
+# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
+# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
+# define R300_TX_MIN_FILTER_ANISO (3 << 11)
+# define R300_TX_MIN_FILTER_MASK (3 << 11)
+# define R300_TX_MIN_FILTER_MIP_NONE (0 << 13)
+# define R300_TX_MIN_FILTER_MIP_NEAREST (1 << 13)
+# define R300_TX_MIN_FILTER_MIP_LINEAR (2 << 13)
+# define R300_TX_MIN_FILTER_MIP_MASK (3 << 13)
+# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
+# define R300_TX_MAX_ANISO_2_TO_1 (1 << 21)
+# define R300_TX_MAX_ANISO_4_TO_1 (2 << 21)
+# define R300_TX_MAX_ANISO_8_TO_1 (3 << 21)
+# define R300_TX_MAX_ANISO_16_TO_1 (4 << 21)
+# define R300_TX_MAX_ANISO_MASK (7 << 21)
+# define R300_TX_WRAP_S(x) ((x) << 0)
+# define R300_TX_WRAP_T(x) ((x) << 3)
+
+#define R300_TX_FILTER1_0 0x4440
+# define R300_CHROMA_KEY_MODE_DISABLE 0
+# define R300_CHROMA_KEY_FORCE 1
+# define R300_CHROMA_KEY_BLEND 2
+# define R300_MC_ROUND_NORMAL (0<<2)
+# define R300_MC_ROUND_MPEG4 (1<<2)
+# define R300_LOD_BIAS_SHIFT 3
+# define R300_LOD_BIAS_MASK 0x1ff8
+# define R300_EDGE_ANISO_EDGE_DIAG (0<<13)
+# define R300_EDGE_ANISO_EDGE_ONLY (1<<13)
+# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14)
+# define R300_MC_COORD_TRUNCATE_MPEG (1<<14)
+# define R300_TX_TRI_PERF_0_8 (0<<15)
+# define R300_TX_TRI_PERF_1_8 (1<<15)
+# define R300_TX_TRI_PERF_1_4 (2<<15)
+# define R300_TX_TRI_PERF_3_8 (3<<15)
+# define R300_ANISO_THRESHOLD_MASK (7<<17)
+
+# define R500_MACRO_SWITCH (1<<22)
+# define R500_BORDER_FIX (1<<31)
+
+#define R300_TX_FORMAT0_0 0x4480
+# define R300_TX_WIDTHMASK_SHIFT 0
+# define R300_TX_WIDTHMASK_MASK (2047 << 0)
+# define R300_TX_HEIGHTMASK_SHIFT 11
+# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
+# define R300_TX_DEPTHMASK_SHIFT 22
+# define R300_TX_DEPTHMASK_MASK (0xf << 22)
+# define R300_TX_MAX_MIP_LEVEL_SHIFT 26
+# define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26)
+# define R300_TX_SIZE_PROJECTED (1 << 30)
+# define R300_TX_PITCH_EN (1 << 31)
+# define R300_TX_WIDTH(x) ((x) << 0)
+# define R300_TX_HEIGHT(x) ((x) << 11)
+# define R300_TX_NUM_LEVELS(x) ((x) << 26)
+
+#define R300_TX_FORMAT1_0 0x44C0
+ /* The interpretation of the format word by Wladimir van der Laan */
+ /* The X, Y, Z and W refer to the layout of the components.
+ They are given meanings as R, G, B and Alpha by the swizzle
+ specification */
+# define R300_TX_FORMAT_X8 0x0
+# define R300_TX_FORMAT_X16 0x1
+# define R300_TX_FORMAT_Y4X4 0x2
+# define R300_TX_FORMAT_Y8X8 0x3
+# define R300_TX_FORMAT_Y16X16 0x4
+# define R300_TX_FORMAT_Z3Y3X2 0x5
+# define R300_TX_FORMAT_Z5Y6X5 0x6
+# define R300_TX_FORMAT_Z6Y5X5 0x7
+# define R300_TX_FORMAT_Z11Y11X10 0x8
+# define R300_TX_FORMAT_Z10Y11X11 0x9
+# define R300_TX_FORMAT_W4Z4Y4X4 0xA
+# define R300_TX_FORMAT_W1Z5Y5X5 0xB
+# define R300_TX_FORMAT_W8Z8Y8X8 0xC
+# define R300_TX_FORMAT_W2Z10Y10X10 0xD
+# define R300_TX_FORMAT_W16Z16Y16X16 0xE
+# define R300_TX_FORMAT_DXT1 0xF
+# define R300_TX_FORMAT_DXT3 0x10
+# define R300_TX_FORMAT_DXT5 0x11
+# define R300_TX_FORMAT_Y8 0x12
+# define R300_TX_FORMAT_AVYU444 0x13
+# define R300_TX_FORMAT_VYUY422 0x14
+# define R300_TX_FORMAT_YVYU422 0x15
+# define R300_TX_FORMAT_16_MPEG 0x16
+# define R300_TX_FORMAT_16_16_MPEG 0x17
+# define R300_TX_FORMAT_16F 0x18
+# define R300_TX_FORMAT_16F_16F 0x19
+# define R300_TX_FORMAT_16F_16F_16F_16F 0x1A
+# define R300_TX_FORMAT_32F 0x1B
+# define R300_TX_FORMAT_32F_32F 0x1C
+# define R300_TX_FORMAT_32F_32F_32F_32F 0x1D
+# define R300_TX_FORMAT_W24_FP 0x1E
+
+# define R300_TX_FORMAT_SIGNED_W (1 << 5)
+# define R300_TX_FORMAT_SIGNED_Z (1 << 6)
+# define R300_TX_FORMAT_SIGNED_Y (1 << 7)
+# define R300_TX_FORMAT_SIGNED_X (1 << 8)
+# define R300_TX_FORMAT_SIGNED (0xf << 5)
+
+# define R300_TX_FORMAT_3D (1 << 25)
+# define R300_TX_FORMAT_CUBIC_MAP (2 << 25)
+
+ /* alpha modes, convenience mostly */
+ /* if you have alpha, pick constant appropriate to the
+ number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+# define R300_TX_FORMAT_ALPHA_1CH 0x000
+# define R300_TX_FORMAT_ALPHA_2CH 0x200
+# define R300_TX_FORMAT_ALPHA_4CH 0x600
+# define R300_TX_FORMAT_ALPHA_NONE 0xA00
+ /* Swizzling */
+ /* constants */
+# define R300_TX_FORMAT_X 0
+# define R300_TX_FORMAT_Y 1
+# define R300_TX_FORMAT_Z 2
+# define R300_TX_FORMAT_W 3
+# define R300_TX_FORMAT_ZERO 4
+# define R300_TX_FORMAT_ONE 5
+ /* 2.0*Z, everything above 1.0 is set to 0.0 */
+# define R300_TX_FORMAT_CUT_Z 6
+ /* 2.0*W, everything above 1.0 is set to 0.0 */
+# define R300_TX_FORMAT_CUT_W 7
+
+# define R300_TX_FORMAT_B_SHIFT 18
+# define R300_TX_FORMAT_G_SHIFT 15
+# define R300_TX_FORMAT_R_SHIFT 12
+# define R300_TX_FORMAT_A_SHIFT 9
+ /* Convenience macro to take care of layout and swizzling */
+# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \
+ ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
+ | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
+ | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
+ | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
+ | (R300_TX_FORMAT_##FMT) \
+ )
+ /* These can be ORed with result of R300_EASY_TX_FORMAT()
+ We don't really know what they do. Take values from a
+ constant color ? */
+# define R300_TX_FORMAT_CONST_X (1<<5)
+# define R300_TX_FORMAT_CONST_Y (2<<5)
+# define R300_TX_FORMAT_CONST_Z (4<<5)
+# define R300_TX_FORMAT_CONST_W (8<<5)
+
+# define R300_TX_FORMAT_GAMMA (1 << 21)
+# define R300_TX_FORMAT_YUV_TO_RGB (1 << 22)
+
+#define R300_TX_FORMAT2_0 0x4500 /* obvious missing in gap */
+# define R300_TX_PITCHMASK_SHIFT 0
+# define R300_TX_PITCHMASK_MASK (2047 << 0)
+# define R500_TXFORMAT_MSB (1 << 14)
+# define R500_TXWIDTH_BIT11 (1 << 15)
+# define R500_TXHEIGHT_BIT11 (1 << 16)
+# define R500_POW2FIX2FLT (1 << 17)
+# define R500_SEL_FILTER4_TC0 (0 << 18)
+# define R500_SEL_FILTER4_TC1 (1 << 18)
+# define R500_SEL_FILTER4_TC2 (2 << 18)
+# define R500_SEL_FILTER4_TC3 (3 << 18)
+
+#define R300_TX_OFFSET_0 0x4540
+#define R300_TX_OFFSET_1 0x4544
+#define R300_TX_OFFSET_2 0x4548
+#define R300_TX_OFFSET_3 0x454C
+#define R300_TX_OFFSET_4 0x4550
+#define R300_TX_OFFSET_5 0x4554
+#define R300_TX_OFFSET_6 0x4558
+#define R300_TX_OFFSET_7 0x455C
+ /* BEGIN: Guess from R200 */
+# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
+# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
+# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
+# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+# define R300_TXO_MACRO_TILE (1 << 2)
+# define R300_TXO_MICRO_TILE_LINEAR (0 << 3)
+# define R300_TXO_MICRO_TILE (1 << 3)
+# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
+# define R300_TXO_OFFSET_MASK 0xffffffe0
+# define R300_TXO_OFFSET_SHIFT 5
+ /* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0 0x4580
+#define R300_TX_CHROMA_KEY_1 0x4584
+#define R300_TX_CHROMA_KEY_2 0x4588
+#define R300_TX_CHROMA_KEY_3 0x458c
+#define R300_TX_CHROMA_KEY_4 0x4590
+#define R300_TX_CHROMA_KEY_5 0x4594
+#define R300_TX_CHROMA_KEY_6 0x4598
+#define R300_TX_CHROMA_KEY_7 0x459c
+#define R300_TX_CHROMA_KEY_8 0x45a0
+#define R300_TX_CHROMA_KEY_9 0x45a4
+#define R300_TX_CHROMA_KEY_10 0x45a8
+#define R300_TX_CHROMA_KEY_11 0x45ac
+#define R300_TX_CHROMA_KEY_12 0x45b0
+#define R300_TX_CHROMA_KEY_13 0x45b4
+#define R300_TX_CHROMA_KEY_14 0x45b8
+#define R300_TX_CHROMA_KEY_15 0x45bc
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+
+/* Border Color */
+#define R300_TX_BORDER_COLOR_0 0x45c0
+#define R300_TX_BORDER_COLOR_1 0x45c4
+#define R300_TX_BORDER_COLOR_2 0x45c8
+#define R300_TX_BORDER_COLOR_3 0x45cc
+#define R300_TX_BORDER_COLOR_4 0x45d0
+#define R300_TX_BORDER_COLOR_5 0x45d4
+#define R300_TX_BORDER_COLOR_6 0x45d8
+#define R300_TX_BORDER_COLOR_7 0x45dc
+#define R300_TX_BORDER_COLOR_8 0x45e0
+#define R300_TX_BORDER_COLOR_9 0x45e4
+#define R300_TX_BORDER_COLOR_10 0x45e8
+#define R300_TX_BORDER_COLOR_11 0x45ec
+#define R300_TX_BORDER_COLOR_12 0x45f0
+#define R300_TX_BORDER_COLOR_13 0x45f4
+#define R300_TX_BORDER_COLOR_14 0x45f8
+#define R300_TX_BORDER_COLOR_15 0x45fc
+
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
+#define R300_US_CONFIG 0x4600
+# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
+# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
+# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
+#define R300_US_PIXSIZE 0x4604
+/* There is an unshifted value here which has so far always been equal to the
+ * index of the highest used temporary register.
+ */
+#define R300_US_CODE_OFFSET 0x4608
+# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
+# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
+# define R300_PFS_CNTL_ALU_END_SHIFT 6
+# define R300_PFS_CNTL_ALU_END_MASK (63 << 6)
+# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 13
+# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 13)
+# define R300_PFS_CNTL_TEX_END_SHIFT 18
+# define R300_PFS_CNTL_TEX_END_MASK (31 << 18)
+
+/* gap */
+
+/* Nodes are stored backwards. The last active node is always stored in
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
+#define R300_US_CODE_ADDR_0 0x4610
+#define R300_US_CODE_ADDR_1 0x4614
+#define R300_US_CODE_ADDR_2 0x4618
+#define R300_US_CODE_ADDR_3 0x461C
+# define R300_ALU_START_SHIFT 0
+# define R300_ALU_START_MASK (63 << 0)
+# define R300_ALU_SIZE_SHIFT 6
+# define R300_ALU_SIZE_MASK (63 << 6)
+# define R300_TEX_START_SHIFT 12
+# define R300_TEX_START_MASK (31 << 12)
+# define R300_TEX_SIZE_SHIFT 17
+# define R300_TEX_SIZE_MASK (31 << 17)
+# define R300_RGBA_OUT (1 << 22)
+# define R300_W_OUT (1 << 23)
+
+/* TEX
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
+#define R300_US_TEX_INST_0 0x4620
+# define R300_SRC_ADDR_SHIFT 0
+# define R300_SRC_ADDR_MASK (31 << 0)
+# define R300_DST_ADDR_SHIFT 6
+# define R300_DST_ADDR_MASK (31 << 6)
+# define R300_TEX_ID_SHIFT 11
+# define R300_TEX_ID_MASK (15 << 11)
+# define R300_TEX_INST_SHIFT 15
+# define R300_TEX_OP_NOP 0
+# define R300_TEX_OP_LD 1
+# define R300_TEX_OP_KIL 2
+# define R300_TEX_OP_TXP 3
+# define R300_TEX_OP_TXB 4
+# define R300_TEX_INST_MASK (7 << 15)
+
+/* Output format from the unfied shader */
+#define R300_US_OUT_FMT_0 0x46A4
+# define R300_US_OUT_FMT_C4_8 (0 << 0)
+# define R300_US_OUT_FMT_C4_10 (1 << 0)
+# define R300_US_OUT_FMT_C4_10_GAMMA (2 << 0)
+# define R300_US_OUT_FMT_C_16 (3 << 0)
+# define R300_US_OUT_FMT_C2_16 (4 << 0)
+# define R300_US_OUT_FMT_C4_16 (5 << 0)
+# define R300_US_OUT_FMT_C_16_MPEG (6 << 0)
+# define R300_US_OUT_FMT_C2_16_MPEG (7 << 0)
+# define R300_US_OUT_FMT_C2_4 (8 << 0)
+# define R300_US_OUT_FMT_C_3_3_2 (9 << 0)
+# define R300_US_OUT_FMT_C_6_5_6 (10 << 0)
+# define R300_US_OUT_FMT_C_11_11_10 (11 << 0)
+# define R300_US_OUT_FMT_C_10_11_11 (12 << 0)
+# define R300_US_OUT_FMT_C_2_10_10_10 (13 << 0)
+/* reserved */
+# define R300_US_OUT_FMT_UNUSED (15 << 0)
+# define R300_US_OUT_FMT_C_16_FP (16 << 0)
+# define R300_US_OUT_FMT_C2_16_FP (17 << 0)
+# define R300_US_OUT_FMT_C4_16_FP (18 << 0)
+# define R300_US_OUT_FMT_C_32_FP (19 << 0)
+# define R300_US_OUT_FMT_C2_32_FP (20 << 0)
+# define R300_US_OUT_FMT_C4_32_FP (21 << 0)
+# define R300_C0_SEL_A (0 << 8)
+# define R300_C0_SEL_R (1 << 8)
+# define R300_C0_SEL_G (2 << 8)
+# define R300_C0_SEL_B (3 << 8)
+# define R300_C1_SEL_A (0 << 10)
+# define R300_C1_SEL_R (1 << 10)
+# define R300_C1_SEL_G (2 << 10)
+# define R300_C1_SEL_B (3 << 10)
+# define R300_C2_SEL_A (0 << 12)
+# define R300_C2_SEL_R (1 << 12)
+# define R300_C2_SEL_G (2 << 12)
+# define R300_C2_SEL_B (3 << 12)
+# define R300_C3_SEL_A (0 << 14)
+# define R300_C3_SEL_R (1 << 14)
+# define R300_C3_SEL_G (2 << 14)
+# define R300_C3_SEL_B (3 << 14)
+# define R300_OUT_SIGN(x) ((x) << 16)
+# define R500_ROUND_ADJ (1 << 20)
+
+/* ALU
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ * - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ * - DP4: Use OUTC_DP4, OUTA_DP4
+ * - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ * - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ * - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ * - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ * - FLR: use FRC+MAD
+ * - XPD: use MAD+MAD
+ * - SGE, SLT: use MAD+CMP
+ * - RSQ: use ABS modifier for argument
+ * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ * (e.g. RCP) into color register
+ * - apparently, there's no quick DST operation
+ * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ * - Argument order is the same as in ARB_fragment_program.
+ * - Operation is MAD
+ * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ * - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
+#define R300_US_ALU_RGB_ADDR_0 0x46C0
+# define R300_ALU_SRC0C_SHIFT 0
+# define R300_ALU_SRC0C_MASK (31 << 0)
+# define R300_ALU_SRC0C_CONST (1 << 5)
+# define R300_ALU_SRC1C_SHIFT 6
+# define R300_ALU_SRC1C_MASK (31 << 6)
+# define R300_ALU_SRC1C_CONST (1 << 11)
+# define R300_ALU_SRC2C_SHIFT 12
+# define R300_ALU_SRC2C_MASK (31 << 12)
+# define R300_ALU_SRC2C_CONST (1 << 17)
+# define R300_ALU_SRC_MASK 0x0003ffff
+# define R300_ALU_DSTC_SHIFT 18
+# define R300_ALU_DSTC_MASK (31 << 18)
+# define R300_ALU_DSTC_REG_MASK_SHIFT 23
+# define R300_ALU_DSTC_REG_X (1 << 23)
+# define R300_ALU_DSTC_REG_Y (1 << 24)
+# define R300_ALU_DSTC_REG_Z (1 << 25)
+# define R300_ALU_DSTC_OUTPUT_MASK_SHIFT 26
+# define R300_ALU_DSTC_OUTPUT_X (1 << 26)
+# define R300_ALU_DSTC_OUTPUT_Y (1 << 27)
+# define R300_ALU_DSTC_OUTPUT_Z (1 << 28)
+# define R300_ALU_DSTC_OUTPUT_XYZ (7 << 26)
+# define R300_RGB_ADDR0(x) ((x) << 0)
+# define R300_RGB_ADDR1(x) ((x) << 6)
+# define R300_RGB_ADDR2(x) ((x) << 12)
+
+#define R300_US_ALU_ALPHA_ADDR_0 0x47C0
+# define R300_ALU_SRC0A_SHIFT 0
+# define R300_ALU_SRC0A_MASK (31 << 0)
+# define R300_ALU_SRC0A_CONST (1 << 5)
+# define R300_ALU_SRC1A_SHIFT 6
+# define R300_ALU_SRC1A_MASK (31 << 6)
+# define R300_ALU_SRC1A_CONST (1 << 11)
+# define R300_ALU_SRC2A_SHIFT 12
+# define R300_ALU_SRC2A_MASK (31 << 12)
+# define R300_ALU_SRC2A_CONST (1 << 17)
+# define R300_ALU_SRC_MASK 0x0003ffff
+# define R300_ALU_DSTA_SHIFT 18
+# define R300_ALU_DSTA_MASK (31 << 18)
+# define R300_ALU_DSTA_REG (1 << 23)
+# define R300_ALU_DSTA_OUTPUT (1 << 24)
+# define R300_ALU_DSTA_DEPTH (1 << 27)
+# define R300_ALPHA_ADDR0(x) ((x) << 0)
+# define R300_ALPHA_ADDR1(x) ((x) << 6)
+# define R300_ALPHA_ADDR2(x) ((x) << 12)
+
+#define R300_US_ALU_RGB_INST_0 0x48C0
+# define R300_ALU_ARGC_SRC0C_XYZ 0
+# define R300_ALU_ARGC_SRC0C_XXX 1
+# define R300_ALU_ARGC_SRC0C_YYY 2
+# define R300_ALU_ARGC_SRC0C_ZZZ 3
+# define R300_ALU_ARGC_SRC1C_XYZ 4
+# define R300_ALU_ARGC_SRC1C_XXX 5
+# define R300_ALU_ARGC_SRC1C_YYY 6
+# define R300_ALU_ARGC_SRC1C_ZZZ 7
+# define R300_ALU_ARGC_SRC2C_XYZ 8
+# define R300_ALU_ARGC_SRC2C_XXX 9
+# define R300_ALU_ARGC_SRC2C_YYY 10
+# define R300_ALU_ARGC_SRC2C_ZZZ 11
+# define R300_ALU_ARGC_SRC0A 12
+# define R300_ALU_ARGC_SRC1A 13
+# define R300_ALU_ARGC_SRC2A 14
+# define R300_ALU_ARGC_SRCP_XYZ 15
+# define R300_ALU_ARGC_SRCP_XXX 16
+# define R300_ALU_ARGC_SRCP_YYY 17
+# define R300_ALU_ARGC_SRCP_ZZZ 18
+# define R300_ALU_ARGC_SRCP_WWW 19
+# define R300_ALU_ARGC_ZERO 20
+# define R300_ALU_ARGC_ONE 21
+# define R300_ALU_ARGC_HALF 22
+# define R300_ALU_ARGC_SRC0C_YZX 23
+# define R300_ALU_ARGC_SRC1C_YZX 24
+# define R300_ALU_ARGC_SRC2C_YZX 25
+# define R300_ALU_ARGC_SRC0C_ZXY 26
+# define R300_ALU_ARGC_SRC1C_ZXY 27
+# define R300_ALU_ARGC_SRC2C_ZXY 28
+# define R300_ALU_ARGC_SRC0CA_WZY 29
+# define R300_ALU_ARGC_SRC1CA_WZY 30
+# define R300_ALU_ARGC_SRC2CA_WZY 31
+# define R300_RGB_SWIZA(x) ((x) << 0)
+# define R300_RGB_SWIZB(x) ((x) << 7)
+# define R300_RGB_SWIZC(x) ((x) << 14)
+
+# define R300_ALU_ARG0C_SHIFT 0
+# define R300_ALU_ARG0C_MASK (31 << 0)
+# define R300_ALU_ARG0C_NOP (0 << 5)
+# define R300_ALU_ARG0C_NEG (1 << 5)
+# define R300_ALU_ARG0C_ABS (2 << 5)
+# define R300_ALU_ARG0C_NAB (3 << 5)
+# define R300_ALU_ARG1C_SHIFT 7
+# define R300_ALU_ARG1C_MASK (31 << 7)
+# define R300_ALU_ARG1C_NOP (0 << 12)
+# define R300_ALU_ARG1C_NEG (1 << 12)
+# define R300_ALU_ARG1C_ABS (2 << 12)
+# define R300_ALU_ARG1C_NAB (3 << 12)
+# define R300_ALU_ARG2C_SHIFT 14
+# define R300_ALU_ARG2C_MASK (31 << 14)
+# define R300_ALU_ARG2C_NOP (0 << 19)
+# define R300_ALU_ARG2C_NEG (1 << 19)
+# define R300_ALU_ARG2C_ABS (2 << 19)
+# define R300_ALU_ARG2C_NAB (3 << 19)
+# define R300_ALU_SRCP_1_MINUS_2_SRC0 (0 << 21)
+# define R300_ALU_SRCP_SRC1_MINUS_SRC0 (1 << 21)
+# define R300_ALU_SRCP_SRC1_PLUS_SRC0 (2 << 21)
+# define R300_ALU_SRCP_1_MINUS_SRC0 (3 << 21)
+
+# define R300_ALU_OUTC_MAD (0 << 23)
+# define R300_ALU_OUTC_DP3 (1 << 23)
+# define R300_ALU_OUTC_DP4 (2 << 23)
+# define R300_ALU_OUTC_D2A (3 << 23)
+# define R300_ALU_OUTC_MIN (4 << 23)
+# define R300_ALU_OUTC_MAX (5 << 23)
+# define R300_ALU_OUTC_CMPH (7 << 23)
+# define R300_ALU_OUTC_CMP (8 << 23)
+# define R300_ALU_OUTC_FRC (9 << 23)
+# define R300_ALU_OUTC_REPL_ALPHA (10 << 23)
+
+# define R300_ALU_OUTC_MOD_NOP (0 << 27)
+# define R300_ALU_OUTC_MOD_MUL2 (1 << 27)
+# define R300_ALU_OUTC_MOD_MUL4 (2 << 27)
+# define R300_ALU_OUTC_MOD_MUL8 (3 << 27)
+# define R300_ALU_OUTC_MOD_DIV2 (4 << 27)
+# define R300_ALU_OUTC_MOD_DIV4 (5 << 27)
+# define R300_ALU_OUTC_MOD_DIV8 (6 << 27)
+
+# define R300_ALU_OUTC_CLAMP (1 << 30)
+# define R300_ALU_INSERT_NOP (1 << 31)
+
+#define R300_US_ALU_ALPHA_INST_0 0x49C0
+# define R300_ALU_ARGA_SRC0C_X 0
+# define R300_ALU_ARGA_SRC0C_Y 1
+# define R300_ALU_ARGA_SRC0C_Z 2
+# define R300_ALU_ARGA_SRC1C_X 3
+# define R300_ALU_ARGA_SRC1C_Y 4
+# define R300_ALU_ARGA_SRC1C_Z 5
+# define R300_ALU_ARGA_SRC2C_X 6
+# define R300_ALU_ARGA_SRC2C_Y 7
+# define R300_ALU_ARGA_SRC2C_Z 8
+# define R300_ALU_ARGA_SRC0A 9
+# define R300_ALU_ARGA_SRC1A 10
+# define R300_ALU_ARGA_SRC2A 11
+# define R300_ALU_ARGA_SRCP_X 12
+# define R300_ALU_ARGA_SRCP_Y 13
+# define R300_ALU_ARGA_SRCP_Z 14
+# define R300_ALU_ARGA_SRCP_W 15
+# define R300_ALU_ARGA_ZERO 16
+# define R300_ALU_ARGA_ONE 17
+# define R300_ALU_ARGA_HALF 18
+# define R300_ALPHA_SWIZA(x) ((x) << 0)
+# define R300_ALPHA_SWIZB(x) ((x) << 7)
+# define R300_ALPHA_SWIZC(x) ((x) << 14)
+
+# define R300_ALU_ARG0A_SHIFT 0
+# define R300_ALU_ARG0A_MASK (31 << 0)
+# define R300_ALU_ARG0A_NOP (0 << 5)
+# define R300_ALU_ARG0A_NEG (1 << 5)
+# define R300_ALU_ARG0A_ABS (2 << 5)
+# define R300_ALU_ARG0A_NAB (3 << 5)
+# define R300_ALU_ARG1A_SHIFT 7
+# define R300_ALU_ARG1A_MASK (31 << 7)
+# define R300_ALU_ARG1A_NOP (0 << 12)
+# define R300_ALU_ARG1A_NEG (1 << 12)
+# define R300_ALU_ARG1A_ABS (2 << 12)
+# define R300_ALU_ARG1A_NAB (3 << 12)
+# define R300_ALU_ARG2A_SHIFT 14
+# define R300_ALU_ARG2A_MASK (31 << 14)
+# define R300_ALU_ARG2A_NOP (0 << 19)
+# define R300_ALU_ARG2A_NEG (1 << 19)
+# define R300_ALU_ARG2A_ABS (2 << 19)
+# define R300_ALU_ARG2A_NAB (3 << 19)
+# define R300_ALU_SRCP_1_MINUS_2_SRC0 (0 << 21)
+# define R300_ALU_SRCP_SRC1_MINUS_SRC0 (1 << 21)
+# define R300_ALU_SRCP_SRC1_PLUS_SRC0 (2 << 21)
+# define R300_ALU_SRCP_1_MINUS_SRC0 (3 << 21)
+
+# define R300_ALU_OUTA_MAD (0 << 23)
+# define R300_ALU_OUTA_DP4 (1 << 23)
+# define R300_ALU_OUTA_MIN (2 << 23)
+# define R300_ALU_OUTA_MAX (3 << 23)
+# define R300_ALU_OUTA_CND (5 << 23)
+# define R300_ALU_OUTA_CMP (6 << 23)
+# define R300_ALU_OUTA_FRC (7 << 23)
+# define R300_ALU_OUTA_EX2 (8 << 23)
+# define R300_ALU_OUTA_LG2 (9 << 23)
+# define R300_ALU_OUTA_RCP (10 << 23)
+# define R300_ALU_OUTA_RSQ (11 << 23)
+
+# define R300_ALU_OUTA_MOD_NOP (0 << 27)
+# define R300_ALU_OUTA_MOD_MUL2 (1 << 27)
+# define R300_ALU_OUTA_MOD_MUL4 (2 << 27)
+# define R300_ALU_OUTA_MOD_MUL8 (3 << 27)
+# define R300_ALU_OUTA_MOD_DIV2 (4 << 27)
+# define R300_ALU_OUTA_MOD_DIV4 (5 << 27)
+# define R300_ALU_OUTA_MOD_DIV8 (6 << 27)
+
+# define R300_ALU_OUTA_CLAMP (1 << 30)
+/* END: Fragment program instruction set */
+
+/* Fog: Fog Blending Enable */
+#define R300_FG_FOG_BLEND 0x4bc0
+# define R300_FG_FOG_BLEND_DISABLE (0 << 0)
+# define R300_FG_FOG_BLEND_ENABLE (1 << 0)
+# define R300_FG_FOG_BLEND_FN_LINEAR (0 << 1)
+# define R300_FG_FOG_BLEND_FN_EXP (1 << 1)
+# define R300_FG_FOG_BLEND_FN_EXP2 (2 << 1)
+# define R300_FG_FOG_BLEND_FN_CONSTANT (3 << 1)
+# define R300_FG_FOG_BLEND_FN_MASK (3 << 1)
+
+/* Fog: Red Component of Fog Color */
+#define R300_FG_FOG_COLOR_R 0x4bc8
+/* Fog: Green Component of Fog Color */
+#define R300_FG_FOG_COLOR_G 0x4bcc
+/* Fog: Blue Component of Fog Color */
+#define R300_FG_FOG_COLOR_B 0x4bd0
+# define R300_FG_FOG_COLOR_MASK 0x000003ff
+
+/* Fog: Constant Factor for Fog Blending */
+#define R300_FG_FOG_FACTOR 0x4bc4
+# define FG_FOG_FACTOR_MASK 0x000003ff
+
+/* Fog: Alpha function */
+#define R300_FG_ALPHA_FUNC 0x4bd4
+# define R300_FG_ALPHA_FUNC_VAL_MASK 0x000000ff
+# define R300_FG_ALPHA_FUNC_NEVER (0 << 8)
+# define R300_FG_ALPHA_FUNC_LESS (1 << 8)
+# define R300_FG_ALPHA_FUNC_EQUAL (2 << 8)
+# define R300_FG_ALPHA_FUNC_LE (3 << 8)
+# define R300_FG_ALPHA_FUNC_GREATER (4 << 8)
+# define R300_FG_ALPHA_FUNC_NOTEQUAL (5 << 8)
+# define R300_FG_ALPHA_FUNC_GE (6 << 8)
+# define R300_FG_ALPHA_FUNC_ALWAYS (7 << 8)
+# define R300_ALPHA_TEST_OP_MASK (7 << 8)
+# define R300_FG_ALPHA_FUNC_DISABLE (0 << 11)
+# define R300_FG_ALPHA_FUNC_ENABLE (1 << 11)
+
+# define R500_FG_ALPHA_FUNC_10BIT (0 << 12)
+# define R500_FG_ALPHA_FUNC_8BIT (1 << 12)
+
+# define R300_FG_ALPHA_FUNC_MASK_DISABLE (0 << 16)
+# define R300_FG_ALPHA_FUNC_MASK_ENABLE (1 << 16)
+# define R300_FG_ALPHA_FUNC_CFG_2_OF_4 (0 << 17)
+# define R300_FG_ALPHA_FUNC_CFG_3_OF_6 (1 << 17)
+
+# define R300_FG_ALPHA_FUNC_DITH_DISABLE (0 << 20)
+# define R300_FG_ALPHA_FUNC_DITH_ENABLE (1 << 20)
+
+# define R500_FG_ALPHA_FUNC_OFFSET_DISABLE (0 << 24)
+# define R500_FG_ALPHA_FUNC_OFFSET_ENABLE (1 << 24) /* Not supported in R520 */
+# define R500_FG_ALPHA_FUNC_DISC_ZERO_MASK_DISABLE (0 << 25)
+# define R500_FG_ALPHA_FUNC_DISC_ZERO_MASK_ENABLE (1 << 25)
+
+# define R500_FG_ALPHA_FUNC_FP16_DISABLE (0 << 28)
+# define R500_FG_ALPHA_FUNC_FP16_ENABLE (1 << 28)
+
+
+/* Fog: Where does the depth come from? */
+#define R300_FG_DEPTH_SRC 0x4bd8
+# define R300_FG_DEPTH_SRC_SCAN (0 << 0)
+# define R300_FG_DEPTH_SRC_SHADER (1 << 0)
+
+/* Fog: Alpha Compare Value */
+#define R500_FG_ALPHA_VALUE 0x4be0
+# define R500_FG_ALPHA_VALUE_MASK 0x0000ffff
+
+/* gap */
+
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X 0x4C00
+#define R300_PFS_PARAM_0_Y 0x4C04
+#define R300_PFS_PARAM_0_Z 0x4C08
+#define R300_PFS_PARAM_0_W 0x4C0C
+/* last consts */
+#define R300_PFS_PARAM_31_X 0x4DF0
+#define R300_PFS_PARAM_31_Y 0x4DF4
+#define R300_PFS_PARAM_31_Z 0x4DF8
+#define R300_PFS_PARAM_31_W 0x4DFC
+
+/* Unpipelined. */
+#define R300_RB3D_CCTL 0x4e00
+# define R300_RB3D_CCTL_NUM_MULTIWRITES_1_BUFFER (0 << 5)
+# define R300_RB3D_CCTL_NUM_MULTIWRITES_2_BUFFERS (1 << 5)
+# define R300_RB3D_CCTL_NUM_MULTIWRITES_3_BUFFERS (2 << 5)
+# define R300_RB3D_CCTL_NUM_MULTIWRITES_4_BUFFERS (3 << 5)
+# define R300_RB3D_CCTL_CLRCMP_FLIPE_DISABLE (0 << 7)
+# define R300_RB3D_CCTL_CLRCMP_FLIPE_ENABLE (1 << 7)
+# define R300_RB3D_CCTL_AA_COMPRESSION_DISABLE (0 << 9)
+# define R300_RB3D_CCTL_AA_COMPRESSION_ENABLE (1 << 9)
+# define R300_RB3D_CCTL_CMASK_DISABLE (0 << 10)
+# define R300_RB3D_CCTL_CMASK_ENABLE (1 << 10)
+/* reserved */
+# define R300_RB3D_CCTL_INDEPENDENT_COLOR_CHANNEL_MASK_DISABLE (0 << 12)
+# define R300_RB3D_CCTL_INDEPENDENT_COLOR_CHANNEL_MASK_ENABLE (1 << 12)
+# define R300_RB3D_CCTL_WRITE_COMPRESSION_ENABLE (0 << 13)
+# define R300_RB3D_CCTL_WRITE_COMPRESSION_DISABLE (1 << 13)
+# define R300_RB3D_CCTL_INDEPENDENT_COLORFORMAT_ENABLE_DISABLE (0 << 14)
+# define R300_RB3D_CCTL_INDEPENDENT_COLORFORMAT_ENABLE_ENABLE (1 << 14)
+
+
+/* Notes:
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ * the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ * are set to the same
+ * function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
+#define R300_RB3D_CBLEND 0x4E04
+#define R300_RB3D_ABLEND 0x4E08
+/* the following only appear in CBLEND */
+# define R300_ALPHA_BLEND_ENABLE (1 << 0)
+# define R300_SEPARATE_ALPHA_ENABLE (1 << 1)
+# define R300_READ_ENABLE (1 << 2)
+# define R300_DISCARD_SRC_PIXELS_DIS (0 << 3)
+# define R300_DISCARD_SRC_PIXELS_SRC_ALPHA_0 (1 << 3)
+# define R300_DISCARD_SRC_PIXELS_SRC_COLOR_0 (2 << 3)
+# define R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_0 (3 << 3)
+# define R300_DISCARD_SRC_PIXELS_SRC_ALPHA_1 (4 << 3)
+# define R300_DISCARD_SRC_PIXELS_SRC_COLOR_1 (5 << 3)
+# define R300_DISCARD_SRC_PIXELS_SRC_ALPHA_COLOR_1 (6 << 3)
+
+/* the following are shared between CBLEND and ABLEND */
+# define R300_FCN_MASK (3 << 12)
+# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
+# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
+# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
+# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
+# define R300_COMB_FCN_MIN (4 << 12)
+# define R300_COMB_FCN_MAX (5 << 12)
+# define R300_COMB_FCN_RSUB_CLAMP (6 << 12)
+# define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12)
+# define R300_BLEND_GL_ZERO (32)
+# define R300_BLEND_GL_ONE (33)
+# define R300_BLEND_GL_SRC_COLOR (34)
+# define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35)
+# define R300_BLEND_GL_DST_COLOR (36)
+# define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37)
+# define R300_BLEND_GL_SRC_ALPHA (38)
+# define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39)
+# define R300_BLEND_GL_DST_ALPHA (40)
+# define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41)
+# define R300_BLEND_GL_SRC_ALPHA_SATURATE (42)
+# define R300_BLEND_GL_CONST_COLOR (43)
+# define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44)
+# define R300_BLEND_GL_CONST_ALPHA (45)
+# define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46)
+# define R300_BLEND_MASK (63)
+# define R300_SRC_BLEND_SHIFT (16)
+# define R300_DST_BLEND_SHIFT (24)
+
+/* Constant color used by the blender. Pipelined through the blender.
+ * Note: For R520, this field is ignored, use RB3D_CONSTANT_COLOR_GB__BLUE,
+ * RB3D_CONSTANT_COLOR_GB__GREEN, etc. instead.
+ */
+#define R300_RB3D_BLEND_COLOR 0x4E10
+
+
+/* 3D Color Channel Mask. If all the channels used in the current color format
+ * are disabled, then the cb will discard all the incoming quads. Pipelined
+ * through the blender.
+ */
+#define RB3D_COLOR_CHANNEL_MASK 0x4E0C
+# define RB3D_COLOR_CHANNEL_MASK_BLUE_MASK0 (1 << 0)
+# define RB3D_COLOR_CHANNEL_MASK_GREEN_MASK0 (1 << 1)
+# define RB3D_COLOR_CHANNEL_MASK_RED_MASK0 (1 << 2)
+# define RB3D_COLOR_CHANNEL_MASK_ALPHA_MASK0 (1 << 3)
+# define RB3D_COLOR_CHANNEL_MASK_BLUE_MASK1 (1 << 4)
+# define RB3D_COLOR_CHANNEL_MASK_GREEN_MASK1 (1 << 5)
+# define RB3D_COLOR_CHANNEL_MASK_RED_MASK1 (1 << 6)
+# define RB3D_COLOR_CHANNEL_MASK_ALPHA_MASK1 (1 << 7)
+# define RB3D_COLOR_CHANNEL_MASK_BLUE_MASK2 (1 << 8)
+# define RB3D_COLOR_CHANNEL_MASK_GREEN_MASK2 (1 << 9)
+# define RB3D_COLOR_CHANNEL_MASK_RED_MASK2 (1 << 10)
+# define RB3D_COLOR_CHANNEL_MASK_ALPHA_MASK2 (1 << 11)
+# define RB3D_COLOR_CHANNEL_MASK_BLUE_MASK3 (1 << 12)
+# define RB3D_COLOR_CHANNEL_MASK_GREEN_MASK3 (1 << 13)
+# define RB3D_COLOR_CHANNEL_MASK_RED_MASK3 (1 << 14)
+# define RB3D_COLOR_CHANNEL_MASK_ALPHA_MASK3 (1 << 15)
+
+/* Clear color that is used when the color mask is set to 00. Unpipelined.
+ * Program this register with a 32-bit value in ARGB8888 or ARGB2101010
+ * formats, ignoring the fields.
+ */
+#define RB3D_COLOR_CLEAR_VALUE 0x4e14
+
+/* gap */
+
+/* Color Compare Color. Stalls the 2d/3d datapath until it is idle. */
+#define RB3D_CLRCMP_CLR 0x4e20
+
+/* Color Compare Mask. Stalls the 2d/3d datapath until it is idle. */
+#define RB3D_CLRCMP_MSK 0x4e24
+
+/* Color Buffer Address Offset of multibuffer 0. Unpipelined. */
+#define R300_RB3D_COLOROFFSET0 0x4E28
+# define R300_COLOROFFSET_MASK 0xFFFFFFE0
+/* Color Buffer Address Offset of multibuffer 1. Unpipelined. */
+#define R300_RB3D_COLOROFFSET1 0x4E2C
+/* Color Buffer Address Offset of multibuffer 2. Unpipelined. */
+#define R300_RB3D_COLOROFFSET2 0x4E30
+/* Color Buffer Address Offset of multibuffer 3. Unpipelined. */
+#define R300_RB3D_COLOROFFSET3 0x4E34
+
+/* Color buffer format and tiling control for all the multibuffers and the
+ * pitch of multibuffer 0 to 3. Unpipelined. The cache must be empty before any
+ * of the registers are changed.
+ *
+ * Bit 16: Larger tiles
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
+#define R300_RB3D_COLORPITCH0 0x4E38
+# define R300_COLORPITCH_MASK 0x00003FFE
+# define R300_COLOR_TILE_DISABLE (0 << 16)
+# define R300_COLOR_TILE_ENABLE (1 << 16)
+# define R300_COLOR_MICROTILE_DISABLE (0 << 17)
+# define R300_COLOR_MICROTILE_ENABLE (1 << 17)
+# define R300_COLOR_MICROTILE_ENABLE_SQUARE (2 << 17) /* Only available in 16-bit */
+# define R300_COLOR_ENDIAN_NO_SWAP (0 << 19)
+# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 19)
+# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 19)
+# define R300_COLOR_ENDIAN_HALF_DWORD_SWAP (3 << 19)
+# define R500_COLOR_FORMAT_ARGB10101010 (0 << 21)
+# define R500_COLOR_FORMAT_UV1010 (1 << 21)
+# define R500_COLOR_FORMAT_CI8 (2 << 21) /* 2D only */
+# define R300_COLOR_FORMAT_ARGB1555 (3 << 21)
+# define R300_COLOR_FORMAT_RGB565 (4 << 21)
+# define R500_COLOR_FORMAT_ARGB2101010 (5 << 21)
+# define R300_COLOR_FORMAT_ARGB8888 (6 << 21)
+# define R300_COLOR_FORMAT_ARGB32323232 (7 << 21)
+/* reserved */
+# define R300_COLOR_FORMAT_I8 (9 << 21)
+# define R300_COLOR_FORMAT_ARGB16161616 (10 << 21)
+# define R300_COLOR_FORMAT_VYUY (11 << 21)
+# define R300_COLOR_FORMAT_YVYU (12 << 21)
+# define R300_COLOR_FORMAT_UV88 (13 << 21)
+# define R500_COLOR_FORMAT_I10 (14 << 21)
+# define R300_COLOR_FORMAT_ARGB4444 (15 << 21)
+#define R300_RB3D_COLORPITCH1 0x4E3C
+#define R300_RB3D_COLORPITCH2 0x4E40
+#define R300_RB3D_COLORPITCH3 0x4E44
+
+/* gap */
+
+/* Destination Color Buffer Cache Control/Status. If the cb is in e2 mode, then
+ * a flush or free will not occur upon a write to this register, but a sync
+ * will be immediately sent if one is requested. If both DC_FLUSH and DC_FREE
+ * are zero but DC_FINISH is one, then a sync will be sent immediately -- the
+ * cb will not wait for all the previous operations to complete before sending
+ * the sync. Unpipelined except when DC_FINISH and DC_FREE are both set to
+ * zero.
+ *
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_NO_EFFECT (0 << 0)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_NO_EFFECT_1 (1 << 0)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D (2 << 0)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D_1 (3 << 0)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_NO_EFFECT (0 << 2)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_NO_EFFECT_1 (1 << 2)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS (2 << 2)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS_1 (3 << 2)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FINISH_NO_SIGNAL (0 << 4)
+# define R300_RB3D_DSTCACHE_CTLSTAT_DC_FINISH_SIGNAL (1 << 4)
+
+#define R300_RB3D_DITHER_CTL 0x4E50
+# define R300_RB3D_DITHER_CTL_DITHER_MODE_TRUNCATE (0 << 0)
+# define R300_RB3D_DITHER_CTL_DITHER_MODE_ROUND (1 << 0)
+# define R300_RB3D_DITHER_CTL_DITHER_MODE_LUT (2 << 0)
+/* reserved */
+# define R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_TRUNCATE (0 << 2)
+# define R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_ROUND (1 << 2)
+# define R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT (2 << 2)
+/* reserved */
+
+/* Resolve buffer destination address. The cache must be empty before changing
+ * this register if the cb is in resolve mode. Unpipelined
+ */
+#define R300_RB3D_AARESOLVE_OFFSET 0x4e80
+# define R300_RB3D_AARESOLVE_OFFSET_SHIFT 5
+# define R300_RB3D_AARESOLVE_OFFSET_MASK 0xffffffe0 /* At least according to the calculations of Christoph Brill */
+
+/* Resolve Buffer Pitch and Tiling Control. The cache must be empty before
+ * changing this register if the cb is in resolve mode. Unpipelined
+ */
+#define R300_RB3D_AARESOLVE_PITCH 0x4e84
+# define R300_RB3D_AARESOLVE_PITCH_SHIFT 1
+# define R300_RB3D_AARESOLVE_PITCH_MASK 0x00003ffe /* At least according to the calculations of Christoph Brill */
+
+/* Resolve Buffer Control. Unpipelined */
+#define R300_RB3D_AARESOLVE_CTL 0x4e88
+# define R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_NORMAL (0 << 0)
+# define R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE (1 << 0)
+# define R300_RB3D_AARESOLVE_CTL_AARESOLVE_GAMMA_10 (0 << 1)
+# define R300_RB3D_AARESOLVE_CTL_AARESOLVE_GAMMA_22 (1 << 1)
+# define R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_SAMPLE0 (0 << 2)
+# define R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_AVERAGE (1 << 2)
+
+
+/* Discard src pixels less than or equal to threshold. */
+#define R500_RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 0x4ea0
+/* Discard src pixels greater than or equal to threshold. */
+#define R500_RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 0x4ea4
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_BLUE_SHIFT 0
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_BLUE_MASK 0x000000ff
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_GREEN_SHIFT 8
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_GREEN_MASK 0x0000ff00
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_RED_SHIFT 16
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_RED_MASK 0x00ff0000
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_ALPHA_SHIFT 24
+# define R500_RB3D_DISCARD_SRC_PIXEL_THRESHOLD_ALPHA_MASK 0xff000000
+
+/* 3D ROP Control. Stalls the 2d/3d datapath until it is idle. */
+#define R300_RB3D_ROPCNTL 0x4e18
+# define R300_RB3D_ROPCNTL_ROP_ENABLE 0x00000004
+# define R300_RB3D_ROPCNTL_ROP_MASK (15 << 8)
+# define R300_RB3D_ROPCNTL_ROP_SHIFT 8
+
+/* Color Compare Flip. Stalls the 2d/3d datapath until it is idle. */
+#define R300_RB3D_CLRCMP_FLIPE 0x4e1c
+
+/* Sets the fifo sizes */
+#define R500_RB3D_FIFO_SIZE 0x4ef4
+# define R500_RB3D_FIFO_SIZE_OP_FIFO_SIZE_FULL (0 << 0)
+# define R500_RB3D_FIFO_SIZE_OP_FIFO_SIZE_HALF (1 << 0)
+# define R500_RB3D_FIFO_SIZE_OP_FIFO_SIZE_QUATER (2 << 0)
+# define R500_RB3D_FIFO_SIZE_OP_FIFO_SIZE_EIGTHS (3 << 0)
+
+/* Constant color used by the blender. Pipelined through the blender. */
+#define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8
+# define R500_RB3D_CONSTANT_COLOR_AR_RED_MASK 0x0000ffff
+# define R500_RB3D_CONSTANT_COLOR_AR_RED_SHIFT 0
+# define R500_RB3D_CONSTANT_COLOR_AR_ALPHA_MASK 0xffff0000
+# define R500_RB3D_CONSTANT_COLOR_AR_ALPHA_SHIFT 16
+
+/* Constant color used by the blender. Pipelined through the blender. */
+#define R500_RB3D_CONSTANT_COLOR_GB 0x4efc
+# define R500_RB3D_CONSTANT_COLOR_AR_BLUE_MASK 0x0000ffff
+# define R500_RB3D_CONSTANT_COLOR_AR_BLUE_SHIFT 0
+# define R500_RB3D_CONSTANT_COLOR_AR_GREEN_MASK 0xffff0000
+# define R500_RB3D_CONSTANT_COLOR_AR_GREEN_SHIFT 16
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6 - vd
+ */
+#define R300_ZB_CNTL 0x4F00
+# define R300_STENCIL_ENABLE (1 << 0)
+# define R300_Z_ENABLE (1 << 1)
+# define R300_Z_WRITE_ENABLE (1 << 2)
+# define R300_Z_SIGNED_COMPARE (1 << 3)
+# define R300_STENCIL_FRONT_BACK (1 << 4)
+
+#define R300_ZB_ZSTENCILCNTL 0x4f04
+ /* functions */
+# define R300_ZS_NEVER 0
+# define R300_ZS_LESS 1
+# define R300_ZS_LEQUAL 2
+# define R300_ZS_EQUAL 3
+# define R300_ZS_GEQUAL 4
+# define R300_ZS_GREATER 5
+# define R300_ZS_NOTEQUAL 6
+# define R300_ZS_ALWAYS 7
+# define R300_ZS_MASK 7
+ /* operations */
+# define R300_ZS_KEEP 0
+# define R300_ZS_ZERO 1
+# define R300_ZS_REPLACE 2
+# define R300_ZS_INCR 3
+# define R300_ZS_DECR 4
+# define R300_ZS_INVERT 5
+# define R300_ZS_INCR_WRAP 6
+# define R300_ZS_DECR_WRAP 7
+# define R300_Z_FUNC_SHIFT 0
+ /* front and back refer to operations done for front
+ and back faces, i.e. separate stencil function support */
+# define R300_S_FRONT_FUNC_SHIFT 3
+# define R300_S_FRONT_SFAIL_OP_SHIFT 6
+# define R300_S_FRONT_ZPASS_OP_SHIFT 9
+# define R300_S_FRONT_ZFAIL_OP_SHIFT 12
+# define R300_S_BACK_FUNC_SHIFT 15
+# define R300_S_BACK_SFAIL_OP_SHIFT 18
+# define R300_S_BACK_ZPASS_OP_SHIFT 21
+# define R300_S_BACK_ZFAIL_OP_SHIFT 24
+
+#define R300_ZB_STENCILREFMASK 0x4f08
+# define R300_STENCILREF_SHIFT 0
+# define R300_STENCILREF_MASK 0x000000ff
+# define R300_STENCILMASK_SHIFT 8
+# define R300_STENCILMASK_MASK 0x0000ff00
+# define R300_STENCILWRITEMASK_SHIFT 16
+# define R300_STENCILWRITEMASK_MASK 0x00ff0000
+
+/* gap */
+
+#define R300_ZB_FORMAT 0x4f10
+# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0)
+# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0)
+# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0)
+/* reserved up to (15 << 0) */
+# define R300_INVERT_13E3_LEADING_ONES (0 << 4)
+# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
+
+#define R300_ZB_ZTOP 0x4F14
+# define R300_ZTOP_DISABLE (0 << 0)
+# define R300_ZTOP_ENABLE (1 << 0)
+
+/* gap */
+
+#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31)
+# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31)
+
+#define R300_ZB_BW_CNTL 0x4f1c
+# define R300_HIZ_DISABLE (0 << 0)
+# define R300_HIZ_ENABLE (1 << 0)
+# define R300_HIZ_MIN (0 << 1)
+# define R300_HIZ_MAX (1 << 1)
+# define R300_FAST_FILL_DISABLE (0 << 2)
+# define R300_FAST_FILL_ENABLE (1 << 2)
+# define R300_RD_COMP_DISABLE (0 << 3)
+# define R300_RD_COMP_ENABLE (1 << 3)
+# define R300_WR_COMP_DISABLE (0 << 4)
+# define R300_WR_COMP_ENABLE (1 << 4)
+# define R300_ZB_CB_CLEAR_RMW (0 << 5)
+# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5)
+# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6)
+# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6)
+
+# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7)
+# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7)
+# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8)
+# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8)
+
+# define R500_BMASK_ENABLE (0 << 10)
+# define R500_BMASK_DISABLE (1 << 10)
+# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11)
+# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11)
+# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12)
+# define R500_HIZ_FP_EXP_BITS_1 (1 << 12)
+# define R500_HIZ_FP_EXP_BITS_2 (2 << 12)
+# define R500_HIZ_FP_EXP_BITS_3 (3 << 12)
+# define R500_HIZ_FP_EXP_BITS_4 (4 << 12)
+# define R500_HIZ_FP_EXP_BITS_5 (5 << 12)
+# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15)
+# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15)
+# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16)
+# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16)
+# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17)
+# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17)
+# define R500_PEQ_PACKING_DISABLE (0 << 18)
+# define R500_PEQ_PACKING_ENABLE (1 << 18)
+# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18)
+# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18)
+
+
+/* gap */
+
+/* Z Buffer Address Offset.
+ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
+ */
+#define R300_ZB_DEPTHOFFSET 0x4f20
+
+/* Z Buffer Pitch and Endian Control */
+#define R300_ZB_DEPTHPITCH 0x4f24
+# define R300_DEPTHPITCH_MASK 0x00003FFC
+# define R300_DEPTHMACROTILE_DISABLE (0 << 16)
+# define R300_DEPTHMACROTILE_ENABLE (1 << 16)
+# define R300_DEPTHMICROTILE_LINEAR (0 << 17)
+# define R300_DEPTHMICROTILE_TILED (1 << 17)
+# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
+# define R300_DEPTHENDIAN_NO_SWAP (0 << 18)
+# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18)
+# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18)
+# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
+
+/* Z Buffer Clear Value */
+#define R300_ZB_DEPTHCLEARVALUE 0x4f28
+
+/* Hierarchical Z Memory Offset */
+#define R300_ZB_HIZ_OFFSET 0x4f44
+
+/* Hierarchical Z Write Index */
+#define R300_ZB_HIZ_WRINDEX 0x4f48
+
+/* Hierarchical Z Data */
+#define R300_ZB_HIZ_DWORD 0x4f4c
+
+/* Hierarchical Z Read Index */
+#define R300_ZB_HIZ_RDINDEX 0x4f50
+
+/* Hierarchical Z Pitch */
+#define R300_ZB_HIZ_PITCH 0x4f54
+
+/* Z Buffer Z Pass Counter Data */
+#define R300_ZB_ZPASS_DATA 0x4f58
+
+/* Z Buffer Z Pass Counter Address */
+#define R300_ZB_ZPASS_ADDR 0x4f5c
+
+/* Depth buffer X and Y coordinate offset */
+#define R300_ZB_DEPTHXY_OFFSET 0x4f60
+# define R300_DEPTHX_OFFSET_SHIFT 1
+# define R300_DEPTHX_OFFSET_MASK 0x000007FE
+# define R300_DEPTHY_OFFSET_SHIFT 17
+# define R300_DEPTHY_OFFSET_MASK 0x07FE0000
+
+/* Sets the fifo sizes */
+#define R500_ZB_FIFO_SIZE 0x4fd0
+# define R500_OP_FIFO_SIZE_FULL (0 << 0)
+# define R500_OP_FIFO_SIZE_HALF (1 << 0)
+# define R500_OP_FIFO_SIZE_QUATER (2 << 0)
+# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
+
+/* Stencil Reference Value and Mask for backfacing quads */
+/* R300_ZB_STENCILREFMASK handles front face */
+#define R500_ZB_STENCILREFMASK_BF 0x4fd4
+# define R500_STENCILREF_SHIFT 0
+# define R500_STENCILREF_MASK 0x000000ff
+# define R500_STENCILMASK_SHIFT 8
+# define R500_STENCILMASK_MASK 0x0000ff00
+# define R500_STENCILWRITEMASK_SHIFT 16
+# define R500_STENCILWRITEMASK_MASK 0x00ff0000
+
+/**
+ * \defgroup R3XX_R5XX_PROGRAMMABLE_VERTEX_SHADER_DESCRIPTION R3XX-R5XX PROGRAMMABLE VERTEX SHADER DESCRIPTION
+ *
+ * The PVS_DST_MATH_INST is used to identify whether the instruction is a Vector
+ * Engine instruction or a Math Engine instruction.
+ */
+
+/*\{*/
+
+enum {
+ /* R3XX */
+ VECTOR_NO_OP = 0,
+ VE_DOT_PRODUCT = 1,
+ VE_MULTIPLY = 2,
+ VE_ADD = 3,
+ VE_MULTIPLY_ADD = 4,
+ VE_DISTANCE_VECTOR = 5,
+ VE_FRACTION = 6,
+ VE_MAXIMUM = 7,
+ VE_MINIMUM = 8,
+ VE_SET_GREATER_THAN_EQUAL = 9,
+ VE_SET_LESS_THAN = 10,
+ VE_MULTIPLYX2_ADD = 11,
+ VE_MULTIPLY_CLAMP = 12,
+ VE_FLT2FIX_DX = 13,
+ VE_FLT2FIX_DX_RND = 14,
+ /* R5XX */
+ VE_PRED_SET_EQ_PUSH = 15,
+ VE_PRED_SET_GT_PUSH = 16,
+ VE_PRED_SET_GTE_PUSH = 17,
+ VE_PRED_SET_NEQ_PUSH = 18,
+ VE_COND_WRITE_EQ = 19,
+ VE_COND_WRITE_GT = 20,
+ VE_COND_WRITE_GTE = 21,
+ VE_COND_WRITE_NEQ = 22,
+ VE_COND_MUX_EQ = 23,
+ VE_COND_MUX_GT = 24,
+ VE_COND_MUX_GTE = 25,
+ VE_SET_GREATER_THAN = 26,
+ VE_SET_EQUAL = 27,
+ VE_SET_NOT_EQUAL = 28,
+};
+
+enum {
+ /* R3XX */
+ MATH_NO_OP = 0,
+ ME_EXP_BASE2_DX = 1,
+ ME_LOG_BASE2_DX = 2,
+ ME_EXP_BASEE_FF = 3,
+ ME_LIGHT_COEFF_DX = 4,
+ ME_POWER_FUNC_FF = 5,
+ ME_RECIP_DX = 6,
+ ME_RECIP_FF = 7,
+ ME_RECIP_SQRT_DX = 8,
+ ME_RECIP_SQRT_FF = 9,
+ ME_MULTIPLY = 10,
+ ME_EXP_BASE2_FULL_DX = 11,
+ ME_LOG_BASE2_FULL_DX = 12,
+ ME_POWER_FUNC_FF_CLAMP_B = 13,
+ ME_POWER_FUNC_FF_CLAMP_B1 = 14,
+ ME_POWER_FUNC_FF_CLAMP_01 = 15,
+ ME_SIN = 16,
+ ME_COS = 17,
+ /* R5XX */
+ ME_LOG_BASE2_IEEE = 18,
+ ME_RECIP_IEEE = 19,
+ ME_RECIP_SQRT_IEEE = 20,
+ ME_PRED_SET_EQ = 21,
+ ME_PRED_SET_GT = 22,
+ ME_PRED_SET_GTE = 23,
+ ME_PRED_SET_NEQ = 24,
+ ME_PRED_SET_CLR = 25,
+ ME_PRED_SET_INV = 26,
+ ME_PRED_SET_POP = 27,
+ ME_PRED_SET_RESTORE = 28,
+};
+
+enum {
+ /* R3XX */
+ PVS_MACRO_OP_2CLK_MADD = 0,
+ PVS_MACRO_OP_2CLK_M2X_ADD = 1,
+};
+
+enum {
+ PVS_SRC_REG_TEMPORARY = 0, /* Intermediate Storage */
+ PVS_SRC_REG_INPUT = 1, /* Input Vertex Storage */
+ PVS_SRC_REG_CONSTANT = 2, /* Constant State Storage */
+ PVS_SRC_REG_ALT_TEMPORARY = 3, /* Alternate Intermediate Storage */
+};
+
+enum {
+ PVS_DST_REG_TEMPORARY = 0, /* Intermediate Storage */
+ PVS_DST_REG_A0 = 1, /* Address Register Storage */
+ PVS_DST_REG_OUT = 2, /* Output Memory. Used for all outputs */
+ PVS_DST_REG_OUT_REPL_X = 3, /* Output Memory & Replicate X to all channels */
+ PVS_DST_REG_ALT_TEMPORARY = 4, /* Alternate Intermediate Storage */
+ PVS_DST_REG_INPUT = 5, /* Output Memory & Replicate X to all channels */
+};
+
+enum {
+ PVS_SRC_SELECT_X = 0, /* Select X Component */
+ PVS_SRC_SELECT_Y = 1, /* Select Y Component */
+ PVS_SRC_SELECT_Z = 2, /* Select Z Component */
+ PVS_SRC_SELECT_W = 3, /* Select W Component */
+ PVS_SRC_SELECT_FORCE_0 = 4, /* Force Component to 0.0 */
+ PVS_SRC_SELECT_FORCE_1 = 5, /* Force Component to 1.0 */
+};
+
+/* PVS Opcode & Destination Operand Description */
+
+enum {
+ PVS_DST_OPCODE_MASK = 0x3f,
+ PVS_DST_OPCODE_SHIFT = 0,
+ PVS_DST_MATH_INST_MASK = 0x1,
+ PVS_DST_MATH_INST_SHIFT = 6,
+ PVS_DST_MACRO_INST_MASK = 0x1,
+ PVS_DST_MACRO_INST_SHIFT = 7,
+ PVS_DST_REG_TYPE_MASK = 0xf,
+ PVS_DST_REG_TYPE_SHIFT = 8,
+ PVS_DST_ADDR_MODE_1_MASK = 0x1,
+ PVS_DST_ADDR_MODE_1_SHIFT = 12,
+ PVS_DST_OFFSET_MASK = 0x7f,
+ PVS_DST_OFFSET_SHIFT = 13,
+ PVS_DST_WE_X_MASK = 0x1,
+ PVS_DST_WE_X_SHIFT = 20,
+ PVS_DST_WE_Y_MASK = 0x1,
+ PVS_DST_WE_Y_SHIFT = 21,
+ PVS_DST_WE_Z_MASK = 0x1,
+ PVS_DST_WE_Z_SHIFT = 22,
+ PVS_DST_WE_W_MASK = 0x1,
+ PVS_DST_WE_W_SHIFT = 23,
+ PVS_DST_VE_SAT_MASK = 0x1,
+ PVS_DST_VE_SAT_SHIFT = 24,
+ PVS_DST_ME_SAT_MASK = 0x1,
+ PVS_DST_ME_SAT_SHIFT = 25,
+ PVS_DST_PRED_ENABLE_MASK = 0x1,
+ PVS_DST_PRED_ENABLE_SHIFT = 26,
+ PVS_DST_PRED_SENSE_MASK = 0x1,
+ PVS_DST_PRED_SENSE_SHIFT = 27,
+ PVS_DST_DUAL_MATH_OP_MASK = 0x3,
+ PVS_DST_DUAL_MATH_OP_SHIFT = 27,
+ PVS_DST_ADDR_SEL_MASK = 0x3,
+ PVS_DST_ADDR_SEL_SHIFT = 29,
+ PVS_DST_ADDR_MODE_0_MASK = 0x1,
+ PVS_DST_ADDR_MODE_0_SHIFT = 31,
+};
+
+/* PVS Source Operand Description */
+
+enum {
+ PVS_SRC_REG_TYPE_MASK = 0x3,
+ PVS_SRC_REG_TYPE_SHIFT = 0,
+ SPARE_0_MASK = 0x1,
+ SPARE_0_SHIFT = 2,
+ PVS_SRC_ABS_XYZW_MASK = 0x1,
+ PVS_SRC_ABS_XYZW_SHIFT = 3,
+ PVS_SRC_ADDR_MODE_0_MASK = 0x1,
+ PVS_SRC_ADDR_MODE_0_SHIFT = 4,
+ PVS_SRC_OFFSET_MASK = 0xff,
+ PVS_SRC_OFFSET_SHIFT = 5,
+ PVS_SRC_SWIZZLE_X_MASK = 0x7,
+ PVS_SRC_SWIZZLE_X_SHIFT = 13,
+ PVS_SRC_SWIZZLE_Y_MASK = 0x7,
+ PVS_SRC_SWIZZLE_Y_SHIFT = 16,
+ PVS_SRC_SWIZZLE_Z_MASK = 0x7,
+ PVS_SRC_SWIZZLE_Z_SHIFT = 19,
+ PVS_SRC_SWIZZLE_W_MASK = 0x7,
+ PVS_SRC_SWIZZLE_W_SHIFT = 22,
+ PVS_SRC_MODIFIER_X_MASK = 0x1,
+ PVS_SRC_MODIFIER_X_SHIFT = 25,
+ PVS_SRC_MODIFIER_Y_MASK = 0x1,
+ PVS_SRC_MODIFIER_Y_SHIFT = 26,
+ PVS_SRC_MODIFIER_Z_MASK = 0x1,
+ PVS_SRC_MODIFIER_Z_SHIFT = 27,
+ PVS_SRC_MODIFIER_W_MASK = 0x1,
+ PVS_SRC_MODIFIER_W_SHIFT = 28,
+ PVS_SRC_ADDR_SEL_MASK = 0x3,
+ PVS_SRC_ADDR_SEL_SHIFT = 29,
+ PVS_SRC_ADDR_MODE_1_MASK = 0x0,
+ PVS_SRC_ADDR_MODE_1_SHIFT = 32,
+};
+
+/*\}*/
+
+/* BEGIN: Packet 3 commands */
+
+/* A primitive emission dword. */
+#define R300_PRIM_TYPE_NONE (0 << 0)
+#define R300_PRIM_TYPE_POINT (1 << 0)
+#define R300_PRIM_TYPE_LINE (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
+ /* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0)
+#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
+#define R300_PRIM_TYPE_QUADS (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
+#define R300_PRIM_TYPE_POLYGON (15 << 0)
+#define R300_PRIM_TYPE_MASK 0xF
+#define R300_PRIM_WALK_IND (1 << 4)
+#define R300_PRIM_WALK_LIST (2 << 4)
+#define R300_PRIM_WALK_RING (3 << 4)
+#define R300_PRIM_WALK_MASK (3 << 4)
+ /* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6)
+#define R300_PRIM_NUM_VERTICES_SHIFT 16
+#define R300_PRIM_NUM_VERTICES_MASK 0xffff
+
+
+
+/*
+ * The R500 unified shader (US) registers come in banks of 512 each, one
+ * for each instruction slot in the shader. You can't touch them directly.
+ * R500_US_VECTOR_INDEX() sets the base instruction to modify; successive
+ * writes to R500_GA_US_VECTOR_DATA autoincrement the index after the
+ * instruction is fully specified.
+ */
+#define R500_US_ALU_ALPHA_INST_0 0xa800
+# define R500_ALPHA_OP_MAD 0
+# define R500_ALPHA_OP_DP 1
+# define R500_ALPHA_OP_MIN 2
+# define R500_ALPHA_OP_MAX 3
+/* #define R500_ALPHA_OP_RESERVED 4 */
+# define R500_ALPHA_OP_CND 5
+# define R500_ALPHA_OP_CMP 6
+# define R500_ALPHA_OP_FRC 7
+# define R500_ALPHA_OP_EX2 8
+# define R500_ALPHA_OP_LN2 9
+# define R500_ALPHA_OP_RCP 10
+# define R500_ALPHA_OP_RSQ 11
+# define R500_ALPHA_OP_SIN 12
+# define R500_ALPHA_OP_COS 13
+# define R500_ALPHA_OP_MDH 14
+# define R500_ALPHA_OP_MDV 15
+# define R500_ALPHA_ADDRD(x) ((x) << 4)
+# define R500_ALPHA_ADDRD_REL (1 << 11)
+# define R500_ALPHA_SEL_A_SHIFT 12
+# define R500_ALPHA_SEL_A_SRC0 (0 << 12)
+# define R500_ALPHA_SEL_A_SRC1 (1 << 12)
+# define R500_ALPHA_SEL_A_SRC2 (2 << 12)
+# define R500_ALPHA_SEL_A_SRCP (3 << 12)
+# define R500_ALPHA_SWIZ_A_R (0 << 14)
+# define R500_ALPHA_SWIZ_A_G (1 << 14)
+# define R500_ALPHA_SWIZ_A_B (2 << 14)
+# define R500_ALPHA_SWIZ_A_A (3 << 14)
+# define R500_ALPHA_SWIZ_A_0 (4 << 14)
+# define R500_ALPHA_SWIZ_A_HALF (5 << 14)
+# define R500_ALPHA_SWIZ_A_1 (6 << 14)
+/* #define R500_ALPHA_SWIZ_A_UNUSED (7 << 14) */
+# define R500_ALPHA_MOD_A_NOP (0 << 17)
+# define R500_ALPHA_MOD_A_NEG (1 << 17)
+# define R500_ALPHA_MOD_A_ABS (2 << 17)
+# define R500_ALPHA_MOD_A_NAB (3 << 17)
+# define R500_ALPHA_SEL_B_SHIFT 19
+# define R500_ALPHA_SEL_B_SRC0 (0 << 19)
+# define R500_ALPHA_SEL_B_SRC1 (1 << 19)
+# define R500_ALPHA_SEL_B_SRC2 (2 << 19)
+# define R500_ALPHA_SEL_B_SRCP (3 << 19)
+# define R500_ALPHA_SWIZ_B_R (0 << 21)
+# define R500_ALPHA_SWIZ_B_G (1 << 21)
+# define R500_ALPHA_SWIZ_B_B (2 << 21)
+# define R500_ALPHA_SWIZ_B_A (3 << 21)
+# define R500_ALPHA_SWIZ_B_0 (4 << 21)
+# define R500_ALPHA_SWIZ_B_HALF (5 << 21)
+# define R500_ALPHA_SWIZ_B_1 (6 << 21)
+/* #define R500_ALPHA_SWIZ_B_UNUSED (7 << 21) */
+# define R500_ALPHA_MOD_B_NOP (0 << 24)
+# define R500_ALPHA_MOD_B_NEG (1 << 24)
+# define R500_ALPHA_MOD_B_ABS (2 << 24)
+# define R500_ALPHA_MOD_B_NAB (3 << 24)
+# define R500_ALPHA_OMOD_IDENTITY (0 << 26)
+# define R500_ALPHA_OMOD_MUL_2 (1 << 26)
+# define R500_ALPHA_OMOD_MUL_4 (2 << 26)
+# define R500_ALPHA_OMOD_MUL_8 (3 << 26)
+# define R500_ALPHA_OMOD_DIV_2 (4 << 26)
+# define R500_ALPHA_OMOD_DIV_4 (5 << 26)
+# define R500_ALPHA_OMOD_DIV_8 (6 << 26)
+# define R500_ALPHA_OMOD_DISABLE (7 << 26)
+# define R500_ALPHA_TARGET(x) ((x) << 29)
+# define R500_ALPHA_W_OMASK (1 << 31)
+#define R500_US_ALU_ALPHA_ADDR_0 0x9800
+# define R500_ALPHA_ADDR0(x) ((x) << 0)
+# define R500_ALPHA_ADDR0_CONST (1 << 8)
+# define R500_ALPHA_ADDR0_REL (1 << 9)
+# define R500_ALPHA_ADDR1(x) ((x) << 10)
+# define R500_ALPHA_ADDR1_CONST (1 << 18)
+# define R500_ALPHA_ADDR1_REL (1 << 19)
+# define R500_ALPHA_ADDR2(x) ((x) << 20)
+# define R500_ALPHA_ADDR2_CONST (1 << 28)
+# define R500_ALPHA_ADDR2_REL (1 << 29)
+# define R500_ALPHA_SRCP_OP_1_MINUS_2A0 (0 << 30)
+# define R500_ALPHA_SRCP_OP_A1_MINUS_A0 (1 << 30)
+# define R500_ALPHA_SRCP_OP_A1_PLUS_A0 (2 << 30)
+# define R500_ALPHA_SRCP_OP_1_MINUS_A0 (3 << 30)
+#define R500_US_ALU_RGBA_INST_0 0xb000
+# define R500_ALU_RGBA_OP_MAD (0 << 0)
+# define R500_ALU_RGBA_OP_DP3 (1 << 0)
+# define R500_ALU_RGBA_OP_DP4 (2 << 0)
+# define R500_ALU_RGBA_OP_D2A (3 << 0)
+# define R500_ALU_RGBA_OP_MIN (4 << 0)
+# define R500_ALU_RGBA_OP_MAX (5 << 0)
+/* #define R500_ALU_RGBA_OP_RESERVED (6 << 0) */
+# define R500_ALU_RGBA_OP_CND (7 << 0)
+# define R500_ALU_RGBA_OP_CMP (8 << 0)
+# define R500_ALU_RGBA_OP_FRC (9 << 0)
+# define R500_ALU_RGBA_OP_SOP (10 << 0)
+# define R500_ALU_RGBA_OP_MDH (11 << 0)
+# define R500_ALU_RGBA_OP_MDV (12 << 0)
+# define R500_ALU_RGBA_ADDRD(x) ((x) << 4)
+# define R500_ALU_RGBA_ADDRD_REL (1 << 11)
+# define R500_ALU_RGBA_SEL_C_SHIFT 12
+# define R500_ALU_RGBA_SEL_C_SRC0 (0 << 12)
+# define R500_ALU_RGBA_SEL_C_SRC1 (1 << 12)
+# define R500_ALU_RGBA_SEL_C_SRC2 (2 << 12)
+# define R500_ALU_RGBA_SEL_C_SRCP (3 << 12)
+# define R500_ALU_RGBA_R_SWIZ_R (0 << 14)
+# define R500_ALU_RGBA_R_SWIZ_G (1 << 14)
+# define R500_ALU_RGBA_R_SWIZ_B (2 << 14)
+# define R500_ALU_RGBA_R_SWIZ_A (3 << 14)
+# define R500_ALU_RGBA_R_SWIZ_0 (4 << 14)
+# define R500_ALU_RGBA_R_SWIZ_HALF (5 << 14)
+# define R500_ALU_RGBA_R_SWIZ_1 (6 << 14)
+/* #define R500_ALU_RGBA_R_SWIZ_UNUSED (7 << 14) */
+# define R500_ALU_RGBA_G_SWIZ_R (0 << 17)
+# define R500_ALU_RGBA_G_SWIZ_G (1 << 17)
+# define R500_ALU_RGBA_G_SWIZ_B (2 << 17)
+# define R500_ALU_RGBA_G_SWIZ_A (3 << 17)
+# define R500_ALU_RGBA_G_SWIZ_0 (4 << 17)
+# define R500_ALU_RGBA_G_SWIZ_HALF (5 << 17)
+# define R500_ALU_RGBA_G_SWIZ_1 (6 << 17)
+/* #define R500_ALU_RGBA_G_SWIZ_UNUSED (7 << 17) */
+# define R500_ALU_RGBA_B_SWIZ_R (0 << 20)
+# define R500_ALU_RGBA_B_SWIZ_G (1 << 20)
+# define R500_ALU_RGBA_B_SWIZ_B (2 << 20)
+# define R500_ALU_RGBA_B_SWIZ_A (3 << 20)
+# define R500_ALU_RGBA_B_SWIZ_0 (4 << 20)
+# define R500_ALU_RGBA_B_SWIZ_HALF (5 << 20)
+# define R500_ALU_RGBA_B_SWIZ_1 (6 << 20)
+/* #define R500_ALU_RGBA_B_SWIZ_UNUSED (7 << 20) */
+# define R500_ALU_RGBA_MOD_C_NOP (0 << 23)
+# define R500_ALU_RGBA_MOD_C_NEG (1 << 23)
+# define R500_ALU_RGBA_MOD_C_ABS (2 << 23)
+# define R500_ALU_RGBA_MOD_C_NAB (3 << 23)
+# define R500_ALU_RGBA_ALPHA_SEL_C_SHIFT 25
+# define R500_ALU_RGBA_ALPHA_SEL_C_SRC0 (0 << 25)
+# define R500_ALU_RGBA_ALPHA_SEL_C_SRC1 (1 << 25)
+# define R500_ALU_RGBA_ALPHA_SEL_C_SRC2 (2 << 25)
+# define R500_ALU_RGBA_ALPHA_SEL_C_SRCP (3 << 25)
+# define R500_ALU_RGBA_A_SWIZ_R (0 << 27)
+# define R500_ALU_RGBA_A_SWIZ_G (1 << 27)
+# define R500_ALU_RGBA_A_SWIZ_B (2 << 27)
+# define R500_ALU_RGBA_A_SWIZ_A (3 << 27)
+# define R500_ALU_RGBA_A_SWIZ_0 (4 << 27)
+# define R500_ALU_RGBA_A_SWIZ_HALF (5 << 27)
+# define R500_ALU_RGBA_A_SWIZ_1 (6 << 27)
+/* #define R500_ALU_RGBA_A_SWIZ_UNUSED (7 << 27) */
+# define R500_ALU_RGBA_ALPHA_MOD_C_NOP (0 << 30)
+# define R500_ALU_RGBA_ALPHA_MOD_C_NEG (1 << 30)
+# define R500_ALU_RGBA_ALPHA_MOD_C_ABS (2 << 30)
+# define R500_ALU_RGBA_ALPHA_MOD_C_NAB (3 << 30)
+#define R500_US_ALU_RGB_INST_0 0xa000
+# define R500_ALU_RGB_SEL_A_SHIFT 0
+# define R500_ALU_RGB_SEL_A_SRC0 (0 << 0)
+# define R500_ALU_RGB_SEL_A_SRC1 (1 << 0)
+# define R500_ALU_RGB_SEL_A_SRC2 (2 << 0)
+# define R500_ALU_RGB_SEL_A_SRCP (3 << 0)
+# define R500_ALU_RGB_R_SWIZ_A_R (0 << 2)
+# define R500_ALU_RGB_R_SWIZ_A_G (1 << 2)
+# define R500_ALU_RGB_R_SWIZ_A_B (2 << 2)
+# define R500_ALU_RGB_R_SWIZ_A_A (3 << 2)
+# define R500_ALU_RGB_R_SWIZ_A_0 (4 << 2)
+# define R500_ALU_RGB_R_SWIZ_A_HALF (5 << 2)
+# define R500_ALU_RGB_R_SWIZ_A_1 (6 << 2)
+/* #define R500_ALU_RGB_R_SWIZ_A_UNUSED (7 << 2) */
+# define R500_ALU_RGB_G_SWIZ_A_R (0 << 5)
+# define R500_ALU_RGB_G_SWIZ_A_G (1 << 5)
+# define R500_ALU_RGB_G_SWIZ_A_B (2 << 5)
+# define R500_ALU_RGB_G_SWIZ_A_A (3 << 5)
+# define R500_ALU_RGB_G_SWIZ_A_0 (4 << 5)
+# define R500_ALU_RGB_G_SWIZ_A_HALF (5 << 5)
+# define R500_ALU_RGB_G_SWIZ_A_1 (6 << 5)
+/* #define R500_ALU_RGB_G_SWIZ_A_UNUSED (7 << 5) */
+# define R500_ALU_RGB_B_SWIZ_A_R (0 << 8)
+# define R500_ALU_RGB_B_SWIZ_A_G (1 << 8)
+# define R500_ALU_RGB_B_SWIZ_A_B (2 << 8)
+# define R500_ALU_RGB_B_SWIZ_A_A (3 << 8)
+# define R500_ALU_RGB_B_SWIZ_A_0 (4 << 8)
+# define R500_ALU_RGB_B_SWIZ_A_HALF (5 << 8)
+# define R500_ALU_RGB_B_SWIZ_A_1 (6 << 8)
+/* #define R500_ALU_RGB_B_SWIZ_A_UNUSED (7 << 8) */
+# define R500_ALU_RGB_MOD_A_NOP (0 << 11)
+# define R500_ALU_RGB_MOD_A_NEG (1 << 11)
+# define R500_ALU_RGB_MOD_A_ABS (2 << 11)
+# define R500_ALU_RGB_MOD_A_NAB (3 << 11)
+# define R500_ALU_RGB_SEL_B_SHIFT 13
+# define R500_ALU_RGB_SEL_B_SRC0 (0 << 13)
+# define R500_ALU_RGB_SEL_B_SRC1 (1 << 13)
+# define R500_ALU_RGB_SEL_B_SRC2 (2 << 13)
+# define R500_ALU_RGB_SEL_B_SRCP (3 << 13)
+# define R500_ALU_RGB_R_SWIZ_B_R (0 << 15)
+# define R500_ALU_RGB_R_SWIZ_B_G (1 << 15)
+# define R500_ALU_RGB_R_SWIZ_B_B (2 << 15)
+# define R500_ALU_RGB_R_SWIZ_B_A (3 << 15)
+# define R500_ALU_RGB_R_SWIZ_B_0 (4 << 15)
+# define R500_ALU_RGB_R_SWIZ_B_HALF (5 << 15)
+# define R500_ALU_RGB_R_SWIZ_B_1 (6 << 15)
+/* #define R500_ALU_RGB_R_SWIZ_B_UNUSED (7 << 15) */
+# define R500_ALU_RGB_G_SWIZ_B_R (0 << 18)
+# define R500_ALU_RGB_G_SWIZ_B_G (1 << 18)
+# define R500_ALU_RGB_G_SWIZ_B_B (2 << 18)
+# define R500_ALU_RGB_G_SWIZ_B_A (3 << 18)
+# define R500_ALU_RGB_G_SWIZ_B_0 (4 << 18)
+# define R500_ALU_RGB_G_SWIZ_B_HALF (5 << 18)
+# define R500_ALU_RGB_G_SWIZ_B_1 (6 << 18)
+/* #define R500_ALU_RGB_G_SWIZ_B_UNUSED (7 << 18) */
+# define R500_ALU_RGB_B_SWIZ_B_R (0 << 21)
+# define R500_ALU_RGB_B_SWIZ_B_G (1 << 21)
+# define R500_ALU_RGB_B_SWIZ_B_B (2 << 21)
+# define R500_ALU_RGB_B_SWIZ_B_A (3 << 21)
+# define R500_ALU_RGB_B_SWIZ_B_0 (4 << 21)
+# define R500_ALU_RGB_B_SWIZ_B_HALF (5 << 21)
+# define R500_ALU_RGB_B_SWIZ_B_1 (6 << 21)
+/* #define R500_ALU_RGB_B_SWIZ_B_UNUSED (7 << 21) */
+# define R500_ALU_RGB_MOD_B_NOP (0 << 24)
+# define R500_ALU_RGB_MOD_B_NEG (1 << 24)
+# define R500_ALU_RGB_MOD_B_ABS (2 << 24)
+# define R500_ALU_RGB_MOD_B_NAB (3 << 24)
+# define R500_ALU_RGB_OMOD_IDENTITY (0 << 26)
+# define R500_ALU_RGB_OMOD_MUL_2 (1 << 26)
+# define R500_ALU_RGB_OMOD_MUL_4 (2 << 26)
+# define R500_ALU_RGB_OMOD_MUL_8 (3 << 26)
+# define R500_ALU_RGB_OMOD_DIV_2 (4 << 26)
+# define R500_ALU_RGB_OMOD_DIV_4 (5 << 26)
+# define R500_ALU_RGB_OMOD_DIV_8 (6 << 26)
+# define R500_ALU_RGB_OMOD_DISABLE (7 << 26)
+# define R500_ALU_RGB_TARGET(x) ((x) << 29)
+# define R500_ALU_RGB_WMASK (1 << 31)
+#define R500_US_ALU_RGB_ADDR_0 0x9000
+# define R500_RGB_ADDR0(x) ((x) << 0)
+# define R500_RGB_ADDR0_CONST (1 << 8)
+# define R500_RGB_ADDR0_REL (1 << 9)
+# define R500_RGB_ADDR1(x) ((x) << 10)
+# define R500_RGB_ADDR1_CONST (1 << 18)
+# define R500_RGB_ADDR1_REL (1 << 19)
+# define R500_RGB_ADDR2(x) ((x) << 20)
+# define R500_RGB_ADDR2_CONST (1 << 28)
+# define R500_RGB_ADDR2_REL (1 << 29)
+# define R500_RGB_SRCP_OP_1_MINUS_2RGB0 (0 << 30)
+# define R500_RGB_SRCP_OP_RGB1_MINUS_RGB0 (1 << 30)
+# define R500_RGB_SRCP_OP_RGB1_PLUS_RGB0 (2 << 30)
+# define R500_RGB_SRCP_OP_1_MINUS_RGB0 (3 << 30)
+#define R500_US_CMN_INST_0 0xb800
+# define R500_INST_TYPE_MASK (3 << 0)
+# define R500_INST_TYPE_ALU (0 << 0)
+# define R500_INST_TYPE_OUT (1 << 0)
+# define R500_INST_TYPE_FC (2 << 0)
+# define R500_INST_TYPE_TEX (3 << 0)
+# define R500_INST_TEX_SEM_WAIT (1 << 2)
+# define R500_INST_RGB_PRED_SEL_NONE (0 << 3)
+# define R500_INST_RGB_PRED_SEL_RGBA (1 << 3)
+# define R500_INST_RGB_PRED_SEL_RRRR (2 << 3)
+# define R500_INST_RGB_PRED_SEL_GGGG (3 << 3)
+# define R500_INST_RGB_PRED_SEL_BBBB (4 << 3)
+# define R500_INST_RGB_PRED_SEL_AAAA (5 << 3)
+# define R500_INST_RGB_PRED_INV (1 << 6)
+# define R500_INST_WRITE_INACTIVE (1 << 7)
+# define R500_INST_LAST (1 << 8)
+# define R500_INST_NOP (1 << 9)
+# define R500_INST_ALU_WAIT (1 << 10)
+# define R500_INST_RGB_WMASK_R (1 << 11)
+# define R500_INST_RGB_WMASK_G (1 << 12)
+# define R500_INST_RGB_WMASK_B (1 << 13)
+# define R500_INST_RGB_WMASK_RGB (7 << 11)
+# define R500_INST_ALPHA_WMASK (1 << 14)
+# define R500_INST_RGB_OMASK_R (1 << 15)
+# define R500_INST_RGB_OMASK_G (1 << 16)
+# define R500_INST_RGB_OMASK_B (1 << 17)
+# define R500_INST_RGB_OMASK_RGB (7 << 15)
+# define R500_INST_ALPHA_OMASK (1 << 18)
+# define R500_INST_RGB_CLAMP (1 << 19)
+# define R500_INST_ALPHA_CLAMP (1 << 20)
+# define R500_INST_ALU_RESULT_SEL (1 << 21)
+# define R500_INST_ALPHA_PRED_INV (1 << 22)
+# define R500_INST_ALU_RESULT_OP_EQ (0 << 23)
+# define R500_INST_ALU_RESULT_OP_LT (1 << 23)
+# define R500_INST_ALU_RESULT_OP_GE (2 << 23)
+# define R500_INST_ALU_RESULT_OP_NE (3 << 23)
+# define R500_INST_ALPHA_PRED_SEL_NONE (0 << 25)
+# define R500_INST_ALPHA_PRED_SEL_RGBA (1 << 25)
+# define R500_INST_ALPHA_PRED_SEL_RRRR (2 << 25)
+# define R500_INST_ALPHA_PRED_SEL_GGGG (3 << 25)
+# define R500_INST_ALPHA_PRED_SEL_BBBB (4 << 25)
+# define R500_INST_ALPHA_PRED_SEL_AAAA (5 << 25)
+/* XXX next four are kind of guessed */
+# define R500_INST_STAT_WE_R (1 << 28)
+# define R500_INST_STAT_WE_G (1 << 29)
+# define R500_INST_STAT_WE_B (1 << 30)
+# define R500_INST_STAT_WE_A (1 << 31)
+
+/* note that these are 8 bit lengths, despite the offsets, at least for R500 */
+#define R500_US_CODE_ADDR 0x4630
+# define R500_US_CODE_START_ADDR(x) ((x) << 0)
+# define R500_US_CODE_END_ADDR(x) ((x) << 16)
+#define R500_US_CODE_OFFSET 0x4638
+# define R500_US_CODE_OFFSET_ADDR(x) ((x) << 0)
+#define R500_US_CODE_RANGE 0x4634
+# define R500_US_CODE_RANGE_ADDR(x) ((x) << 0)
+# define R500_US_CODE_RANGE_SIZE(x) ((x) << 16)
+#define R500_US_CONFIG 0x4600
+# define R500_ZERO_TIMES_ANYTHING_EQUALS_ZERO (1 << 1)
+#define R500_US_FC_ADDR_0 0xa000
+# define R500_FC_BOOL_ADDR(x) ((x) << 0)
+# define R500_FC_INT_ADDR(x) ((x) << 8)
+# define R500_FC_JUMP_ADDR(x) ((x) << 16)
+# define R500_FC_JUMP_GLOBAL (1 << 31)
+#define R500_US_FC_BOOL_CONST 0x4620
+# define R500_FC_KBOOL(x) (x)
+#define R500_US_FC_CTRL 0x4624
+# define R500_FC_TEST_EN (1 << 30)
+# define R500_FC_FULL_FC_EN (1 << 31)
+#define R500_US_FC_INST_0 0x9800
+# define R500_FC_OP_JUMP (0 << 0)
+# define R500_FC_OP_LOOP (1 << 0)
+# define R500_FC_OP_ENDLOOP (2 << 0)
+# define R500_FC_OP_REP (3 << 0)
+# define R500_FC_OP_ENDREP (4 << 0)
+# define R500_FC_OP_BREAKLOOP (5 << 0)
+# define R500_FC_OP_BREAKREP (6 << 0)
+# define R500_FC_OP_CONTINUE (7 << 0)
+# define R500_FC_B_ELSE (1 << 4)
+# define R500_FC_JUMP_ANY (1 << 5)
+# define R500_FC_A_OP_NONE (0 << 6)
+# define R500_FC_A_OP_POP (1 << 6)
+# define R500_FC_A_OP_PUSH (2 << 6)
+# define R500_FC_JUMP_FUNC(x) ((x) << 8)
+# define R500_FC_B_POP_CNT(x) ((x) << 16)
+# define R500_FC_B_OP0_NONE (0 << 24)
+# define R500_FC_B_OP0_DECR (1 << 24)
+# define R500_FC_B_OP0_INCR (2 << 24)
+# define R500_FC_B_OP1_DECR (0 << 26)
+# define R500_FC_B_OP1_NONE (1 << 26)
+# define R500_FC_B_OP1_INCR (2 << 26)
+# define R500_FC_IGNORE_UNCOVERED (1 << 28)
+#define R500_US_FC_INT_CONST_0 0x4c00
+# define R500_FC_INT_CONST_KR(x) ((x) << 0)
+# define R500_FC_INT_CONST_KG(x) ((x) << 8)
+# define R500_FC_INT_CONST_KB(x) ((x) << 16)
+/* _0 through _15 */
+#define R500_US_FORMAT0_0 0x4640
+# define R500_FORMAT_TXWIDTH(x) ((x) << 0)
+# define R500_FORMAT_TXHEIGHT(x) ((x) << 11)
+# define R500_FORMAT_TXDEPTH(x) ((x) << 22)
+#define R500_US_PIXSIZE 0x4604
+# define R500_PIX_SIZE(x) (x)
+#define R500_US_TEX_ADDR_0 0x9800
+# define R500_TEX_SRC_ADDR(x) ((x) << 0)
+# define R500_TEX_SRC_ADDR_REL (1 << 7)
+# define R500_TEX_SRC_S_SWIZ_R (0 << 8)
+# define R500_TEX_SRC_S_SWIZ_G (1 << 8)
+# define R500_TEX_SRC_S_SWIZ_B (2 << 8)
+# define R500_TEX_SRC_S_SWIZ_A (3 << 8)
+# define R500_TEX_SRC_T_SWIZ_R (0 << 10)
+# define R500_TEX_SRC_T_SWIZ_G (1 << 10)
+# define R500_TEX_SRC_T_SWIZ_B (2 << 10)
+# define R500_TEX_SRC_T_SWIZ_A (3 << 10)
+# define R500_TEX_SRC_R_SWIZ_R (0 << 12)
+# define R500_TEX_SRC_R_SWIZ_G (1 << 12)
+# define R500_TEX_SRC_R_SWIZ_B (2 << 12)
+# define R500_TEX_SRC_R_SWIZ_A (3 << 12)
+# define R500_TEX_SRC_Q_SWIZ_R (0 << 14)
+# define R500_TEX_SRC_Q_SWIZ_G (1 << 14)
+# define R500_TEX_SRC_Q_SWIZ_B (2 << 14)
+# define R500_TEX_SRC_Q_SWIZ_A (3 << 14)
+# define R500_TEX_DST_ADDR(x) ((x) << 16)
+# define R500_TEX_DST_ADDR_REL (1 << 23)
+# define R500_TEX_DST_R_SWIZ_R (0 << 24)
+# define R500_TEX_DST_R_SWIZ_G (1 << 24)
+# define R500_TEX_DST_R_SWIZ_B (2 << 24)
+# define R500_TEX_DST_R_SWIZ_A (3 << 24)
+# define R500_TEX_DST_G_SWIZ_R (0 << 26)
+# define R500_TEX_DST_G_SWIZ_G (1 << 26)
+# define R500_TEX_DST_G_SWIZ_B (2 << 26)
+# define R500_TEX_DST_G_SWIZ_A (3 << 26)
+# define R500_TEX_DST_B_SWIZ_R (0 << 28)
+# define R500_TEX_DST_B_SWIZ_G (1 << 28)
+# define R500_TEX_DST_B_SWIZ_B (2 << 28)
+# define R500_TEX_DST_B_SWIZ_A (3 << 28)
+# define R500_TEX_DST_A_SWIZ_R (0 << 30)
+# define R500_TEX_DST_A_SWIZ_G (1 << 30)
+# define R500_TEX_DST_A_SWIZ_B (2 << 30)
+# define R500_TEX_DST_A_SWIZ_A (3 << 30)
+#define R500_US_TEX_ADDR_DXDY_0 0xa000
+# define R500_DX_ADDR(x) ((x) << 0)
+# define R500_DX_ADDR_REL (1 << 7)
+# define R500_DX_S_SWIZ_R (0 << 8)
+# define R500_DX_S_SWIZ_G (1 << 8)
+# define R500_DX_S_SWIZ_B (2 << 8)
+# define R500_DX_S_SWIZ_A (3 << 8)
+# define R500_DX_T_SWIZ_R (0 << 10)
+# define R500_DX_T_SWIZ_G (1 << 10)
+# define R500_DX_T_SWIZ_B (2 << 10)
+# define R500_DX_T_SWIZ_A (3 << 10)
+# define R500_DX_R_SWIZ_R (0 << 12)
+# define R500_DX_R_SWIZ_G (1 << 12)
+# define R500_DX_R_SWIZ_B (2 << 12)
+# define R500_DX_R_SWIZ_A (3 << 12)
+# define R500_DX_Q_SWIZ_R (0 << 14)
+# define R500_DX_Q_SWIZ_G (1 << 14)
+# define R500_DX_Q_SWIZ_B (2 << 14)
+# define R500_DX_Q_SWIZ_A (3 << 14)
+# define R500_DY_ADDR(x) ((x) << 16)
+# define R500_DY_ADDR_REL (1 << 17)
+# define R500_DY_S_SWIZ_R (0 << 24)
+# define R500_DY_S_SWIZ_G (1 << 24)
+# define R500_DY_S_SWIZ_B (2 << 24)
+# define R500_DY_S_SWIZ_A (3 << 24)
+# define R500_DY_T_SWIZ_R (0 << 26)
+# define R500_DY_T_SWIZ_G (1 << 26)
+# define R500_DY_T_SWIZ_B (2 << 26)
+# define R500_DY_T_SWIZ_A (3 << 26)
+# define R500_DY_R_SWIZ_R (0 << 28)
+# define R500_DY_R_SWIZ_G (1 << 28)
+# define R500_DY_R_SWIZ_B (2 << 28)
+# define R500_DY_R_SWIZ_A (3 << 28)
+# define R500_DY_Q_SWIZ_R (0 << 30)
+# define R500_DY_Q_SWIZ_G (1 << 30)
+# define R500_DY_Q_SWIZ_B (2 << 30)
+# define R500_DY_Q_SWIZ_A (3 << 30)
+#define R500_US_TEX_INST_0 0x9000
+# define R500_TEX_ID(x) ((x) << 16)
+# define R500_TEX_INST_NOP (0 << 22)
+# define R500_TEX_INST_LD (1 << 22)
+# define R500_TEX_INST_TEXKILL (2 << 22)
+# define R500_TEX_INST_PROJ (3 << 22)
+# define R500_TEX_INST_LODBIAS (4 << 22)
+# define R500_TEX_INST_LOD (5 << 22)
+# define R500_TEX_INST_DXDY (6 << 22)
+# define R500_TEX_SEM_ACQUIRE (1 << 25)
+# define R500_TEX_IGNORE_UNCOVERED (1 << 26)
+# define R500_TEX_UNSCALED (1 << 27)
+#define R300_US_W_FMT 0x46b4
+# define R300_W_FMT_W0 (0 << 0)
+# define R300_W_FMT_W24 (1 << 0)
+# define R300_W_FMT_W24FP (2 << 0)
+# define R300_W_SRC_US (0 << 2)
+# define R300_W_SRC_RAS (1 << 2)
+
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. VAP_VTX_FMT: The first parameter is not written to hardware
+ * 1. VAP_VF_CTL: The second parameter is a standard primitive emission dword.
+ */
+#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
+
+/* Draw a primitive from immediate vertices in this packet
+ * Up to 16382 dwords:
+ * 0. VAP_VTX_FMT: The first parameter is not written to hardware
+ * 1. VAP_VF_CTL: The second parameter is a standard primitive emission dword.
+ * 2 to end: Up to 16380 dwords of vertex data.
+ */
+#define R300_PACKET3_3D_DRAW_IMMD 0x00002900
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR and
+ * immediate vertices in this packet
+ * Up to 16382 dwords:
+ * 0. VAP_VTX_FMT: The first parameter is not written to hardware
+ * 1. VAP_VF_CTL: The second parameter is a standard primitive emission dword.
+ * 2 to end: Up to 16380 dwords of vertex data.
+ */
+#define R300_PACKET3_3D_DRAW_INDX 0x00002A00
+
+
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
+#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER 0x00003300
+# define R300_INDX_BUFFER_DST_SHIFT 0
+# define R300_INDX_BUFFER_SKIP_SHIFT 16
+# define R300_INDX_BUFFER_ONE_REG_WR (1<<31)
+
+/* Same as R300_PACKET3_3D_DRAW_VBUF but without VAP_VTX_FMT */
+#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400
+/* Same as R300_PACKET3_3D_DRAW_IMMD but without VAP_VTX_FMT */
+#define R300_PACKET3_3D_DRAW_IMMD_2 0x00003500
+/* Same as R300_PACKET3_3D_DRAW_INDX but without VAP_VTX_FMT */
+#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
+
+/* Clears a portion of hierachical Z RAM
+ * 3 dword parameters
+ * 0. START
+ * 1. COUNT: 13:0 (max is 0x3FFF)
+ * 2. CLEAR_VALUE: Value to write into HIZ RAM.
+ */
+#define R300_PACKET3_3D_CLEAR_HIZ 0x00003700
+
+/* Draws a set of primitives using vertex buffers pointed by the state data.
+ * At least 2 Parameters:
+ * 0. VAP_VF_CNTL: The first parameter is a standard primitive emission dword.
+ * 2 to end: Data or indices (see other 3D_DRAW_* packets for details)
+ */
+#define R300_PACKET3_3D_DRAW_128 0x00003900
+
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8 2
+#define R300_CP_COLOR_FORMAT_ARGB1555 3
+#define R300_CP_COLOR_FORMAT_RGB565 4
+#define R300_CP_COLOR_FORMAT_ARGB8888 6
+#define R300_CP_COLOR_FORMAT_RGB332 7
+#define R300_CP_COLOR_FORMAT_RGB8 9
+#define R300_CP_COLOR_FORMAT_ARGB4444 15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
+
+/* XXX Corbin's stuff from radeon and r200 */
+
+#define RADEON_WAIT_UNTIL 0x1720
+# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
+# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
+# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
+
+#define RADEON_CP_PACKET3 0xC0000000
+
+#define R200_3D_DRAW_IMMD_2 0xC0003500
+
+/* XXX Oh look, stuff not brought over from docs yet */
+
+#define R300_SU_REG_DEST 0x42C8
+
+#endif /* _R300_REG_H */
+
+/* *INDENT-ON* */
+
+/* vim: set foldenable foldmarker=\\{,\\} foldmethod=marker : */
diff --git a/src/gallium/drivers/r300/r300_render.c b/src/gallium/drivers/r300/r300_render.c
new file mode 100644
index 0000000000..737396d8d9
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_render.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "draw/draw_pipe.h"
+#include "draw/draw_vbuf.h"
+#include "util/u_memory.h"
+
+#include "r300_cs.h"
+#include "r300_context.h"
+#include "r300_emit.h"
+#include "r300_reg.h"
+#include "r300_state_derived.h"
+
+/* r300_render: Vertex and index buffer primitive emission. */
+#define R300_MAX_VBO_SIZE (1024 * 1024)
+
+struct r300_render {
+ /* Parent class */
+ struct vbuf_render base;
+
+ /* Pipe context */
+ struct r300_context* r300;
+
+ /* Vertex information */
+ size_t vertex_size;
+ unsigned prim;
+ unsigned hwprim;
+
+ /* VBO */
+ struct pipe_buffer* vbo;
+ size_t vbo_size;
+ size_t vbo_offset;
+ size_t vbo_max_used;
+ void * vbo_ptr;
+};
+
+static INLINE struct r300_render*
+r300_render(struct vbuf_render* render)
+{
+ return (struct r300_render*)render;
+}
+
+static const struct vertex_info*
+r300_render_get_vertex_info(struct vbuf_render* render)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct r300_context* r300 = r300render->r300;
+
+ r300_update_derived_state(r300);
+
+ return &r300->vertex_info.vinfo;
+}
+
+static boolean r300_render_allocate_vertices(struct vbuf_render* render,
+ ushort vertex_size,
+ ushort count)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct r300_context* r300 = r300render->r300;
+ struct pipe_screen* screen = r300->context.screen;
+ size_t size = (size_t)vertex_size * (size_t)count;
+
+ if (size + r300render->vbo_offset > r300render->vbo_size)
+ {
+ r300render->vbo = pipe_buffer_create(screen,
+ 64,
+ PIPE_BUFFER_USAGE_VERTEX,
+ R300_MAX_VBO_SIZE);
+ r300render->vbo_size = R300_MAX_VBO_SIZE;
+ }
+
+ r300render->vertex_size = vertex_size;
+ r300->vbo = r300render->vbo;
+ r300->vbo_offset = r300render->vbo_offset;
+
+ return (r300render->vbo) ? TRUE : FALSE;
+}
+
+static void* r300_render_map_vertices(struct vbuf_render* render)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct pipe_screen* screen = r300render->r300->context.screen;
+
+ r300render->vbo_ptr = pipe_buffer_map(screen, r300render->vbo,
+ PIPE_BUFFER_USAGE_CPU_WRITE);
+
+ return (r300render->vbo_ptr + r300render->vbo_offset);
+}
+
+static void r300_render_unmap_vertices(struct vbuf_render* render,
+ ushort min,
+ ushort max)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct pipe_screen* screen = r300render->r300->context.screen;
+ CS_LOCALS(r300render->r300);
+ BEGIN_CS(2);
+ OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max);
+ END_CS;
+
+ r300render->vbo_max_used = MAX2(r300render->vbo_max_used,
+ r300render->vertex_size * (max + 1));
+ pipe_buffer_unmap(screen, r300render->vbo);
+}
+
+static void r300_render_release_vertices(struct vbuf_render* render)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct r300_context* r300 = r300render->r300;
+
+ r300render->vbo_offset += r300render->vbo_max_used;
+ r300render->vbo_max_used = 0;
+ r300->vbo = NULL;
+}
+
+static boolean r300_render_set_primitive(struct vbuf_render* render,
+ unsigned prim)
+{
+ struct r300_render* r300render = r300_render(render);
+ r300render->prim = prim;
+
+ switch (prim) {
+ case PIPE_PRIM_POINTS:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_POINTS;
+ break;
+ case PIPE_PRIM_LINES:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_LINES;
+ break;
+ case PIPE_PRIM_LINE_LOOP:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_LINE_LOOP;
+ break;
+ case PIPE_PRIM_LINE_STRIP:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_LINE_STRIP;
+ break;
+ case PIPE_PRIM_TRIANGLES:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_TRIANGLES;
+ break;
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP;
+ break;
+ case PIPE_PRIM_TRIANGLE_FAN:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN;
+ break;
+ case PIPE_PRIM_QUADS:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_QUADS;
+ break;
+ case PIPE_PRIM_QUAD_STRIP:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_QUAD_STRIP;
+ break;
+ case PIPE_PRIM_POLYGON:
+ r300render->hwprim = R300_VAP_VF_CNTL__PRIM_POLYGON;
+ break;
+ default:
+ return FALSE;
+ break;
+ }
+
+ return TRUE;
+}
+
+static void r300_prepare_render(struct r300_render* render, unsigned count)
+{
+ struct r300_context* r300 = render->r300;
+
+ CS_LOCALS(r300);
+
+ r300_emit_dirty_state(r300);
+}
+
+static void r300_render_draw_arrays(struct vbuf_render* render,
+ unsigned start,
+ unsigned count)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct r300_context* r300 = r300render->r300;
+
+ CS_LOCALS(r300);
+
+ r300_prepare_render(r300render, count);
+
+ DBG(r300, DBG_DRAW, "r300: Doing vbuf render, count %d\n", count);
+
+ BEGIN_CS(2);
+ OUT_CS_PKT3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
+ OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (count << 16) |
+ r300render->hwprim);
+ END_CS;
+}
+
+static void r300_render_draw(struct vbuf_render* render,
+ const ushort* indices,
+ uint count)
+{
+ struct r300_render* r300render = r300_render(render);
+ struct r300_context* r300 = r300render->r300;
+ struct pipe_screen* screen = r300->context.screen;
+ struct pipe_buffer* index_buffer;
+ void* index_map;
+ int i;
+ uint32_t index;
+
+ CS_LOCALS(r300);
+
+ r300_prepare_render(r300render, count);
+
+ /* Send our indices into an index buffer. */
+ index_buffer = pipe_buffer_create(screen, 64, PIPE_BUFFER_USAGE_VERTEX,
+ count * 2);
+ if (!index_buffer) {
+ return;
+ }
+
+ BEGIN_CS(2 + (count+1)/2);
+ OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, (count+1)/2);
+ OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
+ r300render->hwprim);
+ for (i = 0; i < count-1; i += 2) {
+ OUT_CS(indices[i+1] << 16 | indices[i]);
+ }
+ if (count % 2) {
+ OUT_CS(indices[count-1]);
+ }
+ END_CS;
+}
+
+static void r300_render_destroy(struct vbuf_render* render)
+{
+ FREE(render);
+}
+
+static struct vbuf_render* r300_render_create(struct r300_context* r300)
+{
+ struct r300_render* r300render = CALLOC_STRUCT(r300_render);
+
+ r300render->r300 = r300;
+
+ /* XXX find real numbers plz */
+ r300render->base.max_vertex_buffer_bytes = 128 * 1024;
+ r300render->base.max_indices = 16 * 1024;
+
+ r300render->base.get_vertex_info = r300_render_get_vertex_info;
+ r300render->base.allocate_vertices = r300_render_allocate_vertices;
+ r300render->base.map_vertices = r300_render_map_vertices;
+ r300render->base.unmap_vertices = r300_render_unmap_vertices;
+ r300render->base.set_primitive = r300_render_set_primitive;
+ r300render->base.draw = r300_render_draw;
+ r300render->base.draw_arrays = r300_render_draw_arrays;
+ r300render->base.release_vertices = r300_render_release_vertices;
+ r300render->base.destroy = r300_render_destroy;
+
+ r300render->vbo = NULL;
+ r300render->vbo_size = 0;
+ r300render->vbo_offset = 0;
+
+ return &r300render->base;
+}
+
+struct draw_stage* r300_draw_stage(struct r300_context* r300)
+{
+ struct vbuf_render* render;
+ struct draw_stage* stage;
+
+ render = r300_render_create(r300);
+
+ if (!render) {
+ return NULL;
+ }
+
+ stage = draw_vbuf_stage(r300->draw, render);
+
+ if (!stage) {
+ render->destroy(render);
+ return NULL;
+ }
+
+ draw_set_render(r300->draw, render);
+
+ return stage;
+}
diff --git a/src/gallium/drivers/r300/r300_screen.c b/src/gallium/drivers/r300/r300_screen.c
new file mode 100644
index 0000000000..15740f6125
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_screen.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_screen.h"
+
+/* Return the identifier behind whom the brave coders responsible for this
+ * amalgamation of code, sweat, and duct tape, routinely obscure their names.
+ *
+ * ...I should have just put "Corbin Simpson", but I'm not that cool.
+ *
+ * (Or egotistical. Yet.) */
+static const char* r300_get_vendor(struct pipe_screen* pscreen)
+{
+ return "X.Org R300 Project";
+}
+
+static const char* chip_families[] = {
+ "R300",
+ "R350",
+ "R360",
+ "RV350",
+ "RV370",
+ "RV380",
+ "R420",
+ "R423",
+ "R430",
+ "R480",
+ "R481",
+ "RV410",
+ "RS400",
+ "RC410",
+ "RS480",
+ "RS482",
+ "RS600",
+ "RS690",
+ "RS740",
+ "RV515",
+ "R520",
+ "RV530",
+ "R580",
+ "RV560",
+ "RV570"
+};
+
+static const char* r300_get_name(struct pipe_screen* pscreen)
+{
+ struct r300_screen* r300screen = r300_screen(pscreen);
+
+ return chip_families[r300screen->caps->family];
+}
+
+static int r300_get_param(struct pipe_screen* pscreen, int param)
+{
+ struct r300_screen* r300screen = r300_screen(pscreen);
+
+ switch (param) {
+ /* XXX cases marked "IN THEORY" are possible on the hardware,
+ * but haven't been implemented yet. */
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ /* XXX I'm told this goes up to 16 */
+ return 8;
+ case PIPE_CAP_NPOT_TEXTURES:
+ /* IN THEORY */
+ return 0;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ if (r300screen->caps->is_r500) {
+ return 1;
+ } else {
+ return 0;
+ }
+ case PIPE_CAP_GLSL:
+ if (r300screen->caps->is_r500) {
+ return 1;
+ } else {
+ return 0;
+ }
+ case PIPE_CAP_S3TC:
+ return 1;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 1;
+ case PIPE_CAP_POINT_SPRITE:
+ /* IN THEORY */
+ return 0;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return 4;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ /* IN THEORY */
+ return 0;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ /* IN THEORY */
+ return 0;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ if (r300screen->caps->is_r500) {
+ /* 13 == 4096x4096 */
+ return 13;
+ } else {
+ /* 12 == 2048x2048 */
+ return 12;
+ }
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ /* So, technically, the limit is the same as above, but some math
+ * shows why this is silly. Assuming RGBA, 4cpp, we can see that
+ * 4096*4096*4096 = 64.0 GiB exactly, so it's not exactly
+ * practical. However, if at some point a game really wants this,
+ * then we can remove or raise this limit. */
+ if (r300screen->caps->is_r500) {
+ /* 9 == 256x256x256 */
+ return 9;
+ } else {
+ /* 8 == 128*128*128 */
+ return 8;
+ }
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ if (r300screen->caps->is_r500) {
+ /* 13 == 4096x4096 */
+ return 13;
+ } else {
+ /* 12 == 2048x2048 */
+ return 12;
+ }
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 1;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ /* XXX guessing (what a terrible guess) */
+ return 2;
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ /* XXX */
+ return 0;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+ default:
+ debug_printf("r300: Implementation error: Bad param %d\n",
+ param);
+ return 0;
+ }
+}
+
+static float r300_get_paramf(struct pipe_screen* pscreen, int param)
+{
+ struct r300_screen* r300screen = r300_screen(pscreen);
+
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ /* The maximum dimensions of the colorbuffer are our practical
+ * rendering limits. 2048 pixels should be enough for anybody. */
+ if (r300screen->caps->is_r500) {
+ return 4096.0f;
+ } else {
+ return 2048.0f;
+ }
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 16.0f;
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0f;
+ default:
+ debug_printf("r300: Implementation error: Bad paramf %d\n",
+ param);
+ return 0.0f;
+ }
+}
+
+static boolean check_tex_2d_format(enum pipe_format format, uint32_t usage,
+ boolean is_r500)
+{
+ switch (format) {
+ /* Supported formats. */
+ /* Colorbuffer */
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ return usage &
+ (PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY);
+
+ /* Texture */
+ case PIPE_FORMAT_A8R8G8B8_SRGB:
+ case PIPE_FORMAT_R8G8B8A8_SRGB:
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ case PIPE_FORMAT_YCBCR:
+ return usage & PIPE_TEXTURE_USAGE_SAMPLER;
+
+ /* Colorbuffer or texture */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ case PIPE_FORMAT_I8_UNORM:
+ return usage &
+ (PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY |
+ PIPE_TEXTURE_USAGE_SAMPLER);
+
+ /* Z buffer */
+ case PIPE_FORMAT_Z16_UNORM:
+ return usage & PIPE_TEXTURE_USAGE_DEPTH_STENCIL;
+
+ /* Z buffer with stencil or texture */
+ case PIPE_FORMAT_Z24S8_UNORM:
+ return usage &
+ (PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
+ PIPE_TEXTURE_USAGE_SAMPLER);
+
+ /* Definitely unsupported formats. */
+ /* Non-usable Z buffer/stencil formats. */
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_S8Z24_UNORM:
+ case PIPE_FORMAT_X8Z24_UNORM:
+ debug_printf("r300: Note: Got unsupported format: %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ return FALSE;
+
+ /* XXX These don't even exist
+ case PIPE_FORMAT_A32R32G32B32:
+ case PIPE_FORMAT_A16R16G16B16: */
+ /* XXX Insert YUV422 packed VYUY and YVYU here */
+ /* XXX What the deuce is UV88? (r3xx accel page 14)
+ debug_printf("r300: Warning: Got unimplemented format: %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ return FALSE; */
+
+ /* XXX Supported yet unimplemented r5xx formats: */
+ /* XXX Again, what is UV1010 this time? (r5xx accel page 148) */
+ /* XXX Even more that don't exist
+ case PIPE_FORMAT_A10R10G10B10_UNORM:
+ case PIPE_FORMAT_A2R10G10B10_UNORM:
+ case PIPE_FORMAT_I10_UNORM:
+ debug_printf(
+ "r300: Warning: Got unimplemented r500 format: %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ return FALSE; */
+
+ default:
+ /* Unknown format... */
+ debug_printf("r300: Warning: Got unknown format: %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ break;
+ }
+
+ return FALSE;
+}
+
+/* XXX moar targets */
+static boolean r300_is_format_supported(struct pipe_screen* pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags)
+{
+ switch (target) {
+ case PIPE_TEXTURE_2D:
+ return check_tex_2d_format(format, tex_usage,
+ r300_screen(pscreen)->caps->is_r500);
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_3D:
+ case PIPE_TEXTURE_CUBE:
+ debug_printf("r300: Implementation error: Unsupported format "
+ "target: %d\n", target);
+ break;
+ default:
+ debug_printf("r300: Fatal: This is not a format target: %d\n",
+ target);
+ assert(0);
+ break;
+ }
+
+ return FALSE;
+}
+
+static struct pipe_transfer*
+r300_get_tex_transfer(struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage, unsigned x, unsigned y,
+ unsigned w, unsigned h)
+{
+ struct r300_texture *tex = (struct r300_texture *)texture;
+ struct r300_transfer *trans;
+ unsigned offset; /* in bytes */
+
+ /* XXX Add support for these things */
+ if (texture->target == PIPE_TEXTURE_CUBE) {
+ debug_printf("PIPE_TEXTURE_CUBE is not yet supported.\n");
+ /* offset = tex->image_offset[level][face]; */
+ }
+ else if (texture->target == PIPE_TEXTURE_3D) {
+ debug_printf("PIPE_TEXTURE_3D is not yet supported.\n");
+ /* offset = tex->image_offset[level][zslice]; */
+ }
+ else {
+ offset = tex->offset[level];
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+
+ trans = CALLOC_STRUCT(r300_transfer);
+ if (trans) {
+ pipe_texture_reference(&trans->transfer.texture, texture);
+ trans->transfer.format = texture->format;
+ trans->transfer.width = w;
+ trans->transfer.height = h;
+ trans->transfer.block = texture->block;
+ trans->transfer.nblocksx = texture->nblocksx[level];
+ trans->transfer.nblocksy = texture->nblocksy[level];
+ trans->transfer.stride = align(pf_get_stride(&trans->transfer.block,
+ texture->width[level]), 32);
+ trans->transfer.usage = usage;
+ trans->offset = offset;
+ }
+ return &trans->transfer;
+}
+
+static void
+r300_tex_transfer_destroy(struct pipe_transfer *trans)
+{
+ pipe_texture_reference(&trans->texture, NULL);
+ FREE(trans);
+}
+
+static void* r300_transfer_map(struct pipe_screen* screen,
+ struct pipe_transfer* transfer)
+{
+ struct r300_texture* tex = (struct r300_texture*)transfer->texture;
+ char* map;
+ unsigned flags = 0;
+
+ if (transfer->usage != PIPE_TRANSFER_WRITE) {
+ flags |= PIPE_BUFFER_USAGE_CPU_READ;
+ }
+ if (transfer->usage != PIPE_TRANSFER_READ) {
+ flags |= PIPE_BUFFER_USAGE_CPU_WRITE;
+ }
+
+ map = pipe_buffer_map(screen, tex->buffer, flags);
+
+ if (!map) {
+ return NULL;
+ }
+
+ return map + r300_transfer(transfer)->offset +
+ transfer->y / transfer->block.height * transfer->stride +
+ transfer->x / transfer->block.width * transfer->block.size;
+}
+
+static void r300_transfer_unmap(struct pipe_screen* screen,
+ struct pipe_transfer* transfer)
+{
+ struct r300_texture* tex = (struct r300_texture*)transfer->texture;
+ pipe_buffer_unmap(screen, tex->buffer);
+}
+
+static void r300_destroy_screen(struct pipe_screen* pscreen)
+{
+ struct r300_screen* r300screen = r300_screen(pscreen);
+
+ FREE(r300screen->caps);
+ FREE(r300screen);
+}
+
+struct pipe_screen* r300_create_screen(struct r300_winsys* r300_winsys)
+{
+ struct r300_screen* r300screen = CALLOC_STRUCT(r300_screen);
+ struct r300_capabilities* caps = CALLOC_STRUCT(r300_capabilities);
+
+ if (!r300screen || !caps)
+ return NULL;
+
+ caps->pci_id = r300_winsys->pci_id;
+ caps->num_frag_pipes = r300_winsys->gb_pipes;
+
+ r300_parse_chipset(caps);
+
+ r300screen->caps = caps;
+ r300screen->screen.winsys = (struct pipe_winsys*)r300_winsys;
+ r300screen->screen.destroy = r300_destroy_screen;
+ r300screen->screen.get_name = r300_get_name;
+ r300screen->screen.get_vendor = r300_get_vendor;
+ r300screen->screen.get_param = r300_get_param;
+ r300screen->screen.get_paramf = r300_get_paramf;
+ r300screen->screen.is_format_supported = r300_is_format_supported;
+ r300screen->screen.get_tex_transfer = r300_get_tex_transfer;
+ r300screen->screen.tex_transfer_destroy = r300_tex_transfer_destroy;
+ r300screen->screen.transfer_map = r300_transfer_map;
+ r300screen->screen.transfer_unmap = r300_transfer_unmap;
+
+ r300_init_screen_texture_functions(&r300screen->screen);
+ u_simple_screen_init(&r300screen->screen);
+
+ return &r300screen->screen;
+}
diff --git a/src/gallium/drivers/r300/r300_screen.h b/src/gallium/drivers/r300/r300_screen.h
new file mode 100644
index 0000000000..2a0e41fbc3
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_screen.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_SCREEN_H
+#define R300_SCREEN_H
+
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "util/u_memory.h"
+#include "util/u_simple_screen.h"
+
+#include "r300_chipset.h"
+#include "r300_texture.h"
+#include "r300_winsys.h"
+
+struct r300_screen {
+ /* Parent class */
+ struct pipe_screen screen;
+
+ /* Chipset capabilities */
+ struct r300_capabilities* caps;
+};
+
+struct r300_transfer {
+ /* Parent class */
+ struct pipe_transfer transfer;
+
+ /* Offset from start of buffer. */
+ unsigned offset;
+};
+
+/* Convenience cast wrapper. */
+static INLINE struct r300_screen* r300_screen(struct pipe_screen* screen) {
+ return (struct r300_screen*)screen;
+}
+
+/* Convenience cast wrapper. */
+static INLINE struct r300_transfer*
+r300_transfer(struct pipe_transfer* transfer)
+{
+ return (struct r300_transfer*)transfer;
+}
+
+/* Creates a new r300 screen. */
+struct pipe_screen* r300_create_screen(struct r300_winsys* r300_winsys);
+
+#endif /* R300_SCREEN_H */
diff --git a/src/gallium/drivers/r300/r300_shader_inlines.h b/src/gallium/drivers/r300/r300_shader_inlines.h
new file mode 100644
index 0000000000..a04f45b03e
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_shader_inlines.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_SHADER_INLINES_H
+#define R300_SHADER_INLINES_H
+
+/* TGSI constants. TGSI is like XML: If it can't solve your problems, you're
+ * not using enough of it. */
+static const struct tgsi_full_src_register r300_constant_zero = {
+ .SrcRegister.Extended = TRUE,
+ .SrcRegister.File = TGSI_FILE_NULL,
+ .SrcRegisterExtSwz.ExtSwizzleX = TGSI_EXTSWIZZLE_ZERO,
+ .SrcRegisterExtSwz.ExtSwizzleY = TGSI_EXTSWIZZLE_ZERO,
+ .SrcRegisterExtSwz.ExtSwizzleZ = TGSI_EXTSWIZZLE_ZERO,
+ .SrcRegisterExtSwz.ExtSwizzleW = TGSI_EXTSWIZZLE_ZERO,
+};
+
+static const struct tgsi_full_src_register r300_constant_one = {
+ .SrcRegister.Extended = TRUE,
+ .SrcRegister.File = TGSI_FILE_NULL,
+ .SrcRegisterExtSwz.ExtSwizzleX = TGSI_EXTSWIZZLE_ONE,
+ .SrcRegisterExtSwz.ExtSwizzleY = TGSI_EXTSWIZZLE_ONE,
+ .SrcRegisterExtSwz.ExtSwizzleZ = TGSI_EXTSWIZZLE_ONE,
+ .SrcRegisterExtSwz.ExtSwizzleW = TGSI_EXTSWIZZLE_ONE,
+};
+
+#endif /* R300_SHADER_INLINES_H */
diff --git a/src/gallium/drivers/r300/r300_state.c b/src/gallium/drivers/r300/r300_state.c
new file mode 100644
index 0000000000..88cb9af6fb
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_state.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "util/u_debug.h"
+#include "util/u_math.h"
+#include "util/u_pack_color.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "pipe/p_config.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "r300_context.h"
+#include "r300_reg.h"
+#include "r300_state_inlines.h"
+#include "r300_fs.h"
+#include "r300_vs.h"
+
+/* r300_state: Functions used to intialize state context by translating
+ * Gallium state objects into semi-native r300 state objects. */
+
+/* Create a new blend state based on the CSO blend state.
+ *
+ * This encompasses alpha blending, logic/raster ops, and blend dithering. */
+static void* r300_create_blend_state(struct pipe_context* pipe,
+ const struct pipe_blend_state* state)
+{
+ struct r300_blend_state* blend = CALLOC_STRUCT(r300_blend_state);
+
+ if (state->blend_enable) {
+ /* XXX for now, always do separate alpha...
+ * is it faster to do it with one reg? */
+ blend->blend_control = R300_ALPHA_BLEND_ENABLE |
+ R300_SEPARATE_ALPHA_ENABLE |
+ R300_READ_ENABLE |
+ r300_translate_blend_function(state->rgb_func) |
+ (r300_translate_blend_factor(state->rgb_src_factor) <<
+ R300_SRC_BLEND_SHIFT) |
+ (r300_translate_blend_factor(state->rgb_dst_factor) <<
+ R300_DST_BLEND_SHIFT);
+ blend->alpha_blend_control =
+ r300_translate_blend_function(state->alpha_func) |
+ (r300_translate_blend_factor(state->alpha_src_factor) <<
+ R300_SRC_BLEND_SHIFT) |
+ (r300_translate_blend_factor(state->alpha_dst_factor) <<
+ R300_DST_BLEND_SHIFT);
+ }
+
+ /* PIPE_LOGICOP_* don't need to be translated, fortunately. */
+ if (state->logicop_enable) {
+ blend->rop = R300_RB3D_ROPCNTL_ROP_ENABLE |
+ (state->logicop_func) << R300_RB3D_ROPCNTL_ROP_SHIFT;
+ }
+
+ if (state->dither) {
+ blend->dither = R300_RB3D_DITHER_CTL_DITHER_MODE_LUT |
+ R300_RB3D_DITHER_CTL_ALPHA_DITHER_MODE_LUT;
+ }
+
+ return (void*)blend;
+}
+
+/* Bind blend state. */
+static void r300_bind_blend_state(struct pipe_context* pipe,
+ void* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ r300->blend_state = (struct r300_blend_state*)state;
+ r300->dirty_state |= R300_NEW_BLEND;
+}
+
+/* Free blend state. */
+static void r300_delete_blend_state(struct pipe_context* pipe,
+ void* state)
+{
+ FREE(state);
+}
+
+/* Set blend color.
+ * Setup both R300 and R500 registers, figure out later which one to write. */
+static void r300_set_blend_color(struct pipe_context* pipe,
+ const struct pipe_blend_color* color)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ ubyte ur, ug, ub, ua;
+
+ ur = float_to_ubyte(color->color[0]);
+ ug = float_to_ubyte(color->color[1]);
+ ub = float_to_ubyte(color->color[2]);
+ ua = float_to_ubyte(color->color[3]);
+
+ util_pack_color(color->color, PIPE_FORMAT_A8R8G8B8_UNORM,
+ &r300->blend_color_state->blend_color);
+
+ /* XXX this is wrong */
+ r300->blend_color_state->blend_color_red_alpha = ur | (ua << 16);
+ r300->blend_color_state->blend_color_green_blue = ub | (ug << 16);
+
+ r300->dirty_state |= R300_NEW_BLEND_COLOR;
+}
+
+static void r300_set_clip_state(struct pipe_context* pipe,
+ const struct pipe_clip_state* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ if (r300_screen(pipe->screen)->caps->has_tcl) {
+ r300->clip_state = *state;
+ r300->dirty_state |= R300_NEW_CLIP;
+ } else {
+ draw_flush(r300->draw);
+ draw_set_clip_state(r300->draw, state);
+ }
+}
+
+static void
+ r300_set_constant_buffer(struct pipe_context* pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer* buffer)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ /* This entire chunk of code seems ever-so-slightly baked.
+ * It's as if I've got pipe_buffer* matryoshkas... */
+ if (buffer && buffer->buffer && buffer->buffer->size) {
+ void* map = pipe->winsys->buffer_map(pipe->winsys, buffer->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ memcpy(r300->shader_constants[shader].constants,
+ map, buffer->buffer->size);
+ pipe->winsys->buffer_unmap(pipe->winsys, buffer->buffer);
+
+ r300->shader_constants[shader].count =
+ buffer->buffer->size / (sizeof(float) * 4);
+ } else {
+ r300->shader_constants[shader].count = 0;
+ }
+
+ r300->dirty_state |= R300_NEW_CONSTANTS;
+}
+
+/* Create a new depth, stencil, and alpha state based on the CSO dsa state.
+ *
+ * This contains the depth buffer, stencil buffer, alpha test, and such.
+ * On the Radeon, depth and stencil buffer setup are intertwined, which is
+ * the reason for some of the strange-looking assignments across registers. */
+static void*
+ r300_create_dsa_state(struct pipe_context* pipe,
+ const struct pipe_depth_stencil_alpha_state* state)
+{
+ struct r300_dsa_state* dsa = CALLOC_STRUCT(r300_dsa_state);
+
+ /* Depth test setup. */
+ if (state->depth.enabled) {
+ dsa->z_buffer_control |= R300_Z_ENABLE;
+
+ if (state->depth.writemask) {
+ dsa->z_buffer_control |= R300_Z_WRITE_ENABLE;
+ }
+
+ dsa->z_stencil_control |=
+ (r300_translate_depth_stencil_function(state->depth.func) <<
+ R300_Z_FUNC_SHIFT);
+ }
+
+ /* Stencil buffer setup. */
+ if (state->stencil[0].enabled) {
+ dsa->z_buffer_control |= R300_STENCIL_ENABLE;
+ dsa->z_stencil_control |=
+ (r300_translate_depth_stencil_function(state->stencil[0].func) <<
+ R300_S_FRONT_FUNC_SHIFT) |
+ (r300_translate_stencil_op(state->stencil[0].fail_op) <<
+ R300_S_FRONT_SFAIL_OP_SHIFT) |
+ (r300_translate_stencil_op(state->stencil[0].zpass_op) <<
+ R300_S_FRONT_ZPASS_OP_SHIFT) |
+ (r300_translate_stencil_op(state->stencil[0].zfail_op) <<
+ R300_S_FRONT_ZFAIL_OP_SHIFT);
+
+ dsa->stencil_ref_mask = (state->stencil[0].ref_value) |
+ (state->stencil[0].valuemask << R300_STENCILMASK_SHIFT) |
+ (state->stencil[0].writemask << R300_STENCILWRITEMASK_SHIFT);
+
+ if (state->stencil[1].enabled) {
+ dsa->z_buffer_control |= R300_STENCIL_FRONT_BACK;
+ dsa->z_stencil_control |=
+ (r300_translate_depth_stencil_function(state->stencil[1].func) <<
+ R300_S_BACK_FUNC_SHIFT) |
+ (r300_translate_stencil_op(state->stencil[1].fail_op) <<
+ R300_S_BACK_SFAIL_OP_SHIFT) |
+ (r300_translate_stencil_op(state->stencil[1].zpass_op) <<
+ R300_S_BACK_ZPASS_OP_SHIFT) |
+ (r300_translate_stencil_op(state->stencil[1].zfail_op) <<
+ R300_S_BACK_ZFAIL_OP_SHIFT);
+
+ dsa->stencil_ref_bf = (state->stencil[1].ref_value) |
+ (state->stencil[1].valuemask << R300_STENCILMASK_SHIFT) |
+ (state->stencil[1].writemask << R300_STENCILWRITEMASK_SHIFT);
+ }
+ }
+
+ /* Alpha test setup. */
+ if (state->alpha.enabled) {
+ dsa->alpha_function =
+ r300_translate_alpha_function(state->alpha.func) |
+ R300_FG_ALPHA_FUNC_ENABLE;
+ dsa->alpha_reference = CLAMP(state->alpha.ref_value * 1023.0f,
+ 0, 1023);
+ } else {
+ /* XXX need to fix this to be dynamically set
+ dsa->z_buffer_top = R300_ZTOP_ENABLE; */
+ }
+
+ return (void*)dsa;
+}
+
+/* Bind DSA state. */
+static void r300_bind_dsa_state(struct pipe_context* pipe,
+ void* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ r300->dsa_state = (struct r300_dsa_state*)state;
+ r300->dirty_state |= R300_NEW_DSA;
+}
+
+/* Free DSA state. */
+static void r300_delete_dsa_state(struct pipe_context* pipe,
+ void* state)
+{
+ FREE(state);
+}
+
+static void r300_set_edgeflags(struct pipe_context* pipe,
+ const unsigned* bitfield)
+{
+ /* XXX you know it's bad when i915 has this blank too */
+ /* XXX and even worse, I have no idea WTF the bitfield is */
+}
+
+static void
+ r300_set_framebuffer_state(struct pipe_context* pipe,
+ const struct pipe_framebuffer_state* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ draw_flush(r300->draw);
+
+ r300->framebuffer_state = *state;
+
+ r300->dirty_state |= R300_NEW_FRAMEBUFFERS;
+}
+
+/* Create fragment shader state. */
+static void* r300_create_fs_state(struct pipe_context* pipe,
+ const struct pipe_shader_state* shader)
+{
+ struct r300_fragment_shader* fs = NULL;
+
+ fs = (struct r300_fragment_shader*)CALLOC_STRUCT(r300_fragment_shader);
+
+ /* Copy state directly into shader. */
+ fs->state = *shader;
+ fs->state.tokens = tgsi_dup_tokens(shader->tokens);
+
+ tgsi_scan_shader(shader->tokens, &fs->info);
+
+ return (void*)fs;
+}
+
+/* Bind fragment shader state. */
+static void r300_bind_fs_state(struct pipe_context* pipe, void* shader)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
+
+ if (fs == NULL) {
+ r300->fs = NULL;
+ return;
+ } else if (!fs->translated) {
+ r300_translate_fragment_shader(r300, fs);
+ }
+
+ r300->fs = fs;
+
+ r300->dirty_state |= R300_NEW_FRAGMENT_SHADER;
+}
+
+/* Delete fragment shader state. */
+static void r300_delete_fs_state(struct pipe_context* pipe, void* shader)
+{
+ struct r300_fragment_shader* fs = (struct r300_fragment_shader*)shader;
+ rc_constants_destroy(&fs->code.constants);
+ FREE(fs->state.tokens);
+ FREE(shader);
+}
+
+static void r300_set_polygon_stipple(struct pipe_context* pipe,
+ const struct pipe_poly_stipple* state)
+{
+ /* XXX no idea how to set this up, but not terribly important */
+}
+
+/* Create a new rasterizer state based on the CSO rasterizer state.
+ *
+ * This is a very large chunk of state, and covers most of the graphics
+ * backend (GB), geometry assembly (GA), and setup unit (SU) blocks.
+ *
+ * In a not entirely unironic sidenote, this state has nearly nothing to do
+ * with the actual block on the Radeon called the rasterizer (RS). */
+static void* r300_create_rs_state(struct pipe_context* pipe,
+ const struct pipe_rasterizer_state* state)
+{
+ struct r300_rs_state* rs = CALLOC_STRUCT(r300_rs_state);
+
+ /* Copy rasterizer state for Draw. */
+ rs->rs = *state;
+
+ rs->enable_vte = !state->bypass_vs_clip_and_viewport;
+
+#ifdef PIPE_ARCH_LITTLE_ENDIAN
+ rs->vap_control_status = R300_VC_NO_SWAP;
+#else
+ rs->vap_control_status = R300_VC_32BIT_SWAP;
+#endif
+
+ /* If bypassing TCL, or if no TCL engine is present, turn off the HW TCL.
+ * Else, enable HW TCL and force Draw's TCL off. */
+ if (state->bypass_vs_clip_and_viewport ||
+ !r300_screen(pipe->screen)->caps->has_tcl) {
+ rs->vap_control_status |= R300_VAP_TCL_BYPASS;
+ } else {
+ rs->rs.bypass_vs_clip_and_viewport = TRUE;
+ }
+
+ rs->point_size = pack_float_16_6x(state->point_size) |
+ (pack_float_16_6x(state->point_size) << R300_POINTSIZE_X_SHIFT);
+
+ rs->point_minmax =
+ ((int)(state->point_size_min * 6.0) <<
+ R300_GA_POINT_MINMAX_MIN_SHIFT) |
+ ((int)(state->point_size_max * 6.0) <<
+ R300_GA_POINT_MINMAX_MAX_SHIFT);
+
+ rs->line_control = pack_float_16_6x(state->line_width) |
+ R300_GA_LINE_CNTL_END_TYPE_COMP;
+
+ /* Radeons don't think in "CW/CCW", they think in "front/back". */
+ if (state->front_winding == PIPE_WINDING_CW) {
+ rs->cull_mode = R300_FRONT_FACE_CW;
+
+ if (state->offset_cw) {
+ rs->polygon_offset_enable |= R300_FRONT_ENABLE;
+ }
+ if (state->offset_ccw) {
+ rs->polygon_offset_enable |= R300_BACK_ENABLE;
+ }
+ } else {
+ rs->cull_mode = R300_FRONT_FACE_CCW;
+
+ if (state->offset_ccw) {
+ rs->polygon_offset_enable |= R300_FRONT_ENABLE;
+ }
+ if (state->offset_cw) {
+ rs->polygon_offset_enable |= R300_BACK_ENABLE;
+ }
+ }
+ if (state->front_winding & state->cull_mode) {
+ rs->cull_mode |= R300_CULL_FRONT;
+ }
+ if (~(state->front_winding) & state->cull_mode) {
+ rs->cull_mode |= R300_CULL_BACK;
+ }
+
+ if (rs->polygon_offset_enable) {
+ rs->depth_offset_front = rs->depth_offset_back =
+ fui(state->offset_units);
+ rs->depth_scale_front = rs->depth_scale_back =
+ fui(state->offset_scale);
+ }
+
+ if (state->line_stipple_enable) {
+ rs->line_stipple_config =
+ R300_GA_LINE_STIPPLE_CONFIG_LINE_RESET_LINE |
+ (fui((float)state->line_stipple_factor) &
+ R300_GA_LINE_STIPPLE_CONFIG_STIPPLE_SCALE_MASK);
+ /* XXX this might need to be scaled up */
+ rs->line_stipple_value = state->line_stipple_pattern;
+ }
+
+ if (state->flatshade) {
+ rs->color_control = R300_SHADE_MODEL_FLAT;
+ } else {
+ rs->color_control = R300_SHADE_MODEL_SMOOTH;
+ }
+
+ if (!state->flatshade_first) {
+ rs->color_control |= R300_GA_COLOR_CONTROL_PROVOKING_VERTEX_LAST;
+ }
+
+ return (void*)rs;
+}
+
+/* Bind rasterizer state. */
+static void r300_bind_rs_state(struct pipe_context* pipe, void* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_rs_state* rs = (struct r300_rs_state*)state;
+
+ draw_flush(r300->draw);
+ draw_set_rasterizer_state(r300->draw, &rs->rs);
+
+ r300->rs_state = rs;
+ r300->dirty_state |= R300_NEW_RASTERIZER;
+ r300->dirty_state |= R300_NEW_RS_BLOCK;
+ r300->dirty_state |= R300_NEW_SCISSOR;
+ r300->dirty_state |= R300_NEW_VIEWPORT;
+}
+
+/* Free rasterizer state. */
+static void r300_delete_rs_state(struct pipe_context* pipe, void* state)
+{
+ FREE(state);
+}
+
+static void*
+ r300_create_sampler_state(struct pipe_context* pipe,
+ const struct pipe_sampler_state* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_sampler_state* sampler = CALLOC_STRUCT(r300_sampler_state);
+ int lod_bias;
+
+ sampler->filter0 |=
+ (r300_translate_wrap(state->wrap_s) << R300_TX_WRAP_S_SHIFT) |
+ (r300_translate_wrap(state->wrap_t) << R300_TX_WRAP_T_SHIFT) |
+ (r300_translate_wrap(state->wrap_r) << R300_TX_WRAP_R_SHIFT);
+
+ sampler->filter0 |= r300_translate_tex_filters(state->min_img_filter,
+ state->mag_img_filter,
+ state->min_mip_filter);
+
+ lod_bias = CLAMP((int)(state->lod_bias * 32), -(1 << 9), (1 << 9) - 1);
+
+ sampler->filter1 |= lod_bias << R300_LOD_BIAS_SHIFT;
+
+ sampler->filter1 |= r300_anisotropy(state->max_anisotropy);
+
+ util_pack_color(state->border_color, PIPE_FORMAT_A8R8G8B8_UNORM,
+ &sampler->border_color);
+
+ /* R500-specific fixups and optimizations */
+ if (r300_screen(r300->context.screen)->caps->is_r500) {
+ sampler->filter1 |= R500_BORDER_FIX;
+ }
+
+ return (void*)sampler;
+}
+
+static void r300_bind_sampler_states(struct pipe_context* pipe,
+ unsigned count,
+ void** states)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ int i;
+
+ if (count > 8) {
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (r300->sampler_states[i] != states[i]) {
+ r300->sampler_states[i] = (struct r300_sampler_state*)states[i];
+ r300->dirty_state |= (R300_NEW_SAMPLER << i);
+ }
+ }
+
+ r300->sampler_count = count;
+}
+
+static void r300_delete_sampler_state(struct pipe_context* pipe, void* state)
+{
+ FREE(state);
+}
+
+static void r300_set_sampler_textures(struct pipe_context* pipe,
+ unsigned count,
+ struct pipe_texture** texture)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ int i;
+
+ /* XXX magic num */
+ if (count > 8) {
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (r300->textures[i] != (struct r300_texture*)texture[i]) {
+ pipe_texture_reference((struct pipe_texture**)&r300->textures[i],
+ texture[i]);
+ r300->dirty_state |= (R300_NEW_TEXTURE << i);
+ }
+ }
+
+ for (i = count; i < 8; i++) {
+ if (r300->textures[i]) {
+ pipe_texture_reference((struct pipe_texture**)&r300->textures[i],
+ NULL);
+ r300->dirty_state |= (R300_NEW_TEXTURE << i);
+ }
+ }
+
+ r300->texture_count = count;
+}
+
+static void r300_set_scissor_state(struct pipe_context* pipe,
+ const struct pipe_scissor_state* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ if (r300_screen(r300->context.screen)->caps->is_r500) {
+ r300->scissor_state->scissor_top_left =
+ (state->minx << R300_SCISSORS_X_SHIFT) |
+ (state->miny << R300_SCISSORS_Y_SHIFT);
+ r300->scissor_state->scissor_bottom_right =
+ ((state->maxx - 1) << R300_SCISSORS_X_SHIFT) |
+ ((state->maxy - 1) << R300_SCISSORS_Y_SHIFT);
+ } else {
+ /* Offset of 1440 in non-R500 chipsets. */
+ r300->scissor_state->scissor_top_left =
+ ((state->minx + 1440) << R300_SCISSORS_X_SHIFT) |
+ ((state->miny + 1440) << R300_SCISSORS_Y_SHIFT);
+ r300->scissor_state->scissor_bottom_right =
+ (((state->maxx - 1) + 1440) << R300_SCISSORS_X_SHIFT) |
+ (((state->maxy - 1) + 1440) << R300_SCISSORS_Y_SHIFT);
+ }
+
+ r300->dirty_state |= R300_NEW_SCISSOR;
+}
+
+static void r300_set_viewport_state(struct pipe_context* pipe,
+ const struct pipe_viewport_state* state)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ /* Do the transform in HW. */
+ r300->viewport_state->vte_control = R300_VTX_W0_FMT;
+
+ if (state->scale[0] != 1.0f) {
+ assert(state->scale[0] != 0.0f);
+ r300->viewport_state->xscale = state->scale[0];
+ r300->viewport_state->vte_control |= R300_VPORT_X_SCALE_ENA;
+ }
+ if (state->scale[1] != 1.0f) {
+ assert(state->scale[1] != 0.0f);
+ r300->viewport_state->yscale = state->scale[1];
+ r300->viewport_state->vte_control |= R300_VPORT_Y_SCALE_ENA;
+ }
+ if (state->scale[2] != 1.0f) {
+ assert(state->scale[2] != 0.0f);
+ r300->viewport_state->zscale = state->scale[2];
+ r300->viewport_state->vte_control |= R300_VPORT_Z_SCALE_ENA;
+ }
+ if (state->translate[0] != 0.0f) {
+ r300->viewport_state->xoffset = state->translate[0];
+ r300->viewport_state->vte_control |= R300_VPORT_X_OFFSET_ENA;
+ }
+ if (state->translate[1] != 0.0f) {
+ r300->viewport_state->yoffset = state->translate[1];
+ r300->viewport_state->vte_control |= R300_VPORT_Y_OFFSET_ENA;
+ }
+ if (state->translate[2] != 0.0f) {
+ r300->viewport_state->zoffset = state->translate[2];
+ r300->viewport_state->vte_control |= R300_VPORT_Z_OFFSET_ENA;
+ }
+
+ r300->dirty_state |= R300_NEW_VIEWPORT;
+}
+
+static void r300_set_vertex_buffers(struct pipe_context* pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer* buffers)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ memcpy(r300->vertex_buffers, buffers,
+ sizeof(struct pipe_vertex_buffer) * count);
+
+ r300->vertex_buffer_count = count;
+
+ draw_flush(r300->draw);
+ draw_set_vertex_buffers(r300->draw, count, buffers);
+}
+
+static void r300_set_vertex_elements(struct pipe_context* pipe,
+ unsigned count,
+ const struct pipe_vertex_element* elements)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ draw_flush(r300->draw);
+ draw_set_vertex_elements(r300->draw, count, elements);
+}
+
+static void* r300_create_vs_state(struct pipe_context* pipe,
+ const struct pipe_shader_state* shader)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ if (r300_screen(pipe->screen)->caps->has_tcl) {
+ struct r300_vertex_shader* vs = CALLOC_STRUCT(r300_vertex_shader);
+ /* Copy state directly into shader. */
+ vs->state = *shader;
+ vs->state.tokens = tgsi_dup_tokens(shader->tokens);
+
+ tgsi_scan_shader(shader->tokens, &vs->info);
+
+ /* Appease Draw. */
+ vs->draw = draw_create_vertex_shader(r300->draw, shader);
+
+ return (void*)vs;
+ } else {
+ return draw_create_vertex_shader(r300->draw, shader);
+ }
+}
+
+static void r300_bind_vs_state(struct pipe_context* pipe, void* shader)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ draw_flush(r300->draw);
+
+ if (r300_screen(pipe->screen)->caps->has_tcl) {
+ struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
+
+ if (vs == NULL) {
+ r300->vs = NULL;
+ return;
+ } else if (!vs->translated) {
+ r300_translate_vertex_shader(r300, vs);
+ }
+
+ draw_bind_vertex_shader(r300->draw, vs->draw);
+ r300->vs = vs;
+ r300->dirty_state |= R300_NEW_VERTEX_SHADER;
+ } else {
+ draw_bind_vertex_shader(r300->draw,
+ (struct draw_vertex_shader*)shader);
+ }
+}
+
+static void r300_delete_vs_state(struct pipe_context* pipe, void* shader)
+{
+ struct r300_context* r300 = r300_context(pipe);
+
+ if (r300_screen(pipe->screen)->caps->has_tcl) {
+ struct r300_vertex_shader* vs = (struct r300_vertex_shader*)shader;
+
+ rc_constants_destroy(&vs->code.constants);
+ draw_delete_vertex_shader(r300->draw, vs->draw);
+ FREE(vs->state.tokens);
+ FREE(shader);
+ } else {
+ draw_delete_vertex_shader(r300->draw,
+ (struct draw_vertex_shader*)shader);
+ }
+}
+
+void r300_init_state_functions(struct r300_context* r300)
+{
+ r300->context.create_blend_state = r300_create_blend_state;
+ r300->context.bind_blend_state = r300_bind_blend_state;
+ r300->context.delete_blend_state = r300_delete_blend_state;
+
+ r300->context.set_blend_color = r300_set_blend_color;
+
+ r300->context.set_clip_state = r300_set_clip_state;
+
+ r300->context.set_constant_buffer = r300_set_constant_buffer;
+
+ r300->context.create_depth_stencil_alpha_state = r300_create_dsa_state;
+ r300->context.bind_depth_stencil_alpha_state = r300_bind_dsa_state;
+ r300->context.delete_depth_stencil_alpha_state = r300_delete_dsa_state;
+
+ r300->context.set_edgeflags = r300_set_edgeflags;
+
+ r300->context.set_framebuffer_state = r300_set_framebuffer_state;
+
+ r300->context.create_fs_state = r300_create_fs_state;
+ r300->context.bind_fs_state = r300_bind_fs_state;
+ r300->context.delete_fs_state = r300_delete_fs_state;
+
+ r300->context.set_polygon_stipple = r300_set_polygon_stipple;
+
+ r300->context.create_rasterizer_state = r300_create_rs_state;
+ r300->context.bind_rasterizer_state = r300_bind_rs_state;
+ r300->context.delete_rasterizer_state = r300_delete_rs_state;
+
+ r300->context.create_sampler_state = r300_create_sampler_state;
+ r300->context.bind_sampler_states = r300_bind_sampler_states;
+ r300->context.delete_sampler_state = r300_delete_sampler_state;
+
+ r300->context.set_sampler_textures = r300_set_sampler_textures;
+
+ r300->context.set_scissor_state = r300_set_scissor_state;
+
+ r300->context.set_viewport_state = r300_set_viewport_state;
+
+ r300->context.set_vertex_buffers = r300_set_vertex_buffers;
+ r300->context.set_vertex_elements = r300_set_vertex_elements;
+
+ r300->context.create_vs_state = r300_create_vs_state;
+ r300->context.bind_vs_state = r300_bind_vs_state;
+ r300->context.delete_vs_state = r300_delete_vs_state;
+}
diff --git a/src/gallium/drivers/r300/r300_state_derived.c b/src/gallium/drivers/r300/r300_state_derived.c
new file mode 100644
index 0000000000..5f6b225d34
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_state_derived.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_state_derived.h"
+
+#include "r300_fs.h"
+#include "r300_state_inlines.h"
+#include "r300_vs.h"
+
+/* r300_state_derived: Various bits of state which are dependent upon
+ * currently bound CSO data. */
+
+/* Set up the vs_tab and routes. */
+static void r300_vs_tab_routes(struct r300_context* r300,
+ struct r300_vertex_format* vformat)
+{
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ struct vertex_info* vinfo = &vformat->vinfo;
+ int* tab = vformat->vs_tab;
+ boolean pos = FALSE, psize = FALSE, fog = FALSE;
+ int i, texs = 0, cols = 0;
+ struct tgsi_shader_info* info;
+
+ if (r300screen->caps->has_tcl) {
+ /* Use vertex shader to determine required routes. */
+ info = &r300->vs->info;
+ } else {
+ /* Use fragment shader to determine required routes. */
+ info = &r300->fs->info;
+ }
+
+ assert(info->num_inputs <= 16);
+
+ if (!r300screen->caps->has_tcl || !r300->rs_state->enable_vte)
+ {
+ for (i = 0; i < info->num_inputs; i++) {
+ switch (info->input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ pos = TRUE;
+ tab[i] = 0;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ tab[i] = 2 + cols;
+ cols++;
+ break;
+ case TGSI_SEMANTIC_PSIZE:
+ psize = TRUE;
+ tab[i] = 15;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ fog = TRUE;
+ /* Fall through */
+ case TGSI_SEMANTIC_GENERIC:
+ tab[i] = 6 + texs;
+ texs++;
+ break;
+ default:
+ debug_printf("r300: Unknown vertex input %d\n",
+ info->input_semantic_name[i]);
+ break;
+ }
+ }
+ }
+ else
+ {
+ /* Just copy vert attribs over as-is. */
+ for (i = 0; i < info->num_inputs; i++) {
+ tab[i] = i;
+ }
+
+ for (i = 0; i < info->num_outputs; i++) {
+ switch (info->output_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ pos = TRUE;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ cols++;
+ break;
+ case TGSI_SEMANTIC_PSIZE:
+ psize = TRUE;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ fog = TRUE;
+ /* Fall through */
+ case TGSI_SEMANTIC_GENERIC:
+ texs++;
+ break;
+ default:
+ debug_printf("r300: Unknown vertex output %d\n",
+ info->output_semantic_name[i]);
+ break;
+ }
+ }
+ }
+
+ /* XXX magic */
+ assert(texs <= 8);
+
+ /* Do the actual vertex_info setup.
+ *
+ * vertex_info has four uints of hardware-specific data in it.
+ * vinfo.hwfmt[0] is R300_VAP_VTX_STATE_CNTL
+ * vinfo.hwfmt[1] is R300_VAP_VSM_VTX_ASSM
+ * vinfo.hwfmt[2] is R300_VAP_OUTPUT_VTX_FMT_0
+ * vinfo.hwfmt[3] is R300_VAP_OUTPUT_VTX_FMT_1 */
+
+ vinfo->hwfmt[0] = 0x5555; /* XXX this is classic Mesa bonghits */
+
+ if (!pos) {
+ debug_printf("r300: Forcing vertex position attribute emit...\n");
+ /* Make room for the position attribute
+ * at the beginning of the tab. */
+ for (i = 15; i > 0; i--) {
+ tab[i] = tab[i-1];
+ }
+ tab[0] = 0;
+ }
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE,
+ draw_find_vs_output(r300->draw, TGSI_SEMANTIC_POSITION, 0));
+ vinfo->hwfmt[1] |= R300_INPUT_CNTL_POS;
+ vinfo->hwfmt[2] |= R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT;
+
+ if (psize) {
+ draw_emit_vertex_attr(vinfo, EMIT_1F_PSIZE, INTERP_POS,
+ draw_find_vs_output(r300->draw, TGSI_SEMANTIC_PSIZE, 0));
+ vinfo->hwfmt[2] |= R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT;
+ }
+
+ for (i = 0; i < cols; i++) {
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_LINEAR,
+ draw_find_vs_output(r300->draw, TGSI_SEMANTIC_COLOR, i));
+ vinfo->hwfmt[1] |= R300_INPUT_CNTL_COLOR;
+ vinfo->hwfmt[2] |= (R300_VAP_OUTPUT_VTX_FMT_0__COLOR_0_PRESENT << i);
+ }
+
+ /* Init i right here, increment it if fog is enabled.
+ * This gets around a double-increment problem. */
+ i = 0;
+
+ if (fog) {
+ i++;
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE,
+ draw_find_vs_output(r300->draw, TGSI_SEMANTIC_FOG, 0));
+ vinfo->hwfmt[1] |= (R300_INPUT_CNTL_TC0 << i);
+ vinfo->hwfmt[3] |= (4 << (3 * i));
+ }
+
+ for (i; i < texs; i++) {
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE,
+ draw_find_vs_output(r300->draw, TGSI_SEMANTIC_GENERIC, i));
+ vinfo->hwfmt[1] |= (R300_INPUT_CNTL_TC0 << i);
+ vinfo->hwfmt[3] |= (4 << (3 * i));
+ }
+
+ /* Handle the case where the vertex shader will be generating some of
+ * the attribs based on its inputs. */
+ if (r300screen->caps->has_tcl &&
+ info->num_inputs < info->num_outputs) {
+ vinfo->num_attribs = info->num_inputs;
+ }
+
+ draw_compute_vertex_size(vinfo);
+}
+
+/* Update the PSC tables. */
+static void r300_vertex_psc(struct r300_context* r300,
+ struct r300_vertex_format* vformat)
+{
+ struct r300_screen* r300screen = r300_screen(r300->context.screen);
+ struct vertex_info* vinfo = &vformat->vinfo;
+ int* tab = vformat->vs_tab;
+ uint32_t temp;
+ int i, attrib_count;
+
+ /* Vertex shaders have no semantics on their inputs,
+ * so PSC should just route stuff based on their info,
+ * and not on attrib information. */
+ if (r300screen->caps->has_tcl) {
+ attrib_count = r300->vs->info.num_inputs;
+ DBG(r300, DBG_DRAW, "r300: routing %d attribs in psc for vs\n",
+ attrib_count);
+ } else {
+ attrib_count = vinfo->num_attribs;
+ DBG(r300, DBG_DRAW, "r300: attrib count: %d\n", attrib_count);
+ for (i = 0; i < attrib_count; i++) {
+ DBG(r300, DBG_DRAW, "r300: attrib: offset %d, interp %d, size %d,"
+ " tab %d\n", vinfo->attrib[i].src_index,
+ vinfo->attrib[i].interp_mode, vinfo->attrib[i].emit,
+ tab[i]);
+ }
+ }
+
+ for (i = 0; i < attrib_count; i++) {
+ /* Make sure we have a proper destination for our attribute */
+ assert(tab[i] != -1);
+
+ /* Add the attribute to the PSC table. */
+ temp = r300screen->caps->has_tcl ?
+ R300_DATA_TYPE_FLOAT_4 :
+ translate_vertex_data_type(vinfo->attrib[i].emit);
+ temp |= tab[i] << R300_DST_VEC_LOC_SHIFT;
+
+ if (i & 1) {
+ vformat->vap_prog_stream_cntl[i >> 1] &= 0x0000ffff;
+ vformat->vap_prog_stream_cntl[i >> 1] |= temp << 16;
+
+ vformat->vap_prog_stream_cntl_ext[i >> 1] |=
+ (R300_VAP_SWIZZLE_XYZW << 16);
+ } else {
+ vformat->vap_prog_stream_cntl[i >> 1] &= 0xffff0000;
+ vformat->vap_prog_stream_cntl[i >> 1] |= temp << 0;
+
+ vformat->vap_prog_stream_cntl_ext[i >> 1] |=
+ (R300_VAP_SWIZZLE_XYZW << 0);
+ }
+ }
+
+ /* Set the last vector in the PSC. */
+ i--;
+ vformat->vap_prog_stream_cntl[i >> 1] |=
+ (R300_LAST_VEC << (i & 1 ? 16 : 0));
+}
+
+/* Update the vertex format. */
+static void r300_update_vertex_format(struct r300_context* r300)
+{
+ struct r300_vertex_format vformat;
+ int i;
+
+ memset(&vformat, 0, sizeof(struct r300_vertex_format));
+ for (i = 0; i < 16; i++) {
+ vformat.vs_tab[i] = -1;
+ vformat.fs_tab[i] = -1;
+ }
+
+ r300_vs_tab_routes(r300, &vformat);
+
+ r300_vertex_psc(r300, &vformat);
+
+ if (memcmp(&r300->vertex_info, &vformat,
+ sizeof(struct r300_vertex_format))) {
+ memcpy(&r300->vertex_info, &vformat,
+ sizeof(struct r300_vertex_format));
+ r300->dirty_state |= R300_NEW_VERTEX_FORMAT;
+ }
+}
+
+/* Set up the mappings from GB to US, for RS block. */
+static void r300_update_fs_tab(struct r300_context* r300)
+{
+ struct r300_vertex_format* vformat = &r300->vertex_info;
+ struct tgsi_shader_info* info = &r300->fs->info;
+ int i, cols = 0, texs = 0, cols_emitted = 0;
+ int* tab = vformat->fs_tab;
+
+ for (i = 0; i < 16; i++) {
+ tab[i] = -1;
+ }
+
+ assert(info->num_inputs <= 16);
+ for (i = 0; i < info->num_inputs; i++) {
+ switch (info->input_semantic_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ tab[i] = INTERP_LINEAR;
+ cols++;
+ break;
+ case TGSI_SEMANTIC_POSITION:
+ case TGSI_SEMANTIC_PSIZE:
+ debug_printf("r300: Implementation error: Can't use "
+ "pos attribs in fragshader yet!\n");
+ /* Pass through for now */
+ case TGSI_SEMANTIC_FOG:
+ case TGSI_SEMANTIC_GENERIC:
+ tab[i] = INTERP_PERSPECTIVE;
+ break;
+ default:
+ debug_printf("r300: Unknown vertex input %d\n",
+ info->input_semantic_name[i]);
+ break;
+ }
+ }
+
+ /* Now that we know where everything is... */
+ DBG(r300, DBG_DRAW, "r300: fp input count: %d\n", info->num_inputs);
+ for (i = 0; i < info->num_inputs; i++) {
+ switch (tab[i]) {
+ case INTERP_LINEAR:
+ DBG(r300, DBG_DRAW, "r300: attrib: "
+ "stack offset %d, color, tab %d\n",
+ i, cols_emitted);
+ tab[i] = cols_emitted;
+ cols_emitted++;
+ break;
+ case INTERP_PERSPECTIVE:
+ DBG(r300, DBG_DRAW, "r300: attrib: "
+ "stack offset %d, texcoord, tab %d\n",
+ i, cols + texs);
+ tab[i] = cols + texs;
+ texs++;
+ break;
+ case -1:
+ debug_printf("r300: Implementation error: Bad fp interp!\n");
+ default:
+ break;
+ }
+ }
+
+}
+
+/* Set up the RS block. This is the part of the chipset that actually does
+ * the rasterization of vertices into fragments. This is also the part of the
+ * chipset that locks up if any part of it is even slightly wrong. */
+static void r300_update_rs_block(struct r300_context* r300)
+{
+ struct r300_rs_block* rs = r300->rs_block;
+ struct tgsi_shader_info* info = &r300->fs->info;
+ int* tab = r300->vertex_info.fs_tab;
+ int col_count = 0, fp_offset = 0, i, memory_pos, tex_count = 0;
+
+ memset(rs, 0, sizeof(struct r300_rs_block));
+
+ if (r300_screen(r300->context.screen)->caps->is_r500) {
+ for (i = 0; i < info->num_inputs; i++) {
+ assert(tab[i] != -1);
+ memory_pos = tab[i] * 4;
+ switch (info->input_semantic_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ rs->ip[col_count] |=
+ R500_RS_COL_PTR(memory_pos) |
+ R500_RS_COL_FMT(R300_RS_COL_FMT_RGBA);
+ col_count++;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ rs->ip[tex_count] |=
+ R500_RS_SEL_S(memory_pos) |
+ R500_RS_SEL_T(memory_pos + 1) |
+ R500_RS_SEL_R(memory_pos + 2) |
+ R500_RS_SEL_Q(memory_pos + 3);
+ tex_count++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (col_count == 0) {
+ rs->ip[0] |= R500_RS_COL_FMT(R300_RS_COL_FMT_0001);
+ }
+
+ if (tex_count == 0) {
+ rs->ip[0] |=
+ R500_RS_SEL_S(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_T(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_R(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_Q(R500_RS_IP_PTR_K1);
+ }
+
+ /* Rasterize at least one color, or bad things happen. */
+ if ((col_count == 0) && (tex_count == 0)) {
+ col_count++;
+ }
+
+ for (i = 0; i < tex_count; i++) {
+ rs->inst[i] |= R500_RS_INST_TEX_ID(i) |
+ R500_RS_INST_TEX_CN_WRITE | R500_RS_INST_TEX_ADDR(fp_offset);
+ fp_offset++;
+ }
+
+ for (i = 0; i < col_count; i++) {
+ rs->inst[i] |= R500_RS_INST_COL_ID(i) |
+ R500_RS_INST_COL_CN_WRITE | R500_RS_INST_COL_ADDR(fp_offset);
+ fp_offset++;
+ }
+ } else {
+ for (i = 0; i < info->num_inputs; i++) {
+ assert(tab[i] != -1);
+ memory_pos = tab[i] * 4;
+ switch (info->input_semantic_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ rs->ip[col_count] |=
+ R300_RS_COL_PTR(memory_pos) |
+ R300_RS_COL_FMT(R300_RS_COL_FMT_RGBA);
+ col_count++;
+ break;
+ case TGSI_SEMANTIC_GENERIC:
+ rs->ip[tex_count] |=
+ R300_RS_TEX_PTR(memory_pos) |
+ R300_RS_SEL_S(R300_RS_SEL_C0) |
+ R300_RS_SEL_T(R300_RS_SEL_C1) |
+ R300_RS_SEL_R(R300_RS_SEL_C2) |
+ R300_RS_SEL_Q(R300_RS_SEL_C3);
+ tex_count++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (col_count == 0) {
+ rs->ip[0] |= R300_RS_COL_FMT(R300_RS_COL_FMT_0001);
+ }
+
+ if (tex_count == 0) {
+ rs->ip[0] |=
+ R300_RS_SEL_S(R300_RS_SEL_K0) |
+ R300_RS_SEL_T(R300_RS_SEL_K0) |
+ R300_RS_SEL_R(R300_RS_SEL_K0) |
+ R300_RS_SEL_Q(R300_RS_SEL_K1);
+ }
+
+ /* Rasterize at least one color, or bad things happen. */
+ if ((col_count == 0) && (tex_count == 0)) {
+ col_count++;
+ }
+
+ for (i = 0; i < tex_count; i++) {
+ rs->inst[i] |= R300_RS_INST_TEX_ID(i) |
+ R300_RS_INST_TEX_CN_WRITE | R300_RS_INST_TEX_ADDR(fp_offset);
+ fp_offset++;
+ }
+
+ for (i = 0; i < col_count; i++) {
+ rs->inst[i] |= R300_RS_INST_COL_ID(i) |
+ R300_RS_INST_COL_CN_WRITE | R300_RS_INST_COL_ADDR(fp_offset);
+ fp_offset++;
+ }
+ }
+
+ rs->count = (tex_count * 4) | (col_count << R300_IC_COUNT_SHIFT) |
+ R300_HIRES_EN;
+
+ rs->inst_count = MAX2(MAX2(col_count - 1, tex_count - 1), 0);
+}
+
+void r300_update_derived_state(struct r300_context* r300)
+{
+ if (r300->dirty_state &
+ (R300_NEW_FRAGMENT_SHADER | R300_NEW_VERTEX_SHADER)) {
+ r300_update_vertex_format(r300);
+ }
+
+ if (r300->dirty_state & R300_NEW_VERTEX_FORMAT) {
+ r300_update_fs_tab(r300);
+ r300_update_rs_block(r300);
+ }
+}
diff --git a/src/gallium/drivers/r300/r300_state_derived.h b/src/gallium/drivers/r300/r300_state_derived.h
new file mode 100644
index 0000000000..71a4a47b00
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_state_derived.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_STATE_DERIVED_H
+#define R300_STATE_DERIVED_H
+
+struct r300_context;
+
+void r300_update_derived_state(struct r300_context* r300);
+
+#endif /* R300_STATE_DERIVED_H */
diff --git a/src/gallium/drivers/r300/r300_state_inlines.h b/src/gallium/drivers/r300/r300_state_inlines.h
new file mode 100644
index 0000000000..91b93fc367
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_state_inlines.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2009 Joakim Sindholt <opensource@zhasha.com>
+ * Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_STATE_INLINES_H
+#define R300_STATE_INLINES_H
+
+#include "pipe/p_format.h"
+
+#include "r300_reg.h"
+
+/* Some maths. These should probably find their way to u_math, if needed. */
+
+static INLINE int pack_float_16_6x(float f) {
+ return ((int)(f * 6.0) & 0xffff);
+}
+
+/* Blend state. */
+
+static INLINE uint32_t r300_translate_blend_function(int blend_func)
+{
+ switch (blend_func) {
+ case PIPE_BLEND_ADD:
+ return R300_COMB_FCN_ADD_CLAMP;
+ case PIPE_BLEND_SUBTRACT:
+ return R300_COMB_FCN_SUB_CLAMP;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ return R300_COMB_FCN_RSUB_CLAMP;
+ case PIPE_BLEND_MIN:
+ return R300_COMB_FCN_MIN;
+ case PIPE_BLEND_MAX:
+ return R300_COMB_FCN_MAX;
+ default:
+ debug_printf("r300: Unknown blend function %d\n", blend_func);
+ break;
+ }
+ return 0;
+}
+
+/* XXX we can also offer the D3D versions of some of these... */
+static INLINE uint32_t r300_translate_blend_factor(int blend_fact)
+{
+ switch (blend_fact) {
+ case PIPE_BLENDFACTOR_ONE:
+ return R300_BLEND_GL_ONE;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ return R300_BLEND_GL_SRC_COLOR;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ return R300_BLEND_GL_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ return R300_BLEND_GL_DST_ALPHA;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ return R300_BLEND_GL_DST_COLOR;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ return R300_BLEND_GL_SRC_ALPHA_SATURATE;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ return R300_BLEND_GL_CONST_COLOR;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ return R300_BLEND_GL_CONST_ALPHA;
+ /* XXX WTF are these?
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA: */
+ case PIPE_BLENDFACTOR_ZERO:
+ return R300_BLEND_GL_ZERO;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ return R300_BLEND_GL_ONE_MINUS_SRC_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ return R300_BLEND_GL_ONE_MINUS_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ return R300_BLEND_GL_ONE_MINUS_DST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ return R300_BLEND_GL_ONE_MINUS_DST_COLOR;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ return R300_BLEND_GL_ONE_MINUS_CONST_COLOR;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return R300_BLEND_GL_ONE_MINUS_CONST_ALPHA;
+ /* XXX see above
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: */
+ default:
+ debug_printf("r300: Unknown blend factor %d\n", blend_fact);
+ break;
+ }
+ return 0;
+}
+
+/* DSA state. */
+
+static INLINE uint32_t r300_translate_depth_stencil_function(int zs_func)
+{
+ switch (zs_func) {
+ case PIPE_FUNC_NEVER:
+ return R300_ZS_NEVER;
+ case PIPE_FUNC_LESS:
+ return R300_ZS_LESS;
+ case PIPE_FUNC_EQUAL:
+ return R300_ZS_EQUAL;
+ case PIPE_FUNC_LEQUAL:
+ return R300_ZS_LEQUAL;
+ case PIPE_FUNC_GREATER:
+ return R300_ZS_GREATER;
+ case PIPE_FUNC_NOTEQUAL:
+ return R300_ZS_NOTEQUAL;
+ case PIPE_FUNC_GEQUAL:
+ return R300_ZS_GEQUAL;
+ case PIPE_FUNC_ALWAYS:
+ return R300_ZS_ALWAYS;
+ default:
+ debug_printf("r300: Unknown depth/stencil function %d\n",
+ zs_func);
+ break;
+ }
+ return 0;
+}
+
+static INLINE uint32_t r300_translate_stencil_op(int s_op)
+{
+ switch (s_op) {
+ case PIPE_STENCIL_OP_KEEP:
+ return R300_ZS_KEEP;
+ case PIPE_STENCIL_OP_ZERO:
+ return R300_ZS_ZERO;
+ case PIPE_STENCIL_OP_REPLACE:
+ return R300_ZS_REPLACE;
+ case PIPE_STENCIL_OP_INCR:
+ return R300_ZS_INCR;
+ case PIPE_STENCIL_OP_DECR:
+ return R300_ZS_DECR;
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ return R300_ZS_INCR_WRAP;
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ return R300_ZS_DECR_WRAP;
+ case PIPE_STENCIL_OP_INVERT:
+ return R300_ZS_INVERT;
+ default:
+ debug_printf("r300: Unknown stencil op %d", s_op);
+ break;
+ }
+ return 0;
+}
+
+static INLINE uint32_t r300_translate_alpha_function(int alpha_func)
+{
+ switch (alpha_func) {
+ case PIPE_FUNC_NEVER:
+ return R300_FG_ALPHA_FUNC_NEVER;
+ case PIPE_FUNC_LESS:
+ return R300_FG_ALPHA_FUNC_LESS;
+ case PIPE_FUNC_EQUAL:
+ return R300_FG_ALPHA_FUNC_EQUAL;
+ case PIPE_FUNC_LEQUAL:
+ return R300_FG_ALPHA_FUNC_LE;
+ case PIPE_FUNC_GREATER:
+ return R300_FG_ALPHA_FUNC_GREATER;
+ case PIPE_FUNC_NOTEQUAL:
+ return R300_FG_ALPHA_FUNC_NOTEQUAL;
+ case PIPE_FUNC_GEQUAL:
+ return R300_FG_ALPHA_FUNC_GE;
+ case PIPE_FUNC_ALWAYS:
+ return R300_FG_ALPHA_FUNC_ALWAYS;
+ default:
+ debug_printf("r300: Unknown alpha function %d", alpha_func);
+ break;
+ }
+ return 0;
+}
+
+/* Texture sampler state. */
+
+static INLINE uint32_t r300_translate_wrap(int wrap)
+{
+ switch (wrap) {
+ case PIPE_TEX_WRAP_REPEAT:
+ return R300_TX_REPEAT;
+ case PIPE_TEX_WRAP_CLAMP:
+ return R300_TX_CLAMP;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ return R300_TX_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ return R300_TX_CLAMP_TO_BORDER;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ return R300_TX_REPEAT | R300_TX_MIRRORED;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ return R300_TX_CLAMP | R300_TX_MIRRORED;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ return R300_TX_CLAMP_TO_EDGE | R300_TX_MIRRORED;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ return R300_TX_CLAMP_TO_EDGE | R300_TX_MIRRORED;
+ default:
+ debug_printf("r300: Unknown texture wrap %d", wrap);
+ return 0;
+ }
+}
+
+static INLINE uint32_t r300_translate_tex_filters(int min, int mag, int mip)
+{
+ uint32_t retval = 0;
+ switch (min) {
+ case PIPE_TEX_FILTER_NEAREST:
+ retval |= R300_TX_MIN_FILTER_NEAREST;
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ retval |= R300_TX_MIN_FILTER_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_ANISO:
+ retval |= R300_TX_MIN_FILTER_ANISO;
+ break;
+ default:
+ debug_printf("r300: Unknown texture filter %d\n", min);
+ break;
+ }
+ switch (mag) {
+ case PIPE_TEX_FILTER_NEAREST:
+ retval |= R300_TX_MAG_FILTER_NEAREST;
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ retval |= R300_TX_MAG_FILTER_LINEAR;
+ break;
+ case PIPE_TEX_FILTER_ANISO:
+ retval |= R300_TX_MAG_FILTER_ANISO;
+ break;
+ default:
+ debug_printf("r300: Unknown texture filter %d\n", mag);
+ break;
+ }
+ switch (mip) {
+ case PIPE_TEX_MIPFILTER_NONE:
+ retval |= R300_TX_MIN_FILTER_MIP_NONE;
+ break;
+ case PIPE_TEX_MIPFILTER_NEAREST:
+ retval |= R300_TX_MIN_FILTER_MIP_NEAREST;
+ break;
+ case PIPE_TEX_MIPFILTER_LINEAR:
+ retval |= R300_TX_MIN_FILTER_MIP_LINEAR;
+ break;
+ default:
+ debug_printf("r300: Unknown texture filter %d\n", mip);
+ break;
+ }
+
+ return retval;
+}
+
+static INLINE uint32_t r300_anisotropy(float max_aniso)
+{
+ if (max_aniso >= 16.0f) {
+ return R300_TX_MAX_ANISO_16_TO_1;
+ } else if (max_aniso >= 8.0f) {
+ return R300_TX_MAX_ANISO_8_TO_1;
+ } else if (max_aniso >= 4.0f) {
+ return R300_TX_MAX_ANISO_4_TO_1;
+ } else if (max_aniso >= 2.0f) {
+ return R300_TX_MAX_ANISO_2_TO_1;
+ } else {
+ return R300_TX_MAX_ANISO_1_TO_1;
+ }
+}
+
+/* Buffer formats. */
+
+static INLINE uint32_t r300_translate_colorformat(enum pipe_format format)
+{
+ switch (format) {
+ /* 8-bit buffers */
+ case PIPE_FORMAT_I8_UNORM:
+ return R300_COLOR_FORMAT_I8;
+ /* 16-bit buffers */
+ case PIPE_FORMAT_R5G6B5_UNORM:
+ return R300_COLOR_FORMAT_RGB565;
+ case PIPE_FORMAT_A1R5G5B5_UNORM:
+ return R300_COLOR_FORMAT_ARGB1555;
+ case PIPE_FORMAT_A4R4G4B4_UNORM:
+ return R300_COLOR_FORMAT_ARGB4444;
+ /* 32-bit buffers */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_Z24S8_UNORM:
+ return R300_COLOR_FORMAT_ARGB8888;
+ /* XXX Not in pipe_format
+ case PIPE_FORMAT_A32R32G32B32:
+ return R300_COLOR_FORMAT_ARGB32323232;
+ case PIPE_FORMAT_A16R16G16B16:
+ return R300_COLOR_FORMAT_ARGB16161616;
+ case PIPE_FORMAT_A10R10G10B10_UNORM:
+ return R500_COLOR_FORMAT_ARGB10101010;
+ case PIPE_FORMAT_A2R10G10B10_UNORM:
+ return R500_COLOR_FORMAT_ARGB2101010;
+ case PIPE_FORMAT_I10_UNORM:
+ return R500_COLOR_FORMAT_I10; */
+ default:
+ debug_printf("r300: Implementation error: "
+ "Got unsupported color format %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ break;
+ }
+ return 0;
+}
+
+static INLINE uint32_t r300_translate_zsformat(enum pipe_format format)
+{
+ switch (format) {
+ /* 16-bit depth, no stencil */
+ case PIPE_FORMAT_Z16_UNORM:
+ return R300_DEPTHFORMAT_16BIT_INT_Z;
+ /* 24-bit depth, 8-bit stencil */
+ case PIPE_FORMAT_Z24S8_UNORM:
+ return R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL;
+ default:
+ debug_printf("r300: Implementation error: "
+ "Got unsupported ZS format %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ break;
+ }
+ return 0;
+}
+
+/* Translate pipe_format into US_OUT_FMT.
+ * Note that formats are stored from C3 to C0. */
+static INLINE uint32_t r300_translate_out_fmt(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ case PIPE_FORMAT_Z24S8_UNORM:
+ return R300_US_OUT_FMT_C4_8 |
+ R300_C0_SEL_B | R300_C1_SEL_G |
+ R300_C2_SEL_R | R300_C3_SEL_A;
+ default:
+ debug_printf("r300: Implementation error: "
+ "Got unsupported output format %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ return R300_US_OUT_FMT_UNUSED;
+ }
+ return 0;
+}
+
+/* Non-CSO state. (For now.) */
+
+static INLINE uint32_t r300_translate_gb_pipes(int pipe_count)
+{
+ switch (pipe_count) {
+ case 1:
+ return R300_GB_TILE_PIPE_COUNT_RV300;
+ break;
+ case 2:
+ return R300_GB_TILE_PIPE_COUNT_R300;
+ break;
+ case 3:
+ return R300_GB_TILE_PIPE_COUNT_R420_3P;
+ break;
+ case 4:
+ return R300_GB_TILE_PIPE_COUNT_R420;
+ break;
+ }
+ return 0;
+}
+
+static INLINE uint32_t translate_vertex_data_type(int type) {
+ switch (type) {
+ case EMIT_1F:
+ case EMIT_1F_PSIZE:
+ return R300_DATA_TYPE_FLOAT_1;
+ break;
+ case EMIT_2F:
+ return R300_DATA_TYPE_FLOAT_2;
+ break;
+ case EMIT_3F:
+ return R300_DATA_TYPE_FLOAT_3;
+ break;
+ case EMIT_4F:
+ return R300_DATA_TYPE_FLOAT_4;
+ break;
+ case EMIT_4UB:
+ return R300_DATA_TYPE_BYTE;
+ break;
+ default:
+ debug_printf("r300: Implementation error: "
+ "Bad vertex data type!\n");
+ assert(0);
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* R300_STATE_INLINES_H */
diff --git a/src/gallium/drivers/r300/r300_state_invariant.c b/src/gallium/drivers/r300/r300_state_invariant.c
new file mode 100644
index 0000000000..3865730d63
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_state_invariant.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 Joakim Sindholt <opensource@zhasha.com>
+ * Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_state_invariant.h"
+
+
+struct pipe_viewport_state r300_viewport_identity = {
+ .scale = {1.0, 1.0, 1.0, 1.0},
+ .translate = {0.0, 0.0, 0.0, 0.0},
+};
+
+/* Calculate and emit invariant state. This is data that the 3D engine
+ * will probably want at the beginning of every CS, but it's not currently
+ * handled by any CSO setup, and in addition it doesn't really change much.
+ *
+ * Note that eventually this should be empty, but it's useful for development
+ * and general unduplication of code. */
+void r300_emit_invariant_state(struct r300_context* r300)
+{
+ struct r300_capabilities* caps = r300_screen(r300->context.screen)->caps;
+ CS_LOCALS(r300);
+
+ BEGIN_CS(24 + (caps->has_tcl ? 2: 0));
+
+ /*** Graphics Backend (GB) ***/
+ /* Various GB enables */
+ OUT_CS_REG(R300_GB_ENABLE, R300_GB_POINT_STUFF_ENABLE |
+ R300_GB_LINE_STUFF_ENABLE |
+ R300_GB_TRIANGLE_STUFF_ENABLE);
+ /* Subpixel multisampling for AA
+ * These are commented out because glisse's CS checker doesn't like them.
+ * I presume these will be re-enabled later.
+ * OUT_CS_REG(R300_GB_MSPOS0, 0x66666666);
+ * OUT_CS_REG(R300_GB_MSPOS1, 0x6666666);
+ */
+ /* Source of fog depth */
+ OUT_CS_REG(R300_GB_SELECT, R300_GB_FOG_SELECT_1_1_W);
+ /* AA enable */
+ OUT_CS_REG(R300_GB_AA_CONFIG, 0x0);
+
+ /*** Fog (FG) ***/
+ OUT_CS_REG(R300_FG_FOG_BLEND, 0x0);
+ OUT_CS_REG(R300_FG_FOG_COLOR_R, 0x0);
+ OUT_CS_REG(R300_FG_FOG_COLOR_G, 0x0);
+ OUT_CS_REG(R300_FG_FOG_COLOR_B, 0x0);
+ OUT_CS_REG(R300_FG_DEPTH_SRC, 0x0);
+ OUT_CS_REG(R300_US_W_FMT, 0x0);
+
+ /*** VAP ***/
+ /* Max and min vertex index clamp. */
+ OUT_CS_REG(R300_VAP_VF_MIN_VTX_INDX, 0x0);
+ OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, 0xffffff);
+ /* Sign/normalize control */
+ OUT_CS_REG(R300_VAP_PSC_SGN_NORM_CNTL, R300_SGN_NORM_NO_ZERO);
+ /* TCL-only stuff */
+ if (caps->has_tcl) {
+ /* Amount of time to wait for vertex fetches in PVS */
+ OUT_CS_REG(VAP_PVS_VTX_TIMEOUT_REG, 0xffff);
+ }
+
+ END_CS;
+
+ /* XXX unsorted stuff from surface_fill */
+ BEGIN_CS(64 + (caps->has_tcl ? 5 : 0) + (caps->is_r500 ? 4 : 0));
+ /* Flush PVS. */
+ OUT_CS_REG(R300_VAP_PVS_STATE_FLUSH_REG, 0x0);
+
+ OUT_CS_REG(R300_SE_VTE_CNTL, R300_VPORT_X_SCALE_ENA |
+ R300_VPORT_X_OFFSET_ENA | R300_VPORT_Y_SCALE_ENA |
+ R300_VPORT_Y_OFFSET_ENA | R300_VPORT_Z_SCALE_ENA |
+ R300_VPORT_Z_OFFSET_ENA | R300_VTX_W0_FMT);
+ if (caps->has_tcl) {
+ OUT_CS_REG_SEQ(R300_VAP_GB_VERT_CLIP_ADJ, 4);
+ OUT_CS_32F(1.0);
+ OUT_CS_32F(1.0);
+ OUT_CS_32F(1.0);
+ OUT_CS_32F(1.0);
+ }
+ /* XXX point tex stuffing */
+ OUT_CS_REG_SEQ(R300_GA_POINT_S0, 1);
+ OUT_CS_32F(0.0);
+ OUT_CS_REG_SEQ(R300_GA_POINT_S1, 1);
+ OUT_CS_32F(1.0);
+ /* XXX line tex stuffing */
+ OUT_CS_REG_SEQ(R300_GA_LINE_S0, 1);
+ OUT_CS_32F(0.0);
+ OUT_CS_REG_SEQ(R300_GA_LINE_S1, 1);
+ OUT_CS_32F(1.0);
+ OUT_CS_REG(R300_GA_TRIANGLE_STIPPLE, 0x5 |
+ (0x5 << R300_GA_TRIANGLE_STIPPLE_Y_SHIFT_SHIFT));
+ /* XXX this big chunk should be refactored into rs_state */
+ OUT_CS_REG(R300_GA_SOLID_RG, 0x00000000);
+ OUT_CS_REG(R300_GA_SOLID_BA, 0x00000000);
+ OUT_CS_REG(R300_GA_POLY_MODE, 0x00000000);
+ OUT_CS_REG(R300_GA_ROUND_MODE, 0x00000001);
+ OUT_CS_REG(R300_GA_OFFSET, 0x00000000);
+ OUT_CS_REG(R300_GA_FOG_SCALE, 0x3DBF1412);
+ OUT_CS_REG(R300_GA_FOG_OFFSET, 0x00000000);
+ OUT_CS_REG(R300_SU_TEX_WRAP, 0x00000000);
+ OUT_CS_REG(R300_SU_DEPTH_SCALE, 0x4B7FFFFF);
+ OUT_CS_REG(R300_SU_DEPTH_OFFSET, 0x00000000);
+ OUT_CS_REG(R300_SC_HYPERZ, 0x0000001C);
+ OUT_CS_REG(R300_SC_EDGERULE, 0x2DA49525);
+ OUT_CS_REG(R300_RB3D_CCTL, 0x00000000);
+ OUT_CS_REG(RB3D_COLOR_CHANNEL_MASK, 0x0000000F);
+ OUT_CS_REG(R300_RB3D_AARESOLVE_CTL, 0x00000000);
+ if (caps->is_r500) {
+ OUT_CS_REG(R500_RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD, 0x00000000);
+ OUT_CS_REG(R500_RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD, 0xFFFFFFFF);
+ }
+ OUT_CS_REG(R300_ZB_FORMAT, 0x00000002);
+ OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT, 0x00000003);
+ OUT_CS_REG(R300_ZB_BW_CNTL, 0x00000000);
+ OUT_CS_REG(R300_ZB_DEPTHCLEARVALUE, 0x00000000);
+ OUT_CS_REG(R300_ZB_HIZ_OFFSET, 0x00000000);
+ OUT_CS_REG(R300_ZB_HIZ_PITCH, 0x00000000);
+ OUT_CS_REG(R300_VAP_VTX_STATE_CNTL, 0x1);
+ OUT_CS_REG(R300_VAP_VSM_VTX_ASSM, 0x405);
+ OUT_CS_REG(R300_SE_VTE_CNTL, 0x0000043F);
+
+ /* XXX */
+ OUT_CS_REG(R300_SC_CLIP_RULE, 0xaaaa);
+
+ END_CS;
+}
diff --git a/src/gallium/drivers/r300/r300_state_invariant.h b/src/gallium/drivers/r300/r300_state_invariant.h
new file mode 100644
index 0000000000..5bea6779fe
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_state_invariant.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_STATE_INVARIANT_H
+#define R300_STATE_INVARIANT_H
+
+#include "r300_chipset.h"
+#include "r300_context.h"
+#include "r300_cs.h"
+#include "r300_reg.h"
+#include "r300_state_inlines.h"
+
+void r300_emit_invariant_state(struct r300_context* r300);
+
+#endif /* R300_STATE_INVARIANT_H */
diff --git a/src/gallium/drivers/r300/r300_surface.c b/src/gallium/drivers/r300/r300_surface.c
new file mode 100644
index 0000000000..96e6e4a77d
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_surface.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_surface.h"
+
+static void r300_surface_setup(struct r300_context* r300,
+ struct r300_texture* dest,
+ unsigned x, unsigned y,
+ unsigned w, unsigned h)
+{
+ struct r300_capabilities* caps = r300_screen(r300->context.screen)->caps;
+ unsigned pixpitch = dest->stride / dest->tex.block.size;
+ CS_LOCALS(r300);
+
+ r300_emit_blend_state(r300, &blend_clear_state);
+ r300_emit_blend_color_state(r300, &blend_color_clear_state);
+ r300_emit_dsa_state(r300, &dsa_clear_state);
+ r300_emit_rs_state(r300, &rs_clear_state);
+
+ BEGIN_CS(26);
+
+ /* Viewport setup */
+ OUT_CS_REG_SEQ(R300_SE_VPORT_XSCALE, 6);
+ OUT_CS_32F((float)w);
+ OUT_CS_32F((float)x);
+ OUT_CS_32F((float)h);
+ OUT_CS_32F((float)y);
+ OUT_CS_32F(1.0);
+ OUT_CS_32F(0.0);
+
+ OUT_CS_REG(R300_VAP_VTE_CNTL, R300_VPORT_X_SCALE_ENA |
+ R300_VPORT_X_OFFSET_ENA |
+ R300_VPORT_Y_SCALE_ENA |
+ R300_VPORT_Y_OFFSET_ENA |
+ R300_VTX_XY_FMT | R300_VTX_Z_FMT);
+
+ /* Pixel scissors. */
+ OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL, 2);
+ if (caps->is_r500) {
+ OUT_CS((x << R300_SCISSORS_X_SHIFT) | (y << R300_SCISSORS_Y_SHIFT));
+ OUT_CS(((w - 1) << R300_SCISSORS_X_SHIFT) | ((h - 1) << R300_SCISSORS_Y_SHIFT));
+ } else {
+ /* Non-R500 chipsets have an offset of 1440 in their scissors. */
+ OUT_CS(((x + 1440) << R300_SCISSORS_X_SHIFT) |
+ ((y + 1440) << R300_SCISSORS_Y_SHIFT));
+ OUT_CS((((w - 1) + 1440) << R300_SCISSORS_X_SHIFT) |
+ (((h - 1) + 1440) << R300_SCISSORS_Y_SHIFT));
+ }
+
+ /* Flush colorbuffer and blend caches. */
+ OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT,
+ R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D |
+ R300_RB3D_DSTCACHE_CTLSTAT_DC_FINISH_SIGNAL);
+ OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
+ R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
+ R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
+
+ /* Setup colorbuffer. */
+ OUT_CS_REG_SEQ(R300_RB3D_COLOROFFSET0, 1);
+ OUT_CS_RELOC(dest->buffer, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+ OUT_CS_REG_SEQ(R300_RB3D_COLORPITCH0, 1);
+ OUT_CS_RELOC(dest->buffer, pixpitch |
+ r300_translate_colorformat(dest->tex.format), 0,
+ RADEON_GEM_DOMAIN_VRAM, 0);
+ OUT_CS_REG(RB3D_COLOR_CHANNEL_MASK, 0xf);
+
+ END_CS;
+}
+
+/* Provides pipe_context's "surface_fill". Commonly used for clearing
+ * buffers. */
+static void r300_surface_fill(struct pipe_context* pipe,
+ struct pipe_surface* dest,
+ unsigned x, unsigned y,
+ unsigned w, unsigned h,
+ unsigned color)
+{
+ int i;
+ float r, g, b, a, depth;
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_capabilities* caps = r300_screen(pipe->screen)->caps;
+ struct r300_texture* tex = (struct r300_texture*)dest->texture;
+ unsigned pixpitch = tex->stride / tex->tex.block.size;
+ boolean invalid = FALSE;
+ CS_LOCALS(r300);
+
+ a = (float)((color >> 24) & 0xff) / 255.0f;
+ r = (float)((color >> 16) & 0xff) / 255.0f;
+ g = (float)((color >> 8) & 0xff) / 255.0f;
+ b = (float)((color >> 0) & 0xff) / 255.0f;
+ debug_printf("r300: Filling surface %p at (%d,%d),"
+ " dimensions %dx%d (pixel pitch %d), color 0x%x\n",
+ dest, x, y, w, h, pixpitch, color);
+
+ /* Fallback? */
+ if (FALSE) {
+fallback:
+ debug_printf("r300: Falling back on surface clear...");
+ util_surface_fill(pipe, dest, x, y, w, h, color);
+ return;
+ }
+
+ /* Make sure our target BO is okay. */
+validate:
+ if (!r300->winsys->add_buffer(r300->winsys, tex->buffer,
+ 0, RADEON_GEM_DOMAIN_VRAM)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ if (!r300->winsys->validate(r300->winsys)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ if (invalid) {
+ debug_printf("r300: Stuck in validation loop, gonna fallback.");
+ goto fallback;
+ }
+ invalid = TRUE;
+ goto validate;
+ }
+
+ r300_surface_setup(r300, tex, x, y, w, h);
+
+ /* Vertex shader setup */
+ if (caps->has_tcl) {
+ r300_emit_vertex_program_code(r300, &r300_passthrough_vertex_shader, 0);
+ } else {
+ BEGIN_CS(4);
+ OUT_CS_REG(R300_VAP_CNTL_STATUS,
+#ifdef PIPE_ARCH_BIG_ENDIAN
+ R300_VC_32BIT_SWAP |
+#endif
+ R300_VAP_TCL_BYPASS);
+ OUT_CS_REG(R300_VAP_CNTL, R300_PVS_NUM_SLOTS(5) |
+ R300_PVS_NUM_CNTLRS(5) |
+ R300_PVS_NUM_FPUS(caps->num_vert_fpus) |
+ R300_PVS_VF_MAX_VTX_NUM(12));
+ END_CS;
+ }
+
+ /* Fragment shader setup */
+ if (caps->is_r500) {
+ r500_emit_fragment_program_code(r300, &r5xx_passthrough_fragment_shader, 0);
+ r300_emit_rs_block_state(r300, &r5xx_rs_block_clear_state);
+ } else {
+ r300_emit_fragment_program_code(r300, &r3xx_passthrough_fragment_shader, 0);
+ r300_emit_rs_block_state(r300, &r3xx_rs_block_clear_state);
+ }
+
+ BEGIN_CS(26);
+
+ /* VAP stream control, mapping from input memory to PVS/RS memory */
+ if (caps->has_tcl) {
+ OUT_CS_REG(R300_VAP_PROG_STREAM_CNTL_0,
+ (R300_DATA_TYPE_FLOAT_4 << R300_DATA_TYPE_0_SHIFT) |
+ ((R300_LAST_VEC | (1 << R300_DST_VEC_LOC_SHIFT) |
+ R300_DATA_TYPE_FLOAT_4) << R300_DATA_TYPE_1_SHIFT));
+ } else {
+ OUT_CS_REG(R300_VAP_PROG_STREAM_CNTL_0,
+ (R300_DATA_TYPE_FLOAT_4 << R300_DATA_TYPE_0_SHIFT) |
+ ((R300_LAST_VEC | (2 << R300_DST_VEC_LOC_SHIFT) |
+ R300_DATA_TYPE_FLOAT_4) << R300_DATA_TYPE_1_SHIFT));
+ }
+ OUT_CS_REG(R300_VAP_PROG_STREAM_CNTL_EXT_0,
+ (R300_VAP_SWIZZLE_XYZW << R300_SWIZZLE0_SHIFT) |
+ (R300_VAP_SWIZZLE_XYZW << R300_SWIZZLE1_SHIFT));
+
+ /* VAP format controls */
+ OUT_CS_REG(R300_VAP_OUTPUT_VTX_FMT_0,
+ R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT |
+ R300_VAP_OUTPUT_VTX_FMT_0__COLOR_0_PRESENT);
+ OUT_CS_REG(R300_VAP_OUTPUT_VTX_FMT_1, 0x0);
+
+ /* Disable textures */
+ OUT_CS_REG(R300_TX_ENABLE, 0x0);
+
+ /* The size of the point we're about to draw, in sixths of pixels */
+ OUT_CS_REG(R300_GA_POINT_SIZE,
+ ((h * 6) & R300_POINTSIZE_Y_MASK) |
+ ((w * 6) << R300_POINTSIZE_X_SHIFT));
+
+ /* Vertex size. */
+ OUT_CS_REG(R300_VAP_VTX_SIZE, 0x8);
+
+ /* Packet3 with our point vertex */
+ OUT_CS_PKT3(R200_3D_DRAW_IMMD_2, 8);
+ OUT_CS(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
+ (1 << R300_PRIM_NUM_VERTICES_SHIFT));
+ /* Position */
+ OUT_CS_32F(0.5);
+ OUT_CS_32F(0.5);
+ OUT_CS_32F(1.0);
+ OUT_CS_32F(1.0);
+ /* Color */
+ OUT_CS_32F(r);
+ OUT_CS_32F(g);
+ OUT_CS_32F(b);
+ OUT_CS_32F(a);
+
+ OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT, 0xA);
+
+ END_CS;
+
+ r300->dirty_hw++;
+}
+
+static void r300_surface_copy(struct pipe_context* pipe,
+ struct pipe_surface* dest,
+ unsigned destx, unsigned desty,
+ struct pipe_surface* src,
+ unsigned srcx, unsigned srcy,
+ unsigned w, unsigned h)
+{
+ struct r300_context* r300 = r300_context(pipe);
+ struct r300_capabilities* caps = r300_screen(pipe->screen)->caps;
+ struct r300_texture* srctex = (struct r300_texture*)src->texture;
+ struct r300_texture* desttex = (struct r300_texture*)dest->texture;
+ unsigned pixpitch = srctex->stride / srctex->tex.block.size;
+ boolean invalid = FALSE;
+ float fsrcx = srcx, fsrcy = srcy, fdestx = destx, fdesty = desty;
+ CS_LOCALS(r300);
+
+ debug_printf("r300: Copying surface %p at (%d,%d) to %p at (%d, %d),"
+ " dimensions %dx%d (pixel pitch %d)\n",
+ src, srcx, srcy, dest, destx, desty, w, h, pixpitch);
+
+ if ((srctex->buffer == desttex->buffer) &&
+ ((destx < srcx + w) || (srcx < destx + w)) &&
+ ((desty < srcy + h) || (srcy < desty + h))) {
+fallback:
+ debug_printf("r300: Falling back on surface_copy\n");
+ util_surface_copy(pipe, FALSE, dest, destx, desty, src,
+ srcx, srcy, w, h);
+ }
+
+ /* Add our target BOs to the list. */
+validate:
+ if (!r300->winsys->add_buffer(r300->winsys, srctex->buffer,
+ RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ if (!r300->winsys->add_buffer(r300->winsys, desttex->buffer,
+ 0, RADEON_GEM_DOMAIN_VRAM)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ goto validate;
+ }
+ if (!r300->winsys->validate(r300->winsys)) {
+ r300->context.flush(&r300->context, 0, NULL);
+ if (invalid) {
+ debug_printf("r300: Stuck in validation loop, gonna fallback.");
+ goto fallback;
+ }
+ invalid = TRUE;
+ goto validate;
+ }
+
+ r300_surface_setup(r300, desttex, destx, desty, w, h);
+
+ /* Setup the texture. */
+ r300_emit_texture(r300, &r300_sampler_copy_state, srctex, 0);
+
+ /* Flush and enable. */
+ r300_flush_textures(r300);
+
+ /* Vertex shader setup */
+ if (caps->has_tcl) {
+ r300_emit_vertex_program_code(r300, &r300_passthrough_vertex_shader, 0);
+ } else {
+ BEGIN_CS(4);
+ OUT_CS_REG(R300_VAP_CNTL_STATUS,
+#ifdef PIPE_ARCH_BIG_ENDIAN
+ R300_VC_32BIT_SWAP |
+#endif
+ R300_VAP_TCL_BYPASS);
+ OUT_CS_REG(R300_VAP_CNTL, R300_PVS_NUM_SLOTS(5) |
+ R300_PVS_NUM_CNTLRS(5) |
+ R300_PVS_NUM_FPUS(caps->num_vert_fpus) |
+ R300_PVS_VF_MAX_VTX_NUM(12));
+ END_CS;
+ }
+
+ /* Fragment shader setup */
+ if (caps->is_r500) {
+ r500_emit_fragment_program_code(r300, &r5xx_texture_fragment_shader, 0);
+ r300_emit_rs_block_state(r300, &r5xx_rs_block_copy_state);
+ } else {
+ r300_emit_fragment_program_code(r300, &r3xx_texture_fragment_shader, 0);
+ r300_emit_rs_block_state(r300, &r3xx_rs_block_copy_state);
+ }
+
+ BEGIN_CS(30);
+ /* VAP stream control, mapping from input memory to PVS/RS memory */
+ if (caps->has_tcl) {
+ OUT_CS_REG(R300_VAP_PROG_STREAM_CNTL_0,
+ (R300_DATA_TYPE_FLOAT_2 << R300_DATA_TYPE_0_SHIFT) |
+ ((R300_LAST_VEC | (1 << R300_DST_VEC_LOC_SHIFT) |
+ R300_DATA_TYPE_FLOAT_2) << R300_DATA_TYPE_1_SHIFT));
+ } else {
+ OUT_CS_REG(R300_VAP_PROG_STREAM_CNTL_0,
+ (R300_DATA_TYPE_FLOAT_2 << R300_DATA_TYPE_0_SHIFT) |
+ ((R300_LAST_VEC | (6 << R300_DST_VEC_LOC_SHIFT) |
+ R300_DATA_TYPE_FLOAT_2) << R300_DATA_TYPE_1_SHIFT));
+ }
+ OUT_CS_REG(R300_VAP_PROG_STREAM_CNTL_EXT_0,
+ (R300_VAP_SWIZZLE_XYZW << R300_SWIZZLE0_SHIFT) |
+ (R300_VAP_SWIZZLE_XYZW << R300_SWIZZLE1_SHIFT));
+
+ /* VAP format controls */
+ OUT_CS_REG(R300_VAP_OUTPUT_VTX_FMT_0,
+ R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT);
+ /* Two components of texture 0 */
+ OUT_CS_REG(R300_VAP_OUTPUT_VTX_FMT_1, 0x2);
+
+ /* Vertex size. */
+ OUT_CS_REG(R300_VAP_VTX_SIZE, 0x4);
+
+ /* Packet3 with our texcoords */
+ OUT_CS_PKT3(R200_3D_DRAW_IMMD_2, 16);
+ OUT_CS(R300_PRIM_TYPE_QUADS | R300_PRIM_WALK_RING |
+ (4 << R300_PRIM_NUM_VERTICES_SHIFT));
+ /* (x , y ) */
+ OUT_CS_32F(fdestx / dest->width);
+ OUT_CS_32F(fdesty / dest->height);
+ OUT_CS_32F(fsrcx / src->width);
+ OUT_CS_32F(fsrcy / src->height);
+ /* (x , y + h) */
+ OUT_CS_32F(fdestx / dest->width);
+ OUT_CS_32F((fdesty + h) / dest->height);
+ OUT_CS_32F(fsrcx / src->width);
+ OUT_CS_32F((fsrcy + h) / src->height);
+ /* (x + w, y + h) */
+ OUT_CS_32F((fdestx + w) / dest->width);
+ OUT_CS_32F((fdesty + h) / dest->height);
+ OUT_CS_32F((fsrcx + w) / src->width);
+ OUT_CS_32F((fsrcy + h) / src->height);
+ /* (x + w, y ) */
+ OUT_CS_32F((fdestx + w) / dest->width);
+ OUT_CS_32F(fdesty / dest->height);
+ OUT_CS_32F((fsrcx + w) / src->width);
+ OUT_CS_32F(fsrcy / src->height);
+
+ OUT_CS_REG(R300_RB3D_DSTCACHE_CTLSTAT, 0xA);
+
+ END_CS;
+
+ r300->dirty_hw++;
+}
+
+void r300_init_surface_functions(struct r300_context* r300)
+{
+ r300->context.surface_fill = r300_surface_fill;
+ r300->context.surface_copy = r300_surface_copy;
+}
diff --git a/src/gallium/drivers/r300/r300_surface.h b/src/gallium/drivers/r300/r300_surface.h
new file mode 100644
index 0000000000..f9e98b2ec9
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_surface.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_SURFACE_H
+#define R300_SURFACE_H
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+
+#include "util/u_rect.h"
+
+#include "r300_context.h"
+#include "r300_cs.h"
+#include "r300_emit.h"
+#include "r300_fs.h"
+#include "r300_vs.h"
+#include "r300_state_inlines.h"
+
+static struct r300_blend_state blend_clear_state = {
+ .blend_control = 0x0,
+ .alpha_blend_control = 0x0,
+ .rop = 0x0,
+ .dither = 0x0,
+};
+
+static struct r300_blend_color_state blend_color_clear_state = {
+ .blend_color = 0x0,
+ .blend_color_red_alpha = 0x0,
+ .blend_color_green_blue = 0x0,
+};
+
+static struct r300_dsa_state dsa_clear_state = {
+ .alpha_function = 0x0,
+ .alpha_reference = 0x0,
+ .z_buffer_control = 0x0,
+ .z_stencil_control = 0x0,
+ .stencil_ref_mask = R300_STENCILWRITEMASK_MASK,
+ .z_buffer_top = R300_ZTOP_ENABLE,
+ .stencil_ref_bf = 0x0,
+};
+
+static struct r300_rs_state rs_clear_state = {
+ .point_minmax = 0x36000006,
+ .line_control = 0x00030006,
+ .depth_scale_front = 0x0,
+ .depth_offset_front = 0x0,
+ .depth_scale_back = 0x0,
+ .depth_offset_back = 0x0,
+ .polygon_offset_enable = 0x0,
+ .cull_mode = 0x0,
+ .line_stipple_config = 0x3BAAAAAB,
+ .line_stipple_value = 0x0,
+ .color_control = R300_SHADE_MODEL_FLAT,
+};
+
+static struct r300_rs_block r3xx_rs_block_clear_state = {
+ .ip[0] = R500_RS_SEL_S(R300_RS_SEL_C0) |
+ R500_RS_SEL_T(R300_RS_SEL_C0) |
+ R500_RS_SEL_R(R300_RS_SEL_C0) |
+ R500_RS_SEL_Q(R300_RS_SEL_K1),
+ .inst[0] = R300_RS_INST_COL_CN_WRITE,
+ .count = R300_IT_COUNT(0) | R300_IC_COUNT(1) | R300_HIRES_EN,
+ .inst_count = 0,
+};
+
+static struct r300_rs_block r5xx_rs_block_clear_state = {
+ .ip[0] = R500_RS_SEL_S(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_T(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_R(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_Q(R500_RS_IP_PTR_K1),
+ .inst[0] = R500_RS_INST_COL_CN_WRITE,
+ .count = R300_IT_COUNT(0) | R300_IC_COUNT(1) | R300_HIRES_EN,
+ .inst_count = 0,
+};
+
+/* The following state is used for surface_copy only. */
+
+static struct r300_rs_block r3xx_rs_block_copy_state = {
+ .ip[0] = R500_RS_SEL_S(R300_RS_SEL_K0) |
+ R500_RS_SEL_T(R300_RS_SEL_K0) |
+ R500_RS_SEL_R(R300_RS_SEL_K0) |
+ R500_RS_SEL_Q(R300_RS_SEL_K1),
+ .inst[0] = R300_RS_INST_COL_CN_WRITE,
+ .count = R300_IT_COUNT(2) | R300_IC_COUNT(0) | R300_HIRES_EN,
+ .inst_count = R300_RS_TX_OFFSET(0),
+};
+
+static struct r300_rs_block r5xx_rs_block_copy_state = {
+ .ip[0] = R500_RS_SEL_S(0) |
+ R500_RS_SEL_T(1) |
+ R500_RS_SEL_R(R500_RS_IP_PTR_K0) |
+ R500_RS_SEL_Q(R500_RS_IP_PTR_K1),
+ .inst[0] = R500_RS_INST_TEX_CN_WRITE,
+ .count = R300_IT_COUNT(2) | R300_IC_COUNT(0) | R300_HIRES_EN,
+ .inst_count = R300_RS_TX_OFFSET(0),
+};
+
+static struct r300_sampler_state r300_sampler_copy_state = {
+ .filter0 = R300_TX_WRAP_S(R300_TX_CLAMP) |
+ R300_TX_WRAP_T(R300_TX_CLAMP) |
+ R300_TX_MAG_FILTER_NEAREST |
+ R300_TX_MIN_FILTER_NEAREST,
+};
+
+#endif /* R300_SURFACE_H */
diff --git a/src/gallium/drivers/r300/r300_texture.c b/src/gallium/drivers/r300/r300_texture.c
new file mode 100644
index 0000000000..590052509c
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_texture.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_texture.h"
+
+static void r300_setup_texture_state(struct r300_texture* tex,
+ unsigned width,
+ unsigned height,
+ unsigned pitch,
+ unsigned levels)
+{
+ struct r300_texture_state* state = &tex->state;
+
+ state->format0 = R300_TX_WIDTH((width - 1) & 0x7ff) |
+ R300_TX_HEIGHT((height - 1) & 0x7ff) |
+ R300_TX_NUM_LEVELS(levels) |
+ R300_TX_PITCH_EN;
+
+ /* XXX */
+ state->format1 = r300_translate_texformat(tex->tex.format);
+
+ state->format2 = pitch - 1;
+
+ /* Assume (somewhat foolishly) that oversized textures will
+ * not be permitted by the state tracker. */
+ if (width > 2048) {
+ state->format2 |= R500_TXWIDTH_BIT11;
+ }
+ if (height > 2048) {
+ state->format2 |= R500_TXHEIGHT_BIT11;
+ }
+
+ debug_printf("r300: Set texture state (%dx%d, pitch %d, %d levels)\n",
+ width, height, pitch, levels);
+}
+
+static void r300_setup_miptree(struct r300_texture* tex)
+{
+ struct pipe_texture* base = &tex->tex;
+ int stride, size, offset;
+ int i;
+
+ for (i = 0; i <= base->last_level; i++) {
+ if (i > 0) {
+ base->width[i] = minify(base->width[i-1]);
+ base->height[i] = minify(base->height[i-1]);
+ base->depth[i] = minify(base->depth[i-1]);
+ }
+
+ base->nblocksx[i] = pf_get_nblocksx(&base->block, base->width[i]);
+ base->nblocksy[i] = pf_get_nblocksy(&base->block, base->height[i]);
+
+ /* Radeons enjoy things in multiples of 64.
+ *
+ * XXX
+ * POT, uncompressed, unmippmapped textures can be aligned to 32,
+ * instead of 64. */
+ stride = align(pf_get_stride(&base->block, base->width[i]), 32);
+ size = stride * base->nblocksy[i] * base->depth[i];
+
+ tex->offset[i] = align(tex->size, 32);
+ tex->size = tex->offset[i] + size;
+
+ debug_printf("r300: Texture miptree: Level %d "
+ "(%dx%dx%d px, pitch %d bytes)\n",
+ i, base->width[i], base->height[i], base->depth[i],
+ stride);
+ /* Save stride of first level to the texture. */
+ if (i == 0) {
+ tex->stride = stride;
+ }
+ }
+}
+
+/* Create a new texture. */
+static struct pipe_texture*
+ r300_texture_create(struct pipe_screen* screen,
+ const struct pipe_texture* template)
+{
+ struct r300_texture* tex = CALLOC_STRUCT(r300_texture);
+
+ if (!tex) {
+ return NULL;
+ }
+
+ tex->tex = *template;
+ pipe_reference_init(&tex->tex.reference, 1);
+ tex->tex.screen = screen;
+
+ r300_setup_miptree(tex);
+
+ r300_setup_texture_state(tex, template->width[0], template->height[0],
+ template->width[0], template->last_level);
+
+ tex->buffer = screen->buffer_create(screen, 1024,
+ PIPE_BUFFER_USAGE_PIXEL,
+ tex->size);
+
+ if (!tex->buffer) {
+ FREE(tex);
+ return NULL;
+ }
+
+ return (struct pipe_texture*)tex;
+}
+
+static void r300_texture_destroy(struct pipe_texture* texture)
+{
+ struct r300_texture* tex = (struct r300_texture*)texture;
+
+ pipe_buffer_reference(&tex->buffer, NULL);
+
+ FREE(tex);
+}
+
+static struct pipe_surface* r300_get_tex_surface(struct pipe_screen* screen,
+ struct pipe_texture* texture,
+ unsigned face,
+ unsigned level,
+ unsigned zslice,
+ unsigned flags)
+{
+ struct r300_texture* tex = (struct r300_texture*)texture;
+ struct pipe_surface* surface = CALLOC_STRUCT(pipe_surface);
+ unsigned offset;
+
+ /* XXX this is certainly dependent on tex target */
+ offset = tex->offset[level];
+
+ if (surface) {
+ pipe_reference_init(&surface->reference, 1);
+ pipe_texture_reference(&surface->texture, texture);
+ surface->format = texture->format;
+ surface->width = texture->width[level];
+ surface->height = texture->height[level];
+ surface->offset = offset;
+ surface->usage = flags;
+ }
+
+ return surface;
+}
+
+static void r300_tex_surface_destroy(struct pipe_surface* s)
+{
+ pipe_texture_reference(&s->texture, NULL);
+ FREE(s);
+}
+
+static struct pipe_texture*
+ r300_texture_blanket(struct pipe_screen* screen,
+ const struct pipe_texture* base,
+ const unsigned* stride,
+ struct pipe_buffer* buffer)
+{
+ struct r300_texture* tex;
+
+ /* XXX we should start doing mips now... */
+ if (base->target != PIPE_TEXTURE_2D ||
+ base->last_level != 0 ||
+ base->depth[0] != 1) {
+ return NULL;
+ }
+
+ tex = CALLOC_STRUCT(r300_texture);
+ if (!tex) {
+ return NULL;
+ }
+
+ tex->tex = *base;
+ pipe_reference_init(&tex->tex.reference, 1);
+ tex->tex.screen = screen;
+
+ tex->stride = *stride;
+
+ /* XXX */
+ r300_setup_texture_state(tex, tex->tex.width[0], tex->tex.height[0],
+ tex->stride, 0);
+
+ pipe_buffer_reference(&tex->buffer, buffer);
+
+ return (struct pipe_texture*)tex;
+}
+
+void r300_init_screen_texture_functions(struct pipe_screen* screen)
+{
+ screen->texture_create = r300_texture_create;
+ screen->texture_destroy = r300_texture_destroy;
+ screen->get_tex_surface = r300_get_tex_surface;
+ screen->tex_surface_destroy = r300_tex_surface_destroy;
+ screen->texture_blanket = r300_texture_blanket;
+}
+
+boolean r300_get_texture_buffer(struct pipe_texture* texture,
+ struct pipe_buffer** buffer,
+ unsigned* stride)
+{
+ struct r300_texture* tex = (struct r300_texture*)texture;
+ if (!tex) {
+ return FALSE;
+ }
+
+ pipe_buffer_reference(buffer, tex->buffer);
+
+ if (stride) {
+ *stride = tex->stride;
+ }
+
+ return TRUE;
+}
diff --git a/src/gallium/drivers/r300/r300_texture.h b/src/gallium/drivers/r300/r300_texture.h
new file mode 100644
index 0000000000..3b56f0307c
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_texture.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_TEXTURE_H
+#define R300_TEXTURE_H
+
+#include "pipe/p_screen.h"
+
+#include "util/u_math.h"
+
+#include "r300_context.h"
+#include "r300_reg.h"
+
+void r300_init_screen_texture_functions(struct pipe_screen* screen);
+
+/* Note the signature of R300_EASY_TX_FORMAT(A, R, G, B, FORMAT)... */
+static INLINE uint32_t r300_translate_texformat(enum pipe_format format)
+{
+ switch (format) {
+ /* X8 */
+ case PIPE_FORMAT_I8_UNORM:
+ return R300_EASY_TX_FORMAT(X, X, X, X, X8);
+ /* W8Z8Y8X8 */
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return R300_EASY_TX_FORMAT(X, Y, Z, W, W8Z8Y8X8);
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ return R300_EASY_TX_FORMAT(Y, Z, W, X, W8Z8Y8X8);
+ case PIPE_FORMAT_A8R8G8B8_SRGB:
+ return R300_EASY_TX_FORMAT(X, Y, Z, W, W8Z8Y8X8) |
+ R300_TX_FORMAT_GAMMA;
+ case PIPE_FORMAT_R8G8B8A8_SRGB:
+ return R300_EASY_TX_FORMAT(Y, Z, W, X, W8Z8Y8X8) |
+ R300_TX_FORMAT_GAMMA;
+ /* DXT1 */
+ case PIPE_FORMAT_DXT1_RGB:
+ return R300_EASY_TX_FORMAT(X, Y, Z, ONE, DXT1);
+ case PIPE_FORMAT_DXT1_RGBA:
+ return R300_EASY_TX_FORMAT(X, Y, Z, W, DXT1);
+ /* DXT3 */
+ case PIPE_FORMAT_DXT3_RGBA:
+ return R300_EASY_TX_FORMAT(X, Y, Z, W, DXT3);
+ /* DXT5 */
+ case PIPE_FORMAT_DXT5_RGBA:
+ return R300_EASY_TX_FORMAT(Y, Z, W, X, DXT5);
+ /* YVYU422 */
+ case PIPE_FORMAT_YCBCR:
+ return R300_EASY_TX_FORMAT(X, Y, Z, ONE, YVYU422) |
+ R300_TX_FORMAT_YUV_TO_RGB;
+ /* W24_FP */
+ case PIPE_FORMAT_Z24S8_UNORM:
+ return R300_EASY_TX_FORMAT(X, X, X, X, W24_FP);
+ default:
+ debug_printf("r300: Implementation error: "
+ "Got unsupported texture format %s in %s\n",
+ pf_name(format), __FUNCTION__);
+ assert(0);
+ break;
+ }
+ return 0;
+}
+
+#ifndef R300_WINSYS_H
+
+boolean r300_get_texture_buffer(struct pipe_texture* texture,
+ struct pipe_buffer** buffer,
+ unsigned* stride);
+
+#endif /* R300_WINSYS_H */
+
+#endif /* R300_TEXTURE_H */
diff --git a/src/gallium/drivers/r300/r300_tgsi_to_rc.c b/src/gallium/drivers/r300/r300_tgsi_to_rc.c
new file mode 100644
index 0000000000..d68a104106
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_tgsi_to_rc.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright 2009 Nicolai Hähnle <nhaehnle@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_tgsi_to_rc.h"
+
+#include "radeon_compiler.h"
+#include "radeon_program.h"
+
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_scan.h"
+#include "tgsi/tgsi_util.h"
+
+
+static unsigned translate_opcode(unsigned opcode)
+{
+ switch(opcode) {
+ case TGSI_OPCODE_ARL: return OPCODE_ARL;
+ case TGSI_OPCODE_MOV: return OPCODE_MOV;
+ case TGSI_OPCODE_LIT: return OPCODE_LIT;
+ case TGSI_OPCODE_RCP: return OPCODE_RCP;
+ case TGSI_OPCODE_RSQ: return OPCODE_RSQ;
+ case TGSI_OPCODE_EXP: return OPCODE_EXP;
+ case TGSI_OPCODE_LOG: return OPCODE_LOG;
+ case TGSI_OPCODE_MUL: return OPCODE_MUL;
+ case TGSI_OPCODE_ADD: return OPCODE_ADD;
+ case TGSI_OPCODE_DP3: return OPCODE_DP3;
+ case TGSI_OPCODE_DP4: return OPCODE_DP4;
+ case TGSI_OPCODE_DST: return OPCODE_DST;
+ case TGSI_OPCODE_MIN: return OPCODE_MIN;
+ case TGSI_OPCODE_MAX: return OPCODE_MAX;
+ case TGSI_OPCODE_SLT: return OPCODE_SLT;
+ case TGSI_OPCODE_SGE: return OPCODE_SGE;
+ case TGSI_OPCODE_MAD: return OPCODE_MAD;
+ case TGSI_OPCODE_SUB: return OPCODE_SUB;
+ case TGSI_OPCODE_LRP: return OPCODE_LRP;
+ /* case TGSI_OPCODE_CND: return OPCODE_CND; */
+ case TGSI_OPCODE_DP2A: return OPCODE_DP2A;
+ /* gap */
+ case TGSI_OPCODE_FRC: return OPCODE_FRC;
+ /* case TGSI_OPCODE_CLAMP: return OPCODE_CLAMP; */
+ case TGSI_OPCODE_FLR: return OPCODE_FLR;
+ /* case TGSI_OPCODE_ROUND: return OPCODE_ROUND; */
+ case TGSI_OPCODE_EX2: return OPCODE_EX2;
+ case TGSI_OPCODE_LG2: return OPCODE_LG2;
+ case TGSI_OPCODE_POW: return OPCODE_POW;
+ case TGSI_OPCODE_XPD: return OPCODE_XPD;
+ /* gap */
+ case TGSI_OPCODE_ABS: return OPCODE_ABS;
+ case TGSI_OPCODE_RCC: return OPCODE_RCC;
+ case TGSI_OPCODE_DPH: return OPCODE_DPH;
+ case TGSI_OPCODE_COS: return OPCODE_COS;
+ case TGSI_OPCODE_DDX: return OPCODE_DDX;
+ case TGSI_OPCODE_DDY: return OPCODE_DDY;
+ /* case TGSI_OPCODE_KILP: return OPCODE_KILP; */
+ case TGSI_OPCODE_PK2H: return OPCODE_PK2H;
+ case TGSI_OPCODE_PK2US: return OPCODE_PK2US;
+ case TGSI_OPCODE_PK4B: return OPCODE_PK4B;
+ case TGSI_OPCODE_PK4UB: return OPCODE_PK4UB;
+ case TGSI_OPCODE_RFL: return OPCODE_RFL;
+ case TGSI_OPCODE_SEQ: return OPCODE_SEQ;
+ case TGSI_OPCODE_SFL: return OPCODE_SFL;
+ case TGSI_OPCODE_SGT: return OPCODE_SGT;
+ case TGSI_OPCODE_SIN: return OPCODE_SIN;
+ case TGSI_OPCODE_SLE: return OPCODE_SLE;
+ case TGSI_OPCODE_SNE: return OPCODE_SNE;
+ case TGSI_OPCODE_STR: return OPCODE_STR;
+ case TGSI_OPCODE_TEX: return OPCODE_TEX;
+ case TGSI_OPCODE_TXD: return OPCODE_TXD;
+ case TGSI_OPCODE_TXP: return OPCODE_TXP;
+ case TGSI_OPCODE_UP2H: return OPCODE_UP2H;
+ case TGSI_OPCODE_UP2US: return OPCODE_UP2US;
+ case TGSI_OPCODE_UP4B: return OPCODE_UP4B;
+ case TGSI_OPCODE_UP4UB: return OPCODE_UP4UB;
+ case TGSI_OPCODE_X2D: return OPCODE_X2D;
+ case TGSI_OPCODE_ARA: return OPCODE_ARA;
+ case TGSI_OPCODE_ARR: return OPCODE_ARR;
+ case TGSI_OPCODE_BRA: return OPCODE_BRA;
+ case TGSI_OPCODE_CAL: return OPCODE_CAL;
+ case TGSI_OPCODE_RET: return OPCODE_RET;
+ case TGSI_OPCODE_SSG: return OPCODE_SSG;
+ case TGSI_OPCODE_CMP: return OPCODE_CMP;
+ case TGSI_OPCODE_SCS: return OPCODE_SCS;
+ case TGSI_OPCODE_TXB: return OPCODE_TXB;
+ /* case TGSI_OPCODE_NRM: return OPCODE_NRM; */
+ /* case TGSI_OPCODE_DIV: return OPCODE_DIV; */
+ case TGSI_OPCODE_DP2: return OPCODE_DP2;
+ case TGSI_OPCODE_TXL: return OPCODE_TXL;
+ case TGSI_OPCODE_BRK: return OPCODE_BRK;
+ case TGSI_OPCODE_IF: return OPCODE_IF;
+ /* case TGSI_OPCODE_LOOP: return OPCODE_LOOP; */
+ /* case TGSI_OPCODE_REP: return OPCODE_REP; */
+ case TGSI_OPCODE_ELSE: return OPCODE_ELSE;
+ case TGSI_OPCODE_ENDIF: return OPCODE_ENDIF;
+ case TGSI_OPCODE_ENDLOOP: return OPCODE_ENDLOOP;
+ /* case TGSI_OPCODE_ENDREP: return OPCODE_ENDREP; */
+ case TGSI_OPCODE_PUSHA: return OPCODE_PUSHA;
+ case TGSI_OPCODE_POPA: return OPCODE_POPA;
+ /* case TGSI_OPCODE_CEIL: return OPCODE_CEIL; */
+ /* case TGSI_OPCODE_I2F: return OPCODE_I2F; */
+ case TGSI_OPCODE_NOT: return OPCODE_NOT;
+ case TGSI_OPCODE_TRUNC: return OPCODE_TRUNC;
+ /* case TGSI_OPCODE_SHL: return OPCODE_SHL; */
+ /* case TGSI_OPCODE_SHR: return OPCODE_SHR; */
+ case TGSI_OPCODE_AND: return OPCODE_AND;
+ case TGSI_OPCODE_OR: return OPCODE_OR;
+ /* case TGSI_OPCODE_MOD: return OPCODE_MOD; */
+ case TGSI_OPCODE_XOR: return OPCODE_XOR;
+ /* case TGSI_OPCODE_SAD: return OPCODE_SAD; */
+ /* case TGSI_OPCODE_TXF: return OPCODE_TXF; */
+ /* case TGSI_OPCODE_TXQ: return OPCODE_TXQ; */
+ case TGSI_OPCODE_CONT: return OPCODE_CONT;
+ /* case TGSI_OPCODE_EMIT: return OPCODE_EMIT; */
+ /* case TGSI_OPCODE_ENDPRIM: return OPCODE_ENDPRIM; */
+ /* case TGSI_OPCODE_BGNLOOP2: return OPCODE_BGNLOOP2; */
+ case TGSI_OPCODE_BGNSUB: return OPCODE_BGNSUB;
+ /* case TGSI_OPCODE_ENDLOOP2: return OPCODE_ENDLOOP2; */
+ case TGSI_OPCODE_ENDSUB: return OPCODE_ENDSUB;
+ case TGSI_OPCODE_NOISE1: return OPCODE_NOISE1;
+ case TGSI_OPCODE_NOISE2: return OPCODE_NOISE2;
+ case TGSI_OPCODE_NOISE3: return OPCODE_NOISE3;
+ case TGSI_OPCODE_NOISE4: return OPCODE_NOISE4;
+ case TGSI_OPCODE_NOP: return OPCODE_NOP;
+ /* gap */
+ case TGSI_OPCODE_NRM4: return OPCODE_NRM4;
+ /* case TGSI_OPCODE_CALLNZ: return OPCODE_CALLNZ; */
+ /* case TGSI_OPCODE_IFC: return OPCODE_IFC; */
+ /* case TGSI_OPCODE_BREAKC: return OPCODE_BREAKC; */
+ case TGSI_OPCODE_KIL: return OPCODE_KIL;
+ case TGSI_OPCODE_END: return OPCODE_END;
+ case TGSI_OPCODE_SWZ: return OPCODE_SWZ;
+ }
+
+ fprintf(stderr, "Unknown opcode: %i\n", opcode);
+ abort();
+}
+
+static unsigned translate_saturate(unsigned saturate)
+{
+ switch(saturate) {
+ case TGSI_SAT_NONE: return SATURATE_OFF;
+ case TGSI_SAT_ZERO_ONE: return SATURATE_ZERO_ONE;
+ case TGSI_SAT_MINUS_PLUS_ONE: return SATURATE_PLUS_MINUS_ONE;
+ }
+
+ fprintf(stderr, "Unknown saturate mode: %i\n", saturate);
+ abort();
+}
+
+static unsigned translate_register_file(unsigned file)
+{
+ switch(file) {
+ case TGSI_FILE_CONSTANT: return PROGRAM_CONSTANT;
+ case TGSI_FILE_IMMEDIATE: return PROGRAM_CONSTANT;
+ case TGSI_FILE_INPUT: return PROGRAM_INPUT;
+ case TGSI_FILE_OUTPUT: return PROGRAM_OUTPUT;
+ case TGSI_FILE_TEMPORARY: return PROGRAM_TEMPORARY;
+ case TGSI_FILE_ADDRESS: return PROGRAM_ADDRESS;
+ }
+
+ fprintf(stderr, "Unhandled register file: %i\n", file);
+ abort();
+}
+
+static int translate_register_index(
+ struct tgsi_to_rc * ttr,
+ unsigned file,
+ int index)
+{
+ if (file == TGSI_FILE_IMMEDIATE)
+ return ttr->immediate_offset + index;
+
+ return index;
+}
+
+static void transform_dstreg(
+ struct tgsi_to_rc * ttr,
+ struct prog_dst_register * dst,
+ struct tgsi_full_dst_register * src)
+{
+ dst->File = translate_register_file(src->DstRegister.File);
+ dst->Index = translate_register_index(ttr, src->DstRegister.File, src->DstRegister.Index);
+ dst->WriteMask = src->DstRegister.WriteMask;
+ dst->RelAddr = src->DstRegister.Indirect;
+}
+
+static void transform_srcreg(
+ struct tgsi_to_rc * ttr,
+ struct prog_src_register * dst,
+ struct tgsi_full_src_register * src)
+{
+ dst->File = translate_register_file(src->SrcRegister.File);
+ dst->Index = translate_register_index(ttr, src->SrcRegister.File, src->SrcRegister.Index);
+ dst->RelAddr = src->SrcRegister.Indirect;
+ dst->Swizzle = tgsi_util_get_full_src_register_extswizzle(src, 0);
+ dst->Swizzle |= tgsi_util_get_full_src_register_extswizzle(src, 1) << 3;
+ dst->Swizzle |= tgsi_util_get_full_src_register_extswizzle(src, 2) << 6;
+ dst->Swizzle |= tgsi_util_get_full_src_register_extswizzle(src, 3) << 9;
+ dst->Abs = src->SrcRegisterExtMod.Absolute;
+ dst->Negate =
+ src->SrcRegisterExtSwz.NegateX |
+ (src->SrcRegisterExtSwz.NegateY << 1) |
+ (src->SrcRegisterExtSwz.NegateZ << 2) |
+ (src->SrcRegisterExtSwz.NegateW << 3);
+ dst->Negate ^= src->SrcRegister.Negate ? NEGATE_XYZW : 0;
+}
+
+static void transform_texture(struct rc_instruction * dst, struct tgsi_instruction_ext_texture src)
+{
+ switch(src.Texture) {
+ case TGSI_TEXTURE_1D:
+ dst->I.TexSrcTarget = TEXTURE_1D_INDEX;
+ break;
+ case TGSI_TEXTURE_2D:
+ dst->I.TexSrcTarget = TEXTURE_2D_INDEX;
+ break;
+ case TGSI_TEXTURE_3D:
+ dst->I.TexSrcTarget = TEXTURE_3D_INDEX;
+ break;
+ case TGSI_TEXTURE_CUBE:
+ dst->I.TexSrcTarget = TEXTURE_CUBE_INDEX;
+ break;
+ case TGSI_TEXTURE_RECT:
+ dst->I.TexSrcTarget = TEXTURE_RECT_INDEX;
+ break;
+ case TGSI_TEXTURE_SHADOW1D:
+ dst->I.TexSrcTarget = TEXTURE_1D_INDEX;
+ dst->I.TexShadow = 1;
+ break;
+ case TGSI_TEXTURE_SHADOW2D:
+ dst->I.TexSrcTarget = TEXTURE_2D_INDEX;
+ dst->I.TexShadow = 1;
+ break;
+ case TGSI_TEXTURE_SHADOWRECT:
+ dst->I.TexSrcTarget = TEXTURE_RECT_INDEX;
+ dst->I.TexShadow = 1;
+ break;
+ }
+}
+
+static void transform_instruction(struct tgsi_to_rc * ttr, struct tgsi_full_instruction * src)
+{
+ if (src->Instruction.Opcode == TGSI_OPCODE_END)
+ return;
+
+ struct rc_instruction * dst = rc_insert_new_instruction(ttr->compiler, ttr->compiler->Program.Instructions.Prev);
+ int i;
+
+ dst->I.Opcode = translate_opcode(src->Instruction.Opcode);
+ dst->I.SaturateMode = translate_saturate(src->Instruction.Saturate);
+
+ if (src->Instruction.NumDstRegs)
+ transform_dstreg(ttr, &dst->I.DstReg, &src->FullDstRegisters[0]);
+
+ for(i = 0; i < src->Instruction.NumSrcRegs; ++i) {
+ if (src->FullSrcRegisters[i].SrcRegister.File == TGSI_FILE_SAMPLER)
+ dst->I.TexSrcUnit = src->FullSrcRegisters[i].SrcRegister.Index;
+ else
+ transform_srcreg(ttr, &dst->I.SrcReg[i], &src->FullSrcRegisters[i]);
+ }
+
+ /* Texturing. */
+ transform_texture(dst, src->InstructionExtTexture);
+}
+
+static void handle_immediate(struct tgsi_to_rc * ttr, struct tgsi_full_immediate * imm)
+{
+ struct rc_constant constant;
+ int i;
+
+ constant.Type = RC_CONSTANT_IMMEDIATE;
+ constant.Size = 4;
+ for(i = 0; i < 4; ++i)
+ constant.u.Immediate[i] = imm->u[i].Float;
+ rc_constants_add(&ttr->compiler->Program.Constants, &constant);
+}
+
+void r300_tgsi_to_rc(struct tgsi_to_rc * ttr, const struct tgsi_token * tokens)
+{
+ struct tgsi_parse_context parser;
+ int i;
+
+ /* Allocate constants placeholders.
+ *
+ * Note: What if declared constants are not contiguous? */
+ for(i = 0; i <= ttr->info->file_max[TGSI_FILE_CONSTANT]; ++i) {
+ struct rc_constant constant;
+ memset(&constant, 0, sizeof(constant));
+ constant.Type = RC_CONSTANT_EXTERNAL;
+ constant.Size = 4;
+ constant.u.External = i;
+ rc_constants_add(&ttr->compiler->Program.Constants, &constant);
+ }
+
+ ttr->immediate_offset = ttr->compiler->Program.Constants.Count;
+
+ tgsi_parse_init(&parser, tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parser)) {
+ tgsi_parse_token(&parser);
+
+ switch (parser.FullToken.Token.Type) {
+ case TGSI_TOKEN_TYPE_DECLARATION:
+ break;
+ case TGSI_TOKEN_TYPE_IMMEDIATE:
+ handle_immediate(ttr, &parser.FullToken.FullImmediate);
+ break;
+ case TGSI_TOKEN_TYPE_INSTRUCTION:
+ transform_instruction(ttr, &parser.FullToken.FullInstruction);
+ break;
+ }
+ }
+
+ tgsi_parse_free(&parser);
+
+ rc_calculate_inputs_outputs(ttr->compiler);
+}
+
diff --git a/src/gallium/drivers/r300/r300_tgsi_to_rc.h b/src/gallium/drivers/r300/r300_tgsi_to_rc.h
new file mode 100644
index 0000000000..93e90ec6d2
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_tgsi_to_rc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2009 Nicolai Hähnle <nhaehnle@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_TGSI_TO_RC_H
+#define R300_TGSI_TO_RC_H
+
+struct radeon_compiler;
+
+struct tgsi_full_declaration;
+struct tgsi_shader_info;
+struct tgsi_token;
+
+struct tgsi_to_rc {
+ struct radeon_compiler * compiler;
+ const struct tgsi_shader_info * info;
+
+ int immediate_offset;
+};
+
+void r300_tgsi_to_rc(struct tgsi_to_rc * ttr, const struct tgsi_token * tokens);
+
+#endif /* R300_TGSI_TO_RC_H */
diff --git a/src/gallium/drivers/r300/r300_vs.c b/src/gallium/drivers/r300/r300_vs.c
new file mode 100644
index 0000000000..12a6e37be6
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_vs.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r300_vs.h"
+
+#include "r300_context.h"
+#include "r300_tgsi_to_rc.h"
+
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_parse.h"
+
+#include "radeon_compiler.h"
+
+
+static void set_vertex_inputs_outputs(struct r300_vertex_program_compiler * c)
+{
+ struct r300_vertex_shader * vs = c->UserData;
+ struct tgsi_shader_info* info = &vs->info;
+ boolean pointsize = false;
+ int out_colors = 0;
+ int colors = 0;
+ int out_generic = 0;
+ int generic = 0;
+ int i;
+
+ /* Fill in the input mapping */
+ for (i = 0; i < info->num_inputs; i++)
+ c->code->inputs[i] = i;
+
+ /* Fill in the output mapping */
+ for (i = 0; i < info->num_outputs; i++) {
+ switch (info->output_semantic_name[i]) {
+ case TGSI_SEMANTIC_PSIZE:
+ pointsize = true;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ out_colors++;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ case TGSI_SEMANTIC_GENERIC:
+ out_generic++;
+ break;
+ }
+ }
+
+ struct tgsi_parse_context parser;
+
+ tgsi_parse_init(&parser, vs->state.tokens);
+
+ while (!tgsi_parse_end_of_tokens(&parser)) {
+ tgsi_parse_token(&parser);
+
+ if (parser.FullToken.Token.Type != TGSI_TOKEN_TYPE_DECLARATION)
+ continue;
+
+ struct tgsi_full_declaration * decl = &parser.FullToken.FullDeclaration;
+
+ if (decl->Declaration.File != TGSI_FILE_OUTPUT)
+ continue;
+
+ switch (decl->Semantic.SemanticName) {
+ case TGSI_SEMANTIC_POSITION:
+ c->code->outputs[decl->DeclarationRange.First] = 0;
+ break;
+ case TGSI_SEMANTIC_PSIZE:
+ c->code->outputs[decl->DeclarationRange.First] = 1;
+ break;
+ case TGSI_SEMANTIC_COLOR:
+ c->code->outputs[decl->DeclarationRange.First] = 1 +
+ (pointsize ? 1 : 0) +
+ colors++;
+ break;
+ case TGSI_SEMANTIC_FOG:
+ case TGSI_SEMANTIC_GENERIC:
+ c->code->outputs[decl->DeclarationRange.First] = 1 +
+ (pointsize ? 1 : 0) +
+ out_colors +
+ generic++;
+ break;
+ default:
+ debug_printf("r300: vs: Bad semantic declaration %d\n",
+ decl->Semantic.SemanticName);
+ break;
+ }
+ }
+
+ tgsi_parse_free(&parser);
+}
+
+
+void r300_translate_vertex_shader(struct r300_context* r300,
+ struct r300_vertex_shader* vs)
+{
+ struct r300_vertex_program_compiler compiler;
+ struct tgsi_to_rc ttr;
+
+ /* Setup the compiler */
+ rc_init(&compiler.Base);
+
+ compiler.Base.Debug = DBG_ON(r300, DBG_VP);
+ compiler.code = &vs->code;
+ compiler.UserData = vs;
+
+ if (compiler.Base.Debug) {
+ debug_printf("r300: Initial vertex program\n");
+ tgsi_dump(vs->state.tokens, 0);
+ }
+
+ /* Translate TGSI to our internal representation */
+ ttr.compiler = &compiler.Base;
+ ttr.info = &vs->info;
+
+ r300_tgsi_to_rc(&ttr, vs->state.tokens);
+
+ compiler.RequiredOutputs = ~(~0 << vs->info.num_outputs);
+ compiler.SetHwInputOutput = &set_vertex_inputs_outputs;
+
+ /* Invoke the compiler */
+ r3xx_compile_vertex_program(&compiler);
+ if (compiler.Base.Error) {
+ /* Todo: Fail gracefully */
+ fprintf(stderr, "r300 VP: Compiler error\n");
+ abort();
+ }
+
+ /* And, finally... */
+ rc_destroy(&compiler.Base);
+ vs->translated = TRUE;
+}
+
+
+/* XXX get these to r300_reg */
+#define R300_PVS_DST_OPCODE(x) ((x) << 0)
+# define R300_VE_DOT_PRODUCT 1
+# define R300_VE_MULTIPLY 2
+# define R300_VE_ADD 3
+# define R300_VE_MAXIMUM 7
+# define R300_VE_SET_LESS_THAN 10
+#define R300_PVS_DST_MATH_INST (1 << 6)
+# define R300_ME_RECIP_DX 6
+#define R300_PVS_DST_MACRO_INST (1 << 7)
+# define R300_PVS_MACRO_OP_2CLK_MADD 0
+#define R300_PVS_DST_REG_TYPE(x) ((x) << 8)
+# define R300_PVS_DST_REG_TEMPORARY 0
+# define R300_PVS_DST_REG_A0 1
+# define R300_PVS_DST_REG_OUT 2
+# define R300_PVS_DST_REG_OUT_REPL_X 3
+# define R300_PVS_DST_REG_ALT_TEMPORARY 4
+# define R300_PVS_DST_REG_INPUT 5
+#define R300_PVS_DST_OFFSET(x) ((x) << 13)
+#define R300_PVS_DST_WE(x) ((x) << 20)
+#define R300_PVS_DST_WE_XYZW (0xf << 20)
+
+#define R300_PVS_SRC_REG_TYPE(x) ((x) << 0)
+# define R300_PVS_SRC_REG_TEMPORARY 0
+# define R300_PVS_SRC_REG_INPUT 1
+# define R300_PVS_SRC_REG_CONSTANT 2
+# define R300_PVS_SRC_REG_ALT_TEMPORARY 3
+#define R300_PVS_SRC_OFFSET(x) ((x) << 5)
+#define R300_PVS_SRC_SWIZZLE(x) ((x) << 13)
+# define R300_PVS_SRC_SELECT_X 0
+# define R300_PVS_SRC_SELECT_Y 1
+# define R300_PVS_SRC_SELECT_Z 2
+# define R300_PVS_SRC_SELECT_W 3
+# define R300_PVS_SRC_SELECT_FORCE_0 4
+# define R300_PVS_SRC_SELECT_FORCE_1 5
+# define R300_PVS_SRC_SWIZZLE_XYZW \
+ ((R300_PVS_SRC_SELECT_X | (R300_PVS_SRC_SELECT_Y << 3) | \
+ (R300_PVS_SRC_SELECT_Z << 6) | (R300_PVS_SRC_SELECT_W << 9)) << 13)
+# define R300_PVS_SRC_SWIZZLE_ZERO \
+ ((R300_PVS_SRC_SELECT_FORCE_0 | (R300_PVS_SRC_SELECT_FORCE_0 << 3) | \
+ (R300_PVS_SRC_SELECT_FORCE_0 << 6) | \
+ (R300_PVS_SRC_SELECT_FORCE_0 << 9)) << 13)
+# define R300_PVS_SRC_SWIZZLE_ONE \
+ ((R300_PVS_SRC_SELECT_FORCE_1 | (R300_PVS_SRC_SELECT_FORCE_1 << 3) | \
+ (R300_PVS_SRC_SELECT_FORCE_1 << 6) | \
+ (R300_PVS_SRC_SELECT_FORCE_1 << 9)) << 13)
+#define R300_PVS_MODIFIER_X (1 << 25)
+#define R300_PVS_MODIFIER_Y (1 << 26)
+#define R300_PVS_MODIFIER_Z (1 << 27)
+#define R300_PVS_MODIFIER_W (1 << 28)
+#define R300_PVS_NEGATE_XYZW \
+ (R300_PVS_MODIFIER_X | R300_PVS_MODIFIER_Y | \
+ R300_PVS_MODIFIER_Z | R300_PVS_MODIFIER_W)
+
+struct r300_vertex_program_code r300_passthrough_vertex_shader = {
+ .length = 8, /* two instructions */
+
+ /* MOV out[0], in[0] */
+ .body.d[0] = R300_PVS_DST_OPCODE(R300_VE_ADD) |
+ R300_PVS_DST_REG_TYPE(R300_PVS_DST_REG_OUT) |
+ R300_PVS_DST_OFFSET(0) | R300_PVS_DST_WE_XYZW,
+ .body.d[1] = R300_PVS_SRC_REG_TYPE(R300_PVS_SRC_REG_INPUT) |
+ R300_PVS_SRC_OFFSET(0) | R300_PVS_SRC_SWIZZLE_XYZW,
+ .body.d[2] = R300_PVS_SRC_SWIZZLE_ZERO,
+ .body.d[3] = 0x0,
+
+ /* MOV out[1], in[1] */
+ .body.d[4] = R300_PVS_DST_OPCODE(R300_VE_ADD) |
+ R300_PVS_DST_REG_TYPE(R300_PVS_DST_REG_OUT) |
+ R300_PVS_DST_OFFSET(1) | R300_PVS_DST_WE_XYZW,
+ .body.d[5] = R300_PVS_SRC_REG_TYPE(R300_PVS_SRC_REG_INPUT) |
+ R300_PVS_SRC_OFFSET(1) | R300_PVS_SRC_SWIZZLE_XYZW,
+ .body.d[6] = R300_PVS_SRC_SWIZZLE_ZERO,
+ .body.d[7] = 0x0,
+
+ .inputs[0] = 0,
+ .inputs[1] = 1,
+ .outputs[0] = 0,
+ .outputs[1] = 1,
+
+ .InputsRead = 3,
+ .OutputsWritten = 3
+};
+
diff --git a/src/gallium/drivers/r300/r300_vs.h b/src/gallium/drivers/r300/r300_vs.h
new file mode 100644
index 0000000000..2a4ce315e3
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_vs.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_VS_H
+#define R300_VS_H
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+#include "radeon_code.h"
+
+struct r300_context;
+
+struct r300_vertex_shader {
+ /* Parent class */
+ struct pipe_shader_state state;
+ struct tgsi_shader_info info;
+
+ /* Fallback shader, because Draw has issues */
+ struct draw_vertex_shader* draw;
+
+ /* Has this shader been translated yet? */
+ boolean translated;
+
+ /* Machine code (if translated) */
+ struct r300_vertex_program_code code;
+};
+
+
+extern struct r300_vertex_program_code r300_passthrough_vertex_shader;
+
+void r300_translate_vertex_shader(struct r300_context* r300,
+ struct r300_vertex_shader* vs);
+
+#endif /* R300_VS_H */
diff --git a/src/gallium/drivers/r300/r300_winsys.h b/src/gallium/drivers/r300/r300_winsys.h
new file mode 100644
index 0000000000..f18ad75a47
--- /dev/null
+++ b/src/gallium/drivers/r300/r300_winsys.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R300_WINSYS_H
+#define R300_WINSYS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The public interface header for the r300 pipe driver.
+ * Any winsys hosting this pipe needs to implement r300_winsys and then
+ * call r300_create_context to start things. */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+struct r300_winsys {
+ /* Parent class */
+ struct pipe_winsys base;
+
+ /* Opaque Radeon-specific winsys object. */
+ void* radeon_winsys;
+
+ /* PCI ID */
+ uint32_t pci_id;
+
+ /* GB pipe count */
+ uint32_t gb_pipes;
+
+ /* GART size. */
+ uint32_t gart_size;
+
+ /* VRAM size. */
+ uint32_t vram_size;
+
+ /* Add a pipe_buffer to the list of buffer objects to validate. */
+ boolean (*add_buffer)(struct r300_winsys* winsys,
+ struct pipe_buffer* pbuffer,
+ uint32_t rd,
+ uint32_t wd);
+
+ /* Revalidate all currently setup pipe_buffers.
+ * Returns TRUE if a flush is required. */
+ boolean (*validate)(struct r300_winsys* winsys);
+
+ /* Check to see if there's room for commands. */
+ boolean (*check_cs)(struct r300_winsys* winsys, int size);
+
+ /* Start a command emit. */
+ void (*begin_cs)(struct r300_winsys* winsys,
+ int size,
+ const char* file,
+ const char* function,
+ int line);
+
+ /* Write a dword to the command buffer. */
+ void (*write_cs_dword)(struct r300_winsys* winsys, uint32_t dword);
+
+ /* Write a relocated dword to the command buffer. */
+ void (*write_cs_reloc)(struct r300_winsys* winsys,
+ struct pipe_buffer* bo,
+ uint32_t rd,
+ uint32_t wd,
+ uint32_t flags);
+
+ /* Finish a command emit. */
+ void (*end_cs)(struct r300_winsys* winsys,
+ const char* file,
+ const char* function,
+ int line);
+
+ /* Flush the CS. */
+ void (*flush_cs)(struct r300_winsys* winsys);
+};
+
+struct pipe_context* r300_create_context(struct pipe_screen* screen,
+ struct r300_winsys* r300_winsys);
+
+boolean r300_get_texture_buffer(struct pipe_texture* texture,
+ struct pipe_buffer** buffer,
+ unsigned* stride);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* R300_WINSYS_H */
diff --git a/src/gallium/drivers/r300/r3xx_fs.c b/src/gallium/drivers/r300/r3xx_fs.c
new file mode 100644
index 0000000000..c1c1194d58
--- /dev/null
+++ b/src/gallium/drivers/r300/r3xx_fs.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r3xx_fs.h"
+
+#include "r300_reg.h"
+
+struct rX00_fragment_program_code r3xx_passthrough_fragment_shader = {
+ .code.r300.alu.length = 1,
+ .code.r300.tex.length = 0,
+
+ .code.r300.config = 0,
+ .code.r300.pixsize = 0,
+ .code.r300.code_offset = 0,
+ .code.r300.code_addr[3] = R300_RGBA_OUT,
+
+ .code.r300.alu.inst[0].rgb_inst = R300_RGB_SWIZA(R300_ALU_ARGC_SRC0C_XYZ) |
+ R300_RGB_SWIZB(R300_ALU_ARGC_SRC0C_XYZ) |
+ R300_RGB_SWIZC(R300_ALU_ARGC_ZERO) |
+ R300_ALU_OUTC_CMP,
+ .code.r300.alu.inst[0].rgb_addr = R300_RGB_ADDR0(0) | R300_RGB_ADDR1(0) |
+ R300_RGB_ADDR2(0) | R300_ALU_DSTC_OUTPUT_XYZ,
+ .code.r300.alu.inst[0].alpha_inst = R300_ALPHA_SWIZA(R300_ALU_ARGA_SRC0A) |
+ R300_ALPHA_SWIZB(R300_ALU_ARGA_SRC0A) |
+ R300_ALPHA_SWIZC(R300_ALU_ARGA_ZERO) |
+ R300_ALU_OUTA_CMP,
+ .code.r300.alu.inst[0].alpha_addr = R300_ALPHA_ADDR0(0) |
+ R300_ALPHA_ADDR1(0) | R300_ALPHA_ADDR2(0) | R300_ALU_DSTA_OUTPUT,
+};
+
+struct rX00_fragment_program_code r3xx_texture_fragment_shader = {
+ .code.r300.alu.length = 1,
+ .code.r300.tex.length = 1,
+
+ .code.r300.config = R300_PFS_CNTL_FIRST_NODE_HAS_TEX,
+ .code.r300.pixsize = 0,
+ .code.r300.code_offset = 0,
+ .code.r300.code_addr[3] = R300_RGBA_OUT,
+
+ .code.r300.tex.inst[0] = R300_TEX_OP_LD << R300_TEX_INST_SHIFT,
+
+ .code.r300.alu.inst[0].rgb_inst = R300_RGB_SWIZA(R300_ALU_ARGC_SRC0C_XYZ) |
+ R300_RGB_SWIZB(R300_ALU_ARGC_SRC0C_XYZ) |
+ R300_RGB_SWIZC(R300_ALU_ARGC_ZERO) |
+ R300_ALU_OUTC_CMP,
+ .code.r300.alu.inst[0].rgb_addr = R300_RGB_ADDR0(0) | R300_RGB_ADDR1(0) |
+ R300_RGB_ADDR2(0) | R300_ALU_DSTC_OUTPUT_XYZ,
+ .code.r300.alu.inst[0].alpha_inst = R300_ALPHA_SWIZA(R300_ALU_ARGA_SRC0A) |
+ R300_ALPHA_SWIZB(R300_ALU_ARGA_SRC0A) |
+ R300_ALPHA_SWIZC(R300_ALU_ARGA_ZERO) |
+ R300_ALU_OUTA_CMP,
+ .code.r300.alu.inst[0].alpha_addr = R300_ALPHA_ADDR0(0) |
+ R300_ALPHA_ADDR1(0) | R300_ALPHA_ADDR2(0) | R300_ALU_DSTA_OUTPUT,
+};
diff --git a/src/gallium/drivers/r300/r3xx_fs.h b/src/gallium/drivers/r300/r3xx_fs.h
new file mode 100644
index 0000000000..51cd245724
--- /dev/null
+++ b/src/gallium/drivers/r300/r3xx_fs.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R3XX_FS_H
+#define R3XX_FS_H
+
+#include "radeon_code.h"
+
+struct rX00_fragment_program_code r3xx_passthrough_fragment_shader;
+struct rX00_fragment_program_code r3xx_texture_fragment_shader;
+
+#endif /* R3XX_FS_H */
diff --git a/src/gallium/drivers/r300/r5xx_fs.c b/src/gallium/drivers/r300/r5xx_fs.c
new file mode 100644
index 0000000000..f072deab0d
--- /dev/null
+++ b/src/gallium/drivers/r300/r5xx_fs.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "r5xx_fs.h"
+
+#include "r300_reg.h"
+
+/* XXX this all should find its way back to r300_reg */
+/* Swizzle tools */
+#define R500_SWIZZLE_ZERO 4
+#define R500_SWIZZLE_HALF 5
+#define R500_SWIZZLE_ONE 6
+#define R500_SWIZ_RGB_ZERO ((4 << 0) | (4 << 3) | (4 << 6))
+#define R500_SWIZ_RGB_ONE ((6 << 0) | (6 << 3) | (6 << 6))
+#define R500_SWIZ_RGB_RGB ((0 << 0) | (1 << 3) | (2 << 6))
+#define R500_SWIZ_MOD_NEG 1
+#define R500_SWIZ_MOD_ABS 2
+#define R500_SWIZ_MOD_NEG_ABS 3
+/* Swizzles for inst2 */
+#define R500_SWIZ_TEX_STRQ(x) ((x) << 8)
+#define R500_SWIZ_TEX_RGBA(x) ((x) << 24)
+/* Swizzles for inst3 */
+#define R500_SWIZ_RGB_A(x) ((x) << 2)
+#define R500_SWIZ_RGB_B(x) ((x) << 15)
+/* Swizzles for inst4 */
+#define R500_SWIZ_ALPHA_A(x) ((x) << 14)
+#define R500_SWIZ_ALPHA_B(x) ((x) << 21)
+/* Swizzle for inst5 */
+#define R500_SWIZ_RGBA_C(x) ((x) << 14)
+#define R500_SWIZ_ALPHA_C(x) ((x) << 27)
+/* Writemasks */
+#define R500_TEX_WMASK(x) ((x) << 11)
+#define R500_ALU_WMASK(x) ((x) << 11)
+#define R500_ALU_OMASK(x) ((x) << 15)
+#define R500_W_OMASK (1 << 31)
+
+struct rX00_fragment_program_code r5xx_passthrough_fragment_shader = {
+ .code.r500.max_temp_idx = 0,
+ .code.r500.inst_end = 0,
+
+ .code.r500.inst[0].inst0 = R500_INST_TYPE_OUT |
+ R500_INST_TEX_SEM_WAIT | R500_INST_LAST |
+ R500_INST_RGB_OMASK_RGB | R500_INST_ALPHA_OMASK |
+ R500_INST_RGB_CLAMP | R500_INST_ALPHA_CLAMP,
+ .code.r500.inst[0].inst1 =
+ R500_RGB_ADDR0(0) | R500_RGB_ADDR1(0) | R500_RGB_ADDR1_CONST |
+ R500_RGB_ADDR2(0) | R500_RGB_ADDR2_CONST,
+ .code.r500.inst[0].inst2 =
+ R500_ALPHA_ADDR0(0) | R500_ALPHA_ADDR1(0) | R500_ALPHA_ADDR1_CONST |
+ R500_ALPHA_ADDR2(0) | R500_ALPHA_ADDR2_CONST,
+ .code.r500.inst[0].inst3 =
+ R500_ALU_RGB_SEL_A_SRC0 | R500_ALU_RGB_R_SWIZ_A_R |
+ R500_ALU_RGB_G_SWIZ_A_G | R500_ALU_RGB_B_SWIZ_A_B |
+ R500_ALU_RGB_SEL_B_SRC0 | R500_ALU_RGB_R_SWIZ_B_R |
+ R500_ALU_RGB_B_SWIZ_B_G | R500_ALU_RGB_G_SWIZ_B_B,
+ .code.r500.inst[0].inst4 =
+ R500_ALPHA_OP_CMP | R500_ALPHA_SWIZ_A_A | R500_ALPHA_SWIZ_B_A,
+ .code.r500.inst[0].inst5 =
+ R500_ALU_RGBA_OP_CMP | R500_ALU_RGBA_R_SWIZ_0 |
+ R500_ALU_RGBA_G_SWIZ_0 | R500_ALU_RGBA_B_SWIZ_0 |
+ R500_ALU_RGBA_A_SWIZ_0,
+};
+
+struct rX00_fragment_program_code r5xx_texture_fragment_shader = {
+ .code.r500.max_temp_idx = 0,
+ .code.r500.inst_end = 1,
+
+ .code.r500.inst[0].inst0 = R500_INST_TYPE_TEX |
+ R500_INST_TEX_SEM_WAIT |
+ R500_INST_RGB_WMASK_RGB | R500_INST_ALPHA_WMASK |
+ R500_INST_RGB_CLAMP | R500_INST_ALPHA_CLAMP,
+ .code.r500.inst[0].inst1 = R500_TEX_ID(0) | R500_TEX_INST_LD |
+ R500_TEX_SEM_ACQUIRE | R500_TEX_IGNORE_UNCOVERED,
+ .code.r500.inst[0].inst2 = R500_TEX_SRC_ADDR(0) |
+ R500_TEX_SRC_S_SWIZ_R | R500_TEX_SRC_T_SWIZ_G |
+ R500_TEX_SRC_R_SWIZ_B | R500_TEX_SRC_Q_SWIZ_A |
+ R500_TEX_DST_ADDR(0) |
+ R500_TEX_DST_R_SWIZ_R | R500_TEX_DST_G_SWIZ_G |
+ R500_TEX_DST_B_SWIZ_B | R500_TEX_DST_A_SWIZ_A,
+ .code.r500.inst[0].inst3 = 0x0,
+ .code.r500.inst[0].inst4 = 0x0,
+ .code.r500.inst[0].inst5 = 0x0,
+
+ .code.r500.inst[1].inst0 = R500_INST_TYPE_OUT |
+ R500_INST_TEX_SEM_WAIT | R500_INST_LAST |
+ R500_INST_RGB_OMASK_RGB | R500_INST_ALPHA_OMASK |
+ R500_INST_RGB_CLAMP | R500_INST_ALPHA_CLAMP,
+ .code.r500.inst[1].inst1 =
+ R500_RGB_ADDR0(0) | R500_RGB_ADDR1(0) | R500_RGB_ADDR1_CONST |
+ R500_RGB_ADDR2(0) | R500_RGB_ADDR2_CONST,
+ .code.r500.inst[1].inst2 =
+ R500_ALPHA_ADDR0(0) | R500_ALPHA_ADDR1(0) | R500_ALPHA_ADDR1_CONST |
+ R500_ALPHA_ADDR2(0) | R500_ALPHA_ADDR2_CONST,
+ .code.r500.inst[1].inst3 =
+ R500_ALU_RGB_SEL_A_SRC0 | R500_ALU_RGB_R_SWIZ_A_R |
+ R500_ALU_RGB_G_SWIZ_A_G | R500_ALU_RGB_B_SWIZ_A_B |
+ R500_ALU_RGB_SEL_B_SRC0 | R500_ALU_RGB_R_SWIZ_B_R |
+ R500_ALU_RGB_B_SWIZ_B_G | R500_ALU_RGB_G_SWIZ_B_B,
+ .code.r500.inst[1].inst4 =
+ R500_ALPHA_OP_CMP | R500_ALPHA_SWIZ_A_A | R500_ALPHA_SWIZ_B_A,
+ .code.r500.inst[1].inst5 =
+ R500_ALU_RGBA_OP_CMP | R500_ALU_RGBA_R_SWIZ_0 |
+ R500_ALU_RGBA_G_SWIZ_0 | R500_ALU_RGBA_B_SWIZ_0 |
+ R500_ALU_RGBA_A_SWIZ_0,
+};
diff --git a/src/gallium/drivers/r300/r5xx_fs.h b/src/gallium/drivers/r300/r5xx_fs.h
new file mode 100644
index 0000000000..a4addde32b
--- /dev/null
+++ b/src/gallium/drivers/r300/r5xx_fs.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef R5XX_FS_H
+#define R5XX_FS_H
+
+#include "radeon_code.h"
+
+struct rX00_fragment_program_code r5xx_passthrough_fragment_shader;
+struct rX00_fragment_program_code r5xx_texture_fragment_shader;
+
+#endif /* R5XX_FS_H */
diff --git a/src/gallium/drivers/softpipe/Makefile b/src/gallium/drivers/softpipe/Makefile
new file mode 100644
index 0000000000..516e3992fd
--- /dev/null
+++ b/src/gallium/drivers/softpipe/Makefile
@@ -0,0 +1,44 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = softpipe
+
+C_SOURCES = \
+ sp_fs_exec.c \
+ sp_fs_sse.c \
+ sp_fs_llvm.c \
+ sp_clear.c \
+ sp_flush.c \
+ sp_query.c \
+ sp_context.c \
+ sp_draw_arrays.c \
+ sp_prim_setup.c \
+ sp_prim_vbuf.c \
+ sp_quad_pipe.c \
+ sp_quad_alpha_test.c \
+ sp_quad_blend.c \
+ sp_quad_colormask.c \
+ sp_quad_coverage.c \
+ sp_quad_depth_test.c \
+ sp_quad_earlyz.c \
+ sp_quad_fs.c \
+ sp_quad_occlusion.c \
+ sp_quad_output.c \
+ sp_quad_stencil.c \
+ sp_quad_stipple.c \
+ sp_screen.c \
+ sp_setup.c \
+ sp_state_blend.c \
+ sp_state_clip.c \
+ sp_state_derived.c \
+ sp_state_fs.c \
+ sp_state_sampler.c \
+ sp_state_rasterizer.c \
+ sp_state_surface.c \
+ sp_state_vertex.c \
+ sp_texture.c \
+ sp_tex_sample.c \
+ sp_tile_cache.c \
+ sp_surface.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/softpipe/SConscript b/src/gallium/drivers/softpipe/SConscript
new file mode 100644
index 0000000000..f8720638a7
--- /dev/null
+++ b/src/gallium/drivers/softpipe/SConscript
@@ -0,0 +1,46 @@
+Import('*')
+
+env = env.Clone()
+
+softpipe = env.ConvenienceLibrary(
+ target = 'softpipe',
+ source = [
+ 'sp_fs_exec.c',
+ 'sp_fs_sse.c',
+ 'sp_fs_llvm.c',
+ 'sp_clear.c',
+ 'sp_context.c',
+ 'sp_draw_arrays.c',
+ 'sp_flush.c',
+ 'sp_prim_setup.c',
+ 'sp_prim_vbuf.c',
+ 'sp_setup.c',
+ 'sp_quad_alpha_test.c',
+ 'sp_quad_blend.c',
+ 'sp_quad_pipe.c',
+ 'sp_quad_colormask.c',
+ 'sp_quad_coverage.c',
+ 'sp_quad_depth_test.c',
+ 'sp_quad_earlyz.c',
+ 'sp_quad_fs.c',
+ 'sp_quad_occlusion.c',
+ 'sp_quad_output.c',
+ 'sp_quad_stencil.c',
+ 'sp_quad_stipple.c',
+ 'sp_query.c',
+ 'sp_screen.c',
+ 'sp_state_blend.c',
+ 'sp_state_clip.c',
+ 'sp_state_derived.c',
+ 'sp_state_fs.c',
+ 'sp_state_rasterizer.c',
+ 'sp_state_sampler.c',
+ 'sp_state_surface.c',
+ 'sp_state_vertex.c',
+ 'sp_surface.c',
+ 'sp_tex_sample.c',
+ 'sp_texture.c',
+ 'sp_tile_cache.c',
+ ])
+
+Export('softpipe') \ No newline at end of file
diff --git a/src/gallium/drivers/softpipe/sp_clear.c b/src/gallium/drivers/softpipe/sp_clear.c
new file mode 100644
index 0000000000..8fac8e6e05
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_clear.c
@@ -0,0 +1,89 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ * Michel Dänzer
+ */
+
+
+#include "pipe/p_defines.h"
+#include "util/u_pack_color.h"
+#include "sp_clear.h"
+#include "sp_context.h"
+#include "sp_tile_cache.h"
+
+
+/**
+ * Clear the given buffers to the specified values.
+ * No masking, no scissor (clear entire buffer).
+ */
+void
+softpipe_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+ unsigned cv;
+ uint i;
+
+ if (softpipe->no_rast)
+ return;
+
+#if 0
+ softpipe_update_derived(softpipe); /* not needed?? */
+#endif
+
+ if (buffers & PIPE_CLEAR_COLOR) {
+ for (i = 0; i < softpipe->framebuffer.nr_cbufs; i++) {
+ struct pipe_surface *ps = softpipe->framebuffer.cbufs[i];
+
+ util_pack_color(rgba, ps->format, &cv);
+ sp_tile_cache_clear(softpipe->cbuf_cache[i], rgba, cv);
+
+#if !TILE_CLEAR_OPTIMIZATION
+ /* non-cached surface */
+ pipe->surface_fill(pipe, ps, 0, 0, ps->width, ps->height, cv);
+#endif
+ }
+ }
+
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
+ static const float zero[4] = { 0.0F, 0.0F, 0.0F, 0.0F };
+ struct pipe_surface *ps = softpipe->framebuffer.zsbuf;
+
+ cv = util_pack_z_stencil(ps->format, depth, stencil);
+ sp_tile_cache_clear(softpipe->zsbuf_cache, zero, cv);
+
+#if !TILE_CLEAR_OPTIMIZATION
+ /* non-cached surface */
+ pipe->surface_fill(pipe, ps, 0, 0, ps->width, ps->height, cv);
+#endif
+ }
+
+ softpipe->dirty_render_cache = TRUE;
+}
diff --git a/src/gallium/drivers/softpipe/sp_clear.h b/src/gallium/drivers/softpipe/sp_clear.h
new file mode 100644
index 0000000000..9be3b86fe9
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_clear.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ */
+
+#ifndef SP_CLEAR_H
+#define SP_CLEAR_H
+
+struct pipe_context;
+
+extern void
+softpipe_clear(struct pipe_context *pipe, unsigned buffers, const float *rgba,
+ double depth, unsigned stencil);
+
+
+#endif /* SP_CLEAR_H */
diff --git a/src/gallium/drivers/softpipe/sp_context.c b/src/gallium/drivers/softpipe/sp_context.c
new file mode 100644
index 0000000000..c699c433e9
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_context.c
@@ -0,0 +1,330 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "sp_clear.h"
+#include "sp_context.h"
+#include "sp_flush.h"
+#include "sp_prim_setup.h"
+#include "sp_prim_vbuf.h"
+#include "sp_state.h"
+#include "sp_surface.h"
+#include "sp_tile_cache.h"
+#include "sp_texture.h"
+#include "sp_winsys.h"
+#include "sp_query.h"
+
+
+
+/**
+ * Map any drawing surfaces which aren't already mapped
+ */
+void
+softpipe_map_transfers(struct softpipe_context *sp)
+{
+ unsigned i;
+
+ for (i = 0; i < sp->framebuffer.nr_cbufs; i++) {
+ sp_tile_cache_map_transfers(sp->cbuf_cache[i]);
+ }
+
+ sp_tile_cache_map_transfers(sp->zsbuf_cache);
+}
+
+
+/**
+ * Unmap any mapped drawing surfaces
+ */
+void
+softpipe_unmap_transfers(struct softpipe_context *sp)
+{
+ uint i;
+
+ for (i = 0; i < sp->framebuffer.nr_cbufs; i++)
+ sp_flush_tile_cache(sp, sp->cbuf_cache[i]);
+ sp_flush_tile_cache(sp, sp->zsbuf_cache);
+
+ for (i = 0; i < sp->framebuffer.nr_cbufs; i++) {
+ sp_tile_cache_unmap_transfers(sp->cbuf_cache[i]);
+ }
+ sp_tile_cache_unmap_transfers(sp->zsbuf_cache);
+}
+
+
+static void
+softpipe_destroy( struct pipe_context *pipe )
+{
+ struct softpipe_context *softpipe = softpipe_context( pipe );
+ uint i;
+
+ if (softpipe->draw)
+ draw_destroy( softpipe->draw );
+
+ for (i = 0; i < SP_NUM_QUAD_THREADS; i++) {
+ softpipe->quad[i].polygon_stipple->destroy( softpipe->quad[i].polygon_stipple );
+ softpipe->quad[i].earlyz->destroy( softpipe->quad[i].earlyz );
+ softpipe->quad[i].shade->destroy( softpipe->quad[i].shade );
+ softpipe->quad[i].alpha_test->destroy( softpipe->quad[i].alpha_test );
+ softpipe->quad[i].depth_test->destroy( softpipe->quad[i].depth_test );
+ softpipe->quad[i].stencil_test->destroy( softpipe->quad[i].stencil_test );
+ softpipe->quad[i].occlusion->destroy( softpipe->quad[i].occlusion );
+ softpipe->quad[i].coverage->destroy( softpipe->quad[i].coverage );
+ softpipe->quad[i].blend->destroy( softpipe->quad[i].blend );
+ softpipe->quad[i].colormask->destroy( softpipe->quad[i].colormask );
+ softpipe->quad[i].output->destroy( softpipe->quad[i].output );
+ }
+
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
+ sp_destroy_tile_cache(softpipe->cbuf_cache[i]);
+ sp_destroy_tile_cache(softpipe->zsbuf_cache);
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ sp_destroy_tile_cache(softpipe->tex_cache[i]);
+
+ for (i = 0; i < Elements(softpipe->constants); i++) {
+ if (softpipe->constants[i].buffer) {
+ pipe_buffer_reference(&softpipe->constants[i].buffer, NULL);
+ }
+ }
+
+ FREE( softpipe );
+}
+
+
+/**
+ * if (the texture is being used as a framebuffer surface)
+ * return PIPE_REFERENCED_FOR_WRITE
+ * else if (the texture is a bound texture source)
+ * return PIPE_REFERENCED_FOR_READ XXX not done yet
+ * else
+ * return PIPE_UNREFERENCED
+ */
+static unsigned int
+softpipe_is_texture_referenced( struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level)
+{
+ struct softpipe_context *softpipe = softpipe_context( pipe );
+ unsigned i;
+
+ if (softpipe->dirty_render_cache) {
+ for (i = 0; i < softpipe->framebuffer.nr_cbufs; i++) {
+ if (softpipe->framebuffer.cbufs[i] &&
+ softpipe->framebuffer.cbufs[i]->texture == texture) {
+ return PIPE_REFERENCED_FOR_WRITE;
+ }
+ }
+ if (softpipe->framebuffer.zsbuf &&
+ softpipe->framebuffer.zsbuf->texture == texture) {
+ return PIPE_REFERENCED_FOR_WRITE;
+ }
+ }
+
+ /* FIXME: we also need to do the same for the texture cache */
+
+ return PIPE_UNREFERENCED;
+}
+
+
+static unsigned int
+softpipe_is_buffer_referenced( struct pipe_context *pipe,
+ struct pipe_buffer *buf)
+{
+ return PIPE_UNREFERENCED;
+}
+
+
+struct pipe_context *
+softpipe_create( struct pipe_screen *screen )
+{
+ struct softpipe_context *softpipe = CALLOC_STRUCT(softpipe_context);
+ uint i;
+
+ util_init_math();
+
+#ifdef PIPE_ARCH_X86
+ softpipe->use_sse = !debug_get_bool_option( "GALLIUM_NOSSE", FALSE );
+#else
+ softpipe->use_sse = FALSE;
+#endif
+
+ softpipe->dump_fs = debug_get_bool_option( "GALLIUM_DUMP_FS", FALSE );
+
+ softpipe->pipe.winsys = screen->winsys;
+ softpipe->pipe.screen = screen;
+ softpipe->pipe.destroy = softpipe_destroy;
+
+ /* state setters */
+ softpipe->pipe.create_blend_state = softpipe_create_blend_state;
+ softpipe->pipe.bind_blend_state = softpipe_bind_blend_state;
+ softpipe->pipe.delete_blend_state = softpipe_delete_blend_state;
+
+ softpipe->pipe.create_sampler_state = softpipe_create_sampler_state;
+ softpipe->pipe.bind_sampler_states = softpipe_bind_sampler_states;
+ softpipe->pipe.delete_sampler_state = softpipe_delete_sampler_state;
+
+ softpipe->pipe.create_depth_stencil_alpha_state = softpipe_create_depth_stencil_state;
+ softpipe->pipe.bind_depth_stencil_alpha_state = softpipe_bind_depth_stencil_state;
+ softpipe->pipe.delete_depth_stencil_alpha_state = softpipe_delete_depth_stencil_state;
+
+ softpipe->pipe.create_rasterizer_state = softpipe_create_rasterizer_state;
+ softpipe->pipe.bind_rasterizer_state = softpipe_bind_rasterizer_state;
+ softpipe->pipe.delete_rasterizer_state = softpipe_delete_rasterizer_state;
+
+ softpipe->pipe.create_fs_state = softpipe_create_fs_state;
+ softpipe->pipe.bind_fs_state = softpipe_bind_fs_state;
+ softpipe->pipe.delete_fs_state = softpipe_delete_fs_state;
+
+ softpipe->pipe.create_vs_state = softpipe_create_vs_state;
+ softpipe->pipe.bind_vs_state = softpipe_bind_vs_state;
+ softpipe->pipe.delete_vs_state = softpipe_delete_vs_state;
+
+ softpipe->pipe.set_blend_color = softpipe_set_blend_color;
+ softpipe->pipe.set_clip_state = softpipe_set_clip_state;
+ softpipe->pipe.set_constant_buffer = softpipe_set_constant_buffer;
+ softpipe->pipe.set_framebuffer_state = softpipe_set_framebuffer_state;
+ softpipe->pipe.set_polygon_stipple = softpipe_set_polygon_stipple;
+ softpipe->pipe.set_scissor_state = softpipe_set_scissor_state;
+ softpipe->pipe.set_sampler_textures = softpipe_set_sampler_textures;
+ softpipe->pipe.set_viewport_state = softpipe_set_viewport_state;
+
+ softpipe->pipe.set_vertex_buffers = softpipe_set_vertex_buffers;
+ softpipe->pipe.set_vertex_elements = softpipe_set_vertex_elements;
+
+ softpipe->pipe.draw_arrays = softpipe_draw_arrays;
+ softpipe->pipe.draw_elements = softpipe_draw_elements;
+ softpipe->pipe.draw_range_elements = softpipe_draw_range_elements;
+ softpipe->pipe.set_edgeflags = softpipe_set_edgeflags;
+
+
+ softpipe->pipe.clear = softpipe_clear;
+ softpipe->pipe.flush = softpipe_flush;
+
+ softpipe->pipe.is_texture_referenced = softpipe_is_texture_referenced;
+ softpipe->pipe.is_buffer_referenced = softpipe_is_buffer_referenced;
+
+ softpipe_init_query_funcs( softpipe );
+
+ /*
+ * Alloc caches for accessing drawing surfaces and textures.
+ * Must be before quad stage setup!
+ */
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
+ softpipe->cbuf_cache[i] = sp_create_tile_cache( screen );
+ softpipe->zsbuf_cache = sp_create_tile_cache( screen );
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ softpipe->tex_cache[i] = sp_create_tile_cache( screen );
+
+
+ /* setup quad rendering stages */
+ for (i = 0; i < SP_NUM_QUAD_THREADS; i++) {
+ softpipe->quad[i].polygon_stipple = sp_quad_polygon_stipple_stage(softpipe);
+ softpipe->quad[i].earlyz = sp_quad_earlyz_stage(softpipe);
+ softpipe->quad[i].shade = sp_quad_shade_stage(softpipe);
+ softpipe->quad[i].alpha_test = sp_quad_alpha_test_stage(softpipe);
+ softpipe->quad[i].depth_test = sp_quad_depth_test_stage(softpipe);
+ softpipe->quad[i].stencil_test = sp_quad_stencil_test_stage(softpipe);
+ softpipe->quad[i].occlusion = sp_quad_occlusion_stage(softpipe);
+ softpipe->quad[i].coverage = sp_quad_coverage_stage(softpipe);
+ softpipe->quad[i].blend = sp_quad_blend_stage(softpipe);
+ softpipe->quad[i].colormask = sp_quad_colormask_stage(softpipe);
+ softpipe->quad[i].output = sp_quad_output_stage(softpipe);
+ }
+
+ /* vertex shader samplers */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ softpipe->tgsi.vert_samplers[i].base.get_samples = sp_get_samples_vertex;
+ softpipe->tgsi.vert_samplers[i].unit = i;
+ softpipe->tgsi.vert_samplers[i].sp = softpipe;
+ softpipe->tgsi.vert_samplers[i].cache = softpipe->tex_cache[i];
+ softpipe->tgsi.vert_samplers_list[i] = &softpipe->tgsi.vert_samplers[i];
+ }
+
+ /* fragment shader samplers */
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ softpipe->tgsi.frag_samplers[i].base.get_samples = sp_get_samples_fragment;
+ softpipe->tgsi.frag_samplers[i].unit = i;
+ softpipe->tgsi.frag_samplers[i].sp = softpipe;
+ softpipe->tgsi.frag_samplers[i].cache = softpipe->tex_cache[i];
+ softpipe->tgsi.frag_samplers_list[i] = &softpipe->tgsi.frag_samplers[i];
+ }
+
+ /*
+ * Create drawing context and plug our rendering stage into it.
+ */
+ softpipe->draw = draw_create();
+ if (!softpipe->draw)
+ goto fail;
+
+ draw_texture_samplers(softpipe->draw,
+ PIPE_MAX_SAMPLERS,
+ (struct tgsi_sampler **)
+ softpipe->tgsi.vert_samplers_list);
+
+ softpipe->setup = sp_draw_render_stage(softpipe);
+ if (!softpipe->setup)
+ goto fail;
+
+ if (debug_get_bool_option( "SP_NO_RAST", FALSE ))
+ softpipe->no_rast = TRUE;
+
+ if (debug_get_bool_option( "SP_NO_VBUF", FALSE )) {
+ /* Deprecated path -- vbuf is the intended interface to the draw module:
+ */
+ draw_set_rasterize_stage(softpipe->draw, softpipe->setup);
+ }
+ else {
+ sp_init_vbuf(softpipe);
+ }
+
+ /* plug in AA line/point stages */
+ draw_install_aaline_stage(softpipe->draw, &softpipe->pipe);
+ draw_install_aapoint_stage(softpipe->draw, &softpipe->pipe);
+
+#if USE_DRAW_STAGE_PSTIPPLE
+ /* Do polygon stipple w/ texture map + frag prog? */
+ draw_install_pstipple_stage(softpipe->draw, &softpipe->pipe);
+#endif
+
+ sp_init_surface_functions(softpipe);
+
+ return &softpipe->pipe;
+
+ fail:
+ softpipe_destroy(&softpipe->pipe);
+ return NULL;
+}
+
diff --git a/src/gallium/drivers/softpipe/sp_context.h b/src/gallium/drivers/softpipe/sp_context.h
new file mode 100644
index 0000000000..7888c2f644
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_context.h
@@ -0,0 +1,168 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_CONTEXT_H
+#define SP_CONTEXT_H
+
+#include "pipe/p_context.h"
+
+#include "draw/draw_vertex.h"
+
+#include "sp_quad_pipe.h"
+#include "sp_tex_sample.h"
+
+
+/**
+ * This is a temporary variable for testing draw-stage polygon stipple.
+ * If zero, do stipple in sp_quad_stipple.c
+ */
+#define USE_DRAW_STAGE_PSTIPPLE 1
+
+/* Number of threads working on individual quads.
+ * Setting to 1 disables this feature.
+ */
+#define SP_NUM_QUAD_THREADS 1
+
+struct softpipe_vbuf_render;
+struct draw_context;
+struct draw_stage;
+struct softpipe_tile_cache;
+struct sp_fragment_shader;
+struct sp_vertex_shader;
+
+
+struct softpipe_context {
+ struct pipe_context pipe; /**< base class */
+
+ /** Constant state objects */
+ const struct pipe_blend_state *blend;
+ const struct pipe_sampler_state *sampler[PIPE_MAX_SAMPLERS];
+ const struct pipe_depth_stencil_alpha_state *depth_stencil;
+ const struct pipe_rasterizer_state *rasterizer;
+ const struct sp_fragment_shader *fs;
+ const struct sp_vertex_shader *vs;
+
+ /** Other rendering state */
+ struct pipe_blend_color blend_color;
+ struct pipe_clip_state clip;
+ struct pipe_constant_buffer constants[PIPE_SHADER_TYPES];
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_poly_stipple poly_stipple;
+ struct pipe_scissor_state scissor;
+ struct pipe_texture *texture[PIPE_MAX_SAMPLERS];
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
+ struct pipe_vertex_element vertex_element[PIPE_MAX_ATTRIBS];
+
+ unsigned num_samplers;
+ unsigned num_textures;
+ unsigned num_vertex_elements;
+ unsigned num_vertex_buffers;
+
+ unsigned dirty; /**< Mask of SP_NEW_x flags */
+
+ /* Counter for occlusion queries. Note this supports overlapping
+ * queries.
+ */
+ uint64_t occlusion_count;
+ unsigned active_query_count;
+
+ /** Mapped vertex buffers */
+ ubyte *mapped_vbuffer[PIPE_MAX_ATTRIBS];
+
+ /** Mapped constant buffers */
+ void *mapped_constants[PIPE_SHADER_TYPES];
+
+ /** Vertex format */
+ struct vertex_info vertex_info;
+ struct vertex_info vertex_info_vbuf;
+
+ /** Which vertex shader output slot contains point size */
+ int psize_slot;
+
+ unsigned reduced_api_prim; /**< PIPE_PRIM_POINTS, _LINES or _TRIANGLES */
+
+ /** Derived from scissor and surface bounds: */
+ struct pipe_scissor_state cliprect;
+
+ unsigned line_stipple_counter;
+
+ /** Software quad rendering pipeline */
+ struct {
+ struct quad_stage *polygon_stipple;
+ struct quad_stage *earlyz;
+ struct quad_stage *shade;
+ struct quad_stage *alpha_test;
+ struct quad_stage *stencil_test;
+ struct quad_stage *depth_test;
+ struct quad_stage *occlusion;
+ struct quad_stage *coverage;
+ struct quad_stage *blend;
+ struct quad_stage *colormask;
+ struct quad_stage *output;
+
+ struct quad_stage *first; /**< points to one of the above stages */
+ } quad[SP_NUM_QUAD_THREADS];
+
+ /** TGSI exec things */
+ struct {
+ struct sp_shader_sampler vert_samplers[PIPE_MAX_SAMPLERS];
+ struct sp_shader_sampler *vert_samplers_list[PIPE_MAX_SAMPLERS];
+ struct sp_shader_sampler frag_samplers[PIPE_MAX_SAMPLERS];
+ struct sp_shader_sampler *frag_samplers_list[PIPE_MAX_SAMPLERS];
+ } tgsi;
+
+ /** The primitive drawing context */
+ struct draw_context *draw;
+ struct draw_stage *setup;
+ struct draw_stage *vbuf;
+ struct softpipe_vbuf_render *vbuf_render;
+
+ boolean dirty_render_cache;
+
+ struct softpipe_tile_cache *cbuf_cache[PIPE_MAX_COLOR_BUFS];
+ struct softpipe_tile_cache *zsbuf_cache;
+
+ struct softpipe_tile_cache *tex_cache[PIPE_MAX_SAMPLERS];
+
+ unsigned use_sse : 1;
+ unsigned dump_fs : 1;
+ unsigned no_rast : 1;
+};
+
+
+static INLINE struct softpipe_context *
+softpipe_context( struct pipe_context *pipe )
+{
+ return (struct softpipe_context *)pipe;
+}
+
+#endif /* SP_CONTEXT_H */
+
diff --git a/src/gallium/drivers/softpipe/sp_draw_arrays.c b/src/gallium/drivers/softpipe/sp_draw_arrays.c
new file mode 100644
index 0000000000..d4045816d0
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_draw_arrays.c
@@ -0,0 +1,194 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Brian Paul
+ * Keith Whitwell
+ */
+
+
+#include "pipe/p_defines.h"
+#include "pipe/p_context.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_prim.h"
+
+#include "sp_context.h"
+#include "sp_state.h"
+
+#include "draw/draw_context.h"
+
+
+
+static void
+softpipe_map_constant_buffers(struct softpipe_context *sp)
+{
+ struct pipe_winsys *ws = sp->pipe.winsys;
+ uint i, size;
+
+ for (i = 0; i < PIPE_SHADER_TYPES; i++) {
+ if (sp->constants[i].buffer && sp->constants[i].buffer->size)
+ sp->mapped_constants[i] = ws->buffer_map(ws, sp->constants[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ }
+
+ if (sp->constants[PIPE_SHADER_VERTEX].buffer)
+ size = sp->constants[PIPE_SHADER_VERTEX].buffer->size;
+ else
+ size = 0;
+
+ draw_set_mapped_constant_buffer(sp->draw,
+ sp->mapped_constants[PIPE_SHADER_VERTEX],
+ size);
+}
+
+
+static void
+softpipe_unmap_constant_buffers(struct softpipe_context *sp)
+{
+ struct pipe_winsys *ws = sp->pipe.winsys;
+ uint i;
+
+ /* really need to flush all prims since the vert/frag shaders const buffers
+ * are going away now.
+ */
+ draw_flush(sp->draw);
+
+ draw_set_mapped_constant_buffer(sp->draw, NULL, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (sp->constants[i].buffer && sp->constants[i].buffer->size)
+ ws->buffer_unmap(ws, sp->constants[i].buffer);
+ sp->mapped_constants[i] = NULL;
+ }
+}
+
+
+boolean
+softpipe_draw_arrays(struct pipe_context *pipe, unsigned mode,
+ unsigned start, unsigned count)
+{
+ return softpipe_draw_elements(pipe, NULL, 0, mode, start, count);
+}
+
+
+/**
+ * Draw vertex arrays, with optional indexing.
+ * Basically, map the vertex buffers (and drawing surfaces), then hand off
+ * the drawing to the 'draw' module.
+ */
+boolean
+softpipe_draw_range_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned min_index,
+ unsigned max_index,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct softpipe_context *sp = softpipe_context(pipe);
+ struct draw_context *draw = sp->draw;
+ unsigned i;
+
+ sp->reduced_api_prim = u_reduced_prim(mode);
+
+ if (sp->dirty)
+ softpipe_update_derived( sp );
+
+ softpipe_map_transfers(sp);
+ softpipe_map_constant_buffers(sp);
+
+ /*
+ * Map vertex buffers
+ */
+ for (i = 0; i < sp->num_vertex_buffers; i++) {
+ void *buf
+ = pipe_buffer_map(pipe->screen,
+ sp->vertex_buffer[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_vertex_buffer(draw, i, buf);
+ }
+
+ /* Map index buffer, if present */
+ if (indexBuffer) {
+ void *mapped_indexes
+ = pipe_buffer_map(pipe->screen, indexBuffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ draw_set_mapped_element_buffer_range(draw, indexSize,
+ min_index,
+ max_index,
+ mapped_indexes);
+ }
+ else {
+ /* no index/element buffer */
+ draw_set_mapped_element_buffer_range(draw, 0, start,
+ start + count - 1, NULL);
+ }
+
+ /* draw! */
+ draw_arrays(draw, mode, start, count);
+
+ /*
+ * unmap vertex/index buffers - will cause draw module to flush
+ */
+ for (i = 0; i < sp->num_vertex_buffers; i++) {
+ draw_set_mapped_vertex_buffer(draw, i, NULL);
+ pipe_buffer_unmap(pipe->screen, sp->vertex_buffer[i].buffer);
+ }
+ if (indexBuffer) {
+ draw_set_mapped_element_buffer(draw, 0, NULL);
+ pipe_buffer_unmap(pipe->screen, indexBuffer);
+ }
+
+
+ /* Note: leave drawing surfaces mapped */
+ softpipe_unmap_constant_buffers(sp);
+
+ sp->dirty_render_cache = TRUE;
+
+ return TRUE;
+}
+
+
+boolean
+softpipe_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ return softpipe_draw_range_elements( pipe, indexBuffer,
+ indexSize,
+ 0, 0xffffffff,
+ mode, start, count );
+}
+
+
+void
+softpipe_set_edgeflags(struct pipe_context *pipe, const unsigned *edgeflags)
+{
+ struct softpipe_context *sp = softpipe_context(pipe);
+ draw_set_edgeflags(sp->draw, edgeflags);
+}
diff --git a/src/gallium/drivers/softpipe/sp_flush.c b/src/gallium/drivers/softpipe/sp_flush.c
new file mode 100644
index 0000000000..4a14d49686
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_flush.c
@@ -0,0 +1,94 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "pipe/p_defines.h"
+#include "draw/draw_context.h"
+#include "sp_flush.h"
+#include "sp_context.h"
+#include "sp_surface.h"
+#include "sp_state.h"
+#include "sp_tile_cache.h"
+#include "sp_winsys.h"
+
+
+void
+softpipe_flush( struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+ uint i;
+
+ draw_flush(softpipe->draw);
+
+ if (flags & PIPE_FLUSH_TEXTURE_CACHE) {
+ for (i = 0; i < softpipe->num_textures; i++) {
+ sp_flush_tile_cache(softpipe, softpipe->tex_cache[i]);
+ }
+ }
+
+ if (flags & PIPE_FLUSH_RENDER_CACHE) {
+ for (i = 0; i < softpipe->framebuffer.nr_cbufs; i++)
+ if (softpipe->cbuf_cache[i])
+ sp_flush_tile_cache(softpipe, softpipe->cbuf_cache[i]);
+
+ if (softpipe->zsbuf_cache)
+ sp_flush_tile_cache(softpipe, softpipe->zsbuf_cache);
+
+ /* Need this call for hardware buffers before swapbuffers.
+ *
+ * there should probably be another/different flush-type function
+ * that's called before swapbuffers because we don't always want
+ * to unmap surfaces when flushing.
+ */
+ softpipe_unmap_transfers(softpipe);
+
+ softpipe->dirty_render_cache = FALSE;
+ }
+
+ /* Enable to dump BMPs of the color/depth buffers each frame */
+#if 0
+ if(flags & PIPE_FLUSH_FRAME) {
+ static unsigned frame_no = 1;
+ static char filename[256];
+ util_snprintf(filename, sizeof(filename), "cbuf_%u.bmp", frame_no);
+ debug_dump_surface_bmp(filename, softpipe->framebuffer.cbufs[0]);
+ util_snprintf(filename, sizeof(filename), "zsbuf_%u.bmp", frame_no);
+ debug_dump_surface_bmp(filename, softpipe->framebuffer.zsbuf);
+ ++frame_no;
+ }
+#endif
+
+ if (fence)
+ *fence = NULL;
+}
+
diff --git a/src/gallium/drivers/softpipe/sp_flush.h b/src/gallium/drivers/softpipe/sp_flush.h
new file mode 100644
index 0000000000..68d9b5fa83
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_flush.h
@@ -0,0 +1,37 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SP_FLUSH_H
+#define SP_FLUSH_H
+
+struct pipe_context;
+struct pipe_fence_handle;
+
+void softpipe_flush(struct pipe_context *pipe, unsigned flags,
+ struct pipe_fence_handle **fence);
+
+#endif
diff --git a/src/gallium/drivers/softpipe/sp_fs.h b/src/gallium/drivers/softpipe/sp_fs.h
new file mode 100644
index 0000000000..4792ace3a3
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_fs.h
@@ -0,0 +1,54 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_FS_H
+#define SP_FS_H
+
+struct sp_fragment_shader *
+softpipe_create_fs_exec(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ);
+
+struct sp_fragment_shader *
+softpipe_create_fs_sse(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ);
+
+struct sp_fragment_shader *
+softpipe_create_fs_llvm(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ);
+
+struct tgsi_interp_coef;
+struct tgsi_exec_vector;
+
+void sp_setup_pos_vector(const struct tgsi_interp_coef *coef,
+ float x, float y,
+ struct tgsi_exec_vector *quadpos);
+
+
+#endif
diff --git a/src/gallium/drivers/softpipe/sp_fs_exec.c b/src/gallium/drivers/softpipe/sp_fs_exec.c
new file mode 100644
index 0000000000..9ee86fe787
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_fs_exec.c
@@ -0,0 +1,162 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Execute fragment shader using the TGSI interpreter.
+ */
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_fs.h"
+#include "sp_quad.h"
+
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_parse.h"
+
+
+/**
+ * Subclass of sp_fragment_shader
+ */
+struct sp_exec_fragment_shader
+{
+ struct sp_fragment_shader base;
+ /* No other members for now */
+};
+
+
+/** cast wrapper */
+static INLINE struct sp_exec_fragment_shader *
+sp_exec_fragment_shader(const struct sp_fragment_shader *base)
+{
+ return (struct sp_exec_fragment_shader *) base;
+}
+
+
+/**
+ * Compute quad X,Y,Z,W for the four fragments in a quad.
+ *
+ * This should really be part of the compiled shader.
+ */
+void
+sp_setup_pos_vector(const struct tgsi_interp_coef *coef,
+ float x, float y,
+ struct tgsi_exec_vector *quadpos)
+{
+ uint chan;
+ /* do X */
+ quadpos->xyzw[0].f[0] = x;
+ quadpos->xyzw[0].f[1] = x + 1;
+ quadpos->xyzw[0].f[2] = x;
+ quadpos->xyzw[0].f[3] = x + 1;
+
+ /* do Y */
+ quadpos->xyzw[1].f[0] = y;
+ quadpos->xyzw[1].f[1] = y;
+ quadpos->xyzw[1].f[2] = y + 1;
+ quadpos->xyzw[1].f[3] = y + 1;
+
+ /* do Z and W for all fragments in the quad */
+ for (chan = 2; chan < 4; chan++) {
+ const float dadx = coef->dadx[chan];
+ const float dady = coef->dady[chan];
+ const float a0 = coef->a0[chan] + dadx * x + dady * y;
+ quadpos->xyzw[chan].f[0] = a0;
+ quadpos->xyzw[chan].f[1] = a0 + dadx;
+ quadpos->xyzw[chan].f[2] = a0 + dady;
+ quadpos->xyzw[chan].f[3] = a0 + dadx + dady;
+ }
+}
+
+
+static void
+exec_prepare( const struct sp_fragment_shader *base,
+ struct tgsi_exec_machine *machine,
+ struct tgsi_sampler **samplers )
+{
+ /*
+ * Bind tokens/shader to the interpreter's machine state.
+ * Avoid redundant binding.
+ */
+ if (machine->Tokens != base->shader.tokens) {
+ tgsi_exec_machine_bind_shader( machine,
+ base->shader.tokens,
+ PIPE_MAX_SAMPLERS,
+ samplers );
+ }
+}
+
+
+/* TODO: hide the machine struct in here somewhere, remove from this
+ * interface:
+ */
+static unsigned
+exec_run( const struct sp_fragment_shader *base,
+ struct tgsi_exec_machine *machine,
+ struct quad_header *quad )
+{
+ /* Compute X, Y, Z, W vals for this quad */
+ sp_setup_pos_vector(quad->posCoef,
+ (float)quad->input.x0, (float)quad->input.y0,
+ &machine->QuadPos);
+
+ return tgsi_exec_machine_run( machine );
+}
+
+
+static void
+exec_delete( struct sp_fragment_shader *base )
+{
+ FREE((void *) base->shader.tokens);
+ FREE(base);
+}
+
+
+struct sp_fragment_shader *
+softpipe_create_fs_exec(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ)
+{
+ struct sp_exec_fragment_shader *shader;
+
+ /* Decide whether we'll be codegenerating this shader and if so do
+ * that now.
+ */
+
+ shader = CALLOC_STRUCT(sp_exec_fragment_shader);
+ if (!shader)
+ return NULL;
+
+ /* we need to keep a local copy of the tokens */
+ shader->base.shader.tokens = tgsi_dup_tokens(templ->tokens);
+ shader->base.prepare = exec_prepare;
+ shader->base.run = exec_run;
+ shader->base.delete = exec_delete;
+
+ return &shader->base;
+}
diff --git a/src/gallium/drivers/softpipe/sp_fs_llvm.c b/src/gallium/drivers/softpipe/sp_fs_llvm.c
new file mode 100644
index 0000000000..95c0d982d1
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_fs_llvm.c
@@ -0,0 +1,205 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Execute fragment shader using LLVM code generation.
+ * Authors:
+ * Zack Rusin
+ */
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_fs.h"
+
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_sse2.h"
+
+#if 0
+
+/**
+ * Subclass of sp_fragment_shader
+ */
+struct sp_llvm_fragment_shader
+{
+ struct sp_fragment_shader base;
+ struct gallivm_prog *llvm_prog;
+};
+
+
+static void
+shade_quad_llvm(struct quad_stage *qs,
+ struct quad_header *quad)
+{
+ struct quad_shade_stage *qss = quad_shade_stage(qs);
+ struct softpipe_context *softpipe = qs->softpipe;
+ float dests[4][16][4] ALIGN16_ATTRIB;
+ float inputs[4][16][4] ALIGN16_ATTRIB;
+ const float fx = (float) quad->x0;
+ const float fy = (float) quad->y0;
+ struct gallivm_prog *llvm = qss->llvm_prog;
+
+ inputs[0][0][0] = fx;
+ inputs[1][0][0] = fx + 1.0f;
+ inputs[2][0][0] = fx;
+ inputs[3][0][0] = fx + 1.0f;
+
+ inputs[0][0][1] = fy;
+ inputs[1][0][1] = fy;
+ inputs[2][0][1] = fy + 1.0f;
+ inputs[3][0][1] = fy + 1.0f;
+
+
+ gallivm_prog_inputs_interpolate(llvm, inputs, quad->coef);
+
+#if DLLVM
+ debug_printf("MASK = %d\n", quad->mask);
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ debug_printf("IN(%d,%d) [%f %f %f %f]\n", i, j,
+ inputs[i][j][0], inputs[i][j][1], inputs[i][j][2], inputs[i][j][3]);
+ }
+ }
+#endif
+
+ quad->mask &=
+ gallivm_fragment_shader_exec(llvm, fx, fy, dests, inputs,
+ softpipe->mapped_constants[PIPE_SHADER_FRAGMENT],
+ qss->samplers);
+#if DLLVM
+ debug_printf("OUT LLVM = 1[%f %f %f %f], 2[%f %f %f %f]\n",
+ dests[0][0][0], dests[0][0][1], dests[0][0][2], dests[0][0][3],
+ dests[0][1][0], dests[0][1][1], dests[0][1][2], dests[0][1][3]);
+#endif
+
+ /* store result color */
+ if (qss->colorOutSlot >= 0) {
+ unsigned i;
+ /* XXX need to handle multiple color outputs someday */
+ allvmrt(qss->stage.softpipe->fs->info.output_semantic_name[qss->colorOutSlot]
+ == TGSI_SEMANTIC_COLOR);
+ for (i = 0; i < QUAD_SIZE; ++i) {
+ quad->outputs.color[0][0][i] = dests[i][qss->colorOutSlot][0];
+ quad->outputs.color[0][1][i] = dests[i][qss->colorOutSlot][1];
+ quad->outputs.color[0][2][i] = dests[i][qss->colorOutSlot][2];
+ quad->outputs.color[0][3][i] = dests[i][qss->colorOutSlot][3];
+ }
+ }
+#if DLLVM
+ for (int i = 0; i < QUAD_SIZE; ++i) {
+ debug_printf("QLLVM%d(%d) [%f, %f, %f, %f]\n", i, qss->colorOutSlot,
+ quad->outputs.color[0][0][i],
+ quad->outputs.color[0][1][i],
+ quad->outputs.color[0][2][i],
+ quad->outputs.color[0][3][i]);
+ }
+#endif
+
+ /* store result Z */
+ if (qss->depthOutSlot >= 0) {
+ /* output[slot] is new Z */
+ uint i;
+ for (i = 0; i < 4; i++) {
+ quad->outputs.depth[i] = dests[i][0][2];
+ }
+ }
+ else {
+ /* copy input Z (which was interpolated by the executor) to output Z */
+ uint i;
+ for (i = 0; i < 4; i++) {
+ quad->outputs.depth[i] = inputs[i][0][2];
+ }
+ }
+#if DLLVM
+ debug_printf("D [%f, %f, %f, %f] mask = %d\n",
+ quad->outputs.depth[0],
+ quad->outputs.depth[1],
+ quad->outputs.depth[2],
+ quad->outputs.depth[3], quad->mask);
+#endif
+
+ /* shader may cull fragments */
+ if( quad->mask ) {
+ qs->next->run( qs->next, quad );
+ }
+}
+
+
+unsigned
+run_llvm_fs( const struct sp_fragment_shader *base,
+ struct foo *machine )
+{
+}
+
+
+void
+delete_llvm_fs( struct sp_fragment_shader *base )
+{
+ FREE(base);
+}
+
+
+struct sp_fragment_shader *
+softpipe_create_fs_llvm(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ)
+{
+ struct sp_llvm_fragment_shader *shader = NULL;
+
+ /* LLVM fragment shaders currently disabled:
+ */
+ state = CALLOC_STRUCT(sp_llvm_shader_state);
+ if (!state)
+ return NULL;
+
+ state->llvm_prog = 0;
+
+ if (!gallivm_global_cpu_engine()) {
+ gallivm_cpu_engine_create(state->llvm_prog);
+ }
+ else
+ gallivm_cpu_jit_compile(gallivm_global_cpu_engine(), state->llvm_prog);
+
+ if (shader) {
+ shader->base.run = run_llvm_fs;
+ shader->base.delete = delete_llvm_fs;
+ }
+
+ return shader;
+}
+
+
+#else
+
+struct sp_fragment_shader *
+softpipe_create_fs_llvm(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ)
+{
+ return NULL;
+}
+
+#endif
diff --git a/src/gallium/drivers/softpipe/sp_fs_sse.c b/src/gallium/drivers/softpipe/sp_fs_sse.c
new file mode 100644
index 0000000000..f4fa0905d7
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_fs_sse.c
@@ -0,0 +1,169 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Execute fragment shader using runtime SSE code generation.
+ */
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_fs.h"
+#include "sp_quad.h"
+
+#include "pipe/p_state.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_exec.h"
+#include "tgsi/tgsi_sse2.h"
+
+
+#if defined(PIPE_ARCH_X86)
+
+#include "rtasm/rtasm_x86sse.h"
+
+
+
+/**
+ * Subclass of sp_fragment_shader
+ */
+struct sp_sse_fragment_shader
+{
+ struct sp_fragment_shader base;
+ struct x86_function sse2_program;
+ tgsi_sse2_fs_function func;
+ float immediates[TGSI_EXEC_NUM_IMMEDIATES][4];
+};
+
+
+/** cast wrapper */
+static INLINE struct sp_sse_fragment_shader *
+sp_sse_fragment_shader(const struct sp_fragment_shader *base)
+{
+ return (struct sp_sse_fragment_shader *) base;
+}
+
+
+static void
+fs_sse_prepare( const struct sp_fragment_shader *base,
+ struct tgsi_exec_machine *machine,
+ struct tgsi_sampler **samplers )
+{
+ machine->Samplers = samplers;
+}
+
+
+/* TODO: codegenerate the whole run function, skip this wrapper.
+ * TODO: break dependency on tgsi_exec_machine struct
+ * TODO: push Position calculation into the generated shader
+ * TODO: process >1 quad at a time
+ */
+static unsigned
+fs_sse_run( const struct sp_fragment_shader *base,
+ struct tgsi_exec_machine *machine,
+ struct quad_header *quad )
+{
+ struct sp_sse_fragment_shader *shader = sp_sse_fragment_shader(base);
+
+ /* Compute X, Y, Z, W vals for this quad -- place in temp[0] for now */
+ sp_setup_pos_vector(quad->posCoef,
+ (float)quad->input.x0, (float)quad->input.y0,
+ machine->Temps);
+
+ /* init kill mask */
+ tgsi_set_kill_mask(machine, 0x0);
+ tgsi_set_exec_mask(machine, 1, 1, 1, 1);
+
+ shader->func( machine,
+ machine->Consts,
+ (const float (*)[4])shader->immediates,
+ machine->InterpCoefs
+ // , &machine->QuadPos
+ );
+
+ return ~(machine->Temps[TGSI_EXEC_TEMP_KILMASK_I].xyzw[TGSI_EXEC_TEMP_KILMASK_C].u[0]);
+}
+
+
+static void
+fs_sse_delete( struct sp_fragment_shader *base )
+{
+ struct sp_sse_fragment_shader *shader = sp_sse_fragment_shader(base);
+
+ x86_release_func( &shader->sse2_program );
+ FREE(shader);
+}
+
+
+struct sp_fragment_shader *
+softpipe_create_fs_sse(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ)
+{
+ struct sp_sse_fragment_shader *shader;
+
+ if (!softpipe->use_sse)
+ return NULL;
+
+ shader = CALLOC_STRUCT(sp_sse_fragment_shader);
+ if (!shader)
+ return NULL;
+
+ x86_init_func( &shader->sse2_program );
+
+ if (!tgsi_emit_sse2( templ->tokens, &shader->sse2_program,
+ shader->immediates, FALSE )) {
+ FREE(shader);
+ return NULL;
+ }
+
+ shader->func = (tgsi_sse2_fs_function) x86_get_func( &shader->sse2_program );
+ if (!shader->func) {
+ x86_release_func( &shader->sse2_program );
+ FREE(shader);
+ return NULL;
+ }
+
+ shader->base.shader.tokens = NULL; /* don't hold reference to templ->tokens */
+ shader->base.prepare = fs_sse_prepare;
+ shader->base.run = fs_sse_run;
+ shader->base.delete = fs_sse_delete;
+
+ return &shader->base;
+}
+
+
+#else
+
+/* Maybe put this variant in the header file.
+ */
+struct sp_fragment_shader *
+softpipe_create_fs_sse(struct softpipe_context *softpipe,
+ const struct pipe_shader_state *templ)
+{
+ return NULL;
+}
+
+#endif
diff --git a/src/gallium/drivers/softpipe/sp_prim_setup.c b/src/gallium/drivers/softpipe/sp_prim_setup.c
new file mode 100644
index 0000000000..038ff04d4f
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_prim_setup.c
@@ -0,0 +1,190 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief A draw stage that drives our triangle setup routines from
+ * within the draw pipeline. One of two ways to drive setup, the
+ * other being in sp_prim_vbuf.c.
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Brian Paul
+ */
+
+
+#include "sp_context.h"
+#include "sp_setup.h"
+#include "sp_state.h"
+#include "sp_prim_setup.h"
+#include "draw/draw_pipe.h"
+#include "draw/draw_vertex.h"
+#include "util/u_memory.h"
+
+/**
+ * Triangle setup info (derived from draw_stage).
+ * Also used for line drawing (taking some liberties).
+ */
+struct setup_stage {
+ struct draw_stage stage; /**< This must be first (base class) */
+
+ struct setup_context *setup;
+};
+
+
+
+/**
+ * Basically a cast wrapper.
+ */
+static INLINE struct setup_stage *setup_stage( struct draw_stage *stage )
+{
+ return (struct setup_stage *)stage;
+}
+
+
+typedef const float (*cptrf4)[4];
+
+static void
+do_tri(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct setup_stage *setup = setup_stage( stage );
+
+ setup_tri( setup->setup,
+ (cptrf4)prim->v[0]->data,
+ (cptrf4)prim->v[1]->data,
+ (cptrf4)prim->v[2]->data );
+}
+
+static void
+do_line(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct setup_stage *setup = setup_stage( stage );
+
+ setup_line( setup->setup,
+ (cptrf4)prim->v[0]->data,
+ (cptrf4)prim->v[1]->data );
+}
+
+static void
+do_point(struct draw_stage *stage, struct prim_header *prim)
+{
+ struct setup_stage *setup = setup_stage( stage );
+
+ setup_point( setup->setup,
+ (cptrf4)prim->v[0]->data );
+}
+
+
+
+
+static void setup_begin( struct draw_stage *stage )
+{
+ struct setup_stage *setup = setup_stage(stage);
+
+ setup_prepare( setup->setup );
+
+ stage->point = do_point;
+ stage->line = do_line;
+ stage->tri = do_tri;
+}
+
+
+static void setup_first_point( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ setup_begin(stage);
+ stage->point( stage, header );
+}
+
+static void setup_first_line( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ setup_begin(stage);
+ stage->line( stage, header );
+}
+
+
+static void setup_first_tri( struct draw_stage *stage,
+ struct prim_header *header )
+{
+ setup_begin(stage);
+ stage->tri( stage, header );
+}
+
+
+
+static void setup_flush( struct draw_stage *stage,
+ unsigned flags )
+{
+ stage->point = setup_first_point;
+ stage->line = setup_first_line;
+ stage->tri = setup_first_tri;
+}
+
+
+static void reset_stipple_counter( struct draw_stage *stage )
+{
+}
+
+
+static void render_destroy( struct draw_stage *stage )
+{
+ struct setup_stage *ssetup = setup_stage(stage);
+ setup_destroy_context(ssetup->setup);
+ FREE( stage );
+}
+
+
+/**
+ * Create a new primitive setup/render stage.
+ */
+struct draw_stage *sp_draw_render_stage( struct softpipe_context *softpipe )
+{
+ struct setup_stage *sstage = CALLOC_STRUCT(setup_stage);
+
+ sstage->setup = setup_create_context(softpipe);
+ sstage->stage.draw = softpipe->draw;
+ sstage->stage.point = setup_first_point;
+ sstage->stage.line = setup_first_line;
+ sstage->stage.tri = setup_first_tri;
+ sstage->stage.flush = setup_flush;
+ sstage->stage.reset_stipple_counter = reset_stipple_counter;
+ sstage->stage.destroy = render_destroy;
+
+ return (struct draw_stage *)sstage;
+}
+
+struct setup_context *
+sp_draw_setup_context( struct draw_stage *stage )
+{
+ struct setup_stage *ssetup = setup_stage(stage);
+ return ssetup->setup;
+}
+
+void
+sp_draw_flush( struct draw_stage *stage )
+{
+ stage->flush( stage, 0 );
+}
diff --git a/src/gallium/drivers/softpipe/sp_prim_setup.h b/src/gallium/drivers/softpipe/sp_prim_setup.h
new file mode 100644
index 0000000000..49bdd98ed8
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_prim_setup.h
@@ -0,0 +1,85 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef SP_PRIM_SETUP_H
+#define SP_PRIM_SETUP_H
+
+
+/**
+ * vbuf is a special stage to gather the stream of triangles, lines, points
+ * together and reconstruct vertex buffers for hardware upload.
+ *
+ * First attempt, work in progress.
+ *
+ * TODO:
+ * - separate out vertex buffer building and primitive emit, ie >1 draw per vb.
+ * - tell vbuf stage how to build hw vertices directly
+ * - pass vbuf stage a buffer pointer for direct emit to agp/vram.
+ *
+ *
+ *
+ * Vertices are just an array of floats, with all the attributes
+ * packed. We currently assume a layout like:
+ *
+ * attr[0][0..3] - window position
+ * attr[1..n][0..3] - remaining attributes.
+ *
+ * Attributes are assumed to be 4 floats wide but are packed so that
+ * all the enabled attributes run contiguously.
+ */
+
+
+struct draw_stage;
+struct softpipe_context;
+
+
+typedef void (*vbuf_draw_func)( struct pipe_context *pipe,
+ unsigned prim,
+ const ushort *elements,
+ unsigned nr_elements,
+ const void *vertex_buffer,
+ unsigned nr_vertices );
+
+
+extern struct draw_stage *
+sp_draw_render_stage( struct softpipe_context *softpipe );
+
+extern struct setup_context *
+sp_draw_setup_context( struct draw_stage * );
+
+extern void
+sp_draw_flush( struct draw_stage * );
+
+
+extern struct draw_stage *
+sp_draw_vbuf_stage( struct draw_context *draw_context,
+ struct pipe_context *pipe,
+ vbuf_draw_func draw );
+
+
+#endif /* SP_PRIM_SETUP_H */
diff --git a/src/gallium/drivers/softpipe/sp_prim_vbuf.c b/src/gallium/drivers/softpipe/sp_prim_vbuf.c
new file mode 100644
index 0000000000..42021789ea
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_prim_vbuf.c
@@ -0,0 +1,585 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Interface between 'draw' module's output and the softpipe rasterizer/setup
+ * code. When the 'draw' module has finished filling a vertex buffer, the
+ * draw_arrays() functions below will be called. Loop over the vertices and
+ * call the point/line/tri setup functions.
+ *
+ * Authors
+ * Brian Paul
+ */
+
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_prim_vbuf.h"
+#include "sp_prim_setup.h"
+#include "sp_setup.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vbuf.h"
+#include "util/u_memory.h"
+
+
+#define SP_MAX_VBUF_INDEXES 1024
+#define SP_MAX_VBUF_SIZE 4096
+
+typedef const float (*cptrf4)[4];
+
+/**
+ * Subclass of vbuf_render.
+ */
+struct softpipe_vbuf_render
+{
+ struct vbuf_render base;
+ struct softpipe_context *softpipe;
+ uint prim;
+ uint vertex_size;
+ uint nr_vertices;
+ uint vertex_buffer_size;
+ void *vertex_buffer;
+};
+
+
+/** cast wrapper */
+static struct softpipe_vbuf_render *
+softpipe_vbuf_render(struct vbuf_render *vbr)
+{
+ return (struct softpipe_vbuf_render *) vbr;
+}
+
+
+static const struct vertex_info *
+sp_vbuf_get_vertex_info(struct vbuf_render *vbr)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ return softpipe_get_vbuf_vertex_info(cvbr->softpipe);
+}
+
+
+static boolean
+sp_vbuf_allocate_vertices(struct vbuf_render *vbr,
+ ushort vertex_size, ushort nr_vertices)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ unsigned size = vertex_size * nr_vertices;
+
+ if (cvbr->vertex_buffer_size < size) {
+ align_free(cvbr->vertex_buffer);
+ cvbr->vertex_buffer = align_malloc(size, 16);
+ cvbr->vertex_buffer_size = size;
+ }
+
+ cvbr->vertex_size = vertex_size;
+ cvbr->nr_vertices = nr_vertices;
+
+ return cvbr->vertex_buffer != NULL;
+}
+
+static void
+sp_vbuf_release_vertices(struct vbuf_render *vbr)
+{
+#if 0
+ {
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ const struct vertex_info *info =
+ softpipe_get_vbuf_vertex_info(cvbr->softpipe);
+ const float *vtx = (const float *) cvbr->vertex_buffer;
+ uint i, j;
+ debug_printf("%s (vtx_size = %u, vtx_used = %u)\n",
+ __FUNCTION__, cvbr->vertex_size, cvbr->nr_vertices);
+ for (i = 0; i < cvbr->nr_vertices; i++) {
+ for (j = 0; j < info->num_attribs; j++) {
+ uint k;
+ switch (info->attrib[j].emit) {
+ case EMIT_4F: k = 4; break;
+ case EMIT_3F: k = 3; break;
+ case EMIT_2F: k = 2; break;
+ case EMIT_1F: k = 1; break;
+ default: assert(0);
+ }
+ debug_printf("Vert %u attr %u: ", i, j);
+ while (k-- > 0) {
+ debug_printf("%g ", vtx[0]);
+ vtx++;
+ }
+ debug_printf("\n");
+ }
+ }
+ }
+#endif
+
+ /* keep the old allocation for next time */
+}
+
+static void *
+sp_vbuf_map_vertices(struct vbuf_render *vbr)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ return cvbr->vertex_buffer;
+}
+
+static void
+sp_vbuf_unmap_vertices(struct vbuf_render *vbr,
+ ushort min_index,
+ ushort max_index )
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ assert( cvbr->vertex_buffer_size >= (max_index+1) * cvbr->vertex_size );
+ /* do nothing */
+}
+
+
+static boolean
+sp_vbuf_set_primitive(struct vbuf_render *vbr, unsigned prim)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+
+ /* XXX: break this dependency - make setup_context live under
+ * softpipe, rename the old "setup" draw stage to something else.
+ */
+ struct setup_context *setup_ctx = sp_draw_setup_context(cvbr->softpipe->setup);
+
+ setup_prepare( setup_ctx );
+
+ cvbr->prim = prim;
+ return TRUE;
+
+}
+
+
+static INLINE cptrf4 get_vert( const void *vertex_buffer,
+ int index,
+ int stride )
+{
+ return (cptrf4)((char *)vertex_buffer + index * stride);
+}
+
+
+/**
+ * draw elements / indexed primitives
+ */
+static void
+sp_vbuf_draw(struct vbuf_render *vbr, const ushort *indices, uint nr)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ struct softpipe_context *softpipe = cvbr->softpipe;
+ const unsigned stride = softpipe->vertex_info_vbuf.size * sizeof(float);
+ const void *vertex_buffer = cvbr->vertex_buffer;
+ unsigned i;
+
+ /* XXX: break this dependency - make setup_context live under
+ * softpipe, rename the old "setup" draw stage to something else.
+ */
+ struct draw_stage *setup = softpipe->setup;
+ struct setup_context *setup_ctx = sp_draw_setup_context(setup);
+
+ switch (cvbr->prim) {
+ case PIPE_PRIM_POINTS:
+ for (i = 0; i < nr; i++) {
+ setup_point( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (i = 1; i < nr; i += 2) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ for (i = 1; i < nr; i ++) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ for (i = 1; i < nr; i ++) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ if (nr) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, indices[nr-1], stride),
+ get_vert(vertex_buffer, indices[0], stride) );
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ for (i = 2; i < nr; i += 3) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-2], stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ for (i = 2; i < nr; i += 1) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i+(i&1)-1], stride),
+ get_vert(vertex_buffer, indices[i-(i&1)], stride),
+ get_vert(vertex_buffer, indices[i-2], stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i+(i&1)-2], stride),
+ get_vert(vertex_buffer, indices[i-(i&1)-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ for (i = 2; i < nr; i += 1) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUADS:
+ for (i = 3; i < nr; i += 4) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-3], stride) );
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-3], stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-3], stride),
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ for (i = 3; i < nr; i += 2) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-3], stride));
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-3], stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-3], stride),
+ get_vert(vertex_buffer, indices[i-2], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[i-3], stride),
+ get_vert(vertex_buffer, indices[i-0], stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ /* Almost same as tri fan but the _first_ vertex specifies the flat
+ * shading color. Note that the first polygon vertex is passed as
+ * the last triangle vertex here.
+ * flatshade_first state makes no difference.
+ */
+ for (i = 2; i < nr; i += 1) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, indices[i-0], stride),
+ get_vert(vertex_buffer, indices[i-1], stride),
+ get_vert(vertex_buffer, indices[0], stride) );
+ }
+ break;
+
+ default:
+ assert(0);
+ }
+
+ /* XXX: why are we calling this??? If we had to call something, it
+ * would be a function in sp_setup.c:
+ */
+ sp_draw_flush( setup );
+}
+
+
+/**
+ * This function is hit when the draw module is working in pass-through mode.
+ * It's up to us to convert the vertex array into point/line/tri prims.
+ */
+static void
+sp_vbuf_draw_arrays(struct vbuf_render *vbr, uint start, uint nr)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ struct softpipe_context *softpipe = cvbr->softpipe;
+ const unsigned stride = softpipe->vertex_info_vbuf.size * sizeof(float);
+ const void *vertex_buffer =
+ (void *) get_vert(cvbr->vertex_buffer, start, stride);
+ unsigned i;
+
+ /* XXX: break this dependency - make setup_context live under
+ * softpipe, rename the old "setup" draw stage to something else.
+ */
+ struct draw_stage *setup = softpipe->setup;
+ struct setup_context *setup_ctx = sp_draw_setup_context(setup);
+
+ switch (cvbr->prim) {
+ case PIPE_PRIM_POINTS:
+ for (i = 0; i < nr; i++) {
+ setup_point( setup_ctx,
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINES:
+ for (i = 1; i < nr; i += 2) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_STRIP:
+ for (i = 1; i < nr; i ++) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_LINE_LOOP:
+ for (i = 1; i < nr; i ++) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ if (nr) {
+ setup_line( setup_ctx,
+ get_vert(vertex_buffer, nr-1, stride),
+ get_vert(vertex_buffer, 0, stride) );
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ for (i = 2; i < nr; i += 3) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-2, stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ for (i = 2; i < nr; i++) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i+(i&1)-1, stride),
+ get_vert(vertex_buffer, i-(i&1), stride),
+ get_vert(vertex_buffer, i-2, stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i+(i&1)-2, stride),
+ get_vert(vertex_buffer, i-(i&1)-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ for (i = 2; i < nr; i += 1) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, 0, stride),
+ get_vert(vertex_buffer, i-1, stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, 0, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUADS:
+ for (i = 3; i < nr; i += 4) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-3, stride),
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_QUAD_STRIP:
+ for (i = 3; i < nr; i += 2) {
+ if (softpipe->rasterizer->flatshade_first) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, i-3, stride) );
+ }
+ else {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-3, stride),
+ get_vert(vertex_buffer, i-2, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-3, stride),
+ get_vert(vertex_buffer, i-0, stride) );
+ }
+ }
+ break;
+
+ case PIPE_PRIM_POLYGON:
+ /* Almost same as tri fan but the _first_ vertex specifies the flat
+ * shading color. Note that the first polygon vertex is passed as
+ * the last triangle vertex here.
+ * flatshade_first state makes no difference.
+ */
+ for (i = 2; i < nr; i += 1) {
+ setup_tri( setup_ctx,
+ get_vert(vertex_buffer, i-1, stride),
+ get_vert(vertex_buffer, i-0, stride),
+ get_vert(vertex_buffer, 0, stride) );
+ }
+ break;
+
+ default:
+ assert(0);
+ }
+}
+
+
+
+static void
+sp_vbuf_destroy(struct vbuf_render *vbr)
+{
+ struct softpipe_vbuf_render *cvbr = softpipe_vbuf_render(vbr);
+ cvbr->softpipe->vbuf_render = NULL;
+ FREE(cvbr);
+}
+
+
+/**
+ * Initialize the post-transform vertex buffer information for the given
+ * context.
+ */
+void
+sp_init_vbuf(struct softpipe_context *sp)
+{
+ assert(sp->draw);
+
+ sp->vbuf_render = CALLOC_STRUCT(softpipe_vbuf_render);
+
+ sp->vbuf_render->base.max_indices = SP_MAX_VBUF_INDEXES;
+ sp->vbuf_render->base.max_vertex_buffer_bytes = SP_MAX_VBUF_SIZE;
+
+ sp->vbuf_render->base.get_vertex_info = sp_vbuf_get_vertex_info;
+ sp->vbuf_render->base.allocate_vertices = sp_vbuf_allocate_vertices;
+ sp->vbuf_render->base.map_vertices = sp_vbuf_map_vertices;
+ sp->vbuf_render->base.unmap_vertices = sp_vbuf_unmap_vertices;
+ sp->vbuf_render->base.set_primitive = sp_vbuf_set_primitive;
+ sp->vbuf_render->base.draw = sp_vbuf_draw;
+ sp->vbuf_render->base.draw_arrays = sp_vbuf_draw_arrays;
+ sp->vbuf_render->base.release_vertices = sp_vbuf_release_vertices;
+ sp->vbuf_render->base.destroy = sp_vbuf_destroy;
+
+ sp->vbuf_render->softpipe = sp;
+
+ sp->vbuf = draw_vbuf_stage(sp->draw, &sp->vbuf_render->base);
+
+ draw_set_rasterize_stage(sp->draw, sp->vbuf);
+
+ draw_set_render(sp->draw, &sp->vbuf_render->base);
+}
diff --git a/src/gallium/drivers/softpipe/sp_prim_vbuf.h b/src/gallium/drivers/softpipe/sp_prim_vbuf.h
new file mode 100644
index 0000000000..1de9cc2a89
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_prim_vbuf.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SP_VBUF_H
+#define SP_VBUF_H
+
+
+struct softpipe_context;
+
+extern void
+sp_init_vbuf(struct softpipe_context *softpipe);
+
+
+#endif /* SP_VBUF_H */
diff --git a/src/gallium/drivers/softpipe/sp_quad.h b/src/gallium/drivers/softpipe/sp_quad.h
new file mode 100644
index 0000000000..bd6c6cb912
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad.h
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_QUAD_H
+#define SP_QUAD_H
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_exec.h"
+
+
+#define QUAD_PRIM_POINT 1
+#define QUAD_PRIM_LINE 2
+#define QUAD_PRIM_TRI 3
+
+
+/* The rasterizer generates 2x2 quads of fragment and feeds them to
+ * the current fp_machine (see below).
+ * Remember that Y=0=top with Y increasing down the window.
+ */
+#define QUAD_TOP_LEFT 0
+#define QUAD_TOP_RIGHT 1
+#define QUAD_BOTTOM_LEFT 2
+#define QUAD_BOTTOM_RIGHT 3
+
+#define MASK_TOP_LEFT (1 << QUAD_TOP_LEFT)
+#define MASK_TOP_RIGHT (1 << QUAD_TOP_RIGHT)
+#define MASK_BOTTOM_LEFT (1 << QUAD_BOTTOM_LEFT)
+#define MASK_BOTTOM_RIGHT (1 << QUAD_BOTTOM_RIGHT)
+#define MASK_ALL 0xf
+
+
+/**
+ * Quad stage inputs (pos, coverage, front/back face, etc)
+ */
+struct quad_header_input
+{
+ int x0, y0; /**< quad window pos, always even */
+ float coverage[QUAD_SIZE]; /**< fragment coverage for antialiasing */
+ unsigned facing:1; /**< Front (0) or back (1) facing? */
+ unsigned prim:2; /**< QUAD_PRIM_POINT, LINE, TRI */
+};
+
+
+/**
+ * Quad stage inputs/outputs.
+ */
+struct quad_header_inout
+{
+ unsigned mask:4;
+};
+
+
+/**
+ * Quad stage outputs (color & depth).
+ */
+struct quad_header_output
+{
+ /** colors in SOA format (rrrr, gggg, bbbb, aaaa) */
+ float color[PIPE_MAX_COLOR_BUFS][NUM_CHANNELS][QUAD_SIZE];
+ float depth[QUAD_SIZE];
+};
+
+
+/**
+ * Encodes everything we need to know about a 2x2 pixel block. Uses
+ * "Channel-Serial" or "SoA" layout.
+ */
+struct quad_header {
+ struct quad_header_input input;
+ struct quad_header_inout inout;
+ struct quad_header_output output;
+
+ const struct tgsi_interp_coef *coef;
+ const struct tgsi_interp_coef *posCoef;
+
+ unsigned nr_attrs;
+};
+
+#endif /* SP_QUAD_H */
diff --git a/src/gallium/drivers/softpipe/sp_quad_alpha_test.c b/src/gallium/drivers/softpipe/sp_quad_alpha_test.c
new file mode 100644
index 0000000000..0845bae0e6
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_alpha_test.c
@@ -0,0 +1,108 @@
+
+/**
+ * quad alpha test
+ */
+
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_quad_pipe.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+static void
+alpha_test_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ const float ref = softpipe->depth_stencil->alpha.ref_value;
+ unsigned passMask = 0x0, j;
+ const uint cbuf = 0; /* only output[0].alpha is tested */
+ const float *aaaa = quad->output.color[cbuf][3];
+
+ switch (softpipe->depth_stencil->alpha.func) {
+ case PIPE_FUNC_NEVER:
+ break;
+ case PIPE_FUNC_LESS:
+ /*
+ * If mask were an array [4] we could do this SIMD-style:
+ * passMask = (quad->outputs.color[0][3] <= vec4(ref));
+ */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (aaaa[j] < ref) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_EQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (aaaa[j] == ref) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_LEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (aaaa[j] <= ref) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_GREATER:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (aaaa[j] > ref) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (aaaa[j] != ref) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_GEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (aaaa[j] >= ref) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_ALWAYS:
+ passMask = MASK_ALL;
+ break;
+ default:
+ assert(0);
+ }
+
+ quad->inout.mask &= passMask;
+
+ if (quad->inout.mask)
+ qs->next->run(qs->next, quad);
+}
+
+
+static void alpha_test_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void alpha_test_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *
+sp_quad_alpha_test_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = alpha_test_begin;
+ stage->run = alpha_test_quad;
+ stage->destroy = alpha_test_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_blend.c b/src/gallium/drivers/softpipe/sp_quad_blend.c
new file mode 100644
index 0000000000..b1e18805c7
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_blend.c
@@ -0,0 +1,777 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * quad blending
+ * \author Brian Paul
+ */
+
+#include "pipe/p_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_tile_cache.h"
+#include "sp_quad_pipe.h"
+
+
+#define VEC4_COPY(DST, SRC) \
+do { \
+ DST[0] = SRC[0]; \
+ DST[1] = SRC[1]; \
+ DST[2] = SRC[2]; \
+ DST[3] = SRC[3]; \
+} while(0)
+
+#define VEC4_SCALAR(DST, SRC) \
+do { \
+ DST[0] = SRC; \
+ DST[1] = SRC; \
+ DST[2] = SRC; \
+ DST[3] = SRC; \
+} while(0)
+
+#define VEC4_ADD(R, A, B) \
+do { \
+ R[0] = A[0] + B[0]; \
+ R[1] = A[1] + B[1]; \
+ R[2] = A[2] + B[2]; \
+ R[3] = A[3] + B[3]; \
+} while (0)
+
+#define VEC4_SUB(R, A, B) \
+do { \
+ R[0] = A[0] - B[0]; \
+ R[1] = A[1] - B[1]; \
+ R[2] = A[2] - B[2]; \
+ R[3] = A[3] - B[3]; \
+} while (0)
+
+/** Add and limit result to ceiling of 1.0 */
+#define VEC4_ADD_SAT(R, A, B) \
+do { \
+ R[0] = A[0] + B[0]; if (R[0] > 1.0f) R[0] = 1.0f; \
+ R[1] = A[1] + B[1]; if (R[1] > 1.0f) R[1] = 1.0f; \
+ R[2] = A[2] + B[2]; if (R[2] > 1.0f) R[2] = 1.0f; \
+ R[3] = A[3] + B[3]; if (R[3] > 1.0f) R[3] = 1.0f; \
+} while (0)
+
+/** Subtract and limit result to floor of 0.0 */
+#define VEC4_SUB_SAT(R, A, B) \
+do { \
+ R[0] = A[0] - B[0]; if (R[0] < 0.0f) R[0] = 0.0f; \
+ R[1] = A[1] - B[1]; if (R[1] < 0.0f) R[1] = 0.0f; \
+ R[2] = A[2] - B[2]; if (R[2] < 0.0f) R[2] = 0.0f; \
+ R[3] = A[3] - B[3]; if (R[3] < 0.0f) R[3] = 0.0f; \
+} while (0)
+
+#define VEC4_MUL(R, A, B) \
+do { \
+ R[0] = A[0] * B[0]; \
+ R[1] = A[1] * B[1]; \
+ R[2] = A[2] * B[2]; \
+ R[3] = A[3] * B[3]; \
+} while (0)
+
+#define VEC4_MIN(R, A, B) \
+do { \
+ R[0] = (A[0] < B[0]) ? A[0] : B[0]; \
+ R[1] = (A[1] < B[1]) ? A[1] : B[1]; \
+ R[2] = (A[2] < B[2]) ? A[2] : B[2]; \
+ R[3] = (A[3] < B[3]) ? A[3] : B[3]; \
+} while (0)
+
+#define VEC4_MAX(R, A, B) \
+do { \
+ R[0] = (A[0] > B[0]) ? A[0] : B[0]; \
+ R[1] = (A[1] > B[1]) ? A[1] : B[1]; \
+ R[2] = (A[2] > B[2]) ? A[2] : B[2]; \
+ R[3] = (A[3] > B[3]) ? A[3] : B[3]; \
+} while (0)
+
+
+
+static void
+logicop_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ uint cbuf;
+
+ /* loop over colorbuffer outputs */
+ for (cbuf = 0; cbuf < softpipe->framebuffer.nr_cbufs; cbuf++) {
+ float dest[4][QUAD_SIZE];
+ ubyte src[4][4], dst[4][4], res[4][4];
+ uint *src4 = (uint *) src;
+ uint *dst4 = (uint *) dst;
+ uint *res4 = (uint *) res;
+ struct softpipe_cached_tile *
+ tile = sp_get_cached_tile(softpipe,
+ softpipe->cbuf_cache[cbuf],
+ quad->input.x0, quad->input.y0);
+ float (*quadColor)[4] = quad->output.color[cbuf];
+ uint i, j;
+
+ /* get/swizzle dest colors */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = (quad->input.x0 & (TILE_SIZE-1)) + (j & 1);
+ int y = (quad->input.y0 & (TILE_SIZE-1)) + (j >> 1);
+ for (i = 0; i < 4; i++) {
+ dest[i][j] = tile->data.color[y][x][i];
+ }
+ }
+
+ /* convert to ubyte */
+ for (j = 0; j < 4; j++) { /* loop over R,G,B,A channels */
+ dst[j][0] = float_to_ubyte(dest[j][0]); /* P0 */
+ dst[j][1] = float_to_ubyte(dest[j][1]); /* P1 */
+ dst[j][2] = float_to_ubyte(dest[j][2]); /* P2 */
+ dst[j][3] = float_to_ubyte(dest[j][3]); /* P3 */
+
+ src[j][0] = float_to_ubyte(quadColor[j][0]); /* P0 */
+ src[j][1] = float_to_ubyte(quadColor[j][1]); /* P1 */
+ src[j][2] = float_to_ubyte(quadColor[j][2]); /* P2 */
+ src[j][3] = float_to_ubyte(quadColor[j][3]); /* P3 */
+ }
+
+ switch (softpipe->blend->logicop_func) {
+ case PIPE_LOGICOP_CLEAR:
+ for (j = 0; j < 4; j++)
+ res4[j] = 0;
+ break;
+ case PIPE_LOGICOP_NOR:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~(src4[j] | dst4[j]);
+ break;
+ case PIPE_LOGICOP_AND_INVERTED:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~src4[j] & dst4[j];
+ break;
+ case PIPE_LOGICOP_COPY_INVERTED:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~src4[j];
+ break;
+ case PIPE_LOGICOP_AND_REVERSE:
+ for (j = 0; j < 4; j++)
+ res4[j] = src4[j] & ~dst4[j];
+ break;
+ case PIPE_LOGICOP_INVERT:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~dst4[j];
+ break;
+ case PIPE_LOGICOP_XOR:
+ for (j = 0; j < 4; j++)
+ res4[j] = dst4[j] ^ src4[j];
+ break;
+ case PIPE_LOGICOP_NAND:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~(src4[j] & dst4[j]);
+ break;
+ case PIPE_LOGICOP_AND:
+ for (j = 0; j < 4; j++)
+ res4[j] = src4[j] & dst4[j];
+ break;
+ case PIPE_LOGICOP_EQUIV:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~(src4[j] ^ dst4[j]);
+ break;
+ case PIPE_LOGICOP_NOOP:
+ for (j = 0; j < 4; j++)
+ res4[j] = dst4[j];
+ break;
+ case PIPE_LOGICOP_OR_INVERTED:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~src4[j] | dst4[j];
+ break;
+ case PIPE_LOGICOP_COPY:
+ for (j = 0; j < 4; j++)
+ res4[j] = src4[j];
+ break;
+ case PIPE_LOGICOP_OR_REVERSE:
+ for (j = 0; j < 4; j++)
+ res4[j] = src4[j] | ~dst4[j];
+ break;
+ case PIPE_LOGICOP_OR:
+ for (j = 0; j < 4; j++)
+ res4[j] = src4[j] | dst4[j];
+ break;
+ case PIPE_LOGICOP_SET:
+ for (j = 0; j < 4; j++)
+ res4[j] = ~0;
+ break;
+ default:
+ assert(0);
+ }
+
+ for (j = 0; j < 4; j++) {
+ quadColor[j][0] = ubyte_to_float(res[j][0]);
+ quadColor[j][1] = ubyte_to_float(res[j][1]);
+ quadColor[j][2] = ubyte_to_float(res[j][2]);
+ quadColor[j][3] = ubyte_to_float(res[j][3]);
+ }
+ }
+
+ /* pass quad to next stage */
+ qs->next->run(qs->next, quad);
+}
+
+
+
+
+static void
+blend_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ static const float zero[4] = { 0, 0, 0, 0 };
+ static const float one[4] = { 1, 1, 1, 1 };
+
+ struct softpipe_context *softpipe = qs->softpipe;
+ uint cbuf;
+
+ if (softpipe->blend->logicop_enable) {
+ logicop_quad(qs, quad);
+ return;
+ }
+
+ /* loop over colorbuffer outputs */
+ for (cbuf = 0; cbuf < softpipe->framebuffer.nr_cbufs; cbuf++) {
+ float source[4][QUAD_SIZE], dest[4][QUAD_SIZE];
+ struct softpipe_cached_tile *tile
+ = sp_get_cached_tile(softpipe,
+ softpipe->cbuf_cache[cbuf],
+ quad->input.x0, quad->input.y0);
+ float (*quadColor)[4] = quad->output.color[cbuf];
+ uint i, j;
+
+ /* get/swizzle dest colors */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = (quad->input.x0 & (TILE_SIZE-1)) + (j & 1);
+ int y = (quad->input.y0 & (TILE_SIZE-1)) + (j >> 1);
+ for (i = 0; i < 4; i++) {
+ dest[i][j] = tile->data.color[y][x][i];
+ }
+ }
+
+ /*
+ * Compute src/first term RGB
+ */
+ switch (softpipe->blend->rgb_src_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ VEC4_COPY(source[0], quadColor[0]); /* R */
+ VEC4_COPY(source[1], quadColor[1]); /* G */
+ VEC4_COPY(source[2], quadColor[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ VEC4_MUL(source[0], quadColor[0], quadColor[0]); /* R */
+ VEC4_MUL(source[1], quadColor[1], quadColor[1]); /* G */
+ VEC4_MUL(source[2], quadColor[2], quadColor[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ {
+ const float *alpha = quadColor[3];
+ VEC4_MUL(source[0], quadColor[0], alpha); /* R */
+ VEC4_MUL(source[1], quadColor[1], alpha); /* G */
+ VEC4_MUL(source[2], quadColor[2], alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ VEC4_MUL(source[0], quadColor[0], dest[0]); /* R */
+ VEC4_MUL(source[1], quadColor[1], dest[1]); /* G */
+ VEC4_MUL(source[2], quadColor[2], dest[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ {
+ const float *alpha = dest[3];
+ VEC4_MUL(source[0], quadColor[0], alpha); /* R */
+ VEC4_MUL(source[1], quadColor[1], alpha); /* G */
+ VEC4_MUL(source[2], quadColor[2], alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ {
+ const float *alpha = quadColor[3];
+ float diff[4], temp[4];
+ VEC4_SUB(diff, one, dest[3]);
+ VEC4_MIN(temp, alpha, diff);
+ VEC4_MUL(source[0], quadColor[0], temp); /* R */
+ VEC4_MUL(source[1], quadColor[1], temp); /* G */
+ VEC4_MUL(source[2], quadColor[2], temp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ {
+ float comp[4];
+ VEC4_SCALAR(comp, softpipe->blend_color.color[0]); /* R */
+ VEC4_MUL(source[0], quadColor[0], comp); /* R */
+ VEC4_SCALAR(comp, softpipe->blend_color.color[1]); /* G */
+ VEC4_MUL(source[1], quadColor[1], comp); /* G */
+ VEC4_SCALAR(comp, softpipe->blend_color.color[2]); /* B */
+ VEC4_MUL(source[2], quadColor[2], comp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ {
+ float alpha[4];
+ VEC4_SCALAR(alpha, softpipe->blend_color.color[3]);
+ VEC4_MUL(source[0], quadColor[0], alpha); /* R */
+ VEC4_MUL(source[1], quadColor[1], alpha); /* G */
+ VEC4_MUL(source[2], quadColor[2], alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ assert(0); /* to do */
+ break;
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ assert(0); /* to do */
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ VEC4_COPY(source[0], zero); /* R */
+ VEC4_COPY(source[1], zero); /* G */
+ VEC4_COPY(source[2], zero); /* B */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ {
+ float inv_comp[4];
+ VEC4_SUB(inv_comp, one, quadColor[0]); /* R */
+ VEC4_MUL(source[0], quadColor[0], inv_comp); /* R */
+ VEC4_SUB(inv_comp, one, quadColor[1]); /* G */
+ VEC4_MUL(source[1], quadColor[1], inv_comp); /* G */
+ VEC4_SUB(inv_comp, one, quadColor[2]); /* B */
+ VEC4_MUL(source[2], quadColor[2], inv_comp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ {
+ float inv_alpha[4];
+ VEC4_SUB(inv_alpha, one, quadColor[3]);
+ VEC4_MUL(source[0], quadColor[0], inv_alpha); /* R */
+ VEC4_MUL(source[1], quadColor[1], inv_alpha); /* G */
+ VEC4_MUL(source[2], quadColor[2], inv_alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ {
+ float inv_alpha[4];
+ VEC4_SUB(inv_alpha, one, dest[3]);
+ VEC4_MUL(source[0], quadColor[0], inv_alpha); /* R */
+ VEC4_MUL(source[1], quadColor[1], inv_alpha); /* G */
+ VEC4_MUL(source[2], quadColor[2], inv_alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ {
+ float inv_comp[4];
+ VEC4_SUB(inv_comp, one, dest[0]); /* R */
+ VEC4_MUL(source[0], quadColor[0], inv_comp); /* R */
+ VEC4_SUB(inv_comp, one, dest[1]); /* G */
+ VEC4_MUL(source[1], quadColor[1], inv_comp); /* G */
+ VEC4_SUB(inv_comp, one, dest[2]); /* B */
+ VEC4_MUL(source[2], quadColor[2], inv_comp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ {
+ float inv_comp[4];
+ /* R */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[0]);
+ VEC4_MUL(source[0], quadColor[0], inv_comp);
+ /* G */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[1]);
+ VEC4_MUL(source[1], quadColor[1], inv_comp);
+ /* B */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[2]);
+ VEC4_MUL(source[2], quadColor[2], inv_comp);
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ {
+ float inv_alpha[4];
+ VEC4_SCALAR(inv_alpha, 1.0f - softpipe->blend_color.color[3]);
+ VEC4_MUL(source[0], quadColor[0], inv_alpha); /* R */
+ VEC4_MUL(source[1], quadColor[1], inv_alpha); /* G */
+ VEC4_MUL(source[2], quadColor[2], inv_alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ assert(0); /* to do */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ assert(0); /* to do */
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * Compute src/first term A
+ */
+ switch (softpipe->blend->alpha_src_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ VEC4_COPY(source[3], quadColor[3]); /* A */
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ {
+ const float *alpha = quadColor[3];
+ VEC4_MUL(source[3], quadColor[3], alpha); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ VEC4_MUL(source[3], quadColor[3], dest[3]); /* A */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ /* multiply alpha by 1.0 */
+ VEC4_COPY(source[3], quadColor[3]); /* A */
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ {
+ float comp[4];
+ VEC4_SCALAR(comp, softpipe->blend_color.color[3]); /* A */
+ VEC4_MUL(source[3], quadColor[3], comp); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ VEC4_COPY(source[3], zero); /* A */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ {
+ float inv_alpha[4];
+ VEC4_SUB(inv_alpha, one, quadColor[3]);
+ VEC4_MUL(source[3], quadColor[3], inv_alpha); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ {
+ float inv_alpha[4];
+ VEC4_SUB(inv_alpha, one, dest[3]);
+ VEC4_MUL(source[3], quadColor[3], inv_alpha); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ {
+ float inv_comp[4];
+ /* A */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[3]);
+ VEC4_MUL(source[3], quadColor[3], inv_comp);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+
+ /*
+ * Compute dest/second term RGB
+ */
+ switch (softpipe->blend->rgb_dst_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ /* dest = dest * 1 NO-OP, leave dest as-is */
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ VEC4_MUL(dest[0], dest[0], quadColor[0]); /* R */
+ VEC4_MUL(dest[1], dest[1], quadColor[1]); /* G */
+ VEC4_MUL(dest[2], dest[2], quadColor[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ VEC4_MUL(dest[0], dest[0], quadColor[3]); /* R * A */
+ VEC4_MUL(dest[1], dest[1], quadColor[3]); /* G * A */
+ VEC4_MUL(dest[2], dest[2], quadColor[3]); /* B * A */
+ break;
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ VEC4_MUL(dest[0], dest[0], dest[3]); /* R * A */
+ VEC4_MUL(dest[1], dest[1], dest[3]); /* G * A */
+ VEC4_MUL(dest[2], dest[2], dest[3]); /* B * A */
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ VEC4_MUL(dest[0], dest[0], dest[0]); /* R */
+ VEC4_MUL(dest[1], dest[1], dest[1]); /* G */
+ VEC4_MUL(dest[2], dest[2], dest[2]); /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ assert(0); /* illegal */
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ {
+ float comp[4];
+ VEC4_SCALAR(comp, softpipe->blend_color.color[0]); /* R */
+ VEC4_MUL(dest[0], dest[0], comp); /* R */
+ VEC4_SCALAR(comp, softpipe->blend_color.color[1]); /* G */
+ VEC4_MUL(dest[1], dest[1], comp); /* G */
+ VEC4_SCALAR(comp, softpipe->blend_color.color[2]); /* B */
+ VEC4_MUL(dest[2], dest[2], comp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ {
+ float comp[4];
+ VEC4_SCALAR(comp, softpipe->blend_color.color[3]); /* A */
+ VEC4_MUL(dest[0], dest[0], comp); /* R */
+ VEC4_MUL(dest[1], dest[1], comp); /* G */
+ VEC4_MUL(dest[2], dest[2], comp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ VEC4_COPY(dest[0], zero); /* R */
+ VEC4_COPY(dest[1], zero); /* G */
+ VEC4_COPY(dest[2], zero); /* B */
+ break;
+ case PIPE_BLENDFACTOR_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_SRC1_ALPHA:
+ /* XXX what are these? */
+ assert(0);
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ {
+ float inv_comp[4];
+ VEC4_SUB(inv_comp, one, quadColor[0]); /* R */
+ VEC4_MUL(dest[0], inv_comp, dest[0]); /* R */
+ VEC4_SUB(inv_comp, one, quadColor[1]); /* G */
+ VEC4_MUL(dest[1], inv_comp, dest[1]); /* G */
+ VEC4_SUB(inv_comp, one, quadColor[2]); /* B */
+ VEC4_MUL(dest[2], inv_comp, dest[2]); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ {
+ float one_minus_alpha[QUAD_SIZE];
+ VEC4_SUB(one_minus_alpha, one, quadColor[3]);
+ VEC4_MUL(dest[0], dest[0], one_minus_alpha); /* R */
+ VEC4_MUL(dest[1], dest[1], one_minus_alpha); /* G */
+ VEC4_MUL(dest[2], dest[2], one_minus_alpha); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ {
+ float inv_comp[4];
+ VEC4_SUB(inv_comp, one, dest[3]); /* A */
+ VEC4_MUL(dest[0], inv_comp, dest[0]); /* R */
+ VEC4_MUL(dest[1], inv_comp, dest[1]); /* G */
+ VEC4_MUL(dest[2], inv_comp, dest[2]); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ {
+ float inv_comp[4];
+ VEC4_SUB(inv_comp, one, dest[0]); /* R */
+ VEC4_MUL(dest[0], dest[0], inv_comp); /* R */
+ VEC4_SUB(inv_comp, one, dest[1]); /* G */
+ VEC4_MUL(dest[1], dest[1], inv_comp); /* G */
+ VEC4_SUB(inv_comp, one, dest[2]); /* B */
+ VEC4_MUL(dest[2], dest[2], inv_comp); /* B */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ {
+ float inv_comp[4];
+ /* R */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[0]);
+ VEC4_MUL(dest[0], dest[0], inv_comp);
+ /* G */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[1]);
+ VEC4_MUL(dest[1], dest[1], inv_comp);
+ /* B */
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[2]);
+ VEC4_MUL(dest[2], dest[2], inv_comp);
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ {
+ float inv_comp[4];
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[3]);
+ VEC4_MUL(dest[0], dest[0], inv_comp);
+ VEC4_MUL(dest[1], dest[1], inv_comp);
+ VEC4_MUL(dest[2], dest[2], inv_comp);
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ /* XXX what are these? */
+ assert(0);
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * Compute dest/second term A
+ */
+ switch (softpipe->blend->alpha_dst_factor) {
+ case PIPE_BLENDFACTOR_ONE:
+ /* dest = dest * 1 NO-OP, leave dest as-is */
+ break;
+ case PIPE_BLENDFACTOR_SRC_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_SRC_ALPHA:
+ VEC4_MUL(dest[3], dest[3], quadColor[3]); /* A * A */
+ break;
+ case PIPE_BLENDFACTOR_DST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_DST_ALPHA:
+ VEC4_MUL(dest[3], dest[3], dest[3]); /* A */
+ break;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ assert(0); /* illegal */
+ break;
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ {
+ float comp[4];
+ VEC4_SCALAR(comp, softpipe->blend_color.color[3]); /* A */
+ VEC4_MUL(dest[3], dest[3], comp); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_ZERO:
+ VEC4_COPY(dest[3], zero); /* A */
+ break;
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ {
+ float one_minus_alpha[QUAD_SIZE];
+ VEC4_SUB(one_minus_alpha, one, quadColor[3]);
+ VEC4_MUL(dest[3], dest[3], one_minus_alpha); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ {
+ float inv_comp[4];
+ VEC4_SUB(inv_comp, one, dest[3]); /* A */
+ VEC4_MUL(dest[3], inv_comp, dest[3]); /* A */
+ }
+ break;
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ /* fall-through */
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ {
+ float inv_comp[4];
+ VEC4_SCALAR(inv_comp, 1.0f - softpipe->blend_color.color[3]);
+ VEC4_MUL(dest[3], dest[3], inv_comp);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * Combine RGB terms
+ */
+ switch (softpipe->blend->rgb_func) {
+ case PIPE_BLEND_ADD:
+ VEC4_ADD_SAT(quadColor[0], source[0], dest[0]); /* R */
+ VEC4_ADD_SAT(quadColor[1], source[1], dest[1]); /* G */
+ VEC4_ADD_SAT(quadColor[2], source[2], dest[2]); /* B */
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ VEC4_SUB_SAT(quadColor[0], source[0], dest[0]); /* R */
+ VEC4_SUB_SAT(quadColor[1], source[1], dest[1]); /* G */
+ VEC4_SUB_SAT(quadColor[2], source[2], dest[2]); /* B */
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ VEC4_SUB_SAT(quadColor[0], dest[0], source[0]); /* R */
+ VEC4_SUB_SAT(quadColor[1], dest[1], source[1]); /* G */
+ VEC4_SUB_SAT(quadColor[2], dest[2], source[2]); /* B */
+ break;
+ case PIPE_BLEND_MIN:
+ VEC4_MIN(quadColor[0], source[0], dest[0]); /* R */
+ VEC4_MIN(quadColor[1], source[1], dest[1]); /* G */
+ VEC4_MIN(quadColor[2], source[2], dest[2]); /* B */
+ break;
+ case PIPE_BLEND_MAX:
+ VEC4_MAX(quadColor[0], source[0], dest[0]); /* R */
+ VEC4_MAX(quadColor[1], source[1], dest[1]); /* G */
+ VEC4_MAX(quadColor[2], source[2], dest[2]); /* B */
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * Combine A terms
+ */
+ switch (softpipe->blend->alpha_func) {
+ case PIPE_BLEND_ADD:
+ VEC4_ADD_SAT(quadColor[3], source[3], dest[3]); /* A */
+ break;
+ case PIPE_BLEND_SUBTRACT:
+ VEC4_SUB_SAT(quadColor[3], source[3], dest[3]); /* A */
+ break;
+ case PIPE_BLEND_REVERSE_SUBTRACT:
+ VEC4_SUB_SAT(quadColor[3], dest[3], source[3]); /* A */
+ break;
+ case PIPE_BLEND_MIN:
+ VEC4_MIN(quadColor[3], source[3], dest[3]); /* A */
+ break;
+ case PIPE_BLEND_MAX:
+ VEC4_MAX(quadColor[3], source[3], dest[3]); /* A */
+ break;
+ default:
+ assert(0);
+ }
+
+ } /* cbuf loop */
+
+ /* pass blended quad to next stage */
+ qs->next->run(qs->next, quad);
+}
+
+
+static void blend_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void blend_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_blend_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = blend_begin;
+ stage->run = blend_quad;
+ stage->destroy = blend_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_bufloop.c b/src/gallium/drivers/softpipe/sp_quad_bufloop.c
new file mode 100644
index 0000000000..953d8516b9
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_bufloop.c
@@ -0,0 +1,74 @@
+
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_quad_pipe.h"
+
+
+/**
+ * Loop over colorbuffers, passing quad to next stage each time.
+ */
+static void
+cbuf_loop_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ float tmp[PIPE_MAX_COLOR_BUFS][4][QUAD_SIZE];
+ unsigned i;
+
+ assert(sizeof(quad->outputs.color) == sizeof(tmp));
+ assert(softpipe->framebuffer.nr_cbufs <= PIPE_MAX_COLOR_BUFS);
+
+ /* make copy of original colors since they can get modified
+ * by blending and masking.
+ * XXX we won't have to do this if the fragment program actually emits
+ * N separate colors and we're drawing to N color buffers (MRT).
+ * But if we emitted one color and glDrawBuffer(GL_FRONT_AND_BACK) is
+ * in effect, we need to save/restore colors like this.
+ */
+ memcpy(tmp, quad->outputs.color, sizeof(tmp));
+
+ for (i = 0; i < softpipe->framebuffer.nr_cbufs; i++) {
+ /* set current cbuffer */
+#if 0 /* obsolete & going away */
+ softpipe->current_cbuf = i;
+#endif
+
+ /* pass blended quad to next stage */
+ qs->next->run(qs->next, quad);
+
+ /* restore quad's colors for next buffer */
+ memcpy(quad->outputs.color, tmp, sizeof(tmp));
+ }
+}
+
+
+static void cbuf_loop_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void cbuf_loop_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+/**
+ * Create the colorbuffer loop stage.
+ * This is used to implement multiple render targets and GL_FRONT_AND_BACK
+ * rendering.
+ */
+struct quad_stage *sp_quad_bufloop_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = cbuf_loop_begin;
+ stage->run = cbuf_loop_quad;
+ stage->destroy = cbuf_loop_destroy;
+
+ return stage;
+}
+
diff --git a/src/gallium/drivers/softpipe/sp_quad_colormask.c b/src/gallium/drivers/softpipe/sp_quad_colormask.c
new file mode 100644
index 0000000000..dc90e5d5e9
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_colormask.c
@@ -0,0 +1,116 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief quad colormask stage
+ * \author Brian Paul
+ */
+
+#include "pipe/p_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_quad_pipe.h"
+#include "sp_tile_cache.h"
+
+
+
+/**
+ * XXX colormask could be rolled into blending...
+ */
+static void
+colormask_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ uint cbuf;
+
+ /* loop over colorbuffer outputs */
+ for (cbuf = 0; cbuf < softpipe->framebuffer.nr_cbufs; cbuf++) {
+ float dest[4][QUAD_SIZE];
+ struct softpipe_cached_tile *tile
+ = sp_get_cached_tile(softpipe,
+ softpipe->cbuf_cache[cbuf],
+ quad->input.x0, quad->input.y0);
+ float (*quadColor)[4] = quad->output.color[cbuf];
+ uint i, j;
+
+ /* get/swizzle dest colors */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = (quad->input.x0 & (TILE_SIZE-1)) + (j & 1);
+ int y = (quad->input.y0 & (TILE_SIZE-1)) + (j >> 1);
+ for (i = 0; i < 4; i++) {
+ dest[i][j] = tile->data.color[y][x][i];
+ }
+ }
+
+ /* R */
+ if (!(softpipe->blend->colormask & PIPE_MASK_R))
+ COPY_4V(quadColor[0], dest[0]);
+
+ /* G */
+ if (!(softpipe->blend->colormask & PIPE_MASK_G))
+ COPY_4V(quadColor[1], dest[1]);
+
+ /* B */
+ if (!(softpipe->blend->colormask & PIPE_MASK_B))
+ COPY_4V(quadColor[2], dest[2]);
+
+ /* A */
+ if (!(softpipe->blend->colormask & PIPE_MASK_A))
+ COPY_4V(quadColor[3], dest[3]);
+ }
+
+ /* pass quad to next stage */
+ qs->next->run(qs->next, quad);
+}
+
+
+static void colormask_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void colormask_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_colormask_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = colormask_begin;
+ stage->run = colormask_quad;
+ stage->destroy = colormask_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_coverage.c b/src/gallium/drivers/softpipe/sp_quad_coverage.c
new file mode 100644
index 0000000000..4aeee85870
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_coverage.c
@@ -0,0 +1,94 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * \brief Apply AA coverage to quad alpha valus
+ * \author Brian Paul
+ */
+
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_quad_pipe.h"
+
+
+/**
+ * Multiply quad's alpha values by the fragment coverage.
+ */
+static void
+coverage_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ const uint prim = quad->input.prim;
+
+ if ((softpipe->rasterizer->poly_smooth && prim == QUAD_PRIM_TRI) ||
+ (softpipe->rasterizer->line_smooth && prim == QUAD_PRIM_LINE) ||
+ (softpipe->rasterizer->point_smooth && prim == QUAD_PRIM_POINT)) {
+ uint cbuf;
+
+ /* loop over colorbuffer outputs */
+ for (cbuf = 0; cbuf < softpipe->framebuffer.nr_cbufs; cbuf++) {
+ float (*quadColor)[4] = quad->output.color[cbuf];
+ unsigned j;
+ for (j = 0; j < QUAD_SIZE; j++) {
+ assert(quad->input.coverage[j] >= 0.0);
+ assert(quad->input.coverage[j] <= 1.0);
+ quadColor[3][j] *= quad->input.coverage[j];
+ }
+ }
+ }
+
+ qs->next->run(qs->next, quad);
+}
+
+
+static void coverage_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void coverage_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_coverage_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = coverage_begin;
+ stage->run = coverage_quad;
+ stage->destroy = coverage_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_depth_test.c b/src/gallium/drivers/softpipe/sp_quad_depth_test.c
new file mode 100644
index 0000000000..d463930bae
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_depth_test.c
@@ -0,0 +1,290 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Quad depth testing
+ */
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_quad_pipe.h"
+#include "sp_tile_cache.h"
+
+
+/**
+ * Do depth testing for a quad.
+ * Not static since it's used by the stencil code.
+ */
+
+/*
+ * To increase efficiency, we should probably have multiple versions
+ * of this function that are specifically for Z16, Z32 and FP Z buffers.
+ * Try to effectively do that with codegen...
+ */
+
+void
+sp_depth_test_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ struct pipe_surface *ps = softpipe->framebuffer.zsbuf;
+ const enum pipe_format format = ps->format;
+ unsigned bzzzz[QUAD_SIZE]; /**< Z values fetched from depth buffer */
+ unsigned qzzzz[QUAD_SIZE]; /**< Z values from the quad */
+ unsigned zmask = 0;
+ unsigned j;
+ struct softpipe_cached_tile *tile
+ = sp_get_cached_tile(softpipe, softpipe->zsbuf_cache, quad->input.x0, quad->input.y0);
+
+ assert(ps); /* shouldn't get here if there's no zbuffer */
+
+ /*
+ * Convert quad's float depth values to int depth values (qzzzz).
+ * If the Z buffer stores integer values, we _have_ to do the depth
+ * compares with integers (not floats). Otherwise, the float->int->float
+ * conversion of Z values (which isn't an identity function) will cause
+ * Z-fighting errors.
+ *
+ * Also, get the zbuffer values (bzzzz) from the cached tile.
+ */
+ switch (format) {
+ case PIPE_FORMAT_Z16_UNORM:
+ {
+ float scale = 65535.0;
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ qzzzz[j] = (unsigned) (quad->output.depth[j] * scale);
+ }
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ bzzzz[j] = tile->data.depth16[y][x];
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z32_UNORM:
+ {
+ double scale = (double) (uint) ~0UL;
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ qzzzz[j] = (unsigned) (quad->output.depth[j] * scale);
+ }
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ bzzzz[j] = tile->data.depth32[y][x];
+ }
+ }
+ break;
+ case PIPE_FORMAT_X8Z24_UNORM:
+ /* fall-through */
+ case PIPE_FORMAT_S8Z24_UNORM:
+ {
+ float scale = (float) ((1 << 24) - 1);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ qzzzz[j] = (unsigned) (quad->output.depth[j] * scale);
+ }
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ bzzzz[j] = tile->data.depth32[y][x] & 0xffffff;
+ }
+ }
+ break;
+ case PIPE_FORMAT_Z24X8_UNORM:
+ /* fall-through */
+ case PIPE_FORMAT_Z24S8_UNORM:
+ {
+ float scale = (float) ((1 << 24) - 1);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ qzzzz[j] = (unsigned) (quad->output.depth[j] * scale);
+ }
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ bzzzz[j] = tile->data.depth32[y][x] >> 8;
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ switch (softpipe->depth_stencil->depth.func) {
+ case PIPE_FUNC_NEVER:
+ /* zmask = 0 */
+ break;
+ case PIPE_FUNC_LESS:
+ /* Note this is pretty much a single sse or cell instruction.
+ * Like this: quad->mask &= (quad->outputs.depth < zzzz);
+ */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (qzzzz[j] < bzzzz[j])
+ zmask |= 1 << j;
+ }
+ break;
+ case PIPE_FUNC_EQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (qzzzz[j] == bzzzz[j])
+ zmask |= 1 << j;
+ }
+ break;
+ case PIPE_FUNC_LEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (qzzzz[j] <= bzzzz[j])
+ zmask |= (1 << j);
+ }
+ break;
+ case PIPE_FUNC_GREATER:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (qzzzz[j] > bzzzz[j])
+ zmask |= (1 << j);
+ }
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (qzzzz[j] != bzzzz[j])
+ zmask |= (1 << j);
+ }
+ break;
+ case PIPE_FUNC_GEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (qzzzz[j] >= bzzzz[j])
+ zmask |= (1 << j);
+ }
+ break;
+ case PIPE_FUNC_ALWAYS:
+ zmask = MASK_ALL;
+ break;
+ default:
+ assert(0);
+ }
+
+ quad->inout.mask &= zmask;
+
+ if (softpipe->depth_stencil->depth.writemask) {
+
+ /* This is also efficient with sse / spe instructions:
+ */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (quad->inout.mask & (1 << j)) {
+ bzzzz[j] = qzzzz[j];
+ }
+ }
+
+ /* put updated Z values back into cached tile */
+ switch (format) {
+ case PIPE_FORMAT_Z16_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ tile->data.depth16[y][x] = (ushort) bzzzz[j];
+ }
+ break;
+ case PIPE_FORMAT_X8Z24_UNORM:
+ /* fall-through */
+ /* (yes, this falls through to a different case than above) */
+ case PIPE_FORMAT_Z32_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ tile->data.depth32[y][x] = bzzzz[j];
+ }
+ break;
+ case PIPE_FORMAT_S8Z24_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ uint s8z24 = tile->data.depth32[y][x];
+ s8z24 = (s8z24 & 0xff000000) | bzzzz[j];
+ tile->data.depth32[y][x] = s8z24;
+ }
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ uint z24s8 = tile->data.depth32[y][x];
+ z24s8 = (z24s8 & 0xff) | (bzzzz[j] << 8);
+ tile->data.depth32[y][x] = z24s8;
+ }
+ break;
+ case PIPE_FORMAT_Z24X8_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ tile->data.depth32[y][x] = bzzzz[j] << 8;
+ }
+ break;
+ default:
+ assert(0);
+ }
+ }
+}
+
+
+static void
+depth_test_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ sp_depth_test_quad(qs, quad);
+
+ if (quad->inout.mask)
+ qs->next->run(qs->next, quad);
+}
+
+
+static void depth_test_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void depth_test_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_depth_test_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = depth_test_begin;
+ stage->run = depth_test_quad;
+ stage->destroy = depth_test_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_earlyz.c b/src/gallium/drivers/softpipe/sp_quad_earlyz.c
new file mode 100644
index 0000000000..496fd39ed1
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_earlyz.c
@@ -0,0 +1,88 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Quad early-z testing
+ */
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "sp_quad.h"
+#include "sp_quad_pipe.h"
+
+
+/**
+ * All this stage does is compute the quad's Z values (which is normally
+ * done by the shading stage).
+ * The next stage will do the actual depth test.
+ */
+static void
+earlyz_quad(
+ struct quad_stage *qs,
+ struct quad_header *quad )
+{
+ const float fx = (float) quad->input.x0;
+ const float fy = (float) quad->input.y0;
+ const float dzdx = quad->posCoef->dadx[2];
+ const float dzdy = quad->posCoef->dady[2];
+ const float z0 = quad->posCoef->a0[2] + dzdx * fx + dzdy * fy;
+
+ quad->output.depth[0] = z0;
+ quad->output.depth[1] = z0 + dzdx;
+ quad->output.depth[2] = z0 + dzdy;
+ quad->output.depth[3] = z0 + dzdx + dzdy;
+
+ qs->next->run( qs->next, quad );
+}
+
+static void
+earlyz_begin(
+ struct quad_stage *qs )
+{
+ qs->next->begin( qs->next );
+}
+
+static void
+earlyz_destroy(
+ struct quad_stage *qs )
+{
+ FREE( qs );
+}
+
+struct quad_stage *
+sp_quad_earlyz_stage(
+ struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT( quad_stage );
+
+ stage->softpipe = softpipe;
+ stage->begin = earlyz_begin;
+ stage->run = earlyz_quad;
+ stage->destroy = earlyz_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_fs.c b/src/gallium/drivers/softpipe/sp_quad_fs.c
new file mode 100644
index 0000000000..28f8d1a60e
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_fs.c
@@ -0,0 +1,192 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Vertices are just an array of floats, with all the attributes
+ * packed. We currently assume a layout like:
+ *
+ * attr[0][0..3] - window position
+ * attr[1..n][0..3] - remaining attributes.
+ *
+ * Attributes are assumed to be 4 floats wide but are packed so that
+ * all the enabled attributes run contiguously.
+ */
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_quad.h"
+#include "sp_quad_pipe.h"
+#include "sp_texture.h"
+#include "sp_tex_sample.h"
+
+
+struct quad_shade_stage
+{
+ struct quad_stage stage; /**< base class */
+ struct tgsi_exec_machine *machine;
+ struct tgsi_exec_vector *inputs, *outputs;
+};
+
+
+/** cast wrapper */
+static INLINE struct quad_shade_stage *
+quad_shade_stage(struct quad_stage *qs)
+{
+ return (struct quad_shade_stage *) qs;
+}
+
+
+/**
+ * Execute fragment shader for the four fragments in the quad.
+ */
+static void
+shade_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct quad_shade_stage *qss = quad_shade_stage( qs );
+ struct softpipe_context *softpipe = qs->softpipe;
+ struct tgsi_exec_machine *machine = qss->machine;
+ boolean z_written;
+
+ /* Consts do not require 16 byte alignment. */
+ machine->Consts = softpipe->mapped_constants[PIPE_SHADER_FRAGMENT];
+
+ machine->InterpCoefs = quad->coef;
+
+ /* run shader */
+ quad->inout.mask &= softpipe->fs->run( softpipe->fs, machine, quad );
+
+ /* store outputs */
+ z_written = FALSE;
+ {
+ const ubyte *sem_name = softpipe->fs->info.output_semantic_name;
+ const ubyte *sem_index = softpipe->fs->info.output_semantic_index;
+ const uint n = qss->stage.softpipe->fs->info.num_outputs;
+ uint i;
+ for (i = 0; i < n; i++) {
+ switch (sem_name[i]) {
+ case TGSI_SEMANTIC_COLOR:
+ {
+ uint cbuf = sem_index[i];
+ memcpy(quad->output.color[cbuf],
+ &machine->Outputs[i].xyzw[0].f[0],
+ sizeof(quad->output.color[0]) );
+ }
+ break;
+ case TGSI_SEMANTIC_POSITION:
+ {
+ uint j;
+ for (j = 0; j < 4; j++) {
+ quad->output.depth[j] = machine->Outputs[0].xyzw[2].f[j];
+ }
+ z_written = TRUE;
+ }
+ break;
+ }
+ }
+ }
+
+ if (!z_written) {
+ /* compute Z values now, as in the quad earlyz stage */
+ /* XXX we should really only do this if the earlyz stage is not used */
+ const float fx = (float) quad->input.x0;
+ const float fy = (float) quad->input.y0;
+ const float dzdx = quad->posCoef->dadx[2];
+ const float dzdy = quad->posCoef->dady[2];
+ const float z0 = quad->posCoef->a0[2] + dzdx * fx + dzdy * fy;
+
+ quad->output.depth[0] = z0;
+ quad->output.depth[1] = z0 + dzdx;
+ quad->output.depth[2] = z0 + dzdy;
+ quad->output.depth[3] = z0 + dzdx + dzdy;
+ }
+
+ /* shader may cull fragments */
+ if (quad->inout.mask) {
+ qs->next->run( qs->next, quad );
+ }
+}
+
+
+/**
+ * Per-primitive (or per-begin?) setup
+ */
+static void
+shade_begin(struct quad_stage *qs)
+{
+ struct quad_shade_stage *qss = quad_shade_stage(qs);
+ struct softpipe_context *softpipe = qs->softpipe;
+
+ softpipe->fs->prepare( softpipe->fs,
+ qss->machine,
+ (struct tgsi_sampler **)
+ softpipe->tgsi.frag_samplers_list );
+
+ qs->next->begin(qs->next);
+}
+
+
+static void
+shade_destroy(struct quad_stage *qs)
+{
+ struct quad_shade_stage *qss = (struct quad_shade_stage *) qs;
+
+ tgsi_exec_machine_destroy(qss->machine);
+
+ FREE( qs );
+}
+
+
+struct quad_stage *
+sp_quad_shade_stage( struct softpipe_context *softpipe )
+{
+ struct quad_shade_stage *qss = CALLOC_STRUCT(quad_shade_stage);
+ if (!qss)
+ goto fail;
+
+ qss->stage.softpipe = softpipe;
+ qss->stage.begin = shade_begin;
+ qss->stage.run = shade_quad;
+ qss->stage.destroy = shade_destroy;
+
+ qss->machine = tgsi_exec_machine_create();
+ if (!qss->machine)
+ goto fail;
+
+ return &qss->stage;
+
+fail:
+ if (qss && qss->machine)
+ tgsi_exec_machine_destroy(qss->machine);
+
+ FREE(qss);
+ return NULL;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_occlusion.c b/src/gallium/drivers/softpipe/sp_quad_occlusion.c
new file mode 100644
index 0000000000..dfa7ff3b1d
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_occlusion.c
@@ -0,0 +1,85 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * \brief Quad occlusion counter stage
+ * \author Brian Paul
+ */
+
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_quad_pipe.h"
+
+static unsigned count_bits( unsigned val )
+{
+ unsigned i;
+
+ for (i = 0; val ; val >>= 1)
+ i += (val & 1);
+
+ return i;
+}
+
+static void
+occlusion_count_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+
+ softpipe->occlusion_count += count_bits(quad->inout.mask);
+
+ qs->next->run(qs->next, quad);
+}
+
+
+static void occlusion_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void occlusion_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_occlusion_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = occlusion_begin;
+ stage->run = occlusion_count_quad;
+ stage->destroy = occlusion_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_output.c b/src/gallium/drivers/softpipe/sp_quad_output.c
new file mode 100644
index 0000000000..92d5f9f3c1
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_output.c
@@ -0,0 +1,103 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_quad_pipe.h"
+#include "sp_tile_cache.h"
+
+
+/**
+ * Last step of quad processing: write quad colors to the framebuffer,
+ * taking mask into account.
+ */
+static void
+output_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ /* in-tile pos: */
+ const int itx = quad->input.x0 % TILE_SIZE;
+ const int ity = quad->input.y0 % TILE_SIZE;
+
+ struct softpipe_context *softpipe = qs->softpipe;
+ uint cbuf;
+
+ /* loop over colorbuffer outputs */
+ for (cbuf = 0; cbuf < softpipe->framebuffer.nr_cbufs; cbuf++) {
+ struct softpipe_cached_tile *tile
+ = sp_get_cached_tile(softpipe,
+ softpipe->cbuf_cache[cbuf],
+ quad->input.x0, quad->input.y0);
+ float (*quadColor)[4] = quad->output.color[cbuf];
+ int i, j;
+
+ /* get/swizzle dest colors */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (quad->inout.mask & (1 << j)) {
+ int x = itx + (j & 1);
+ int y = ity + (j >> 1);
+ for (i = 0; i < 4; i++) { /* loop over color chans */
+ tile->data.color[y][x][i] = quadColor[i][j];
+ }
+ if (0) {
+ debug_printf("sp write pixel %d,%d: %g, %g, %g\n",
+ quad->input.x0 + x,
+ quad->input.y0 + y,
+ quadColor[0][j],
+ quadColor[1][j],
+ quadColor[2][j]);
+ }
+ }
+ }
+ }
+}
+
+
+static void output_begin(struct quad_stage *qs)
+{
+ assert(qs->next == NULL);
+}
+
+
+static void output_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_output_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = output_begin;
+ stage->run = output_quad;
+ stage->destroy = output_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_pipe.c b/src/gallium/drivers/softpipe/sp_quad_pipe.c
new file mode 100644
index 0000000000..b5f69b7426
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_pipe.c
@@ -0,0 +1,118 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "pipe/p_shader_tokens.h"
+
+static void
+sp_push_quad_first(
+ struct softpipe_context *sp,
+ struct quad_stage *quad,
+ uint i )
+{
+ quad->next = sp->quad[i].first;
+ sp->quad[i].first = quad;
+}
+
+static void
+sp_build_depth_stencil(
+ struct softpipe_context *sp,
+ uint i )
+{
+ if (sp->depth_stencil->stencil[0].enabled ||
+ sp->depth_stencil->stencil[1].enabled) {
+ sp_push_quad_first( sp, sp->quad[i].stencil_test, i );
+ }
+ else if (sp->depth_stencil->depth.enabled &&
+ sp->framebuffer.zsbuf) {
+ sp_push_quad_first( sp, sp->quad[i].depth_test, i );
+ }
+}
+
+void
+sp_build_quad_pipeline(struct softpipe_context *sp)
+{
+ uint i;
+
+ boolean early_depth_test =
+ sp->depth_stencil->depth.enabled &&
+ sp->framebuffer.zsbuf &&
+ !sp->depth_stencil->alpha.enabled &&
+ !sp->fs->info.uses_kill &&
+ !sp->fs->info.writes_z;
+
+ /* build up the pipeline in reverse order... */
+ for (i = 0; i < SP_NUM_QUAD_THREADS; i++) {
+ sp->quad[i].first = sp->quad[i].output;
+
+ if (sp->blend->colormask != 0xf) {
+ sp_push_quad_first( sp, sp->quad[i].colormask, i );
+ }
+
+ if (sp->blend->blend_enable ||
+ sp->blend->logicop_enable) {
+ sp_push_quad_first( sp, sp->quad[i].blend, i );
+ }
+
+ if (sp->active_query_count) {
+ sp_push_quad_first( sp, sp->quad[i].occlusion, i );
+ }
+
+ if (sp->rasterizer->poly_smooth ||
+ sp->rasterizer->line_smooth ||
+ sp->rasterizer->point_smooth) {
+ sp_push_quad_first( sp, sp->quad[i].coverage, i );
+ }
+
+ if (!early_depth_test) {
+ sp_build_depth_stencil( sp, i );
+ }
+
+ if (sp->depth_stencil->alpha.enabled) {
+ sp_push_quad_first( sp, sp->quad[i].alpha_test, i );
+ }
+
+ /* XXX always enable shader? */
+ if (1) {
+ sp_push_quad_first( sp, sp->quad[i].shade, i );
+ }
+
+ if (early_depth_test) {
+ sp_build_depth_stencil( sp, i );
+ sp_push_quad_first( sp, sp->quad[i].earlyz, i );
+ }
+
+#if !USE_DRAW_STAGE_PSTIPPLE
+ if (sp->rasterizer->poly_stipple_enable) {
+ sp_push_quad_first( sp, sp->quad[i].polygon_stipple, i );
+ }
+#endif
+ }
+}
+
diff --git a/src/gallium/drivers/softpipe/sp_quad_pipe.h b/src/gallium/drivers/softpipe/sp_quad_pipe.h
new file mode 100644
index 0000000000..0e40586ffc
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_pipe.h
@@ -0,0 +1,74 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_QUAD_PIPE_H
+#define SP_QUAD_PIPE_H
+
+
+struct softpipe_context;
+struct quad_header;
+
+
+/**
+ * Fragment processing is performed on 2x2 blocks of pixels called "quads".
+ * Quad processing is performed with a pipeline of stages represented by
+ * this type.
+ */
+struct quad_stage {
+ struct softpipe_context *softpipe;
+
+ struct quad_stage *next;
+
+ void (*begin)(struct quad_stage *qs);
+
+ /** the stage action */
+ void (*run)(struct quad_stage *qs, struct quad_header *quad);
+
+ void (*destroy)(struct quad_stage *qs);
+};
+
+
+struct quad_stage *sp_quad_polygon_stipple_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_earlyz_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_shade_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_alpha_test_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_stencil_test_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_depth_test_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_occlusion_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_coverage_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_blend_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_colormask_stage( struct softpipe_context *softpipe );
+struct quad_stage *sp_quad_output_stage( struct softpipe_context *softpipe );
+
+void sp_build_quad_pipeline(struct softpipe_context *sp);
+
+void sp_depth_test_quad(struct quad_stage *qs, struct quad_header *quad);
+
+#endif /* SP_QUAD_PIPE_H */
diff --git a/src/gallium/drivers/softpipe/sp_quad_stencil.c b/src/gallium/drivers/softpipe/sp_quad_stencil.c
new file mode 100644
index 0000000000..5e9d447737
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_stencil.c
@@ -0,0 +1,352 @@
+
+/**
+ * \brief Quad stencil testing
+ */
+
+
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_tile_cache.h"
+#include "sp_quad_pipe.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+/** Only 8-bit stencil supported */
+#define STENCIL_MAX 0xff
+
+
+/**
+ * Do the basic stencil test (compare stencil buffer values against the
+ * reference value.
+ *
+ * \param stencilVals the stencil values from the stencil buffer
+ * \param func the stencil func (PIPE_FUNC_x)
+ * \param ref the stencil reference value
+ * \param valMask the stencil value mask indicating which bits of the stencil
+ * values and ref value are to be used.
+ * \return mask indicating which pixels passed the stencil test
+ */
+static unsigned
+do_stencil_test(const ubyte stencilVals[QUAD_SIZE], unsigned func,
+ unsigned ref, unsigned valMask)
+{
+ unsigned passMask = 0x0;
+ unsigned j;
+
+ ref &= valMask;
+
+ switch (func) {
+ case PIPE_FUNC_NEVER:
+ /* passMask = 0x0 */
+ break;
+ case PIPE_FUNC_LESS:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (ref < (stencilVals[j] & valMask)) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_EQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (ref == (stencilVals[j] & valMask)) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_LEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (ref <= (stencilVals[j] & valMask)) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_GREATER:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (ref > (stencilVals[j] & valMask)) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (ref != (stencilVals[j] & valMask)) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_GEQUAL:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (ref >= (stencilVals[j] & valMask)) {
+ passMask |= (1 << j);
+ }
+ }
+ break;
+ case PIPE_FUNC_ALWAYS:
+ passMask = MASK_ALL;
+ break;
+ default:
+ assert(0);
+ }
+
+ return passMask;
+}
+
+
+/**
+ * Apply the stencil operator to stencil values.
+ *
+ * \param stencilVals the stencil buffer values (read and written)
+ * \param mask indicates which pixels to update
+ * \param op the stencil operator (PIPE_STENCIL_OP_x)
+ * \param ref the stencil reference value
+ * \param wrtMask writemask controlling which bits are changed in the
+ * stencil values
+ */
+static void
+apply_stencil_op(ubyte stencilVals[QUAD_SIZE],
+ unsigned mask, unsigned op, ubyte ref, ubyte wrtMask)
+{
+ unsigned j;
+ ubyte newstencil[QUAD_SIZE];
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ newstencil[j] = stencilVals[j];
+ }
+
+ switch (op) {
+ case PIPE_STENCIL_OP_KEEP:
+ /* no-op */
+ break;
+ case PIPE_STENCIL_OP_ZERO:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ newstencil[j] = 0;
+ }
+ }
+ break;
+ case PIPE_STENCIL_OP_REPLACE:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ newstencil[j] = ref;
+ }
+ }
+ break;
+ case PIPE_STENCIL_OP_INCR:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ if (stencilVals[j] < STENCIL_MAX) {
+ newstencil[j] = stencilVals[j] + 1;
+ }
+ }
+ }
+ break;
+ case PIPE_STENCIL_OP_DECR:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ if (stencilVals[j] > 0) {
+ newstencil[j] = stencilVals[j] - 1;
+ }
+ }
+ }
+ break;
+ case PIPE_STENCIL_OP_INCR_WRAP:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ newstencil[j] = stencilVals[j] + 1;
+ }
+ }
+ break;
+ case PIPE_STENCIL_OP_DECR_WRAP:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ newstencil[j] = stencilVals[j] - 1;
+ }
+ }
+ break;
+ case PIPE_STENCIL_OP_INVERT:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ if (mask & (1 << j)) {
+ newstencil[j] = ~stencilVals[j];
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * update the stencil values
+ */
+ if (wrtMask != STENCIL_MAX) {
+ /* apply bit-wise stencil buffer writemask */
+ for (j = 0; j < QUAD_SIZE; j++) {
+ stencilVals[j] = (wrtMask & newstencil[j]) | (~wrtMask & stencilVals[j]);
+ }
+ }
+ else {
+ for (j = 0; j < QUAD_SIZE; j++) {
+ stencilVals[j] = newstencil[j];
+ }
+ }
+}
+
+
+/**
+ * Do stencil (and depth) testing. Stenciling depends on the outcome of
+ * depth testing.
+ */
+static void
+stencil_test_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ struct softpipe_context *softpipe = qs->softpipe;
+ struct pipe_surface *ps = softpipe->framebuffer.zsbuf;
+ unsigned func, zFailOp, zPassOp, failOp;
+ ubyte ref, wrtMask, valMask;
+ ubyte stencilVals[QUAD_SIZE];
+ struct softpipe_cached_tile *tile
+ = sp_get_cached_tile(softpipe, softpipe->zsbuf_cache, quad->input.x0, quad->input.y0);
+ uint j;
+ uint face = quad->input.facing;
+
+ if (!softpipe->depth_stencil->stencil[1].enabled) {
+ /* single-sided stencil test, use front (face=0) state */
+ face = 0;
+ }
+
+ /* choose front or back face function, operator, etc */
+ /* XXX we could do these initializations once per primitive */
+ func = softpipe->depth_stencil->stencil[face].func;
+ failOp = softpipe->depth_stencil->stencil[face].fail_op;
+ zFailOp = softpipe->depth_stencil->stencil[face].zfail_op;
+ zPassOp = softpipe->depth_stencil->stencil[face].zpass_op;
+ ref = softpipe->depth_stencil->stencil[face].ref_value;
+ wrtMask = softpipe->depth_stencil->stencil[face].writemask;
+ valMask = softpipe->depth_stencil->stencil[face].valuemask;
+
+ assert(ps); /* shouldn't get here if there's no stencil buffer */
+
+ /* get stencil values from cached tile */
+ switch (ps->format) {
+ case PIPE_FORMAT_S8Z24_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ stencilVals[j] = tile->data.depth32[y][x] >> 24;
+ }
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ stencilVals[j] = tile->data.depth32[y][x] & 0xff;
+ }
+ break;
+ case PIPE_FORMAT_S8_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ stencilVals[j] = tile->data.stencil8[y][x];
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ /* do the stencil test first */
+ {
+ unsigned passMask, failMask;
+ passMask = do_stencil_test(stencilVals, func, ref, valMask);
+ failMask = quad->inout.mask & ~passMask;
+ quad->inout.mask &= passMask;
+
+ if (failOp != PIPE_STENCIL_OP_KEEP) {
+ apply_stencil_op(stencilVals, failMask, failOp, ref, wrtMask);
+ }
+ }
+
+ if (quad->inout.mask) {
+ /* now the pixels that passed the stencil test are depth tested */
+ if (softpipe->depth_stencil->depth.enabled) {
+ const unsigned origMask = quad->inout.mask;
+
+ sp_depth_test_quad(qs, quad); /* quad->mask is updated */
+
+ /* update stencil buffer values according to z pass/fail result */
+ if (zFailOp != PIPE_STENCIL_OP_KEEP) {
+ const unsigned failMask = origMask & ~quad->inout.mask;
+ apply_stencil_op(stencilVals, failMask, zFailOp, ref, wrtMask);
+ }
+
+ if (zPassOp != PIPE_STENCIL_OP_KEEP) {
+ const unsigned passMask = origMask & quad->inout.mask;
+ apply_stencil_op(stencilVals, passMask, zPassOp, ref, wrtMask);
+ }
+ }
+ else {
+ /* no depth test, apply Zpass operator to stencil buffer values */
+ apply_stencil_op(stencilVals, quad->inout.mask, zPassOp, ref, wrtMask);
+ }
+
+ }
+
+ /* put new stencil values into cached tile */
+ switch (ps->format) {
+ case PIPE_FORMAT_S8Z24_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ uint s8z24 = tile->data.depth32[y][x];
+ s8z24 = (stencilVals[j] << 24) | (s8z24 & 0xffffff);
+ tile->data.depth32[y][x] = s8z24;
+ }
+ break;
+ case PIPE_FORMAT_Z24S8_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ uint z24s8 = tile->data.depth32[y][x];
+ z24s8 = (z24s8 & 0xffffff00) | stencilVals[j];
+ tile->data.depth32[y][x] = z24s8;
+ }
+ break;
+ case PIPE_FORMAT_S8_UNORM:
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int x = quad->input.x0 % TILE_SIZE + (j & 1);
+ int y = quad->input.y0 % TILE_SIZE + (j >> 1);
+ tile->data.stencil8[y][x] = stencilVals[j];
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ if (quad->inout.mask)
+ qs->next->run(qs->next, quad);
+}
+
+
+static void stencil_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void stencil_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *sp_quad_stencil_test_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = stencil_begin;
+ stage->run = stencil_test_quad;
+ stage->destroy = stencil_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_quad_stipple.c b/src/gallium/drivers/softpipe/sp_quad_stipple.c
new file mode 100644
index 0000000000..07162db7b6
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_quad_stipple.c
@@ -0,0 +1,77 @@
+
+/**
+ * quad polygon stipple stage
+ */
+
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_quad_pipe.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+
+
+/**
+ * Apply polygon stipple to quads produced by triangle rasterization
+ */
+static void
+stipple_quad(struct quad_stage *qs, struct quad_header *quad)
+{
+ static const uint bit31 = 1 << 31;
+ static const uint bit30 = 1 << 30;
+
+ if (quad->input.prim == QUAD_PRIM_TRI) {
+ struct softpipe_context *softpipe = qs->softpipe;
+ /* need to invert Y to index into OpenGL's stipple pattern */
+ const int col0 = quad->input.x0 % 32;
+ const int y0 = quad->input.y0;
+ const int y1 = y0 + 1;
+ const uint stipple0 = softpipe->poly_stipple.stipple[y0 % 32];
+ const uint stipple1 = softpipe->poly_stipple.stipple[y1 % 32];
+
+ /* turn off quad mask bits that fail the stipple test */
+ if ((stipple0 & (bit31 >> col0)) == 0)
+ quad->inout.mask &= ~MASK_TOP_LEFT;
+
+ if ((stipple0 & (bit30 >> col0)) == 0)
+ quad->inout.mask &= ~MASK_TOP_RIGHT;
+
+ if ((stipple1 & (bit31 >> col0)) == 0)
+ quad->inout.mask &= ~MASK_BOTTOM_LEFT;
+
+ if ((stipple1 & (bit30 >> col0)) == 0)
+ quad->inout.mask &= ~MASK_BOTTOM_RIGHT;
+
+ if (!quad->inout.mask) {
+ /* all fragments failed stipple test, end of quad pipeline */
+ return;
+ }
+ }
+
+ qs->next->run(qs->next, quad);
+}
+
+
+static void stipple_begin(struct quad_stage *qs)
+{
+ qs->next->begin(qs->next);
+}
+
+
+static void stipple_destroy(struct quad_stage *qs)
+{
+ FREE( qs );
+}
+
+
+struct quad_stage *
+sp_quad_polygon_stipple_stage( struct softpipe_context *softpipe )
+{
+ struct quad_stage *stage = CALLOC_STRUCT(quad_stage);
+
+ stage->softpipe = softpipe;
+ stage->begin = stipple_begin;
+ stage->run = stipple_quad;
+ stage->destroy = stipple_destroy;
+
+ return stage;
+}
diff --git a/src/gallium/drivers/softpipe/sp_query.c b/src/gallium/drivers/softpipe/sp_query.c
new file mode 100644
index 0000000000..379cf4ad06
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_query.c
@@ -0,0 +1,111 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "draw/draw_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_query.h"
+#include "sp_state.h"
+
+struct softpipe_query {
+ uint64_t start;
+ uint64_t end;
+};
+
+
+static struct softpipe_query *softpipe_query( struct pipe_query *p )
+{
+ return (struct softpipe_query *)p;
+}
+
+static struct pipe_query *
+softpipe_create_query(struct pipe_context *pipe,
+ unsigned type)
+{
+ assert(type == PIPE_QUERY_OCCLUSION_COUNTER);
+ return (struct pipe_query *)CALLOC_STRUCT( softpipe_query );
+}
+
+
+static void
+softpipe_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
+{
+ FREE(q);
+}
+
+
+static void
+softpipe_begin_query(struct pipe_context *pipe, struct pipe_query *q)
+{
+ struct softpipe_context *softpipe = softpipe_context( pipe );
+ struct softpipe_query *sq = softpipe_query(q);
+
+ sq->start = softpipe->occlusion_count;
+ softpipe->active_query_count++;
+ softpipe->dirty |= SP_NEW_QUERY;
+}
+
+
+static void
+softpipe_end_query(struct pipe_context *pipe, struct pipe_query *q)
+{
+ struct softpipe_context *softpipe = softpipe_context( pipe );
+ struct softpipe_query *sq = softpipe_query(q);
+
+ softpipe->active_query_count--;
+ sq->end = softpipe->occlusion_count;
+ softpipe->dirty |= SP_NEW_QUERY;
+}
+
+
+static boolean
+softpipe_get_query_result(struct pipe_context *pipe,
+ struct pipe_query *q,
+ boolean wait,
+ uint64_t *result )
+{
+ struct softpipe_query *sq = softpipe_query(q);
+ *result = sq->end - sq->start;
+ return TRUE;
+}
+
+
+void softpipe_init_query_funcs(struct softpipe_context *softpipe )
+{
+ softpipe->pipe.create_query = softpipe_create_query;
+ softpipe->pipe.destroy_query = softpipe_destroy_query;
+ softpipe->pipe.begin_query = softpipe_begin_query;
+ softpipe->pipe.end_query = softpipe_end_query;
+ softpipe->pipe.get_query_result = softpipe_get_query_result;
+}
+
+
diff --git a/src/gallium/drivers/softpipe/sp_query.h b/src/gallium/drivers/softpipe/sp_query.h
new file mode 100644
index 0000000000..05060a4575
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_query.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Author:
+ * Keith Whitwell
+ */
+
+#ifndef SP_QUERY_H
+#define SP_QUERY_H
+
+struct softpipe_context;
+extern void softpipe_init_query_funcs(struct softpipe_context * );
+
+
+#endif /* SP_QUERY_H */
diff --git a/src/gallium/drivers/softpipe/sp_screen.c b/src/gallium/drivers/softpipe/sp_screen.c
new file mode 100644
index 0000000000..6178c4ac7e
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_screen.c
@@ -0,0 +1,190 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "util/u_simple_screen.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+
+#include "sp_texture.h"
+#include "sp_winsys.h"
+#include "sp_screen.h"
+
+
+static const char *
+softpipe_get_vendor(struct pipe_screen *screen)
+{
+ return "Tungsten Graphics, Inc.";
+}
+
+
+static const char *
+softpipe_get_name(struct pipe_screen *screen)
+{
+ return "softpipe";
+}
+
+
+static int
+softpipe_get_param(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS:
+ return PIPE_MAX_SAMPLERS;
+ case PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS:
+ return PIPE_MAX_SAMPLERS;
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+ case PIPE_CAP_TWO_SIDED_STENCIL:
+ return 1;
+ case PIPE_CAP_GLSL:
+ return 1;
+ case PIPE_CAP_S3TC:
+ return 0;
+ case PIPE_CAP_ANISOTROPIC_FILTER:
+ return 0;
+ case PIPE_CAP_POINT_SPRITE:
+ return 1;
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return PIPE_MAX_COLOR_BUFS;
+ case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
+ case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+ return 1;
+ case PIPE_CAP_TEXTURE_MIRROR_REPEAT:
+ return 1;
+ case PIPE_CAP_TEXTURE_SHADOW_MAP:
+ return 1;
+ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+ return 13; /* max 4Kx4K */
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 8; /* max 128x128x128 */
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 13; /* max 4Kx4K */
+ case PIPE_CAP_TGSI_CONT_SUPPORTED:
+ return 1;
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+static float
+softpipe_get_paramf(struct pipe_screen *screen, int param)
+{
+ switch (param) {
+ case PIPE_CAP_MAX_LINE_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_LINE_WIDTH_AA:
+ return 255.0; /* arbitrary */
+ case PIPE_CAP_MAX_POINT_WIDTH:
+ /* fall-through */
+ case PIPE_CAP_MAX_POINT_WIDTH_AA:
+ return 255.0; /* arbitrary */
+ case PIPE_CAP_MAX_TEXTURE_ANISOTROPY:
+ return 16.0; /* not actually signficant at this time */
+ case PIPE_CAP_MAX_TEXTURE_LOD_BIAS:
+ return 16.0; /* arbitrary */
+ default:
+ return 0;
+ }
+}
+
+
+/**
+ * Query format support for creating a texture, drawing surface, etc.
+ * \param format the format to test
+ * \param type one of PIPE_TEXTURE, PIPE_SURFACE
+ */
+static boolean
+softpipe_is_format_supported( struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags )
+{
+ assert(target == PIPE_TEXTURE_1D ||
+ target == PIPE_TEXTURE_2D ||
+ target == PIPE_TEXTURE_3D ||
+ target == PIPE_TEXTURE_CUBE);
+
+ switch(format) {
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ return FALSE;
+ default:
+ return TRUE;
+ }
+}
+
+
+static void
+softpipe_destroy_screen( struct pipe_screen *screen )
+{
+ struct pipe_winsys *winsys = screen->winsys;
+
+ if(winsys->destroy)
+ winsys->destroy(winsys);
+
+ FREE(screen);
+}
+
+
+
+/**
+ * Create a new pipe_screen object
+ * Note: we're not presently subclassing pipe_screen (no softpipe_screen).
+ */
+struct pipe_screen *
+softpipe_create_screen(struct pipe_winsys *winsys)
+{
+ struct softpipe_screen *screen = CALLOC_STRUCT(softpipe_screen);
+
+ if (!screen)
+ return NULL;
+
+ screen->base.winsys = winsys;
+
+ screen->base.destroy = softpipe_destroy_screen;
+
+ screen->base.get_name = softpipe_get_name;
+ screen->base.get_vendor = softpipe_get_vendor;
+ screen->base.get_param = softpipe_get_param;
+ screen->base.get_paramf = softpipe_get_paramf;
+ screen->base.is_format_supported = softpipe_is_format_supported;
+
+ softpipe_init_screen_texture_funcs(&screen->base);
+ u_simple_screen_init(&screen->base);
+
+ return &screen->base;
+}
diff --git a/src/gallium/drivers/softpipe/sp_screen.h b/src/gallium/drivers/softpipe/sp_screen.h
new file mode 100644
index 0000000000..3d4bfd3e84
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_screen.h
@@ -0,0 +1,58 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_SCREEN_H
+#define SP_SCREEN_H
+
+#include "pipe/p_screen.h"
+#include "pipe/p_defines.h"
+
+
+
+struct softpipe_screen {
+ struct pipe_screen base;
+
+ /* Increments whenever textures are modified. Contexts can track
+ * this.
+ */
+ unsigned timestamp;
+};
+
+
+
+
+static INLINE struct softpipe_screen *
+softpipe_screen( struct pipe_screen *pipe )
+{
+ return (struct softpipe_screen *)pipe;
+}
+
+
+#endif /* SP_SCREEN_H */
diff --git a/src/gallium/drivers/softpipe/sp_setup.c b/src/gallium/drivers/softpipe/sp_setup.c
new file mode 100644
index 0000000000..de3ae3c369
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_setup.c
@@ -0,0 +1,1550 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \brief Primitive rasterization/rendering (points, lines, triangles)
+ *
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Brian Paul
+ */
+
+#include "sp_context.h"
+#include "sp_prim_setup.h"
+#include "sp_quad.h"
+#include "sp_quad_pipe.h"
+#include "sp_setup.h"
+#include "sp_state.h"
+#include "draw/draw_context.h"
+#include "draw/draw_private.h"
+#include "draw/draw_vertex.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_thread.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+#define DEBUG_VERTS 0
+#define DEBUG_FRAGS 0
+
+/**
+ * Triangle edge info
+ */
+struct edge {
+ float dx; /**< X(v1) - X(v0), used only during setup */
+ float dy; /**< Y(v1) - Y(v0), used only during setup */
+ float dxdy; /**< dx/dy */
+ float sx, sy; /**< first sample point coord */
+ int lines; /**< number of lines on this edge */
+};
+
+#if SP_NUM_QUAD_THREADS > 1
+
+/* Set to 1 if you want other threads to be instantly
+ * notified of pending jobs.
+ */
+#define INSTANT_NOTEMPTY_NOTIFY 0
+
+struct thread_info
+{
+ struct setup_context *setup;
+ uint id;
+ pipe_thread handle;
+};
+
+struct quad_job;
+
+typedef void (* quad_job_routine)( struct setup_context *setup, uint thread, struct quad_job *job );
+
+struct quad_job
+{
+ struct quad_header_input input;
+ struct quad_header_inout inout;
+ quad_job_routine routine;
+};
+
+#define NUM_QUAD_JOBS 64
+
+struct quad_job_que
+{
+ struct quad_job jobs[NUM_QUAD_JOBS];
+ uint first;
+ uint last;
+ pipe_mutex que_mutex;
+ pipe_condvar que_notfull_condvar;
+ pipe_condvar que_notempty_condvar;
+ uint jobs_added;
+ uint jobs_done;
+ pipe_condvar que_done_condvar;
+};
+
+static void
+add_quad_job( struct quad_job_que *que, struct quad_header *quad, quad_job_routine routine )
+{
+#if INSTANT_NOTEMPTY_NOTIFY
+ boolean empty;
+#endif
+
+ /* Wait for empty slot, see if the que is empty.
+ */
+ pipe_mutex_lock( que->que_mutex );
+ while ((que->last + 1) % NUM_QUAD_JOBS == que->first) {
+#if !INSTANT_NOTEMPTY_NOTIFY
+ pipe_condvar_broadcast( que->que_notempty_condvar );
+#endif
+ pipe_condvar_wait( que->que_notfull_condvar, que->que_mutex );
+ }
+#if INSTANT_NOTEMPTY_NOTIFY
+ empty = que->last == que->first;
+#endif
+ que->jobs_added++;
+ pipe_mutex_unlock( que->que_mutex );
+
+ /* Submit new job.
+ */
+ que->jobs[que->last].input = quad->input;
+ que->jobs[que->last].inout = quad->inout;
+ que->jobs[que->last].routine = routine;
+ que->last = (que->last + 1) % NUM_QUAD_JOBS;
+
+#if INSTANT_NOTEMPTY_NOTIFY
+ /* If the que was empty, notify consumers there's a job to be done.
+ */
+ if (empty) {
+ pipe_mutex_lock( que->que_mutex );
+ pipe_condvar_broadcast( que->que_notempty_condvar );
+ pipe_mutex_unlock( que->que_mutex );
+ }
+#endif
+}
+
+#endif
+
+/**
+ * Triangle setup info (derived from draw_stage).
+ * Also used for line drawing (taking some liberties).
+ */
+struct setup_context {
+ struct softpipe_context *softpipe;
+
+ /* Vertices are just an array of floats making up each attribute in
+ * turn. Currently fixed at 4 floats, but should change in time.
+ * Codegen will help cope with this.
+ */
+ const float (*vmax)[4];
+ const float (*vmid)[4];
+ const float (*vmin)[4];
+ const float (*vprovoke)[4];
+
+ struct edge ebot;
+ struct edge etop;
+ struct edge emaj;
+
+ float oneoverarea;
+
+ struct tgsi_interp_coef coef[PIPE_MAX_SHADER_INPUTS];
+ struct tgsi_interp_coef posCoef; /* For Z, W */
+ struct quad_header quad;
+
+#if SP_NUM_QUAD_THREADS > 1
+ struct quad_job_que que;
+ struct thread_info threads[SP_NUM_QUAD_THREADS];
+#endif
+
+ struct {
+ int left[2]; /**< [0] = row0, [1] = row1 */
+ int right[2];
+ int y;
+ unsigned y_flags;
+ unsigned mask; /**< mask of MASK_BOTTOM/TOP_LEFT/RIGHT bits */
+ } span;
+
+#if DEBUG_FRAGS
+ uint numFragsEmitted; /**< per primitive */
+ uint numFragsWritten; /**< per primitive */
+#endif
+
+ unsigned winding; /* which winding to cull */
+};
+
+#if SP_NUM_QUAD_THREADS > 1
+
+static PIPE_THREAD_ROUTINE( quad_thread, param )
+{
+ struct thread_info *info = (struct thread_info *) param;
+ struct quad_job_que *que = &info->setup->que;
+
+ for (;;) {
+ struct quad_job job;
+ boolean full;
+
+ /* Wait for an available job.
+ */
+ pipe_mutex_lock( que->que_mutex );
+ while (que->last == que->first)
+ pipe_condvar_wait( que->que_notempty_condvar, que->que_mutex );
+
+ /* See if the que is full.
+ */
+ full = (que->last + 1) % NUM_QUAD_JOBS == que->first;
+
+ /* Take a job and remove it from que.
+ */
+ job = que->jobs[que->first];
+ que->first = (que->first + 1) % NUM_QUAD_JOBS;
+
+ /* Notify the producer if the que is not full.
+ */
+ if (full)
+ pipe_condvar_signal( que->que_notfull_condvar );
+ pipe_mutex_unlock( que->que_mutex );
+
+ job.routine( info->setup, info->id, &job );
+
+ /* Notify the producer if that's the last finished job.
+ */
+ pipe_mutex_lock( que->que_mutex );
+ que->jobs_done++;
+ if (que->jobs_added == que->jobs_done)
+ pipe_condvar_signal( que->que_done_condvar );
+ pipe_mutex_unlock( que->que_mutex );
+ }
+
+ return NULL;
+}
+
+#define WAIT_FOR_COMPLETION(setup) \
+ do {\
+ pipe_mutex_lock( setup->que.que_mutex );\
+ if (!INSTANT_NOTEMPTY_NOTIFY)\
+ pipe_condvar_broadcast( setup->que.que_notempty_condvar );\
+ while (setup->que.jobs_added != setup->que.jobs_done)\
+ pipe_condvar_wait( setup->que.que_done_condvar, setup->que.que_mutex );\
+ pipe_mutex_unlock( setup->que.que_mutex );\
+ } while (0)
+
+#else
+
+#define WAIT_FOR_COMPLETION(setup) ((void) 0)
+
+#endif
+
+
+
+/**
+ * Do triangle cull test using tri determinant (sign indicates orientation)
+ * \return true if triangle is to be culled.
+ */
+static INLINE boolean
+cull_tri(const struct setup_context *setup, float det)
+{
+ if (det != 0) {
+ /* if (det < 0 then Z points toward camera and triangle is
+ * counter-clockwise winding.
+ */
+ unsigned winding = (det < 0) ? PIPE_WINDING_CCW : PIPE_WINDING_CW;
+
+ if ((winding & setup->winding) == 0)
+ return FALSE;
+ }
+
+ /* Culled:
+ */
+ return TRUE;
+}
+
+
+
+/**
+ * Clip setup->quad against the scissor/surface bounds.
+ */
+static INLINE void
+quad_clip( struct setup_context *setup, struct quad_header *quad )
+{
+ const struct pipe_scissor_state *cliprect = &setup->softpipe->cliprect;
+ const int minx = (int) cliprect->minx;
+ const int maxx = (int) cliprect->maxx;
+ const int miny = (int) cliprect->miny;
+ const int maxy = (int) cliprect->maxy;
+
+ if (quad->input.x0 >= maxx ||
+ quad->input.y0 >= maxy ||
+ quad->input.x0 + 1 < minx ||
+ quad->input.y0 + 1 < miny) {
+ /* totally clipped */
+ quad->inout.mask = 0x0;
+ return;
+ }
+ if (quad->input.x0 < minx)
+ quad->inout.mask &= (MASK_BOTTOM_RIGHT | MASK_TOP_RIGHT);
+ if (quad->input.y0 < miny)
+ quad->inout.mask &= (MASK_BOTTOM_LEFT | MASK_BOTTOM_RIGHT);
+ if (quad->input.x0 == maxx - 1)
+ quad->inout.mask &= (MASK_BOTTOM_LEFT | MASK_TOP_LEFT);
+ if (quad->input.y0 == maxy - 1)
+ quad->inout.mask &= (MASK_TOP_LEFT | MASK_TOP_RIGHT);
+}
+
+
+/**
+ * Emit a quad (pass to next stage) with clipping.
+ */
+static INLINE void
+clip_emit_quad( struct setup_context *setup, struct quad_header *quad, uint thread )
+{
+ quad_clip( setup, quad );
+ if (quad->inout.mask) {
+ struct softpipe_context *sp = setup->softpipe;
+
+ sp->quad[thread].first->run( sp->quad[thread].first, quad );
+ }
+}
+
+#if SP_NUM_QUAD_THREADS > 1
+
+static void
+clip_emit_quad_job( struct setup_context *setup, uint thread, struct quad_job *job )
+{
+ struct quad_header quad;
+
+ quad.input = job->input;
+ quad.inout = job->inout;
+ quad.coef = setup->quad.coef;
+ quad.posCoef = setup->quad.posCoef;
+ quad.nr_attrs = setup->quad.nr_attrs;
+ clip_emit_quad( setup, &quad, thread );
+}
+
+#define CLIP_EMIT_QUAD(setup) add_quad_job( &setup->que, &setup->quad, clip_emit_quad_job )
+
+#else
+
+#define CLIP_EMIT_QUAD(setup) clip_emit_quad( setup, &setup->quad, 0 )
+
+#endif
+
+/**
+ * Emit a quad (pass to next stage). No clipping is done.
+ */
+static INLINE void
+emit_quad( struct setup_context *setup, struct quad_header *quad, uint thread )
+{
+ struct softpipe_context *sp = setup->softpipe;
+#if DEBUG_FRAGS
+ uint mask = quad->inout.mask;
+#endif
+
+#if DEBUG_FRAGS
+ if (mask & 1) setup->numFragsEmitted++;
+ if (mask & 2) setup->numFragsEmitted++;
+ if (mask & 4) setup->numFragsEmitted++;
+ if (mask & 8) setup->numFragsEmitted++;
+#endif
+ sp->quad[thread].first->run( sp->quad[thread].first, quad );
+#if DEBUG_FRAGS
+ mask = quad->inout.mask;
+ if (mask & 1) setup->numFragsWritten++;
+ if (mask & 2) setup->numFragsWritten++;
+ if (mask & 4) setup->numFragsWritten++;
+ if (mask & 8) setup->numFragsWritten++;
+#endif
+}
+
+#if SP_NUM_QUAD_THREADS > 1
+
+static void
+emit_quad_job( struct setup_context *setup, uint thread, struct quad_job *job )
+{
+ struct quad_header quad;
+
+ quad.input = job->input;
+ quad.inout = job->inout;
+ quad.coef = setup->quad.coef;
+ quad.posCoef = setup->quad.posCoef;
+ quad.nr_attrs = setup->quad.nr_attrs;
+ emit_quad( setup, &quad, thread );
+}
+
+#define EMIT_QUAD(setup,x,y,mask) do {\
+ setup->quad.input.x0 = x;\
+ setup->quad.input.y0 = y;\
+ setup->quad.inout.mask = mask;\
+ add_quad_job( &setup->que, &setup->quad, emit_quad_job );\
+ } while (0)
+
+#else
+
+#define EMIT_QUAD(setup,x,y,mask) do {\
+ setup->quad.input.x0 = x;\
+ setup->quad.input.y0 = y;\
+ setup->quad.inout.mask = mask;\
+ emit_quad( setup, &setup->quad, 0 );\
+ } while (0)
+
+#endif
+
+/**
+ * Given an X or Y coordinate, return the block/quad coordinate that it
+ * belongs to.
+ */
+static INLINE int block( int x )
+{
+ return x & ~1;
+}
+
+
+/**
+ * Render a horizontal span of quads
+ */
+static void flush_spans( struct setup_context *setup )
+{
+ const int xleft0 = setup->span.left[0];
+ const int xleft1 = setup->span.left[1];
+ const int xright0 = setup->span.right[0];
+ const int xright1 = setup->span.right[1];
+ int minleft, maxright;
+ int x;
+
+ switch (setup->span.y_flags) {
+ case 0x3:
+ /* both odd and even lines written (both quad rows) */
+ minleft = block(MIN2(xleft0, xleft1));
+ maxright = block(MAX2(xright0, xright1));
+ for (x = minleft; x <= maxright; x += 2) {
+ /* determine which of the four pixels is inside the span bounds */
+ uint mask = 0x0;
+ if (x >= xleft0 && x < xright0)
+ mask |= MASK_TOP_LEFT;
+ if (x >= xleft1 && x < xright1)
+ mask |= MASK_BOTTOM_LEFT;
+ if (x+1 >= xleft0 && x+1 < xright0)
+ mask |= MASK_TOP_RIGHT;
+ if (x+1 >= xleft1 && x+1 < xright1)
+ mask |= MASK_BOTTOM_RIGHT;
+ if (mask)
+ EMIT_QUAD( setup, x, setup->span.y, mask );
+ }
+ break;
+
+ case 0x1:
+ /* only even line written (quad top row) */
+ minleft = block(xleft0);
+ maxright = block(xright0);
+ for (x = minleft; x <= maxright; x += 2) {
+ uint mask = 0x0;
+ if (x >= xleft0 && x < xright0)
+ mask |= MASK_TOP_LEFT;
+ if (x+1 >= xleft0 && x+1 < xright0)
+ mask |= MASK_TOP_RIGHT;
+ if (mask)
+ EMIT_QUAD( setup, x, setup->span.y, mask );
+ }
+ break;
+
+ case 0x2:
+ /* only odd line written (quad bottom row) */
+ minleft = block(xleft1);
+ maxright = block(xright1);
+ for (x = minleft; x <= maxright; x += 2) {
+ uint mask = 0x0;
+ if (x >= xleft1 && x < xright1)
+ mask |= MASK_BOTTOM_LEFT;
+ if (x+1 >= xleft1 && x+1 < xright1)
+ mask |= MASK_BOTTOM_RIGHT;
+ if (mask)
+ EMIT_QUAD( setup, x, setup->span.y, mask );
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ setup->span.y = 0;
+ setup->span.y_flags = 0;
+ setup->span.right[0] = 0;
+ setup->span.right[1] = 0;
+}
+
+
+#if DEBUG_VERTS
+static void print_vertex(const struct setup_context *setup,
+ const float (*v)[4])
+{
+ int i;
+ debug_printf(" Vertex: (%p)\n", v);
+ for (i = 0; i < setup->quad.nr_attrs; i++) {
+ debug_printf(" %d: %f %f %f %f\n", i,
+ v[i][0], v[i][1], v[i][2], v[i][3]);
+ if (util_is_inf_or_nan(v[i][0])) {
+ debug_printf(" NaN!\n");
+ }
+ }
+}
+#endif
+
+/**
+ * Sort the vertices from top to bottom order, setting up the triangle
+ * edge fields (ebot, emaj, etop).
+ * \return FALSE if coords are inf/nan (cull the tri), TRUE otherwise
+ */
+static boolean setup_sort_vertices( struct setup_context *setup,
+ float det,
+ const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] )
+{
+ setup->vprovoke = v2;
+
+ /* determine bottom to top order of vertices */
+ {
+ float y0 = v0[0][1];
+ float y1 = v1[0][1];
+ float y2 = v2[0][1];
+ if (y0 <= y1) {
+ if (y1 <= y2) {
+ /* y0<=y1<=y2 */
+ setup->vmin = v0;
+ setup->vmid = v1;
+ setup->vmax = v2;
+ }
+ else if (y2 <= y0) {
+ /* y2<=y0<=y1 */
+ setup->vmin = v2;
+ setup->vmid = v0;
+ setup->vmax = v1;
+ }
+ else {
+ /* y0<=y2<=y1 */
+ setup->vmin = v0;
+ setup->vmid = v2;
+ setup->vmax = v1;
+ }
+ }
+ else {
+ if (y0 <= y2) {
+ /* y1<=y0<=y2 */
+ setup->vmin = v1;
+ setup->vmid = v0;
+ setup->vmax = v2;
+ }
+ else if (y2 <= y1) {
+ /* y2<=y1<=y0 */
+ setup->vmin = v2;
+ setup->vmid = v1;
+ setup->vmax = v0;
+ }
+ else {
+ /* y1<=y2<=y0 */
+ setup->vmin = v1;
+ setup->vmid = v2;
+ setup->vmax = v0;
+ }
+ }
+ }
+
+ setup->ebot.dx = setup->vmid[0][0] - setup->vmin[0][0];
+ setup->ebot.dy = setup->vmid[0][1] - setup->vmin[0][1];
+ setup->emaj.dx = setup->vmax[0][0] - setup->vmin[0][0];
+ setup->emaj.dy = setup->vmax[0][1] - setup->vmin[0][1];
+ setup->etop.dx = setup->vmax[0][0] - setup->vmid[0][0];
+ setup->etop.dy = setup->vmax[0][1] - setup->vmid[0][1];
+
+ /*
+ * Compute triangle's area. Use 1/area to compute partial
+ * derivatives of attributes later.
+ *
+ * The area will be the same as prim->det, but the sign may be
+ * different depending on how the vertices get sorted above.
+ *
+ * To determine whether the primitive is front or back facing we
+ * use the prim->det value because its sign is correct.
+ */
+ {
+ const float area = (setup->emaj.dx * setup->ebot.dy -
+ setup->ebot.dx * setup->emaj.dy);
+
+ setup->oneoverarea = 1.0f / area;
+
+ /*
+ debug_printf("%s one-over-area %f area %f det %f\n",
+ __FUNCTION__, setup->oneoverarea, area, det );
+ */
+ if (util_is_inf_or_nan(setup->oneoverarea))
+ return FALSE;
+ }
+
+ /* We need to know if this is a front or back-facing triangle for:
+ * - the GLSL gl_FrontFacing fragment attribute (bool)
+ * - two-sided stencil test
+ */
+ setup->quad.input.facing = (det > 0.0) ^ (setup->softpipe->rasterizer->front_winding == PIPE_WINDING_CW);
+
+ return TRUE;
+}
+
+
+/**
+ * Compute a0 for a constant-valued coefficient (GL_FLAT shading).
+ * The value value comes from vertex[slot][i].
+ * The result will be put into setup->coef[slot].a0[i].
+ * \param slot which attribute slot
+ * \param i which component of the slot (0..3)
+ */
+static void const_coeff( struct setup_context *setup,
+ struct tgsi_interp_coef *coef,
+ uint vertSlot, uint i)
+{
+ assert(i <= 3);
+
+ coef->dadx[i] = 0;
+ coef->dady[i] = 0;
+
+ /* need provoking vertex info!
+ */
+ coef->a0[i] = setup->vprovoke[vertSlot][i];
+}
+
+
+/**
+ * Compute a0, dadx and dady for a linearly interpolated coefficient,
+ * for a triangle.
+ */
+static void tri_linear_coeff( struct setup_context *setup,
+ struct tgsi_interp_coef *coef,
+ uint vertSlot, uint i)
+{
+ float botda = setup->vmid[vertSlot][i] - setup->vmin[vertSlot][i];
+ float majda = setup->vmax[vertSlot][i] - setup->vmin[vertSlot][i];
+ float a = setup->ebot.dy * majda - botda * setup->emaj.dy;
+ float b = setup->emaj.dx * botda - majda * setup->ebot.dx;
+ float dadx = a * setup->oneoverarea;
+ float dady = b * setup->oneoverarea;
+
+ assert(i <= 3);
+
+ coef->dadx[i] = dadx;
+ coef->dady[i] = dady;
+
+ /* calculate a0 as the value which would be sampled for the
+ * fragment at (0,0), taking into account that we want to sample at
+ * pixel centers, in other words (0.5, 0.5).
+ *
+ * this is neat but unfortunately not a good way to do things for
+ * triangles with very large values of dadx or dady as it will
+ * result in the subtraction and re-addition from a0 of a very
+ * large number, which means we'll end up loosing a lot of the
+ * fractional bits and precision from a0. the way to fix this is
+ * to define a0 as the sample at a pixel center somewhere near vmin
+ * instead - i'll switch to this later.
+ */
+ coef->a0[i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+
+ /*
+ debug_printf("attr[%d].%c: %f dx:%f dy:%f\n",
+ slot, "xyzw"[i],
+ setup->coef[slot].a0[i],
+ setup->coef[slot].dadx[i],
+ setup->coef[slot].dady[i]);
+ */
+}
+
+
+/**
+ * Compute a0, dadx and dady for a perspective-corrected interpolant,
+ * for a triangle.
+ * We basically multiply the vertex value by 1/w before computing
+ * the plane coefficients (a0, dadx, dady).
+ * Later, when we compute the value at a particular fragment position we'll
+ * divide the interpolated value by the interpolated W at that fragment.
+ */
+static void tri_persp_coeff( struct setup_context *setup,
+ struct tgsi_interp_coef *coef,
+ uint vertSlot, uint i)
+{
+ /* premultiply by 1/w (v[0][3] is always W):
+ */
+ float mina = setup->vmin[vertSlot][i] * setup->vmin[0][3];
+ float mida = setup->vmid[vertSlot][i] * setup->vmid[0][3];
+ float maxa = setup->vmax[vertSlot][i] * setup->vmax[0][3];
+ float botda = mida - mina;
+ float majda = maxa - mina;
+ float a = setup->ebot.dy * majda - botda * setup->emaj.dy;
+ float b = setup->emaj.dx * botda - majda * setup->ebot.dx;
+ float dadx = a * setup->oneoverarea;
+ float dady = b * setup->oneoverarea;
+
+ /*
+ debug_printf("tri persp %d,%d: %f %f %f\n", vertSlot, i,
+ setup->vmin[vertSlot][i],
+ setup->vmid[vertSlot][i],
+ setup->vmax[vertSlot][i]
+ );
+ */
+ assert(i <= 3);
+
+ coef->dadx[i] = dadx;
+ coef->dady[i] = dady;
+ coef->a0[i] = (mina -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+}
+
+
+/**
+ * Special coefficient setup for gl_FragCoord.
+ * X and Y are trivial, though Y has to be inverted for OpenGL.
+ * Z and W are copied from posCoef which should have already been computed.
+ * We could do a bit less work if we'd examine gl_FragCoord's swizzle mask.
+ */
+static void
+setup_fragcoord_coeff(struct setup_context *setup, uint slot)
+{
+ /*X*/
+ setup->coef[slot].a0[0] = 0;
+ setup->coef[slot].dadx[0] = 1.0;
+ setup->coef[slot].dady[0] = 0.0;
+ /*Y*/
+ setup->coef[slot].a0[1] = 0.0;
+ setup->coef[slot].dadx[1] = 0.0;
+ setup->coef[slot].dady[1] = 1.0;
+ /*Z*/
+ setup->coef[slot].a0[2] = setup->posCoef.a0[2];
+ setup->coef[slot].dadx[2] = setup->posCoef.dadx[2];
+ setup->coef[slot].dady[2] = setup->posCoef.dady[2];
+ /*W*/
+ setup->coef[slot].a0[3] = setup->posCoef.a0[3];
+ setup->coef[slot].dadx[3] = setup->posCoef.dadx[3];
+ setup->coef[slot].dady[3] = setup->posCoef.dady[3];
+}
+
+
+
+/**
+ * Compute the setup->coef[] array dadx, dady, a0 values.
+ * Must be called after setup->vmin,vmid,vmax,vprovoke are initialized.
+ */
+static void setup_tri_coefficients( struct setup_context *setup )
+{
+ struct softpipe_context *softpipe = setup->softpipe;
+ const struct sp_fragment_shader *spfs = softpipe->fs;
+ const struct vertex_info *vinfo = softpipe_get_vertex_info(softpipe);
+ uint fragSlot;
+
+ /* z and w are done by linear interpolation:
+ */
+ tri_linear_coeff(setup, &setup->posCoef, 0, 2);
+ tri_linear_coeff(setup, &setup->posCoef, 0, 3);
+
+ /* setup interpolation for all the remaining attributes:
+ */
+ for (fragSlot = 0; fragSlot < spfs->info.num_inputs; fragSlot++) {
+ const uint vertSlot = vinfo->attrib[fragSlot].src_index;
+ uint j;
+
+ switch (vinfo->attrib[fragSlot].interp_mode) {
+ case INTERP_CONSTANT:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ const_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_LINEAR:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ tri_linear_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_PERSPECTIVE:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ tri_persp_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_POS:
+ setup_fragcoord_coeff(setup, fragSlot);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (spfs->info.input_semantic_name[fragSlot] == TGSI_SEMANTIC_FACE) {
+ setup->coef[fragSlot].a0[0] = 1.0f - setup->quad.input.facing;
+ setup->coef[fragSlot].dadx[0] = 0.0;
+ setup->coef[fragSlot].dady[0] = 0.0;
+ }
+ }
+}
+
+
+
+static void setup_tri_edges( struct setup_context *setup )
+{
+ float vmin_x = setup->vmin[0][0] + 0.5f;
+ float vmid_x = setup->vmid[0][0] + 0.5f;
+
+ float vmin_y = setup->vmin[0][1] - 0.5f;
+ float vmid_y = setup->vmid[0][1] - 0.5f;
+ float vmax_y = setup->vmax[0][1] - 0.5f;
+
+ setup->emaj.sy = ceilf(vmin_y);
+ setup->emaj.lines = (int) ceilf(vmax_y - setup->emaj.sy);
+ setup->emaj.dxdy = setup->emaj.dx / setup->emaj.dy;
+ setup->emaj.sx = vmin_x + (setup->emaj.sy - vmin_y) * setup->emaj.dxdy;
+
+ setup->etop.sy = ceilf(vmid_y);
+ setup->etop.lines = (int) ceilf(vmax_y - setup->etop.sy);
+ setup->etop.dxdy = setup->etop.dx / setup->etop.dy;
+ setup->etop.sx = vmid_x + (setup->etop.sy - vmid_y) * setup->etop.dxdy;
+
+ setup->ebot.sy = ceilf(vmin_y);
+ setup->ebot.lines = (int) ceilf(vmid_y - setup->ebot.sy);
+ setup->ebot.dxdy = setup->ebot.dx / setup->ebot.dy;
+ setup->ebot.sx = vmin_x + (setup->ebot.sy - vmin_y) * setup->ebot.dxdy;
+}
+
+
+/**
+ * Render the upper or lower half of a triangle.
+ * Scissoring/cliprect is applied here too.
+ */
+static void subtriangle( struct setup_context *setup,
+ struct edge *eleft,
+ struct edge *eright,
+ unsigned lines )
+{
+ const struct pipe_scissor_state *cliprect = &setup->softpipe->cliprect;
+ const int minx = (int) cliprect->minx;
+ const int maxx = (int) cliprect->maxx;
+ const int miny = (int) cliprect->miny;
+ const int maxy = (int) cliprect->maxy;
+ int y, start_y, finish_y;
+ int sy = (int)eleft->sy;
+
+ assert((int)eleft->sy == (int) eright->sy);
+
+ /* clip top/bottom */
+ start_y = sy;
+ finish_y = sy + lines;
+
+ if (start_y < miny)
+ start_y = miny;
+
+ if (finish_y > maxy)
+ finish_y = maxy;
+
+ start_y -= sy;
+ finish_y -= sy;
+
+ /*
+ debug_printf("%s %d %d\n", __FUNCTION__, start_y, finish_y);
+ */
+
+ for (y = start_y; y < finish_y; y++) {
+
+ /* avoid accumulating adds as floats don't have the precision to
+ * accurately iterate large triangle edges that way. luckily we
+ * can just multiply these days.
+ *
+ * this is all drowned out by the attribute interpolation anyway.
+ */
+ int left = (int)(eleft->sx + y * eleft->dxdy);
+ int right = (int)(eright->sx + y * eright->dxdy);
+
+ /* clip left/right */
+ if (left < minx)
+ left = minx;
+ if (right > maxx)
+ right = maxx;
+
+ if (left < right) {
+ int _y = sy + y;
+ if (block(_y) != setup->span.y) {
+ flush_spans(setup);
+ setup->span.y = block(_y);
+ }
+
+ setup->span.left[_y&1] = left;
+ setup->span.right[_y&1] = right;
+ setup->span.y_flags |= 1<<(_y&1);
+ }
+ }
+
+
+ /* save the values so that emaj can be restarted:
+ */
+ eleft->sx += lines * eleft->dxdy;
+ eright->sx += lines * eright->dxdy;
+ eleft->sy += lines;
+ eright->sy += lines;
+}
+
+
+/**
+ * Recalculate prim's determinant. This is needed as we don't have
+ * get this information through the vbuf_render interface & we must
+ * calculate it here.
+ */
+static float
+calc_det( const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] )
+{
+ /* edge vectors e = v0 - v2, f = v1 - v2 */
+ const float ex = v0[0][0] - v2[0][0];
+ const float ey = v0[0][1] - v2[0][1];
+ const float fx = v1[0][0] - v2[0][0];
+ const float fy = v1[0][1] - v2[0][1];
+
+ /* det = cross(e,f).z */
+ return ex * fy - ey * fx;
+}
+
+
+/**
+ * Do setup for triangle rasterization, then render the triangle.
+ */
+void setup_tri( struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] )
+{
+ float det;
+
+#if DEBUG_VERTS
+ debug_printf("Setup triangle:\n");
+ print_vertex(setup, v0);
+ print_vertex(setup, v1);
+ print_vertex(setup, v2);
+#endif
+
+ if (setup->softpipe->no_rast)
+ return;
+
+ det = calc_det(v0, v1, v2);
+ /*
+ debug_printf("%s\n", __FUNCTION__ );
+ */
+
+#if DEBUG_FRAGS
+ setup->numFragsEmitted = 0;
+ setup->numFragsWritten = 0;
+#endif
+
+ if (cull_tri( setup, det ))
+ return;
+
+ if (!setup_sort_vertices( setup, det, v0, v1, v2 ))
+ return;
+ setup_tri_coefficients( setup );
+ setup_tri_edges( setup );
+
+ setup->quad.input.prim = QUAD_PRIM_TRI;
+
+ setup->span.y = 0;
+ setup->span.y_flags = 0;
+ setup->span.right[0] = 0;
+ setup->span.right[1] = 0;
+ /* setup->span.z_mode = tri_z_mode( setup->ctx ); */
+
+ /* init_constant_attribs( setup ); */
+
+ if (setup->oneoverarea < 0.0) {
+ /* emaj on left:
+ */
+ subtriangle( setup, &setup->emaj, &setup->ebot, setup->ebot.lines );
+ subtriangle( setup, &setup->emaj, &setup->etop, setup->etop.lines );
+ }
+ else {
+ /* emaj on right:
+ */
+ subtriangle( setup, &setup->ebot, &setup->emaj, setup->ebot.lines );
+ subtriangle( setup, &setup->etop, &setup->emaj, setup->etop.lines );
+ }
+
+ flush_spans( setup );
+
+ WAIT_FOR_COMPLETION(setup);
+
+#if DEBUG_FRAGS
+ printf("Tri: %u frags emitted, %u written\n",
+ setup->numFragsEmitted,
+ setup->numFragsWritten);
+#endif
+}
+
+
+
+/**
+ * Compute a0, dadx and dady for a linearly interpolated coefficient,
+ * for a line.
+ */
+static void
+line_linear_coeff(const struct setup_context *setup,
+ struct tgsi_interp_coef *coef,
+ uint vertSlot, uint i)
+{
+ const float da = setup->vmax[vertSlot][i] - setup->vmin[vertSlot][i];
+ const float dadx = da * setup->emaj.dx * setup->oneoverarea;
+ const float dady = da * setup->emaj.dy * setup->oneoverarea;
+ coef->dadx[i] = dadx;
+ coef->dady[i] = dady;
+ coef->a0[i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+}
+
+
+/**
+ * Compute a0, dadx and dady for a perspective-corrected interpolant,
+ * for a line.
+ */
+static void
+line_persp_coeff(const struct setup_context *setup,
+ struct tgsi_interp_coef *coef,
+ uint vertSlot, uint i)
+{
+ /* XXX double-check/verify this arithmetic */
+ const float a0 = setup->vmin[vertSlot][i] * setup->vmin[0][3];
+ const float a1 = setup->vmax[vertSlot][i] * setup->vmax[0][3];
+ const float da = a1 - a0;
+ const float dadx = da * setup->emaj.dx * setup->oneoverarea;
+ const float dady = da * setup->emaj.dy * setup->oneoverarea;
+ coef->dadx[i] = dadx;
+ coef->dady[i] = dady;
+ coef->a0[i] = (setup->vmin[vertSlot][i] -
+ (dadx * (setup->vmin[0][0] - 0.5f) +
+ dady * (setup->vmin[0][1] - 0.5f)));
+}
+
+
+/**
+ * Compute the setup->coef[] array dadx, dady, a0 values.
+ * Must be called after setup->vmin,vmax are initialized.
+ */
+static INLINE boolean
+setup_line_coefficients(struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4])
+{
+ struct softpipe_context *softpipe = setup->softpipe;
+ const struct sp_fragment_shader *spfs = softpipe->fs;
+ const struct vertex_info *vinfo = softpipe_get_vertex_info(softpipe);
+ uint fragSlot;
+ float area;
+
+ /* use setup->vmin, vmax to point to vertices */
+ if (softpipe->rasterizer->flatshade_first)
+ setup->vprovoke = v0;
+ else
+ setup->vprovoke = v1;
+ setup->vmin = v0;
+ setup->vmax = v1;
+
+ setup->emaj.dx = setup->vmax[0][0] - setup->vmin[0][0];
+ setup->emaj.dy = setup->vmax[0][1] - setup->vmin[0][1];
+
+ /* NOTE: this is not really area but something proportional to it */
+ area = setup->emaj.dx * setup->emaj.dx + setup->emaj.dy * setup->emaj.dy;
+ if (area == 0.0f || util_is_inf_or_nan(area))
+ return FALSE;
+ setup->oneoverarea = 1.0f / area;
+
+ /* z and w are done by linear interpolation:
+ */
+ line_linear_coeff(setup, &setup->posCoef, 0, 2);
+ line_linear_coeff(setup, &setup->posCoef, 0, 3);
+
+ /* setup interpolation for all the remaining attributes:
+ */
+ for (fragSlot = 0; fragSlot < spfs->info.num_inputs; fragSlot++) {
+ const uint vertSlot = vinfo->attrib[fragSlot].src_index;
+ uint j;
+
+ switch (vinfo->attrib[fragSlot].interp_mode) {
+ case INTERP_CONSTANT:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ const_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_LINEAR:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ line_linear_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_PERSPECTIVE:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ line_persp_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_POS:
+ setup_fragcoord_coeff(setup, fragSlot);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (spfs->info.input_semantic_name[fragSlot] == TGSI_SEMANTIC_FACE) {
+ setup->coef[fragSlot].a0[0] = 1.0f - setup->quad.input.facing;
+ setup->coef[fragSlot].dadx[0] = 0.0;
+ setup->coef[fragSlot].dady[0] = 0.0;
+ }
+ }
+ return TRUE;
+}
+
+
+/**
+ * Plot a pixel in a line segment.
+ */
+static INLINE void
+plot(struct setup_context *setup, int x, int y)
+{
+ const int iy = y & 1;
+ const int ix = x & 1;
+ const int quadX = x - ix;
+ const int quadY = y - iy;
+ const int mask = (1 << ix) << (2 * iy);
+
+ if (quadX != setup->quad.input.x0 ||
+ quadY != setup->quad.input.y0)
+ {
+ /* flush prev quad, start new quad */
+
+ if (setup->quad.input.x0 != -1)
+ CLIP_EMIT_QUAD(setup);
+
+ setup->quad.input.x0 = quadX;
+ setup->quad.input.y0 = quadY;
+ setup->quad.inout.mask = 0x0;
+ }
+
+ setup->quad.inout.mask |= mask;
+}
+
+
+/**
+ * Do setup for line rasterization, then render the line.
+ * Single-pixel width, no stipple, etc. We rely on the 'draw' module
+ * to handle stippling and wide lines.
+ */
+void
+setup_line(struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4])
+{
+ int x0 = (int) v0[0][0];
+ int x1 = (int) v1[0][0];
+ int y0 = (int) v0[0][1];
+ int y1 = (int) v1[0][1];
+ int dx = x1 - x0;
+ int dy = y1 - y0;
+ int xstep, ystep;
+
+#if DEBUG_VERTS
+ debug_printf("Setup line:\n");
+ print_vertex(setup, v0);
+ print_vertex(setup, v1);
+#endif
+
+ if (setup->softpipe->no_rast)
+ return;
+
+ if (dx == 0 && dy == 0)
+ return;
+
+ if (!setup_line_coefficients(setup, v0, v1))
+ return;
+
+ assert(v0[0][0] < 1.0e9);
+ assert(v0[0][1] < 1.0e9);
+ assert(v1[0][0] < 1.0e9);
+ assert(v1[0][1] < 1.0e9);
+
+ if (dx < 0) {
+ dx = -dx; /* make positive */
+ xstep = -1;
+ }
+ else {
+ xstep = 1;
+ }
+
+ if (dy < 0) {
+ dy = -dy; /* make positive */
+ ystep = -1;
+ }
+ else {
+ ystep = 1;
+ }
+
+ assert(dx >= 0);
+ assert(dy >= 0);
+
+ setup->quad.input.x0 = setup->quad.input.y0 = -1;
+ setup->quad.inout.mask = 0x0;
+ setup->quad.input.prim = QUAD_PRIM_LINE;
+ /* XXX temporary: set coverage to 1.0 so the line appears
+ * if AA mode happens to be enabled.
+ */
+ setup->quad.input.coverage[0] =
+ setup->quad.input.coverage[1] =
+ setup->quad.input.coverage[2] =
+ setup->quad.input.coverage[3] = 1.0;
+
+ if (dx > dy) {
+ /*** X-major line ***/
+ int i;
+ const int errorInc = dy + dy;
+ int error = errorInc - dx;
+ const int errorDec = error - dx;
+
+ for (i = 0; i < dx; i++) {
+ plot(setup, x0, y0);
+
+ x0 += xstep;
+ if (error < 0) {
+ error += errorInc;
+ }
+ else {
+ error += errorDec;
+ y0 += ystep;
+ }
+ }
+ }
+ else {
+ /*** Y-major line ***/
+ int i;
+ const int errorInc = dx + dx;
+ int error = errorInc - dy;
+ const int errorDec = error - dy;
+
+ for (i = 0; i < dy; i++) {
+ plot(setup, x0, y0);
+
+ y0 += ystep;
+ if (error < 0) {
+ error += errorInc;
+ }
+ else {
+ error += errorDec;
+ x0 += xstep;
+ }
+ }
+ }
+
+ /* draw final quad */
+ if (setup->quad.inout.mask) {
+ CLIP_EMIT_QUAD(setup);
+ }
+
+ WAIT_FOR_COMPLETION(setup);
+}
+
+
+static void
+point_persp_coeff(const struct setup_context *setup,
+ const float (*vert)[4],
+ struct tgsi_interp_coef *coef,
+ uint vertSlot, uint i)
+{
+ assert(i <= 3);
+ coef->dadx[i] = 0.0F;
+ coef->dady[i] = 0.0F;
+ coef->a0[i] = vert[vertSlot][i] * vert[0][3];
+}
+
+
+/**
+ * Do setup for point rasterization, then render the point.
+ * Round or square points...
+ * XXX could optimize a lot for 1-pixel points.
+ */
+void
+setup_point( struct setup_context *setup,
+ const float (*v0)[4] )
+{
+ struct softpipe_context *softpipe = setup->softpipe;
+ const struct sp_fragment_shader *spfs = softpipe->fs;
+ const int sizeAttr = setup->softpipe->psize_slot;
+ const float size
+ = sizeAttr > 0 ? v0[sizeAttr][0]
+ : setup->softpipe->rasterizer->point_size;
+ const float halfSize = 0.5F * size;
+ const boolean round = (boolean) setup->softpipe->rasterizer->point_smooth;
+ const float x = v0[0][0]; /* Note: data[0] is always position */
+ const float y = v0[0][1];
+ const struct vertex_info *vinfo = softpipe_get_vertex_info(softpipe);
+ uint fragSlot;
+
+#if DEBUG_VERTS
+ debug_printf("Setup point:\n");
+ print_vertex(setup, v0);
+#endif
+
+ if (softpipe->no_rast)
+ return;
+
+ /* For points, all interpolants are constant-valued.
+ * However, for point sprites, we'll need to setup texcoords appropriately.
+ * XXX: which coefficients are the texcoords???
+ * We may do point sprites as textured quads...
+ *
+ * KW: We don't know which coefficients are texcoords - ultimately
+ * the choice of what interpolation mode to use for each attribute
+ * should be determined by the fragment program, using
+ * per-attribute declaration statements that include interpolation
+ * mode as a parameter. So either the fragment program will have
+ * to be adjusted for pointsprite vs normal point behaviour, or
+ * otherwise a special interpolation mode will have to be defined
+ * which matches the required behaviour for point sprites. But -
+ * the latter is not a feature of normal hardware, and as such
+ * probably should be ruled out on that basis.
+ */
+ setup->vprovoke = v0;
+
+ /* setup Z, W */
+ const_coeff(setup, &setup->posCoef, 0, 2);
+ const_coeff(setup, &setup->posCoef, 0, 3);
+
+ for (fragSlot = 0; fragSlot < spfs->info.num_inputs; fragSlot++) {
+ const uint vertSlot = vinfo->attrib[fragSlot].src_index;
+ uint j;
+
+ switch (vinfo->attrib[fragSlot].interp_mode) {
+ case INTERP_CONSTANT:
+ /* fall-through */
+ case INTERP_LINEAR:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ const_coeff(setup, &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_PERSPECTIVE:
+ for (j = 0; j < NUM_CHANNELS; j++)
+ point_persp_coeff(setup, setup->vprovoke,
+ &setup->coef[fragSlot], vertSlot, j);
+ break;
+ case INTERP_POS:
+ setup_fragcoord_coeff(setup, fragSlot);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (spfs->info.input_semantic_name[fragSlot] == TGSI_SEMANTIC_FACE) {
+ setup->coef[fragSlot].a0[0] = 1.0f - setup->quad.input.facing;
+ setup->coef[fragSlot].dadx[0] = 0.0;
+ setup->coef[fragSlot].dady[0] = 0.0;
+ }
+ }
+
+ setup->quad.input.prim = QUAD_PRIM_POINT;
+
+ if (halfSize <= 0.5 && !round) {
+ /* special case for 1-pixel points */
+ const int ix = ((int) x) & 1;
+ const int iy = ((int) y) & 1;
+ setup->quad.input.x0 = (int) x - ix;
+ setup->quad.input.y0 = (int) y - iy;
+ setup->quad.inout.mask = (1 << ix) << (2 * iy);
+ CLIP_EMIT_QUAD(setup);
+ }
+ else {
+ if (round) {
+ /* rounded points */
+ const int ixmin = block((int) (x - halfSize));
+ const int ixmax = block((int) (x + halfSize));
+ const int iymin = block((int) (y - halfSize));
+ const int iymax = block((int) (y + halfSize));
+ const float rmin = halfSize - 0.7071F; /* 0.7071 = sqrt(2)/2 */
+ const float rmax = halfSize + 0.7071F;
+ const float rmin2 = MAX2(0.0F, rmin * rmin);
+ const float rmax2 = rmax * rmax;
+ const float cscale = 1.0F / (rmax2 - rmin2);
+ int ix, iy;
+
+ for (iy = iymin; iy <= iymax; iy += 2) {
+ for (ix = ixmin; ix <= ixmax; ix += 2) {
+ float dx, dy, dist2, cover;
+
+ setup->quad.inout.mask = 0x0;
+
+ dx = (ix + 0.5f) - x;
+ dy = (iy + 0.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad.input.coverage[QUAD_TOP_LEFT] = MIN2(cover, 1.0f);
+ setup->quad.inout.mask |= MASK_TOP_LEFT;
+ }
+
+ dx = (ix + 1.5f) - x;
+ dy = (iy + 0.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad.input.coverage[QUAD_TOP_RIGHT] = MIN2(cover, 1.0f);
+ setup->quad.inout.mask |= MASK_TOP_RIGHT;
+ }
+
+ dx = (ix + 0.5f) - x;
+ dy = (iy + 1.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad.input.coverage[QUAD_BOTTOM_LEFT] = MIN2(cover, 1.0f);
+ setup->quad.inout.mask |= MASK_BOTTOM_LEFT;
+ }
+
+ dx = (ix + 1.5f) - x;
+ dy = (iy + 1.5f) - y;
+ dist2 = dx * dx + dy * dy;
+ if (dist2 <= rmax2) {
+ cover = 1.0F - (dist2 - rmin2) * cscale;
+ setup->quad.input.coverage[QUAD_BOTTOM_RIGHT] = MIN2(cover, 1.0f);
+ setup->quad.inout.mask |= MASK_BOTTOM_RIGHT;
+ }
+
+ if (setup->quad.inout.mask) {
+ setup->quad.input.x0 = ix;
+ setup->quad.input.y0 = iy;
+ CLIP_EMIT_QUAD(setup);
+ }
+ }
+ }
+ }
+ else {
+ /* square points */
+ const int xmin = (int) (x + 0.75 - halfSize);
+ const int ymin = (int) (y + 0.25 - halfSize);
+ const int xmax = xmin + (int) size;
+ const int ymax = ymin + (int) size;
+ /* XXX could apply scissor to xmin,ymin,xmax,ymax now */
+ const int ixmin = block(xmin);
+ const int ixmax = block(xmax - 1);
+ const int iymin = block(ymin);
+ const int iymax = block(ymax - 1);
+ int ix, iy;
+
+ /*
+ debug_printf("(%f, %f) -> X:%d..%d Y:%d..%d\n", x, y, xmin, xmax,ymin,ymax);
+ */
+ for (iy = iymin; iy <= iymax; iy += 2) {
+ uint rowMask = 0xf;
+ if (iy < ymin) {
+ /* above the top edge */
+ rowMask &= (MASK_BOTTOM_LEFT | MASK_BOTTOM_RIGHT);
+ }
+ if (iy + 1 >= ymax) {
+ /* below the bottom edge */
+ rowMask &= (MASK_TOP_LEFT | MASK_TOP_RIGHT);
+ }
+
+ for (ix = ixmin; ix <= ixmax; ix += 2) {
+ uint mask = rowMask;
+
+ if (ix < xmin) {
+ /* fragment is past left edge of point, turn off left bits */
+ mask &= (MASK_BOTTOM_RIGHT | MASK_TOP_RIGHT);
+ }
+ if (ix + 1 >= xmax) {
+ /* past the right edge */
+ mask &= (MASK_BOTTOM_LEFT | MASK_TOP_LEFT);
+ }
+
+ setup->quad.inout.mask = mask;
+ setup->quad.input.x0 = ix;
+ setup->quad.input.y0 = iy;
+ CLIP_EMIT_QUAD(setup);
+ }
+ }
+ }
+ }
+
+ WAIT_FOR_COMPLETION(setup);
+}
+
+void setup_prepare( struct setup_context *setup )
+{
+ struct softpipe_context *sp = setup->softpipe;
+ unsigned i;
+
+ if (sp->dirty) {
+ softpipe_update_derived(sp);
+ }
+
+ /* Note: nr_attrs is only used for debugging (vertex printing) */
+ setup->quad.nr_attrs = draw_num_vs_outputs(sp->draw);
+
+ for (i = 0; i < SP_NUM_QUAD_THREADS; i++) {
+ sp->quad[i].first->begin( sp->quad[i].first );
+ }
+
+ if (sp->reduced_api_prim == PIPE_PRIM_TRIANGLES &&
+ sp->rasterizer->fill_cw == PIPE_POLYGON_MODE_FILL &&
+ sp->rasterizer->fill_ccw == PIPE_POLYGON_MODE_FILL) {
+ /* we'll do culling */
+ setup->winding = sp->rasterizer->cull_mode;
+ }
+ else {
+ /* 'draw' will do culling */
+ setup->winding = PIPE_WINDING_NONE;
+ }
+}
+
+
+
+void setup_destroy_context( struct setup_context *setup )
+{
+ FREE( setup );
+}
+
+
+/**
+ * Create a new primitive setup/render stage.
+ */
+struct setup_context *setup_create_context( struct softpipe_context *softpipe )
+{
+ struct setup_context *setup = CALLOC_STRUCT(setup_context);
+#if SP_NUM_QUAD_THREADS > 1
+ uint i;
+#endif
+
+ setup->softpipe = softpipe;
+
+ setup->quad.coef = setup->coef;
+ setup->quad.posCoef = &setup->posCoef;
+
+#if SP_NUM_QUAD_THREADS > 1
+ setup->que.first = 0;
+ setup->que.last = 0;
+ pipe_mutex_init( setup->que.que_mutex );
+ pipe_condvar_init( setup->que.que_notfull_condvar );
+ pipe_condvar_init( setup->que.que_notempty_condvar );
+ setup->que.jobs_added = 0;
+ setup->que.jobs_done = 0;
+ pipe_condvar_init( setup->que.que_done_condvar );
+ for (i = 0; i < SP_NUM_QUAD_THREADS; i++) {
+ setup->threads[i].setup = setup;
+ setup->threads[i].id = i;
+ setup->threads[i].handle = pipe_thread_create( quad_thread, &setup->threads[i] );
+ }
+#endif
+
+ return setup;
+}
+
diff --git a/src/gallium/drivers/softpipe/sp_setup.h b/src/gallium/drivers/softpipe/sp_setup.h
new file mode 100644
index 0000000000..d54f334428
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_setup.h
@@ -0,0 +1,53 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef SP_SETUP_H
+#define SP_SETUP_H
+
+struct setup_context;
+struct softpipe_context;
+
+void
+setup_tri( struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4],
+ const float (*v2)[4] );
+
+void
+setup_line(struct setup_context *setup,
+ const float (*v0)[4],
+ const float (*v1)[4]);
+
+void
+setup_point( struct setup_context *setup,
+ const float (*v0)[4] );
+
+
+struct setup_context *setup_create_context( struct softpipe_context *softpipe );
+void setup_prepare( struct setup_context *setup );
+void setup_destroy_context( struct setup_context *setup );
+
+#endif
diff --git a/src/gallium/drivers/softpipe/sp_state.h b/src/gallium/drivers/softpipe/sp_state.h
new file mode 100644
index 0000000000..9776e978e3
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state.h
@@ -0,0 +1,206 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_STATE_H
+#define SP_STATE_H
+
+#include "pipe/p_state.h"
+#include "tgsi/tgsi_scan.h"
+
+
+#define SP_NEW_VIEWPORT 0x1
+#define SP_NEW_RASTERIZER 0x2
+#define SP_NEW_FS 0x4
+#define SP_NEW_BLEND 0x8
+#define SP_NEW_CLIP 0x10
+#define SP_NEW_SCISSOR 0x20
+#define SP_NEW_STIPPLE 0x40
+#define SP_NEW_FRAMEBUFFER 0x80
+#define SP_NEW_DEPTH_STENCIL_ALPHA 0x100
+#define SP_NEW_CONSTANTS 0x200
+#define SP_NEW_SAMPLER 0x400
+#define SP_NEW_TEXTURE 0x800
+#define SP_NEW_VERTEX 0x1000
+#define SP_NEW_VS 0x2000
+#define SP_NEW_QUERY 0x4000
+
+
+struct tgsi_sampler;
+struct tgsi_exec_machine;
+struct vertex_info;
+
+
+/**
+ * Subclass of pipe_shader_state (though it doesn't really need to be).
+ *
+ * This is starting to look an awful lot like a quad pipeline stage...
+ */
+struct sp_fragment_shader {
+ struct pipe_shader_state shader;
+
+ struct tgsi_shader_info info;
+
+ void (*prepare)( const struct sp_fragment_shader *shader,
+ struct tgsi_exec_machine *machine,
+ struct tgsi_sampler **samplers);
+
+ /* Run the shader - this interface will get cleaned up in the
+ * future:
+ */
+ unsigned (*run)( const struct sp_fragment_shader *shader,
+ struct tgsi_exec_machine *machine,
+ struct quad_header *quad );
+
+
+ void (*delete)( struct sp_fragment_shader * );
+};
+
+
+/** Subclass of pipe_shader_state */
+struct sp_vertex_shader {
+ struct pipe_shader_state shader;
+ struct draw_vertex_shader *draw_data;
+};
+
+
+
+void *
+softpipe_create_blend_state(struct pipe_context *,
+ const struct pipe_blend_state *);
+void softpipe_bind_blend_state(struct pipe_context *,
+ void *);
+void softpipe_delete_blend_state(struct pipe_context *,
+ void *);
+
+void *
+softpipe_create_sampler_state(struct pipe_context *,
+ const struct pipe_sampler_state *);
+void softpipe_bind_sampler_states(struct pipe_context *, unsigned, void **);
+void softpipe_delete_sampler_state(struct pipe_context *, void *);
+
+void *
+softpipe_create_depth_stencil_state(struct pipe_context *,
+ const struct pipe_depth_stencil_alpha_state *);
+void softpipe_bind_depth_stencil_state(struct pipe_context *, void *);
+void softpipe_delete_depth_stencil_state(struct pipe_context *, void *);
+
+void *
+softpipe_create_rasterizer_state(struct pipe_context *,
+ const struct pipe_rasterizer_state *);
+void softpipe_bind_rasterizer_state(struct pipe_context *, void *);
+void softpipe_delete_rasterizer_state(struct pipe_context *, void *);
+
+void softpipe_set_framebuffer_state( struct pipe_context *,
+ const struct pipe_framebuffer_state * );
+
+void softpipe_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color );
+
+void softpipe_set_clip_state( struct pipe_context *,
+ const struct pipe_clip_state * );
+
+void softpipe_set_constant_buffer(struct pipe_context *,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf);
+
+void *softpipe_create_fs_state(struct pipe_context *,
+ const struct pipe_shader_state *);
+void softpipe_bind_fs_state(struct pipe_context *, void *);
+void softpipe_delete_fs_state(struct pipe_context *, void *);
+void *softpipe_create_vs_state(struct pipe_context *,
+ const struct pipe_shader_state *);
+void softpipe_bind_vs_state(struct pipe_context *, void *);
+void softpipe_delete_vs_state(struct pipe_context *, void *);
+
+void softpipe_set_polygon_stipple( struct pipe_context *,
+ const struct pipe_poly_stipple * );
+
+void softpipe_set_scissor_state( struct pipe_context *,
+ const struct pipe_scissor_state * );
+
+void softpipe_set_sampler_textures( struct pipe_context *,
+ unsigned num,
+ struct pipe_texture ** );
+
+void softpipe_set_viewport_state( struct pipe_context *,
+ const struct pipe_viewport_state * );
+
+void softpipe_set_vertex_elements(struct pipe_context *,
+ unsigned count,
+ const struct pipe_vertex_element *);
+
+void softpipe_set_vertex_buffers(struct pipe_context *,
+ unsigned count,
+ const struct pipe_vertex_buffer *);
+
+
+void softpipe_update_derived( struct softpipe_context *softpipe );
+
+
+boolean softpipe_draw_arrays(struct pipe_context *pipe, unsigned mode,
+ unsigned start, unsigned count);
+
+boolean softpipe_draw_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count);
+boolean
+softpipe_draw_range_elements(struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned min_index,
+ unsigned max_index,
+ unsigned mode, unsigned start, unsigned count);
+
+void
+softpipe_set_edgeflags(struct pipe_context *pipe, const unsigned *edgeflags);
+
+
+void
+softpipe_map_transfers(struct softpipe_context *sp);
+
+void
+softpipe_unmap_transfers(struct softpipe_context *sp);
+
+void
+softpipe_map_texture_surfaces(struct softpipe_context *sp);
+
+void
+softpipe_unmap_texture_surfaces(struct softpipe_context *sp);
+
+
+struct vertex_info *
+softpipe_get_vertex_info(struct softpipe_context *softpipe);
+
+struct vertex_info *
+softpipe_get_vbuf_vertex_info(struct softpipe_context *softpipe);
+
+
+#endif
diff --git a/src/gallium/drivers/softpipe/sp_state_blend.c b/src/gallium/drivers/softpipe/sp_state_blend.c
new file mode 100644
index 0000000000..384fe559af
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_blend.c
@@ -0,0 +1,98 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_state.h"
+
+
+void *
+softpipe_create_blend_state(struct pipe_context *pipe,
+ const struct pipe_blend_state *blend)
+{
+ return mem_dup(blend, sizeof(*blend));
+}
+
+void softpipe_bind_blend_state( struct pipe_context *pipe,
+ void *blend )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ softpipe->blend = (const struct pipe_blend_state *)blend;
+
+ softpipe->dirty |= SP_NEW_BLEND;
+}
+
+void softpipe_delete_blend_state(struct pipe_context *pipe,
+ void *blend)
+{
+ FREE( blend );
+}
+
+
+void softpipe_set_blend_color( struct pipe_context *pipe,
+ const struct pipe_blend_color *blend_color )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ softpipe->blend_color = *blend_color;
+
+ softpipe->dirty |= SP_NEW_BLEND;
+}
+
+
+/** XXX move someday? Or consolidate all these simple state setters
+ * into one file.
+ */
+
+
+void *
+softpipe_create_depth_stencil_state(struct pipe_context *pipe,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil)
+{
+ return mem_dup(depth_stencil, sizeof(*depth_stencil));
+}
+
+void
+softpipe_bind_depth_stencil_state(struct pipe_context *pipe,
+ void *depth_stencil)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ softpipe->depth_stencil = (const struct pipe_depth_stencil_alpha_state *)depth_stencil;
+
+ softpipe->dirty |= SP_NEW_DEPTH_STENCIL_ALPHA;
+}
+
+void
+softpipe_delete_depth_stencil_state(struct pipe_context *pipe, void *depth)
+{
+ FREE( depth );
+}
diff --git a/src/gallium/drivers/softpipe/sp_state_clip.c b/src/gallium/drivers/softpipe/sp_state_clip.c
new file mode 100644
index 0000000000..4946c776e3
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_clip.c
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+#include "sp_context.h"
+#include "sp_state.h"
+#include "draw/draw_context.h"
+
+
+void softpipe_set_clip_state( struct pipe_context *pipe,
+ const struct pipe_clip_state *clip )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ /* pass the clip state to the draw module */
+ draw_set_clip_state(softpipe->draw, clip);
+}
+
+
+void softpipe_set_viewport_state( struct pipe_context *pipe,
+ const struct pipe_viewport_state *viewport )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ /* pass the viewport info to the draw module */
+ draw_set_viewport_state(softpipe->draw, viewport);
+
+ softpipe->viewport = *viewport; /* struct copy */
+ softpipe->dirty |= SP_NEW_VIEWPORT;
+}
+
+
+void softpipe_set_scissor_state( struct pipe_context *pipe,
+ const struct pipe_scissor_state *scissor )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ draw_flush(softpipe->draw);
+
+ softpipe->scissor = *scissor; /* struct copy */
+ softpipe->dirty |= SP_NEW_SCISSOR;
+}
+
+
+void softpipe_set_polygon_stipple( struct pipe_context *pipe,
+ const struct pipe_poly_stipple *stipple )
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ draw_flush(softpipe->draw);
+
+ softpipe->poly_stipple = *stipple; /* struct copy */
+ softpipe->dirty |= SP_NEW_STIPPLE;
+}
diff --git a/src/gallium/drivers/softpipe/sp_state_derived.c b/src/gallium/drivers/softpipe/sp_state_derived.c
new file mode 100644
index 0000000000..75551000c9
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_derived.c
@@ -0,0 +1,211 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw/draw_context.h"
+#include "draw/draw_vertex.h"
+#include "draw/draw_private.h"
+#include "sp_context.h"
+#include "sp_state.h"
+
+
+/**
+ * Mark the current vertex layout as "invalid".
+ * We'll validate the vertex layout later, when we start to actually
+ * render a point or line or tri.
+ */
+static void
+invalidate_vertex_layout(struct softpipe_context *softpipe)
+{
+ softpipe->vertex_info.num_attribs = 0;
+}
+
+
+/**
+ * The vertex info describes how to convert the post-transformed vertices
+ * (simple float[][4]) used by the 'draw' module into vertices for
+ * rasterization.
+ *
+ * This function validates the vertex layout and returns a pointer to a
+ * vertex_info object.
+ */
+struct vertex_info *
+softpipe_get_vertex_info(struct softpipe_context *softpipe)
+{
+ struct vertex_info *vinfo = &softpipe->vertex_info;
+
+ if (vinfo->num_attribs == 0) {
+ /* compute vertex layout now */
+ const struct sp_fragment_shader *spfs = softpipe->fs;
+ const enum interp_mode colorInterp
+ = softpipe->rasterizer->flatshade ? INTERP_CONSTANT : INTERP_LINEAR;
+ uint i;
+
+ if (softpipe->vbuf) {
+ /* if using the post-transform vertex buffer, tell draw_vbuf to
+ * simply emit the whole post-xform vertex as-is:
+ */
+ struct vertex_info *vinfo_vbuf = &softpipe->vertex_info_vbuf;
+ const uint num = draw_num_vs_outputs(softpipe->draw);
+ uint i;
+
+ /* No longer any need to try and emit draw vertex_header info.
+ */
+ vinfo_vbuf->num_attribs = 0;
+ for (i = 0; i < num; i++) {
+ draw_emit_vertex_attr(vinfo_vbuf, EMIT_4F, INTERP_PERSPECTIVE, i);
+ }
+ draw_compute_vertex_size(vinfo_vbuf);
+ }
+
+ /*
+ * Loop over fragment shader inputs, searching for the matching output
+ * from the vertex shader.
+ */
+ vinfo->num_attribs = 0;
+ for (i = 0; i < spfs->info.num_inputs; i++) {
+ int src;
+ switch (spfs->info.input_semantic_name[i]) {
+ case TGSI_SEMANTIC_POSITION:
+ src = draw_find_vs_output(softpipe->draw,
+ TGSI_SEMANTIC_POSITION, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_POS, src);
+ break;
+
+ case TGSI_SEMANTIC_COLOR:
+ src = draw_find_vs_output(softpipe->draw, TGSI_SEMANTIC_COLOR,
+ spfs->info.input_semantic_index[i]);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, colorInterp, src);
+ break;
+
+ case TGSI_SEMANTIC_FOG:
+ src = draw_find_vs_output(softpipe->draw, TGSI_SEMANTIC_FOG, 0);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ break;
+
+ case TGSI_SEMANTIC_GENERIC:
+ case TGSI_SEMANTIC_FACE:
+ /* this includes texcoords and varying vars */
+ src = draw_find_vs_output(softpipe->draw, TGSI_SEMANTIC_GENERIC,
+ spfs->info.input_semantic_index[i]);
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_PERSPECTIVE, src);
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ softpipe->psize_slot = draw_find_vs_output(softpipe->draw,
+ TGSI_SEMANTIC_PSIZE, 0);
+ if (softpipe->psize_slot > 0) {
+ draw_emit_vertex_attr(vinfo, EMIT_4F, INTERP_CONSTANT,
+ softpipe->psize_slot);
+ }
+
+ draw_compute_vertex_size(vinfo);
+ }
+
+ return vinfo;
+}
+
+
+/**
+ * Called from vbuf module.
+ *
+ * Note that there's actually two different vertex layouts in softpipe.
+ *
+ * The normal one is computed in softpipe_get_vertex_info() above and is
+ * used by the point/line/tri "setup" code.
+ *
+ * The other one (this one) is only used by the vbuf module (which is
+ * not normally used by default but used in testing). For the vbuf module,
+ * we basically want to pass-through the draw module's vertex layout as-is.
+ * When the softpipe vbuf code begins drawing, the normal vertex layout
+ * will come into play again.
+ */
+struct vertex_info *
+softpipe_get_vbuf_vertex_info(struct softpipe_context *softpipe)
+{
+ (void) softpipe_get_vertex_info(softpipe);
+ return &softpipe->vertex_info_vbuf;
+}
+
+
+/**
+ * Recompute cliprect from scissor bounds, scissor enable and surface size.
+ */
+static void
+compute_cliprect(struct softpipe_context *sp)
+{
+ uint surfWidth = sp->framebuffer.width;
+ uint surfHeight = sp->framebuffer.height;
+
+ if (sp->rasterizer->scissor) {
+ /* clip to scissor rect */
+ sp->cliprect.minx = MAX2(sp->scissor.minx, 0);
+ sp->cliprect.miny = MAX2(sp->scissor.miny, 0);
+ sp->cliprect.maxx = MIN2(sp->scissor.maxx, surfWidth);
+ sp->cliprect.maxy = MIN2(sp->scissor.maxy, surfHeight);
+ }
+ else {
+ /* clip to surface bounds */
+ sp->cliprect.minx = 0;
+ sp->cliprect.miny = 0;
+ sp->cliprect.maxx = surfWidth;
+ sp->cliprect.maxy = surfHeight;
+ }
+}
+
+
+/* Hopefully this will remain quite simple, otherwise need to pull in
+ * something like the state tracker mechanism.
+ */
+void softpipe_update_derived( struct softpipe_context *softpipe )
+{
+ if (softpipe->dirty & (SP_NEW_RASTERIZER |
+ SP_NEW_FS |
+ SP_NEW_VS))
+ invalidate_vertex_layout( softpipe );
+
+ if (softpipe->dirty & (SP_NEW_SCISSOR |
+ SP_NEW_DEPTH_STENCIL_ALPHA |
+ SP_NEW_FRAMEBUFFER))
+ compute_cliprect(softpipe);
+
+ if (softpipe->dirty & (SP_NEW_BLEND |
+ SP_NEW_DEPTH_STENCIL_ALPHA |
+ SP_NEW_FRAMEBUFFER |
+ SP_NEW_RASTERIZER |
+ SP_NEW_FS |
+ SP_NEW_QUERY))
+ sp_build_quad_pipeline(softpipe);
+
+ softpipe->dirty = 0;
+}
diff --git a/src/gallium/drivers/softpipe/sp_state_fs.c b/src/gallium/drivers/softpipe/sp_state_fs.c
new file mode 100644
index 0000000000..4330c20393
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_fs.c
@@ -0,0 +1,169 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_fs.h"
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_shader_tokens.h"
+#include "draw/draw_context.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_scan.h"
+#include "tgsi/tgsi_parse.h"
+
+
+void *
+softpipe_create_fs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+ struct sp_fragment_shader *state;
+
+ /* debug */
+ if (softpipe->dump_fs)
+ tgsi_dump(templ->tokens, 0);
+
+ /* codegen */
+ state = softpipe_create_fs_llvm( softpipe, templ );
+ if (!state) {
+ state = softpipe_create_fs_sse( softpipe, templ );
+ if (!state) {
+ state = softpipe_create_fs_exec( softpipe, templ );
+ }
+ }
+
+ assert(state);
+
+ /* get/save the summary info for this shader */
+ tgsi_scan_shader(templ->tokens, &state->info);
+
+ return state;
+}
+
+
+void
+softpipe_bind_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ softpipe->fs = (struct sp_fragment_shader *) fs;
+
+ softpipe->dirty |= SP_NEW_FS;
+}
+
+
+void
+softpipe_delete_fs_state(struct pipe_context *pipe, void *fs)
+{
+ struct sp_fragment_shader *state = fs;
+
+ assert(fs != softpipe_context(pipe)->fs);
+
+ state->delete( state );
+}
+
+
+void *
+softpipe_create_vs_state(struct pipe_context *pipe,
+ const struct pipe_shader_state *templ)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+ struct sp_vertex_shader *state;
+
+ state = CALLOC_STRUCT(sp_vertex_shader);
+ if (state == NULL )
+ goto fail;
+
+ /* copy shader tokens, the ones passed in will go away.
+ */
+ state->shader.tokens = tgsi_dup_tokens(templ->tokens);
+ if (state->shader.tokens == NULL)
+ goto fail;
+
+ state->draw_data = draw_create_vertex_shader(softpipe->draw, templ);
+ if (state->draw_data == NULL)
+ goto fail;
+
+ return state;
+
+fail:
+ if (state) {
+ FREE( (void *)state->shader.tokens );
+ FREE( state->draw_data );
+ FREE( state );
+ }
+ return NULL;
+}
+
+
+void
+softpipe_bind_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ softpipe->vs = (const struct sp_vertex_shader *)vs;
+
+ draw_bind_vertex_shader(softpipe->draw,
+ (softpipe->vs ? softpipe->vs->draw_data : NULL));
+
+ softpipe->dirty |= SP_NEW_VS;
+}
+
+
+void
+softpipe_delete_vs_state(struct pipe_context *pipe, void *vs)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ struct sp_vertex_shader *state =
+ (struct sp_vertex_shader *)vs;
+
+ draw_delete_vertex_shader(softpipe->draw, state->draw_data);
+ FREE( state );
+}
+
+
+
+void
+softpipe_set_constant_buffer(struct pipe_context *pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ assert(shader < PIPE_SHADER_TYPES);
+ assert(index == 0);
+
+ /* note: reference counting */
+ pipe_buffer_reference(&softpipe->constants[shader].buffer,
+ buf ? buf->buffer : NULL);
+
+ softpipe->dirty |= SP_NEW_CONSTANTS;
+}
diff --git a/src/gallium/drivers/softpipe/sp_state_rasterizer.c b/src/gallium/drivers/softpipe/sp_state_rasterizer.c
new file mode 100644
index 0000000000..87b7219683
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_rasterizer.c
@@ -0,0 +1,62 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "pipe/p_defines.h"
+#include "util/u_memory.h"
+#include "sp_context.h"
+#include "sp_state.h"
+#include "draw/draw_context.h"
+
+
+
+void *
+softpipe_create_rasterizer_state(struct pipe_context *pipe,
+ const struct pipe_rasterizer_state *rast)
+{
+ return mem_dup(rast, sizeof(*rast));
+}
+
+void softpipe_bind_rasterizer_state(struct pipe_context *pipe,
+ void *setup)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ /* pass-through to draw module */
+ draw_set_rasterizer_state(softpipe->draw, setup);
+
+ softpipe->rasterizer = (struct pipe_rasterizer_state *)setup;
+
+ softpipe->dirty |= SP_NEW_RASTERIZER;
+}
+
+void softpipe_delete_rasterizer_state(struct pipe_context *pipe,
+ void *rasterizer)
+{
+ FREE( rasterizer );
+}
+
+
diff --git a/src/gallium/drivers/softpipe/sp_state_sampler.c b/src/gallium/drivers/softpipe/sp_state_sampler.c
new file mode 100644
index 0000000000..cb517b02e4
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_sampler.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors:
+ * Brian Paul
+ */
+
+#include "util/u_memory.h"
+
+#include "draw/draw_context.h"
+
+#include "sp_context.h"
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_texture.h"
+#include "sp_tile_cache.h"
+#include "draw/draw_context.h"
+
+
+
+void *
+softpipe_create_sampler_state(struct pipe_context *pipe,
+ const struct pipe_sampler_state *sampler)
+{
+ return mem_dup(sampler, sizeof(*sampler));
+}
+
+
+void
+softpipe_bind_sampler_states(struct pipe_context *pipe,
+ unsigned num, void **sampler)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+ unsigned i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == softpipe->num_samplers &&
+ !memcmp(softpipe->sampler, sampler, num * sizeof(void *)))
+ return;
+
+ draw_flush(softpipe->draw);
+
+ for (i = 0; i < num; ++i)
+ softpipe->sampler[i] = sampler[i];
+ for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
+ softpipe->sampler[i] = NULL;
+
+ softpipe->num_samplers = num;
+
+ softpipe->dirty |= SP_NEW_SAMPLER;
+}
+
+
+void
+softpipe_set_sampler_textures(struct pipe_context *pipe,
+ unsigned num, struct pipe_texture **texture)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+ uint i;
+
+ assert(num <= PIPE_MAX_SAMPLERS);
+
+ /* Check for no-op */
+ if (num == softpipe->num_textures &&
+ !memcmp(softpipe->texture, texture, num * sizeof(struct pipe_texture *)))
+ return;
+
+ draw_flush(softpipe->draw);
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
+ struct pipe_texture *tex = i < num ? texture[i] : NULL;
+
+ pipe_texture_reference(&softpipe->texture[i], tex);
+ sp_tile_cache_set_texture(pipe, softpipe->tex_cache[i], tex);
+ }
+
+ softpipe->num_textures = num;
+
+ softpipe->dirty |= SP_NEW_TEXTURE;
+}
+
+
+void
+softpipe_delete_sampler_state(struct pipe_context *pipe,
+ void *sampler)
+{
+ FREE( sampler );
+}
+
+
+
diff --git a/src/gallium/drivers/softpipe/sp_state_surface.c b/src/gallium/drivers/softpipe/sp_state_surface.c
new file mode 100644
index 0000000000..7c06d864a7
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_surface.c
@@ -0,0 +1,129 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_surface.h"
+#include "sp_tile_cache.h"
+
+#include "draw/draw_context.h"
+
+
+/**
+ * XXX this might get moved someday
+ * Set the framebuffer surface info: color buffers, zbuffer, stencil buffer.
+ * Here, we flush the old surfaces and update the tile cache to point to the new
+ * surfaces.
+ */
+void
+softpipe_set_framebuffer_state(struct pipe_context *pipe,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct softpipe_context *sp = softpipe_context(pipe);
+ uint i;
+
+ for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
+ /* check if changing cbuf */
+ if (sp->framebuffer.cbufs[i] != fb->cbufs[i]) {
+ /* flush old */
+ sp_flush_tile_cache(sp, sp->cbuf_cache[i]);
+
+ /* assign new */
+ sp->framebuffer.cbufs[i] = fb->cbufs[i];
+
+ /* update cache */
+ sp_tile_cache_set_surface(sp->cbuf_cache[i], fb->cbufs[i]);
+ }
+ }
+
+ sp->framebuffer.nr_cbufs = fb->nr_cbufs;
+
+ /* zbuf changing? */
+ if (sp->framebuffer.zsbuf != fb->zsbuf) {
+ /* flush old */
+ sp_flush_tile_cache(sp, sp->zsbuf_cache);
+
+ /* assign new */
+ sp->framebuffer.zsbuf = fb->zsbuf;
+
+ /* update cache */
+ sp_tile_cache_set_surface(sp->zsbuf_cache, fb->zsbuf);
+ }
+
+#if 0
+ /* XXX combined depth/stencil here */
+
+ /* sbuf changing? */
+ if (sp->framebuffer.sbuf != fb->sbuf) {
+ /* flush old */
+ sp_flush_tile_cache(sp, sp->sbuf_cache_sep);
+
+ /* assign new */
+ sp->framebuffer.sbuf = fb->sbuf;
+
+ /* update cache */
+ if (fb->sbuf != fb->zbuf) {
+ /* separate stencil buf */
+ sp->sbuf_cache = sp->sbuf_cache_sep;
+ sp_tile_cache_set_surface(sp->sbuf_cache, fb->sbuf);
+ }
+ else {
+ /* combined depth/stencil */
+ sp->sbuf_cache = sp->zbuf_cache;
+ sp_tile_cache_set_surface(sp->sbuf_cache, fb->sbuf);
+ }
+ }
+#endif
+
+ /* Tell draw module how deep the Z/depth buffer is */
+ {
+ int depth_bits;
+ double mrd;
+ if (sp->framebuffer.zsbuf) {
+ depth_bits = pf_get_component_bits(sp->framebuffer.zsbuf->format,
+ PIPE_FORMAT_COMP_Z);
+ }
+ else {
+ depth_bits = 0;
+ }
+ if (depth_bits > 16) {
+ mrd = 0.0000001;
+ }
+ else {
+ mrd = 0.00002;
+ }
+ draw_set_mrd(sp->draw, mrd);
+ }
+
+ sp->framebuffer.width = fb->width;
+ sp->framebuffer.height = fb->height;
+
+ sp->dirty |= SP_NEW_FRAMEBUFFER;
+}
diff --git a/src/gallium/drivers/softpipe/sp_state_vertex.c b/src/gallium/drivers/softpipe/sp_state_vertex.c
new file mode 100644
index 0000000000..46b6991195
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_state_vertex.c
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_surface.h"
+
+#include "draw/draw_context.h"
+
+
+void
+softpipe_set_vertex_elements(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_element *attribs)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(softpipe->vertex_element, attribs,
+ count * sizeof(struct pipe_vertex_element));
+ softpipe->num_vertex_elements = count;
+
+ softpipe->dirty |= SP_NEW_VERTEX;
+
+ draw_set_vertex_elements(softpipe->draw, count, attribs);
+}
+
+
+void
+softpipe_set_vertex_buffers(struct pipe_context *pipe,
+ unsigned count,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct softpipe_context *softpipe = softpipe_context(pipe);
+
+ assert(count <= PIPE_MAX_ATTRIBS);
+
+ memcpy(softpipe->vertex_buffer, buffers, count * sizeof(buffers[0]));
+ softpipe->num_vertex_buffers = count;
+
+ softpipe->dirty |= SP_NEW_VERTEX;
+
+ draw_set_vertex_buffers(softpipe->draw, count, buffers);
+}
diff --git a/src/gallium/drivers/softpipe/sp_surface.c b/src/gallium/drivers/softpipe/sp_surface.c
new file mode 100644
index 0000000000..b04c2a63ad
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_surface.c
@@ -0,0 +1,50 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_rect.h"
+#include "sp_context.h"
+#include "sp_surface.h"
+
+
+static void
+sp_surface_copy(struct pipe_context *pipe,
+ struct pipe_surface *dest, unsigned destx, unsigned desty,
+ struct pipe_surface *src, unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ util_surface_copy(pipe, FALSE,
+ dest, destx, desty,
+ src, srcx, srcy,
+ width, height);
+}
+
+void
+sp_init_surface_functions(struct softpipe_context *sp)
+{
+ sp->pipe.surface_copy = sp_surface_copy;
+ sp->pipe.surface_fill = util_surface_fill;
+}
diff --git a/src/gallium/drivers/softpipe/sp_surface.h b/src/gallium/drivers/softpipe/sp_surface.h
new file mode 100644
index 0000000000..22de3ba43f
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_surface.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Authors: Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef SP_SURFACE_H
+#define SP_SURFACE_H
+
+
+struct softpipe_context;
+
+
+extern void
+sp_init_surface_functions(struct softpipe_context *sp);
+
+
+#endif /* SP_SURFACE_H */
diff --git a/src/gallium/drivers/softpipe/sp_tex_sample.c b/src/gallium/drivers/softpipe/sp_tex_sample.c
new file mode 100644
index 0000000000..a1d3bade27
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_tex_sample.c
@@ -0,0 +1,1295 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ * Copyright 2008 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Texture sampling
+ *
+ * Authors:
+ * Brian Paul
+ */
+
+#include "sp_context.h"
+#include "sp_quad.h"
+#include "sp_surface.h"
+#include "sp_texture.h"
+#include "sp_tex_sample.h"
+#include "sp_tile_cache.h"
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+
+
+/*
+ * Note, the FRAC macro has to work perfectly. Otherwise you'll sometimes
+ * see 1-pixel bands of improperly weighted linear-filtered textures.
+ * The tests/texwrap.c demo is a good test.
+ * Also note, FRAC(x) doesn't truly return the fractional part of x for x < 0.
+ * Instead, if x < 0 then FRAC(x) = 1 - true_frac(x).
+ */
+#define FRAC(f) ((f) - util_ifloor(f))
+
+
+/**
+ * Linear interpolation macro
+ */
+static INLINE float
+lerp(float a, float v0, float v1)
+{
+ return v0 + a * (v1 - v0);
+}
+
+
+/**
+ * Do 2D/biliner interpolation of float values.
+ * v00, v10, v01 and v11 are typically four texture samples in a square/box.
+ * a and b are the horizontal and vertical interpolants.
+ * It's important that this function is inlined when compiled with
+ * optimization! If we find that's not true on some systems, convert
+ * to a macro.
+ */
+static INLINE float
+lerp_2d(float a, float b,
+ float v00, float v10, float v01, float v11)
+{
+ const float temp0 = lerp(a, v00, v10);
+ const float temp1 = lerp(a, v01, v11);
+ return lerp(b, temp0, temp1);
+}
+
+
+/**
+ * As above, but 3D interpolation of 8 values.
+ */
+static INLINE float
+lerp_3d(float a, float b, float c,
+ float v000, float v100, float v010, float v110,
+ float v001, float v101, float v011, float v111)
+{
+ const float temp0 = lerp_2d(a, b, v000, v100, v010, v110);
+ const float temp1 = lerp_2d(a, b, v001, v101, v011, v111);
+ return lerp(c, temp0, temp1);
+}
+
+
+
+/**
+ * If A is a signed integer, A % B doesn't give the right value for A < 0
+ * (in terms of texture repeat). Just casting to unsigned fixes that.
+ */
+#define REMAINDER(A, B) ((unsigned) (A) % (unsigned) (B))
+
+
+/**
+ * Apply texture coord wrapping mode and return integer texture indexes
+ * for a vector of four texcoords (S or T or P).
+ * \param wrapMode PIPE_TEX_WRAP_x
+ * \param s the incoming texcoords
+ * \param size the texture image size
+ * \param icoord returns the integer texcoords
+ * \return integer texture index
+ */
+static INLINE void
+nearest_texcoord_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord[4])
+{
+ uint ch;
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_REPEAT:
+ /* s limited to [0,1) */
+ /* i limited to [0,size-1] */
+ for (ch = 0; ch < 4; ch++) {
+ int i = util_ifloor(s[ch] * size);
+ icoord[ch] = REMAINDER(i, size);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP:
+ /* s limited to [0,1] */
+ /* i limited to [0,size-1] */
+ for (ch = 0; ch < 4; ch++) {
+ if (s[ch] <= 0.0F)
+ icoord[ch] = 0;
+ else if (s[ch] >= 1.0F)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(s[ch] * size);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [0, size-1] */
+ const float min = 1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ if (s[ch] < min)
+ icoord[ch] = 0;
+ else if (s[ch] > max)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(s[ch] * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [-1, size] */
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ if (s[ch] <= min)
+ icoord[ch] = -1;
+ else if (s[ch] >= max)
+ icoord[ch] = size;
+ else
+ icoord[ch] = util_ifloor(s[ch] * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ {
+ const float min = 1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ const int flr = util_ifloor(s[ch]);
+ float u;
+ if (flr & 1)
+ u = 1.0F - (s[ch] - (float) flr);
+ else
+ u = s[ch] - (float) flr;
+ if (u < min)
+ icoord[ch] = 0;
+ else if (u > max)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ /* s limited to [0,1] */
+ /* i limited to [0,size-1] */
+ const float u = fabsf(s[ch]);
+ if (u <= 0.0F)
+ icoord[ch] = 0;
+ else if (u >= 1.0F)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [0, size-1] */
+ const float min = 1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ const float u = fabsf(s[ch]);
+ if (u < min)
+ icoord[ch] = 0;
+ else if (u > max)
+ icoord[ch] = size - 1;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ }
+ return;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ {
+ /* s limited to [min,max] */
+ /* i limited to [0, size-1] */
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ const float u = fabsf(s[ch]);
+ if (u < min)
+ icoord[ch] = -1;
+ else if (u > max)
+ icoord[ch] = size;
+ else
+ icoord[ch] = util_ifloor(u * size);
+ }
+ }
+ return;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * Used to compute texel locations for linear sampling for four texcoords.
+ * \param wrapMode PIPE_TEX_WRAP_x
+ * \param s the texcoords
+ * \param size the texture image size
+ * \param icoord0 returns first texture indexes
+ * \param icoord1 returns second texture indexes (usually icoord0 + 1)
+ * \param w returns blend factor/weight between texture indexes
+ * \param icoord returns the computed integer texture coords
+ */
+static INLINE void
+linear_texcoord_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord0[4], int icoord1[4], float w[4])
+{
+ uint ch;
+
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_REPEAT:
+ for (ch = 0; ch < 4; ch++) {
+ float u = s[ch] * size - 0.5F;
+ icoord0[ch] = REMAINDER(util_ifloor(u), size);
+ icoord1[ch] = REMAINDER(icoord0[ch] + 1, size);
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], 0.0F, 1.0F);
+ u = u * size - 0.5f;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], 0.0F, 1.0F);
+ u = u * size - 0.5f;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord0[ch] < 0)
+ icoord0[ch] = 0;
+ if (icoord1[ch] >= (int) size)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ {
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], min, max);
+ u = u * size - 0.5f;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ for (ch = 0; ch < 4; ch++) {
+ const int flr = util_ifloor(s[ch]);
+ float u;
+ if (flr & 1)
+ u = 1.0F - (s[ch] - (float) flr);
+ else
+ u = s[ch] - (float) flr;
+ u = u * size - 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord0[ch] < 0)
+ icoord0[ch] = 0;
+ if (icoord1[ch] >= (int) size)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ float u = fabsf(s[ch]);
+ if (u >= 1.0F)
+ u = (float) size;
+ else
+ u *= size;
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
+ for (ch = 0; ch < 4; ch++) {
+ float u = fabsf(s[ch]);
+ if (u >= 1.0F)
+ u = (float) size;
+ else
+ u *= size;
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord0[ch] < 0)
+ icoord0[ch] = 0;
+ if (icoord1[ch] >= (int) size)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
+ {
+ const float min = -1.0F / (2.0F * size);
+ const float max = 1.0F - min;
+ for (ch = 0; ch < 4; ch++) {
+ float u = fabsf(s[ch]);
+ if (u <= min)
+ u = min * size;
+ else if (u >= max)
+ u = max * size;
+ else
+ u *= size;
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ }
+ break;;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * For RECT textures / unnormalized texcoords
+ * Only a subset of wrap modes supported.
+ */
+static INLINE void
+nearest_texcoord_unnorm_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord[4])
+{
+ uint ch;
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ int i = util_ifloor(s[ch]);
+ icoord[ch]= CLAMP(i, 0, (int) size-1);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ /* fall-through */
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ for (ch = 0; ch < 4; ch++) {
+ icoord[ch]= util_ifloor( CLAMP(s[ch], 0.5F, (float) size - 0.5F) );
+ }
+ return;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * For RECT textures / unnormalized texcoords.
+ * Only a subset of wrap modes supported.
+ */
+static INLINE void
+linear_texcoord_unnorm_4(unsigned wrapMode, const float s[4], unsigned size,
+ int icoord0[4], int icoord1[4], float w[4])
+{
+ uint ch;
+ switch (wrapMode) {
+ case PIPE_TEX_WRAP_CLAMP:
+ for (ch = 0; ch < 4; ch++) {
+ /* Not exactly what the spec says, but it matches NVIDIA output */
+ float u = CLAMP(s[ch] - 0.5F, 0.0f, (float) size - 1.0f);
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ w[ch] = FRAC(u);
+ }
+ return;
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
+ /* fall-through */
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
+ for (ch = 0; ch < 4; ch++) {
+ float u = CLAMP(s[ch], 0.5F, (float) size - 0.5F);
+ u -= 0.5F;
+ icoord0[ch] = util_ifloor(u);
+ icoord1[ch] = icoord0[ch] + 1;
+ if (icoord1[ch] > (int) size - 1)
+ icoord1[ch] = size - 1;
+ w[ch] = FRAC(u);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static unsigned
+choose_cube_face(float rx, float ry, float rz, float *newS, float *newT)
+{
+ /*
+ major axis
+ direction target sc tc ma
+ ---------- ------------------------------- --- --- ---
+ +rx TEXTURE_CUBE_MAP_POSITIVE_X_EXT -rz -ry rx
+ -rx TEXTURE_CUBE_MAP_NEGATIVE_X_EXT +rz -ry rx
+ +ry TEXTURE_CUBE_MAP_POSITIVE_Y_EXT +rx +rz ry
+ -ry TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT +rx -rz ry
+ +rz TEXTURE_CUBE_MAP_POSITIVE_Z_EXT +rx -ry rz
+ -rz TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT -rx -ry rz
+ */
+ const float arx = fabsf(rx), ary = fabsf(ry), arz = fabsf(rz);
+ unsigned face;
+ float sc, tc, ma;
+
+ if (arx > ary && arx > arz) {
+ if (rx >= 0.0F) {
+ face = PIPE_TEX_FACE_POS_X;
+ sc = -rz;
+ tc = -ry;
+ ma = arx;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_X;
+ sc = rz;
+ tc = -ry;
+ ma = arx;
+ }
+ }
+ else if (ary > arx && ary > arz) {
+ if (ry >= 0.0F) {
+ face = PIPE_TEX_FACE_POS_Y;
+ sc = rx;
+ tc = rz;
+ ma = ary;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_Y;
+ sc = rx;
+ tc = -rz;
+ ma = ary;
+ }
+ }
+ else {
+ if (rz > 0.0F) {
+ face = PIPE_TEX_FACE_POS_Z;
+ sc = rx;
+ tc = -ry;
+ ma = arz;
+ }
+ else {
+ face = PIPE_TEX_FACE_NEG_Z;
+ sc = -rx;
+ tc = -ry;
+ ma = arz;
+ }
+ }
+
+ *newS = ( sc / ma + 1.0F ) * 0.5F;
+ *newT = ( tc / ma + 1.0F ) * 0.5F;
+
+ return face;
+}
+
+
+/**
+ * Examine the quad's texture coordinates to compute the partial
+ * derivatives w.r.t X and Y, then compute lambda (level of detail).
+ *
+ * This is only done for fragment shaders, not vertex shaders.
+ */
+static float
+compute_lambda(const struct pipe_texture *tex,
+ const struct pipe_sampler_state *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias)
+{
+ float rho, lambda;
+
+ assert(sampler->normalized_coords);
+
+ assert(s);
+ {
+ float dsdx = s[QUAD_BOTTOM_RIGHT] - s[QUAD_BOTTOM_LEFT];
+ float dsdy = s[QUAD_TOP_LEFT] - s[QUAD_BOTTOM_LEFT];
+ dsdx = fabsf(dsdx);
+ dsdy = fabsf(dsdy);
+ rho = MAX2(dsdx, dsdy) * tex->width[0];
+ }
+ if (t) {
+ float dtdx = t[QUAD_BOTTOM_RIGHT] - t[QUAD_BOTTOM_LEFT];
+ float dtdy = t[QUAD_TOP_LEFT] - t[QUAD_BOTTOM_LEFT];
+ float max;
+ dtdx = fabsf(dtdx);
+ dtdy = fabsf(dtdy);
+ max = MAX2(dtdx, dtdy) * tex->height[0];
+ rho = MAX2(rho, max);
+ }
+ if (p) {
+ float dpdx = p[QUAD_BOTTOM_RIGHT] - p[QUAD_BOTTOM_LEFT];
+ float dpdy = p[QUAD_TOP_LEFT] - p[QUAD_BOTTOM_LEFT];
+ float max;
+ dpdx = fabsf(dpdx);
+ dpdy = fabsf(dpdy);
+ max = MAX2(dpdx, dpdy) * tex->depth[0];
+ rho = MAX2(rho, max);
+ }
+
+ lambda = util_fast_log2(rho);
+ lambda += lodbias + sampler->lod_bias;
+ lambda = CLAMP(lambda, sampler->min_lod, sampler->max_lod);
+
+ return lambda;
+}
+
+
+/**
+ * Do several things here:
+ * 1. Compute lambda from the texcoords, if needed
+ * 2. Determine if we're minifying or magnifying
+ * 3. If minifying, choose mipmap levels
+ * 4. Return image filter to use within mipmap images
+ * \param level0 Returns first mipmap level to sample from
+ * \param level1 Returns second mipmap level to sample from
+ * \param levelBlend Returns blend factor between levels, in [0,1]
+ * \param imgFilter Returns either the min or mag filter, depending on lambda
+ */
+static void
+choose_mipmap_levels(const struct pipe_texture *texture,
+ const struct pipe_sampler_state *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ unsigned *level0, unsigned *level1, float *levelBlend,
+ unsigned *imgFilter)
+{
+ if (sampler->min_mip_filter == PIPE_TEX_MIPFILTER_NONE) {
+ /* no mipmap selection needed */
+ *level0 = *level1 = CLAMP((int) sampler->min_lod,
+ 0, (int) texture->last_level);
+
+ if (sampler->min_img_filter != sampler->mag_img_filter) {
+ /* non-mipmapped texture, but still need to determine if doing
+ * minification or magnification.
+ */
+ float lambda = compute_lambda(texture, sampler, s, t, p, lodbias);
+ if (lambda <= 0.0) {
+ *imgFilter = sampler->mag_img_filter;
+ }
+ else {
+ *imgFilter = sampler->min_img_filter;
+ }
+ }
+ else {
+ *imgFilter = sampler->mag_img_filter;
+ }
+ }
+ else {
+ float lambda;
+
+ if (computeLambda)
+ /* fragment shader */
+ lambda = compute_lambda(texture, sampler, s, t, p, lodbias);
+ else
+ /* vertex shader */
+ lambda = lodbias; /* not really a bias, but absolute LOD */
+
+ if (lambda <= 0.0) { /* XXX threshold depends on the filter */
+ /* magnifying */
+ *imgFilter = sampler->mag_img_filter;
+ *level0 = *level1 = 0;
+ }
+ else {
+ /* minifying */
+ *imgFilter = sampler->min_img_filter;
+
+ /* choose mipmap level(s) and compute the blend factor between them */
+ if (sampler->min_mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
+ /* Nearest mipmap level */
+ const int lvl = (int) (lambda + 0.5);
+ *level0 =
+ *level1 = CLAMP(lvl, 0, (int) texture->last_level);
+ }
+ else {
+ /* Linear interpolation between mipmap levels */
+ const int lvl = (int) lambda;
+ *level0 = CLAMP(lvl, 0, (int) texture->last_level);
+ *level1 = CLAMP(lvl + 1, 0, (int) texture->last_level);
+ *levelBlend = FRAC(lambda); /* blending weight between levels */
+ }
+ }
+ }
+}
+
+
+/**
+ * Get a texel from a texture, using the texture tile cache.
+ *
+ * \param face the cube face in 0..5
+ * \param level the mipmap level
+ * \param x the x coord of texel within 2D image
+ * \param y the y coord of texel within 2D image
+ * \param z which slice of a 3D texture
+ * \param rgba the quad to put the texel/color into
+ * \param j which element of the rgba quad to write to
+ *
+ * XXX maybe move this into sp_tile_cache.c and merge with the
+ * sp_get_cached_tile_tex() function. Also, get 4 texels instead of 1...
+ */
+static void
+get_texel(const struct tgsi_sampler *tgsi_sampler,
+ unsigned face, unsigned level, int x, int y, int z,
+ float rgba[NUM_CHANNELS][QUAD_SIZE], unsigned j)
+{
+ const struct sp_shader_sampler *samp = sp_shader_sampler(tgsi_sampler);
+ struct softpipe_context *sp = samp->sp;
+ const uint unit = samp->unit;
+ const struct pipe_texture *texture = sp->texture[unit];
+ const struct pipe_sampler_state *sampler = sp->sampler[unit];
+
+ if (x < 0 || x >= (int) texture->width[level] ||
+ y < 0 || y >= (int) texture->height[level] ||
+ z < 0 || z >= (int) texture->depth[level]) {
+ rgba[0][j] = sampler->border_color[0];
+ rgba[1][j] = sampler->border_color[1];
+ rgba[2][j] = sampler->border_color[2];
+ rgba[3][j] = sampler->border_color[3];
+ }
+ else {
+ const int tx = x % TILE_SIZE;
+ const int ty = y % TILE_SIZE;
+ const struct softpipe_cached_tile *tile
+ = sp_get_cached_tile_tex(sp, samp->cache,
+ x, y, z, face, level);
+ rgba[0][j] = tile->data.color[ty][tx][0];
+ rgba[1][j] = tile->data.color[ty][tx][1];
+ rgba[2][j] = tile->data.color[ty][tx][2];
+ rgba[3][j] = tile->data.color[ty][tx][3];
+ if (0)
+ {
+ debug_printf("Get texel %f %f %f %f from %s\n",
+ rgba[0][j], rgba[1][j], rgba[2][j], rgba[3][j],
+ pf_name(texture->format));
+ }
+ }
+}
+
+
+/**
+ * Compare texcoord 'p' (aka R) against texture value 'rgba[0]'
+ * When we sampled the depth texture, the depth value was put into all
+ * RGBA channels. We look at the red channel here.
+ * \param rgba quad of (depth) texel values
+ * \param p texture 'P' components for four pixels in quad
+ * \param j which pixel in the quad to test [0..3]
+ */
+static INLINE void
+shadow_compare(const struct pipe_sampler_state *sampler,
+ float rgba[NUM_CHANNELS][QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ uint j)
+{
+ int k;
+ switch (sampler->compare_func) {
+ case PIPE_FUNC_LESS:
+ k = p[j] < rgba[0][j];
+ break;
+ case PIPE_FUNC_LEQUAL:
+ k = p[j] <= rgba[0][j];
+ break;
+ case PIPE_FUNC_GREATER:
+ k = p[j] > rgba[0][j];
+ break;
+ case PIPE_FUNC_GEQUAL:
+ k = p[j] >= rgba[0][j];
+ break;
+ case PIPE_FUNC_EQUAL:
+ k = p[j] == rgba[0][j];
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ k = p[j] != rgba[0][j];
+ break;
+ case PIPE_FUNC_ALWAYS:
+ k = 1;
+ break;
+ case PIPE_FUNC_NEVER:
+ k = 0;
+ break;
+ default:
+ k = 0;
+ assert(0);
+ break;
+ }
+
+ /* XXX returning result for default GL_DEPTH_TEXTURE_MODE = GL_LUMINANCE */
+ rgba[0][j] = rgba[1][j] = rgba[2][j] = (float) k;
+ rgba[3][j] = 1.0F;
+}
+
+
+/**
+ * As above, but do four z/texture comparisons.
+ */
+static INLINE void
+shadow_compare4(const struct pipe_sampler_state *sampler,
+ float rgba[NUM_CHANNELS][QUAD_SIZE],
+ const float p[QUAD_SIZE])
+{
+ int j, k0, k1, k2, k3;
+ float val;
+
+ /* compare four texcoords vs. four texture samples */
+ switch (sampler->compare_func) {
+ case PIPE_FUNC_LESS:
+ k0 = p[0] < rgba[0][0];
+ k1 = p[1] < rgba[0][1];
+ k2 = p[2] < rgba[0][2];
+ k3 = p[3] < rgba[0][3];
+ break;
+ case PIPE_FUNC_LEQUAL:
+ k0 = p[0] <= rgba[0][0];
+ k1 = p[1] <= rgba[0][1];
+ k2 = p[2] <= rgba[0][2];
+ k3 = p[3] <= rgba[0][3];
+ break;
+ case PIPE_FUNC_GREATER:
+ k0 = p[0] > rgba[0][0];
+ k1 = p[1] > rgba[0][1];
+ k2 = p[2] > rgba[0][2];
+ k3 = p[3] > rgba[0][3];
+ break;
+ case PIPE_FUNC_GEQUAL:
+ k0 = p[0] >= rgba[0][0];
+ k1 = p[1] >= rgba[0][1];
+ k2 = p[2] >= rgba[0][2];
+ k3 = p[3] >= rgba[0][3];
+ break;
+ case PIPE_FUNC_EQUAL:
+ k0 = p[0] == rgba[0][0];
+ k1 = p[1] == rgba[0][1];
+ k2 = p[2] == rgba[0][2];
+ k3 = p[3] == rgba[0][3];
+ break;
+ case PIPE_FUNC_NOTEQUAL:
+ k0 = p[0] != rgba[0][0];
+ k1 = p[1] != rgba[0][1];
+ k2 = p[2] != rgba[0][2];
+ k3 = p[3] != rgba[0][3];
+ break;
+ case PIPE_FUNC_ALWAYS:
+ k0 = k1 = k2 = k3 = 1;
+ break;
+ case PIPE_FUNC_NEVER:
+ k0 = k1 = k2 = k3 = 0;
+ break;
+ default:
+ k0 = k1 = k2 = k3 = 0;
+ assert(0);
+ break;
+ }
+
+ /* convert four pass/fail values to an intensity in [0,1] */
+ val = 0.25F * (k0 + k1 + k2 + k3);
+
+ /* XXX returning result for default GL_DEPTH_TEXTURE_MODE = GL_LUMINANCE */
+ for (j = 0; j < 4; j++) {
+ rgba[0][j] = rgba[1][j] = rgba[2][j] = val;
+ rgba[3][j] = 1.0F;
+ }
+}
+
+
+/**
+ * Common code for sampling 1D/2D/cube textures.
+ * Could probably extend for 3D...
+ */
+static void
+sp_get_samples_2d_common(const struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE],
+ const unsigned faces[4])
+{
+ const struct sp_shader_sampler *samp = sp_shader_sampler(tgsi_sampler);
+ const struct softpipe_context *sp = samp->sp;
+ const uint unit = samp->unit;
+ const struct pipe_texture *texture = sp->texture[unit];
+ const struct pipe_sampler_state *sampler = sp->sampler[unit];
+ unsigned level0, level1, j, imgFilter;
+ int width, height;
+ float levelBlend;
+
+ choose_mipmap_levels(texture, sampler, s, t, p, computeLambda, lodbias,
+ &level0, &level1, &levelBlend, &imgFilter);
+
+ assert(sampler->normalized_coords);
+
+ width = texture->width[level0];
+ height = texture->height[level0];
+
+ assert(width > 0);
+
+ switch (imgFilter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ {
+ int x[4], y[4];
+ nearest_texcoord_4(sampler->wrap_s, s, width, x);
+ nearest_texcoord_4(sampler->wrap_t, t, height, y);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ get_texel(tgsi_sampler, faces[j], level0, x[j], y[j], 0, rgba, j);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare(sampler, rgba, p, j);
+ }
+
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ unsigned c;
+ x[j] /= 2;
+ y[j] /= 2;
+ get_texel(tgsi_sampler, faces[j], level1, x[j], y[j], 0,
+ rgba2, j);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE){
+ shadow_compare(sampler, rgba2, p, j);
+ }
+
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba[c][j], rgba2[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ {
+ int x0[4], y0[4], x1[4], y1[4];
+ float xw[4], yw[4]; /* weights */
+
+ linear_texcoord_4(sampler->wrap_s, s, width, x0, x1, xw);
+ linear_texcoord_4(sampler->wrap_t, t, height, y0, y1, yw);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ float tx[4][4]; /* texels */
+ int c;
+ get_texel(tgsi_sampler, faces[j], level0, x0[j], y0[j], 0, tx, 0);
+ get_texel(tgsi_sampler, faces[j], level0, x1[j], y0[j], 0, tx, 1);
+ get_texel(tgsi_sampler, faces[j], level0, x0[j], y1[j], 0, tx, 2);
+ get_texel(tgsi_sampler, faces[j], level0, x1[j], y1[j], 0, tx, 3);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare4(sampler, tx, p);
+ }
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_2d(xw[j], yw[j],
+ tx[c][0], tx[c][1],
+ tx[c][2], tx[c][3]);
+ }
+
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ x0[j] /= 2;
+ y0[j] /= 2;
+ x1[j] /= 2;
+ y1[j] /= 2;
+ get_texel(tgsi_sampler, faces[j], level1, x0[j], y0[j], 0, tx, 0);
+ get_texel(tgsi_sampler, faces[j], level1, x1[j], y0[j], 0, tx, 1);
+ get_texel(tgsi_sampler, faces[j], level1, x0[j], y1[j], 0, tx, 2);
+ get_texel(tgsi_sampler, faces[j], level1, x1[j], y1[j], 0, tx, 3);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE){
+ shadow_compare4(sampler, tx, p);
+ }
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba2[c][j] = lerp_2d(xw[j], yw[j],
+ tx[c][0], tx[c][1], tx[c][2], tx[c][3]);
+ }
+
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba[c][j], rgba2[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static INLINE void
+sp_get_samples_1d(const struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ static const unsigned faces[4] = {0, 0, 0, 0};
+ static const float tzero[4] = {0, 0, 0, 0};
+ sp_get_samples_2d_common(sampler, s, tzero, NULL,
+ computeLambda, lodbias, rgba, faces);
+}
+
+
+static INLINE void
+sp_get_samples_2d(const struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ static const unsigned faces[4] = {0, 0, 0, 0};
+ sp_get_samples_2d_common(sampler, s, t, p,
+ computeLambda, lodbias, rgba, faces);
+}
+
+
+static INLINE void
+sp_get_samples_3d(const struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct sp_shader_sampler *samp = sp_shader_sampler(tgsi_sampler);
+ const struct softpipe_context *sp = samp->sp;
+ const uint unit = samp->unit;
+ const struct pipe_texture *texture = sp->texture[unit];
+ const struct pipe_sampler_state *sampler = sp->sampler[unit];
+ /* get/map pipe_surfaces corresponding to 3D tex slices */
+ unsigned level0, level1, j, imgFilter;
+ int width, height, depth;
+ float levelBlend;
+ const uint face = 0;
+
+ choose_mipmap_levels(texture, sampler, s, t, p, computeLambda, lodbias,
+ &level0, &level1, &levelBlend, &imgFilter);
+
+ assert(sampler->normalized_coords);
+
+ width = texture->width[level0];
+ height = texture->height[level0];
+ depth = texture->depth[level0];
+
+ assert(width > 0);
+ assert(height > 0);
+ assert(depth > 0);
+
+ switch (imgFilter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ {
+ int x[4], y[4], z[4];
+ nearest_texcoord_4(sampler->wrap_s, s, width, x);
+ nearest_texcoord_4(sampler->wrap_t, t, height, y);
+ nearest_texcoord_4(sampler->wrap_r, p, depth, z);
+ for (j = 0; j < QUAD_SIZE; j++) {
+ get_texel(tgsi_sampler, face, level0, x[j], y[j], z[j], rgba, j);
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ unsigned c;
+ x[j] /= 2;
+ y[j] /= 2;
+ z[j] /= 2;
+ get_texel(tgsi_sampler, face, level1, x[j], y[j], z[j], rgba2, j);
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba2[c][j], rgba[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ {
+ int x0[4], x1[4], y0[4], y1[4], z0[4], z1[4];
+ float xw[4], yw[4], zw[4]; /* interpolation weights */
+ linear_texcoord_4(sampler->wrap_s, s, width, x0, x1, xw);
+ linear_texcoord_4(sampler->wrap_t, t, height, y0, y1, yw);
+ linear_texcoord_4(sampler->wrap_r, p, depth, z0, z1, zw);
+
+ for (j = 0; j < QUAD_SIZE; j++) {
+ int c;
+ float tx0[4][4], tx1[4][4];
+ get_texel(tgsi_sampler, face, level0, x0[j], y0[j], z0[j], tx0, 0);
+ get_texel(tgsi_sampler, face, level0, x1[j], y0[j], z0[j], tx0, 1);
+ get_texel(tgsi_sampler, face, level0, x0[j], y1[j], z0[j], tx0, 2);
+ get_texel(tgsi_sampler, face, level0, x1[j], y1[j], z0[j], tx0, 3);
+ get_texel(tgsi_sampler, face, level0, x0[j], y0[j], z1[j], tx1, 0);
+ get_texel(tgsi_sampler, face, level0, x1[j], y0[j], z1[j], tx1, 1);
+ get_texel(tgsi_sampler, face, level0, x0[j], y1[j], z1[j], tx1, 2);
+ get_texel(tgsi_sampler, face, level0, x1[j], y1[j], z1[j], tx1, 3);
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_3d(xw[j], yw[j], zw[j],
+ tx0[c][0], tx0[c][1],
+ tx0[c][2], tx0[c][3],
+ tx1[c][0], tx1[c][1],
+ tx1[c][2], tx1[c][3]);
+ }
+
+ if (level0 != level1) {
+ /* get texels from second mipmap level and blend */
+ float rgba2[4][4];
+ x0[j] /= 2;
+ y0[j] /= 2;
+ z0[j] /= 2;
+ x1[j] /= 2;
+ y1[j] /= 2;
+ z1[j] /= 2;
+ get_texel(tgsi_sampler, face, level1, x0[j], y0[j], z0[j], tx0, 0);
+ get_texel(tgsi_sampler, face, level1, x1[j], y0[j], z0[j], tx0, 1);
+ get_texel(tgsi_sampler, face, level1, x0[j], y1[j], z0[j], tx0, 2);
+ get_texel(tgsi_sampler, face, level1, x1[j], y1[j], z0[j], tx0, 3);
+ get_texel(tgsi_sampler, face, level1, x0[j], y0[j], z1[j], tx1, 0);
+ get_texel(tgsi_sampler, face, level1, x1[j], y0[j], z1[j], tx1, 1);
+ get_texel(tgsi_sampler, face, level1, x0[j], y1[j], z1[j], tx1, 2);
+ get_texel(tgsi_sampler, face, level1, x1[j], y1[j], z1[j], tx1, 3);
+
+ /* interpolate R, G, B, A */
+ for (c = 0; c < 4; c++) {
+ rgba2[c][j] = lerp_3d(xw[j], yw[j], zw[j],
+ tx0[c][0], tx0[c][1],
+ tx0[c][2], tx0[c][3],
+ tx1[c][0], tx1[c][1],
+ tx1[c][2], tx1[c][3]);
+ }
+
+ /* blend mipmap levels */
+ for (c = 0; c < NUM_CHANNELS; c++) {
+ rgba[c][j] = lerp(levelBlend, rgba[c][j], rgba2[c][j]);
+ }
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+static void
+sp_get_samples_cube(const struct tgsi_sampler *sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ unsigned faces[QUAD_SIZE], j;
+ float ssss[4], tttt[4];
+ for (j = 0; j < QUAD_SIZE; j++) {
+ faces[j] = choose_cube_face(s[j], t[j], p[j], ssss + j, tttt + j);
+ }
+ sp_get_samples_2d_common(sampler, ssss, tttt, NULL,
+ computeLambda, lodbias, rgba, faces);
+}
+
+
+static void
+sp_get_samples_rect(const struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct sp_shader_sampler *samp = sp_shader_sampler(tgsi_sampler);
+ const struct softpipe_context *sp = samp->sp;
+ const uint unit = samp->unit;
+ const struct pipe_texture *texture = sp->texture[unit];
+ const struct pipe_sampler_state *sampler = sp->sampler[unit];
+ const uint face = 0;
+ unsigned level0, level1, j, imgFilter;
+ int width, height;
+ float levelBlend;
+
+ choose_mipmap_levels(texture, sampler, s, t, p, computeLambda, lodbias,
+ &level0, &level1, &levelBlend, &imgFilter);
+
+ /* texture RECTS cannot be mipmapped */
+ assert(level0 == level1);
+
+ width = texture->width[level0];
+ height = texture->height[level0];
+
+ assert(width > 0);
+
+ switch (imgFilter) {
+ case PIPE_TEX_FILTER_NEAREST:
+ {
+ int x[4], y[4];
+ nearest_texcoord_unnorm_4(sampler->wrap_s, s, width, x);
+ nearest_texcoord_unnorm_4(sampler->wrap_t, t, height, y);
+ for (j = 0; j < QUAD_SIZE; j++) {
+ get_texel(tgsi_sampler, face, level0, x[j], y[j], 0, rgba, j);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare(sampler, rgba, p, j);
+ }
+ }
+ }
+ break;
+ case PIPE_TEX_FILTER_LINEAR:
+ case PIPE_TEX_FILTER_ANISO:
+ {
+ int x0[4], y0[4], x1[4], y1[4];
+ float xw[4], yw[4]; /* weights */
+ linear_texcoord_unnorm_4(sampler->wrap_s, s, width, x0, x1, xw);
+ linear_texcoord_unnorm_4(sampler->wrap_t, t, height, y0, y1, yw);
+ for (j = 0; j < QUAD_SIZE; j++) {
+ float tx[4][4]; /* texels */
+ int c;
+ get_texel(tgsi_sampler, face, level0, x0[j], y0[j], 0, tx, 0);
+ get_texel(tgsi_sampler, face, level0, x1[j], y0[j], 0, tx, 1);
+ get_texel(tgsi_sampler, face, level0, x0[j], y1[j], 0, tx, 2);
+ get_texel(tgsi_sampler, face, level0, x1[j], y1[j], 0, tx, 3);
+ if (sampler->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
+ shadow_compare4(sampler, tx, p);
+ }
+ for (c = 0; c < 4; c++) {
+ rgba[c][j] = lerp_2d(xw[j], yw[j],
+ tx[c][0], tx[c][1], tx[c][2], tx[c][3]);
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * Common code for vertex/fragment program texture sampling.
+ */
+static INLINE void
+sp_get_samples(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ boolean computeLambda,
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ const struct sp_shader_sampler *samp = sp_shader_sampler(tgsi_sampler);
+ const struct softpipe_context *sp = samp->sp;
+ const uint unit = samp->unit;
+ const struct pipe_texture *texture = sp->texture[unit];
+ const struct pipe_sampler_state *sampler = sp->sampler[unit];
+
+ if (!texture)
+ return;
+
+ switch (texture->target) {
+ case PIPE_TEXTURE_1D:
+ assert(sampler->normalized_coords);
+ sp_get_samples_1d(tgsi_sampler, s, t, p, computeLambda, lodbias, rgba);
+ break;
+ case PIPE_TEXTURE_2D:
+ if (sampler->normalized_coords)
+ sp_get_samples_2d(tgsi_sampler, s, t, p, computeLambda, lodbias, rgba);
+ else
+ sp_get_samples_rect(tgsi_sampler, s, t, p, computeLambda, lodbias, rgba);
+ break;
+ case PIPE_TEXTURE_3D:
+ assert(sampler->normalized_coords);
+ sp_get_samples_3d(tgsi_sampler, s, t, p, computeLambda, lodbias, rgba);
+ break;
+ case PIPE_TEXTURE_CUBE:
+ assert(sampler->normalized_coords);
+ sp_get_samples_cube(tgsi_sampler, s, t, p, computeLambda, lodbias, rgba);
+ break;
+ default:
+ assert(0);
+ }
+
+#if 0 /* DEBUG */
+ {
+ int i;
+ printf("Sampled at %f, %f, %f:\n", s[0], t[0], p[0]);
+ for (i = 0; i < 4; i++) {
+ printf("Frag %d: %f %f %f %f\n", i,
+ rgba[0][i],
+ rgba[1][i],
+ rgba[2][i],
+ rgba[3][i]);
+ }
+ }
+#endif
+}
+
+
+/**
+ * Called via tgsi_sampler::get_samples() when running a fragment shader.
+ * Get four filtered RGBA values from the sampler's texture.
+ */
+void
+sp_get_samples_fragment(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ sp_get_samples(tgsi_sampler, s, t, p, TRUE, lodbias, rgba);
+}
+
+
+/**
+ * Called via tgsi_sampler::get_samples() when running a vertex shader.
+ * Get four filtered RGBA values from the sampler's texture.
+ */
+void
+sp_get_samples_vertex(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE])
+{
+ sp_get_samples(tgsi_sampler, s, t, p, FALSE, lodbias, rgba);
+}
diff --git a/src/gallium/drivers/softpipe/sp_tex_sample.h b/src/gallium/drivers/softpipe/sp_tex_sample.h
new file mode 100644
index 0000000000..40d8eb2c2a
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_tex_sample.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SP_TEX_SAMPLE_H
+#define SP_TEX_SAMPLE_H
+
+
+#include "tgsi/tgsi_exec.h"
+
+
+/**
+ * Subclass of tgsi_sampler
+ */
+struct sp_shader_sampler
+{
+ struct tgsi_sampler base; /**< base class */
+
+ uint unit;
+ struct softpipe_context *sp;
+ struct softpipe_tile_cache *cache;
+};
+
+
+
+static INLINE const struct sp_shader_sampler *
+sp_shader_sampler(const struct tgsi_sampler *sampler)
+{
+ return (const struct sp_shader_sampler *) sampler;
+}
+
+
+extern void
+sp_get_samples_fragment(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE]);
+
+extern void
+sp_get_samples_vertex(struct tgsi_sampler *tgsi_sampler,
+ const float s[QUAD_SIZE],
+ const float t[QUAD_SIZE],
+ const float p[QUAD_SIZE],
+ float lodbias,
+ float rgba[NUM_CHANNELS][QUAD_SIZE]);
+
+
+#endif /* SP_TEX_SAMPLE_H */
diff --git a/src/gallium/drivers/softpipe/sp_texture.c b/src/gallium/drivers/softpipe/sp_texture.c
new file mode 100644
index 0000000000..548e40c4a8
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_texture.c
@@ -0,0 +1,407 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "sp_context.h"
+#include "sp_state.h"
+#include "sp_texture.h"
+#include "sp_screen.h"
+#include "sp_winsys.h"
+
+
+/**
+ * Conventional allocation path for non-display textures:
+ * Use a simple, maximally packed layout.
+ */
+static boolean
+softpipe_texture_layout(struct pipe_screen *screen,
+ struct softpipe_texture * spt)
+{
+ struct pipe_texture *pt = &spt->base;
+ unsigned level;
+ unsigned width = pt->width[0];
+ unsigned height = pt->height[0];
+ unsigned depth = pt->depth[0];
+
+ unsigned buffer_size = 0;
+
+ for (level = 0; level <= pt->last_level; level++) {
+ pt->width[level] = width;
+ pt->height[level] = height;
+ pt->depth[level] = depth;
+ pt->nblocksx[level] = pf_get_nblocksx(&pt->block, width);
+ pt->nblocksy[level] = pf_get_nblocksy(&pt->block, height);
+ spt->stride[level] = pt->nblocksx[level]*pt->block.size;
+
+ spt->level_offset[level] = buffer_size;
+
+ buffer_size += (pt->nblocksy[level] *
+ ((pt->target == PIPE_TEXTURE_CUBE) ? 6 : depth) *
+ spt->stride[level]);
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ }
+
+ spt->buffer = screen->buffer_create(screen, 32,
+ PIPE_BUFFER_USAGE_PIXEL,
+ buffer_size);
+
+ return spt->buffer != NULL;
+}
+
+
+/**
+ * Texture layout for simple color buffers.
+ */
+static boolean
+softpipe_displaytarget_layout(struct pipe_screen *screen,
+ struct softpipe_texture * spt)
+{
+ unsigned usage = (PIPE_BUFFER_USAGE_CPU_READ_WRITE |
+ PIPE_BUFFER_USAGE_GPU_READ_WRITE);
+ unsigned tex_usage = spt->base.tex_usage;
+
+ spt->base.nblocksx[0] = pf_get_nblocksx(&spt->base.block, spt->base.width[0]);
+ spt->base.nblocksy[0] = pf_get_nblocksy(&spt->base.block, spt->base.height[0]);
+
+ spt->buffer = screen->surface_buffer_create( screen,
+ spt->base.width[0],
+ spt->base.height[0],
+ spt->base.format,
+ usage,
+ tex_usage,
+ &spt->stride[0]);
+
+ return spt->buffer != NULL;
+}
+
+
+static struct pipe_texture *
+softpipe_texture_create(struct pipe_screen *screen,
+ const struct pipe_texture *templat)
+{
+ struct softpipe_texture *spt = CALLOC_STRUCT(softpipe_texture);
+ if (!spt)
+ return NULL;
+
+ spt->base = *templat;
+ pipe_reference_init(&spt->base.reference, 1);
+ spt->base.screen = screen;
+
+ if (spt->base.tex_usage & (PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_PRIMARY)) {
+ if (!softpipe_displaytarget_layout(screen, spt))
+ goto fail;
+ }
+ else {
+ if (!softpipe_texture_layout(screen, spt))
+ goto fail;
+ }
+
+ return &spt->base;
+
+ fail:
+ FREE(spt);
+ return NULL;
+}
+
+
+static struct pipe_texture *
+softpipe_texture_blanket(struct pipe_screen * screen,
+ const struct pipe_texture *base,
+ const unsigned *stride,
+ struct pipe_buffer *buffer)
+{
+ struct softpipe_texture *spt;
+ assert(screen);
+
+ /* Only supports one type */
+ if (base->target != PIPE_TEXTURE_2D ||
+ base->last_level != 0 ||
+ base->depth[0] != 1) {
+ return NULL;
+ }
+
+ spt = CALLOC_STRUCT(softpipe_texture);
+ if (!spt)
+ return NULL;
+
+ spt->base = *base;
+ pipe_reference_init(&spt->base.reference, 1);
+ spt->base.screen = screen;
+ spt->base.nblocksx[0] = pf_get_nblocksx(&spt->base.block, spt->base.width[0]);
+ spt->base.nblocksy[0] = pf_get_nblocksy(&spt->base.block, spt->base.height[0]);
+ spt->stride[0] = stride[0];
+
+ pipe_buffer_reference(&spt->buffer, buffer);
+
+ return &spt->base;
+}
+
+
+static void
+softpipe_texture_destroy(struct pipe_texture *pt)
+{
+ struct softpipe_texture *spt = softpipe_texture(pt);
+
+ pipe_buffer_reference(&spt->buffer, NULL);
+ FREE(spt);
+}
+
+
+static struct pipe_surface *
+softpipe_get_tex_surface(struct pipe_screen *screen,
+ struct pipe_texture *pt,
+ unsigned face, unsigned level, unsigned zslice,
+ unsigned usage)
+{
+ struct softpipe_texture *spt = softpipe_texture(pt);
+ struct pipe_surface *ps;
+
+ assert(level <= pt->last_level);
+
+ ps = CALLOC_STRUCT(pipe_surface);
+ if (ps) {
+ pipe_reference_init(&ps->reference, 1);
+ pipe_texture_reference(&ps->texture, pt);
+ ps->format = pt->format;
+ ps->width = pt->width[level];
+ ps->height = pt->height[level];
+ ps->offset = spt->level_offset[level];
+ ps->usage = usage;
+
+ /* Because we are softpipe, anything that the state tracker
+ * thought was going to be done with the GPU will actually get
+ * done with the CPU. Let's adjust the flags to take that into
+ * account.
+ */
+ if (ps->usage & PIPE_BUFFER_USAGE_GPU_WRITE) {
+ /* GPU_WRITE means "render" and that can involve reads (blending) */
+ ps->usage |= PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_CPU_READ;
+ }
+
+ if (ps->usage & PIPE_BUFFER_USAGE_GPU_READ)
+ ps->usage |= PIPE_BUFFER_USAGE_CPU_READ;
+
+ ps->face = face;
+ ps->level = level;
+ ps->zslice = zslice;
+
+ if (pt->target == PIPE_TEXTURE_CUBE) {
+ ps->offset += face * pt->nblocksy[level] * spt->stride[level];
+ }
+ else if (pt->target == PIPE_TEXTURE_3D) {
+ ps->offset += zslice * pt->nblocksy[level] * spt->stride[level];
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+ }
+ return ps;
+}
+
+
+static void
+softpipe_tex_surface_destroy(struct pipe_surface *surf)
+{
+ /* Effectively do the texture_update work here - if texture images
+ * needed post-processing to put them into hardware layout, this is
+ * where it would happen. For softpipe, nothing to do.
+ */
+ assert(surf->texture);
+ pipe_texture_reference(&surf->texture, NULL);
+ FREE(surf);
+}
+
+
+static struct pipe_transfer *
+softpipe_get_tex_transfer(struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level, unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct softpipe_texture *sptex = softpipe_texture(texture);
+ struct softpipe_transfer *spt;
+
+ assert(texture);
+ assert(level <= texture->last_level);
+
+ spt = CALLOC_STRUCT(softpipe_transfer);
+ if (spt) {
+ struct pipe_transfer *pt = &spt->base;
+ pipe_texture_reference(&pt->texture, texture);
+ pt->format = texture->format;
+ pt->block = texture->block;
+ pt->x = x;
+ pt->y = y;
+ pt->width = w;
+ pt->height = h;
+ pt->nblocksx = texture->nblocksx[level];
+ pt->nblocksy = texture->nblocksy[level];
+ pt->stride = sptex->stride[level];
+ pt->usage = usage;
+ pt->face = face;
+ pt->level = level;
+ pt->zslice = zslice;
+
+ spt->offset = sptex->level_offset[level];
+
+ if (texture->target == PIPE_TEXTURE_CUBE) {
+ spt->offset += face * pt->nblocksy * pt->stride;
+ }
+ else if (texture->target == PIPE_TEXTURE_3D) {
+ spt->offset += zslice * pt->nblocksy * pt->stride;
+ }
+ else {
+ assert(face == 0);
+ assert(zslice == 0);
+ }
+ return pt;
+ }
+ return NULL;
+}
+
+
+static void
+softpipe_tex_transfer_destroy(struct pipe_transfer *transfer)
+{
+ /* Effectively do the texture_update work here - if texture images
+ * needed post-processing to put them into hardware layout, this is
+ * where it would happen. For softpipe, nothing to do.
+ */
+ assert (transfer->texture);
+ pipe_texture_reference(&transfer->texture, NULL);
+ FREE(transfer);
+}
+
+
+static void *
+softpipe_transfer_map( struct pipe_screen *screen,
+ struct pipe_transfer *transfer )
+{
+ ubyte *map, *xfer_map;
+ struct softpipe_texture *spt;
+ unsigned flags = 0;
+
+ assert(transfer->texture);
+ spt = softpipe_texture(transfer->texture);
+
+ if (transfer->usage != PIPE_TRANSFER_READ) {
+ flags |= PIPE_BUFFER_USAGE_CPU_WRITE;
+ }
+
+ if (transfer->usage != PIPE_TRANSFER_WRITE) {
+ flags |= PIPE_BUFFER_USAGE_CPU_READ;
+ }
+
+ map = pipe_buffer_map(screen, spt->buffer, flags);
+ if (map == NULL)
+ return NULL;
+
+ /* May want to different things here depending on read/write nature
+ * of the map:
+ */
+ if (transfer->texture && transfer->usage != PIPE_TRANSFER_READ) {
+ /* Do something to notify sharing contexts of a texture change.
+ * In softpipe, that would mean flushing the texture cache.
+ */
+ softpipe_screen(screen)->timestamp++;
+ }
+
+ xfer_map = map + softpipe_transfer(transfer)->offset +
+ transfer->y / transfer->block.height * transfer->stride +
+ transfer->x / transfer->block.width * transfer->block.size;
+ /*printf("map = %p xfer map = %p\n", map, xfer_map);*/
+ return xfer_map;
+}
+
+
+static void
+softpipe_transfer_unmap(struct pipe_screen *screen,
+ struct pipe_transfer *transfer)
+{
+ struct softpipe_texture *spt;
+
+ assert(transfer->texture);
+ spt = softpipe_texture(transfer->texture);
+
+ pipe_buffer_unmap( screen, spt->buffer );
+
+ if (transfer->usage != PIPE_TRANSFER_READ) {
+ /* Mark the texture as dirty to expire the tile caches. */
+ spt->modified = TRUE;
+ }
+}
+
+
+void
+softpipe_init_screen_texture_funcs(struct pipe_screen *screen)
+{
+ screen->texture_create = softpipe_texture_create;
+ screen->texture_blanket = softpipe_texture_blanket;
+ screen->texture_destroy = softpipe_texture_destroy;
+
+ screen->get_tex_surface = softpipe_get_tex_surface;
+ screen->tex_surface_destroy = softpipe_tex_surface_destroy;
+
+ screen->get_tex_transfer = softpipe_get_tex_transfer;
+ screen->tex_transfer_destroy = softpipe_tex_transfer_destroy;
+ screen->transfer_map = softpipe_transfer_map;
+ screen->transfer_unmap = softpipe_transfer_unmap;
+}
+
+
+boolean
+softpipe_get_texture_buffer( struct pipe_texture *texture,
+ struct pipe_buffer **buf,
+ unsigned *stride )
+{
+ struct softpipe_texture *tex = (struct softpipe_texture *) texture;
+
+ if (!tex)
+ return FALSE;
+
+ pipe_buffer_reference(buf, tex->buffer);
+
+ if (stride)
+ *stride = tex->stride[0];
+
+ return TRUE;
+}
diff --git a/src/gallium/drivers/softpipe/sp_texture.h b/src/gallium/drivers/softpipe/sp_texture.h
new file mode 100644
index 0000000000..b1b6130b22
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_texture.h
@@ -0,0 +1,80 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SP_TEXTURE_H
+#define SP_TEXTURE_H
+
+
+#include "pipe/p_state.h"
+
+
+struct pipe_context;
+struct pipe_screen;
+struct softpipe_context;
+
+
+struct softpipe_texture
+{
+ struct pipe_texture base;
+
+ unsigned long level_offset[PIPE_MAX_TEXTURE_LEVELS];
+ unsigned stride[PIPE_MAX_TEXTURE_LEVELS];
+
+ /* The data is held here:
+ */
+ struct pipe_buffer *buffer;
+
+ boolean modified;
+};
+
+struct softpipe_transfer
+{
+ struct pipe_transfer base;
+
+ unsigned long offset;
+};
+
+
+/** cast wrappers */
+static INLINE struct softpipe_texture *
+softpipe_texture(struct pipe_texture *pt)
+{
+ return (struct softpipe_texture *) pt;
+}
+
+static INLINE struct softpipe_transfer *
+softpipe_transfer(struct pipe_transfer *pt)
+{
+ return (struct softpipe_transfer *) pt;
+}
+
+
+extern void
+softpipe_init_screen_texture_funcs(struct pipe_screen *screen);
+
+
+#endif /* SP_TEXTURE */
diff --git a/src/gallium/drivers/softpipe/sp_tile_cache.c b/src/gallium/drivers/softpipe/sp_tile_cache.c
new file mode 100644
index 0000000000..1f9b8f1f4f
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_tile_cache.c
@@ -0,0 +1,626 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Texture tile caching.
+ *
+ * Author:
+ * Brian Paul
+ */
+
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_tile.h"
+#include "sp_context.h"
+#include "sp_surface.h"
+#include "sp_texture.h"
+#include "sp_tile_cache.h"
+
+#define NUM_ENTRIES 50
+
+
+/** XXX move these */
+#define MAX_WIDTH 2048
+#define MAX_HEIGHT 2048
+
+
+struct softpipe_tile_cache
+{
+ struct pipe_screen *screen;
+ struct pipe_surface *surface; /**< the surface we're caching */
+ struct pipe_transfer *transfer;
+ void *transfer_map;
+ struct pipe_texture *texture; /**< if caching a texture */
+ struct softpipe_cached_tile entries[NUM_ENTRIES];
+ uint clear_flags[(MAX_WIDTH / TILE_SIZE) * (MAX_HEIGHT / TILE_SIZE) / 32];
+ float clear_color[4]; /**< for color bufs */
+ uint clear_val; /**< for z+stencil, or packed color clear value */
+ boolean depth_stencil; /**< Is the surface a depth/stencil format? */
+
+ struct pipe_transfer *tex_trans;
+ void *tex_trans_map;
+ int tex_face, tex_level, tex_z;
+
+ struct softpipe_cached_tile tile; /**< scratch tile for clears */
+};
+
+
+/**
+ * Return the position in the cache for the tile that contains win pos (x,y).
+ * We currently use a direct mapped cache so this is like a hack key.
+ * At some point we should investige something more sophisticated, like
+ * a LRU replacement policy.
+ */
+#define CACHE_POS(x, y) \
+ (((x) / TILE_SIZE + ((y) / TILE_SIZE) * 5) % NUM_ENTRIES)
+
+
+
+/**
+ * Is the tile at (x,y) in cleared state?
+ */
+static INLINE uint
+is_clear_flag_set(const uint *bitvec, int x, int y)
+{
+ int pos, bit;
+ x /= TILE_SIZE;
+ y /= TILE_SIZE;
+ pos = y * (MAX_WIDTH / TILE_SIZE) + x;
+ assert(pos / 32 < (MAX_WIDTH / TILE_SIZE) * (MAX_HEIGHT / TILE_SIZE) / 32);
+ bit = bitvec[pos / 32] & (1 << (pos & 31));
+ return bit;
+}
+
+
+/**
+ * Mark the tile at (x,y) as not cleared.
+ */
+static INLINE void
+clear_clear_flag(uint *bitvec, int x, int y)
+{
+ int pos;
+ x /= TILE_SIZE;
+ y /= TILE_SIZE;
+ pos = y * (MAX_WIDTH / TILE_SIZE) + x;
+ assert(pos / 32 < (MAX_WIDTH / TILE_SIZE) * (MAX_HEIGHT / TILE_SIZE) / 32);
+ bitvec[pos / 32] &= ~(1 << (pos & 31));
+}
+
+
+struct softpipe_tile_cache *
+sp_create_tile_cache( struct pipe_screen *screen )
+{
+ struct softpipe_tile_cache *tc;
+ uint pos;
+
+ tc = CALLOC_STRUCT( softpipe_tile_cache );
+ if (tc) {
+ tc->screen = screen;
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ tc->entries[pos].x =
+ tc->entries[pos].y = -1;
+ }
+ }
+ return tc;
+}
+
+
+void
+sp_destroy_tile_cache(struct softpipe_tile_cache *tc)
+{
+ struct pipe_screen *screen;
+ uint pos;
+
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ /*assert(tc->entries[pos].x < 0);*/
+ }
+ if (tc->transfer) {
+ screen = tc->transfer->texture->screen;
+ screen->tex_transfer_destroy(tc->transfer);
+ }
+ if (tc->tex_trans) {
+ screen = tc->tex_trans->texture->screen;
+ screen->tex_transfer_destroy(tc->tex_trans);
+ }
+
+ FREE( tc );
+}
+
+
+/**
+ * Specify the surface to cache.
+ */
+void
+sp_tile_cache_set_surface(struct softpipe_tile_cache *tc,
+ struct pipe_surface *ps)
+{
+ assert(!tc->texture);
+
+ if (tc->transfer) {
+ struct pipe_screen *screen = tc->transfer->texture->screen;
+
+ if (ps == tc->surface)
+ return;
+
+ if (tc->transfer_map) {
+ screen->transfer_unmap(screen, tc->transfer);
+ tc->transfer_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(tc->transfer);
+ tc->transfer = NULL;
+ }
+
+ tc->surface = ps;
+
+ if (ps) {
+ struct pipe_screen *screen = ps->texture->screen;
+
+ tc->transfer = screen->get_tex_transfer(screen, ps->texture, ps->face,
+ ps->level, ps->zslice,
+ PIPE_TRANSFER_READ_WRITE,
+ 0, 0, ps->width, ps->height);
+
+ tc->depth_stencil = (ps->format == PIPE_FORMAT_S8Z24_UNORM ||
+ ps->format == PIPE_FORMAT_X8Z24_UNORM ||
+ ps->format == PIPE_FORMAT_Z24S8_UNORM ||
+ ps->format == PIPE_FORMAT_Z24X8_UNORM ||
+ ps->format == PIPE_FORMAT_Z16_UNORM ||
+ ps->format == PIPE_FORMAT_Z32_UNORM ||
+ ps->format == PIPE_FORMAT_S8_UNORM);
+ }
+}
+
+
+/**
+ * Return the transfer being cached.
+ */
+struct pipe_surface *
+sp_tile_cache_get_surface(struct softpipe_tile_cache *tc)
+{
+ return tc->surface;
+}
+
+
+void
+sp_tile_cache_map_transfers(struct softpipe_tile_cache *tc)
+{
+ if (tc->transfer && !tc->transfer_map)
+ tc->transfer_map = tc->screen->transfer_map(tc->screen, tc->transfer);
+
+ if (tc->tex_trans && !tc->tex_trans_map)
+ tc->tex_trans_map = tc->screen->transfer_map(tc->screen, tc->tex_trans);
+}
+
+
+void
+sp_tile_cache_unmap_transfers(struct softpipe_tile_cache *tc)
+{
+ if (tc->transfer_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->transfer);
+ tc->transfer_map = NULL;
+ }
+
+ if (tc->tex_trans_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->tex_trans);
+ tc->tex_trans_map = NULL;
+ }
+}
+
+
+/**
+ * Specify the texture to cache.
+ */
+void
+sp_tile_cache_set_texture(struct pipe_context *pipe,
+ struct softpipe_tile_cache *tc,
+ struct pipe_texture *texture)
+{
+ uint i;
+
+ assert(!tc->transfer);
+
+ pipe_texture_reference(&tc->texture, texture);
+
+ if (tc->tex_trans) {
+ struct pipe_screen *screen = tc->tex_trans->texture->screen;
+
+ if (tc->tex_trans_map) {
+ screen->transfer_unmap(screen, tc->tex_trans);
+ tc->tex_trans_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(tc->tex_trans);
+ tc->tex_trans = NULL;
+ }
+
+ /* mark as entries as invalid/empty */
+ /* XXX we should try to avoid this when the teximage hasn't changed */
+ for (i = 0; i < NUM_ENTRIES; i++) {
+ tc->entries[i].x = -1;
+ }
+
+ tc->tex_face = -1; /* any invalid value here */
+}
+
+
+/**
+ * Set pixels in a tile to the given clear color/value, float.
+ */
+static void
+clear_tile_rgba(struct softpipe_cached_tile *tile,
+ enum pipe_format format,
+ const float clear_value[4])
+{
+ if (clear_value[0] == 0.0 &&
+ clear_value[1] == 0.0 &&
+ clear_value[2] == 0.0 &&
+ clear_value[3] == 0.0) {
+ memset(tile->data.color, 0, sizeof(tile->data.color));
+ }
+ else {
+ uint i, j;
+ for (i = 0; i < TILE_SIZE; i++) {
+ for (j = 0; j < TILE_SIZE; j++) {
+ tile->data.color[i][j][0] = clear_value[0];
+ tile->data.color[i][j][1] = clear_value[1];
+ tile->data.color[i][j][2] = clear_value[2];
+ tile->data.color[i][j][3] = clear_value[3];
+ }
+ }
+ }
+}
+
+
+/**
+ * Set a tile to a solid value/color.
+ */
+static void
+clear_tile(struct softpipe_cached_tile *tile,
+ enum pipe_format format,
+ uint clear_value)
+{
+ uint i, j;
+
+ switch (pf_get_size(format)) {
+ case 1:
+ memset(tile->data.any, 0, TILE_SIZE * TILE_SIZE);
+ break;
+ case 2:
+ if (clear_value == 0) {
+ memset(tile->data.any, 0, 2 * TILE_SIZE * TILE_SIZE);
+ }
+ else {
+ for (i = 0; i < TILE_SIZE; i++) {
+ for (j = 0; j < TILE_SIZE; j++) {
+ tile->data.depth16[i][j] = (ushort) clear_value;
+ }
+ }
+ }
+ break;
+ case 4:
+ if (clear_value == 0) {
+ memset(tile->data.any, 0, 4 * TILE_SIZE * TILE_SIZE);
+ }
+ else {
+ for (i = 0; i < TILE_SIZE; i++) {
+ for (j = 0; j < TILE_SIZE; j++) {
+ tile->data.color32[i][j] = clear_value;
+ }
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+
+/**
+ * Actually clear the tiles which were flagged as being in a clear state.
+ */
+static void
+sp_tile_cache_flush_clear(struct pipe_context *pipe,
+ struct softpipe_tile_cache *tc)
+{
+ struct pipe_transfer *pt = tc->transfer;
+ const uint w = tc->transfer->width;
+ const uint h = tc->transfer->height;
+ uint x, y;
+ uint numCleared = 0;
+
+ /* clear the scratch tile to the clear value */
+ clear_tile(&tc->tile, pt->format, tc->clear_val);
+
+ /* push the tile to all positions marked as clear */
+ for (y = 0; y < h; y += TILE_SIZE) {
+ for (x = 0; x < w; x += TILE_SIZE) {
+ if (is_clear_flag_set(tc->clear_flags, x, y)) {
+ pipe_put_tile_raw(pt,
+ x, y, TILE_SIZE, TILE_SIZE,
+ tc->tile.data.color32, 0/*STRIDE*/);
+
+ /* do this? */
+ clear_clear_flag(tc->clear_flags, x, y);
+
+ numCleared++;
+ }
+ }
+ }
+#if 0
+ debug_printf("num cleared: %u\n", numCleared);
+#endif
+}
+
+
+/**
+ * Flush the tile cache: write all dirty tiles back to the transfer.
+ * any tiles "flagged" as cleared will be "really" cleared.
+ */
+void
+sp_flush_tile_cache(struct softpipe_context *softpipe,
+ struct softpipe_tile_cache *tc)
+{
+ struct pipe_transfer *pt = tc->transfer;
+ int inuse = 0, pos;
+
+ if (pt) {
+ /* caching a drawing transfer */
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ struct softpipe_cached_tile *tile = tc->entries + pos;
+ if (tile->x >= 0) {
+ if (tc->depth_stencil) {
+ pipe_put_tile_raw(pt,
+ tile->x, tile->y, TILE_SIZE, TILE_SIZE,
+ tile->data.depth32, 0/*STRIDE*/);
+ }
+ else {
+ pipe_put_tile_rgba(pt,
+ tile->x, tile->y, TILE_SIZE, TILE_SIZE,
+ (float *) tile->data.color);
+ }
+ tile->x = tile->y = -1; /* mark as empty */
+ inuse++;
+ }
+ }
+
+#if TILE_CLEAR_OPTIMIZATION
+ sp_tile_cache_flush_clear(&softpipe->pipe, tc);
+#endif
+ }
+ else if (tc->texture) {
+ /* caching a texture, mark all entries as empty */
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ tc->entries[pos].x = -1;
+ }
+ tc->tex_face = -1;
+ }
+
+#if 0
+ debug_printf("flushed tiles in use: %d\n", inuse);
+#endif
+}
+
+
+/**
+ * Get a tile from the cache.
+ * \param x, y position of tile, in pixels
+ */
+struct softpipe_cached_tile *
+sp_get_cached_tile(struct softpipe_context *softpipe,
+ struct softpipe_tile_cache *tc, int x, int y)
+{
+ struct pipe_transfer *pt = tc->transfer;
+
+ /* tile pos in framebuffer: */
+ const int tile_x = x & ~(TILE_SIZE - 1);
+ const int tile_y = y & ~(TILE_SIZE - 1);
+
+ /* cache pos/entry: */
+ const int pos = CACHE_POS(x, y);
+ struct softpipe_cached_tile *tile = tc->entries + pos;
+
+ if (tile_x != tile->x ||
+ tile_y != tile->y) {
+
+ if (tile->x != -1) {
+ /* put dirty tile back in framebuffer */
+ if (tc->depth_stencil) {
+ pipe_put_tile_raw(pt,
+ tile->x, tile->y, TILE_SIZE, TILE_SIZE,
+ tile->data.depth32, 0/*STRIDE*/);
+ }
+ else {
+ pipe_put_tile_rgba(pt,
+ tile->x, tile->y, TILE_SIZE, TILE_SIZE,
+ (float *) tile->data.color);
+ }
+ }
+
+ tile->x = tile_x;
+ tile->y = tile_y;
+
+ if (is_clear_flag_set(tc->clear_flags, x, y)) {
+ /* don't get tile from framebuffer, just clear it */
+ if (tc->depth_stencil) {
+ clear_tile(tile, pt->format, tc->clear_val);
+ }
+ else {
+ clear_tile_rgba(tile, pt->format, tc->clear_color);
+ }
+ clear_clear_flag(tc->clear_flags, x, y);
+ }
+ else {
+ /* get new tile data from transfer */
+ if (tc->depth_stencil) {
+ pipe_get_tile_raw(pt,
+ tile->x, tile->y, TILE_SIZE, TILE_SIZE,
+ tile->data.depth32, 0/*STRIDE*/);
+ }
+ else {
+ pipe_get_tile_rgba(pt,
+ tile->x, tile->y, TILE_SIZE, TILE_SIZE,
+ (float *) tile->data.color);
+ }
+ }
+ }
+
+ return tile;
+}
+
+
+/**
+ * Given the texture face, level, zslice, x and y values, compute
+ * the cache entry position/index where we'd hope to find the
+ * cached texture tile.
+ * This is basically a direct-map cache.
+ * XXX There's probably lots of ways in which we can improve this.
+ */
+static INLINE uint
+tex_cache_pos(int x, int y, int z, int face, int level)
+{
+ uint entry = x + y * 9 + z * 3 + face + level * 7;
+ return entry % NUM_ENTRIES;
+}
+
+
+/**
+ * Similar to sp_get_cached_tile() but for textures.
+ * Tiles are read-only and indexed with more params.
+ */
+const struct softpipe_cached_tile *
+sp_get_cached_tile_tex(struct softpipe_context *sp,
+ struct softpipe_tile_cache *tc, int x, int y, int z,
+ int face, int level)
+{
+ struct pipe_screen *screen = sp->pipe.screen;
+ /* tile pos in framebuffer: */
+ const int tile_x = x & ~(TILE_SIZE - 1);
+ const int tile_y = y & ~(TILE_SIZE - 1);
+ /* cache pos/entry: */
+ const uint pos = tex_cache_pos(x / TILE_SIZE, y / TILE_SIZE, z,
+ face, level);
+ struct softpipe_cached_tile *tile = tc->entries + pos;
+
+ if (tc->texture) {
+ struct softpipe_texture *spt = softpipe_texture(tc->texture);
+ if (spt->modified) {
+ /* texture was modified, invalidate all cached tiles */
+ uint p;
+ for (p = 0; p < NUM_ENTRIES; p++) {
+ tile = tc->entries + p;
+ tile->x = -1;
+ }
+ spt->modified = FALSE;
+ }
+ }
+
+ if (tile_x != tile->x ||
+ tile_y != tile->y ||
+ z != tile->z ||
+ face != tile->face ||
+ level != tile->level) {
+ /* cache miss */
+
+#if 0
+ printf("miss at %u x=%d y=%d z=%d face=%d level=%d\n", pos,
+ x/TILE_SIZE, y/TILE_SIZE, z, face, level);
+#endif
+ /* check if we need to get a new transfer */
+ if (!tc->tex_trans ||
+ tc->tex_face != face ||
+ tc->tex_level != level ||
+ tc->tex_z != z) {
+ /* get new transfer (view into texture) */
+
+ if (tc->tex_trans) {
+ if (tc->tex_trans_map) {
+ tc->screen->transfer_unmap(tc->screen, tc->tex_trans);
+ tc->tex_trans_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(tc->tex_trans);
+ tc->tex_trans = NULL;
+ }
+
+ tc->tex_trans = screen->get_tex_transfer(screen, tc->texture, face, level, z,
+ PIPE_TRANSFER_READ, 0, 0,
+ tc->texture->width[level],
+ tc->texture->height[level]);
+ tc->tex_trans_map = screen->transfer_map(screen, tc->tex_trans);
+
+ tc->tex_face = face;
+ tc->tex_level = level;
+ tc->tex_z = z;
+ }
+
+ /* get tile from the transfer (view into texture) */
+ pipe_get_tile_rgba(tc->tex_trans,
+ tile_x, tile_y, TILE_SIZE, TILE_SIZE,
+ (float *) tile->data.color);
+ tile->x = tile_x;
+ tile->y = tile_y;
+ tile->z = z;
+ tile->face = face;
+ tile->level = level;
+ }
+
+ return tile;
+}
+
+
+/**
+ * When a whole surface is being cleared to a value we can avoid
+ * fetching tiles above.
+ * Save the color and set a 'clearflag' for each tile of the screen.
+ */
+void
+sp_tile_cache_clear(struct softpipe_tile_cache *tc, const float *rgba,
+ uint clearValue)
+{
+ uint pos;
+
+ tc->clear_color[0] = rgba[0];
+ tc->clear_color[1] = rgba[1];
+ tc->clear_color[2] = rgba[2];
+ tc->clear_color[3] = rgba[3];
+
+ tc->clear_val = clearValue;
+
+#if TILE_CLEAR_OPTIMIZATION
+ /* set flags to indicate all the tiles are cleared */
+ memset(tc->clear_flags, 255, sizeof(tc->clear_flags));
+#else
+ /* disable the optimization */
+ memset(tc->clear_flags, 0, sizeof(tc->clear_flags));
+#endif
+
+ for (pos = 0; pos < NUM_ENTRIES; pos++) {
+ struct softpipe_cached_tile *tile = tc->entries + pos;
+ tile->x = tile->y = -1;
+ }
+}
diff --git a/src/gallium/drivers/softpipe/sp_tile_cache.h b/src/gallium/drivers/softpipe/sp_tile_cache.h
new file mode 100644
index 0000000000..8f247d0e58
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_tile_cache.h
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SP_TILE_CACHE_H
+#define SP_TILE_CACHE_H
+
+#define TILE_CLEAR_OPTIMIZATION 1
+
+
+#include "pipe/p_compiler.h"
+
+
+struct softpipe_context;
+struct softpipe_tile_cache;
+
+
+/**
+ * Cache tile size (width and height). This needs to be a power of two.
+ */
+#define TILE_SIZE 64
+
+
+
+struct softpipe_cached_tile
+{
+ int x, y; /**< pos of tile in window coords */
+ int z, face, level; /**< Extra texture indexes */
+ union {
+ float color[TILE_SIZE][TILE_SIZE][4];
+ uint color32[TILE_SIZE][TILE_SIZE];
+ uint depth32[TILE_SIZE][TILE_SIZE];
+ ushort depth16[TILE_SIZE][TILE_SIZE];
+ ubyte stencil8[TILE_SIZE][TILE_SIZE];
+ ubyte any[1];
+ } data;
+};
+
+
+extern struct softpipe_tile_cache *
+sp_create_tile_cache( struct pipe_screen *screen );
+
+extern void
+sp_destroy_tile_cache(struct softpipe_tile_cache *tc);
+
+extern void
+sp_tile_cache_set_surface(struct softpipe_tile_cache *tc,
+ struct pipe_surface *sps);
+
+extern struct pipe_surface *
+sp_tile_cache_get_surface(struct softpipe_tile_cache *tc);
+
+extern void
+sp_tile_cache_map_transfers(struct softpipe_tile_cache *tc);
+
+extern void
+sp_tile_cache_unmap_transfers(struct softpipe_tile_cache *tc);
+
+extern void
+sp_tile_cache_set_texture(struct pipe_context *pipe,
+ struct softpipe_tile_cache *tc,
+ struct pipe_texture *texture);
+
+extern void
+sp_flush_tile_cache(struct softpipe_context *softpipe,
+ struct softpipe_tile_cache *tc);
+
+extern void
+sp_tile_cache_clear(struct softpipe_tile_cache *tc, const float *rgba,
+ uint clearValue);
+
+extern struct softpipe_cached_tile *
+sp_get_cached_tile(struct softpipe_context *softpipe,
+ struct softpipe_tile_cache *tc, int x, int y);
+
+extern const struct softpipe_cached_tile *
+sp_get_cached_tile_tex(struct softpipe_context *softpipe,
+ struct softpipe_tile_cache *tc, int x, int y, int z,
+ int face, int level);
+
+
+#endif /* SP_TILE_CACHE_H */
+
diff --git a/src/gallium/drivers/softpipe/sp_winsys.h b/src/gallium/drivers/softpipe/sp_winsys.h
new file mode 100644
index 0000000000..9e571862b7
--- /dev/null
+++ b/src/gallium/drivers/softpipe/sp_winsys.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* This is the interface that softpipe requires any window system
+ * hosting it to implement. This is the only include file in softpipe
+ * which is public.
+ */
+
+
+#ifndef SP_WINSYS_H
+#define SP_WINSYS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_screen;
+struct pipe_winsys;
+struct pipe_context;
+
+
+struct pipe_context *softpipe_create( struct pipe_screen * );
+
+
+struct pipe_screen *
+softpipe_create_screen(struct pipe_winsys *);
+
+
+boolean
+softpipe_get_texture_buffer( struct pipe_texture *texture,
+ struct pipe_buffer **buf,
+ unsigned *stride );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SP_WINSYS_H */
diff --git a/src/gallium/drivers/trace/Makefile b/src/gallium/drivers/trace/Makefile
new file mode 100644
index 0000000000..dd6831c70a
--- /dev/null
+++ b/src/gallium/drivers/trace/Makefile
@@ -0,0 +1,17 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = trace
+
+C_SOURCES = \
+ tr_buffer.c \
+ tr_context.c \
+ tr_dump.c \
+ tr_dump_state.c \
+ tr_screen.c \
+ tr_state.c \
+ tr_rbug.c \
+ tr_drm.c \
+ tr_texture.c
+
+include ../../Makefile.template
diff --git a/src/gallium/drivers/trace/README b/src/gallium/drivers/trace/README
new file mode 100644
index 0000000000..1000c31e49
--- /dev/null
+++ b/src/gallium/drivers/trace/README
@@ -0,0 +1,78 @@
+ TRACE PIPE DRIVER
+
+
+= About =
+
+This directory contains a Gallium3D debugger pipe driver.
+It can traces all incoming calls and/or provide remote debugging functionality.
+
+
+= Build Instructions =
+
+To build, invoke scons on the top dir as
+
+ scons dri=no statetrackers=mesa drivers=softpipe,i965simple,trace winsys=xlib
+
+
+= Usage =
+
+To use do
+
+ export LD_LIBRARY_PATH=$PWD/build/linux-x86-debug/lib
+
+ensure the right libGL.so is being picked by doing
+
+ ldd progs/trivial/tri
+
+== Traceing ==
+
+For traceing then do
+
+ export XMESA_TRACE=y
+ GALLIUM_TRACE=tri.trace progs/trivial/tri
+
+which should create a tri.trace file, which is an XML file. You can view copying
+trace.xsl to the same directory, and opening with a XSLT capable browser such as
+Firefox or Internet Explorer.
+
+== Remote debugging ==
+
+For remote debugging
+
+ export XMESA_TRACE=y
+ GALLIUM_RBUG=true progs/trivial/tri
+
+which should open gallium remote debugging session. While the program is running
+you can launch the small remote debugging application from progs/rbug. More
+information is in that directory.
+
+= Integrating =
+
+You can integrate the trace pipe driver either inside the state tracker or the
+winsys. The procedure on both cases is the same. Let's assume you have a
+pipe_screen and a pipe_context pair obtained by the usual means (variable and
+function names are just for illustration purposes):
+
+ real_screen = real_screen_create(...);
+
+ real_context = real_context_create(...);
+
+The trace screen and pipe_context is then created by doing
+
+ trace_screen = trace_screen_create(real_screen);
+
+ trace_context = trace_context_create(trace_screen, real_context);
+
+You can then simply use trace_screen and trace_context instead of real_screen
+and real_context.
+
+Do not call trace_winsys_create. Simply pass trace_screen->winsys or
+trace_context->winsys in places you would pass winsys.
+
+You can create as many contexts you wish. Just ensure that you don't mistake
+trace_screen with real_screen when creating them.
+
+
+--
+Jose Fonseca <jrfonseca@tungstengraphics.com>
+Jakob Bornecrantz <jakob@vmware.com>
diff --git a/src/gallium/drivers/trace/SConscript b/src/gallium/drivers/trace/SConscript
new file mode 100644
index 0000000000..c1675d1c16
--- /dev/null
+++ b/src/gallium/drivers/trace/SConscript
@@ -0,0 +1,19 @@
+Import('*')
+
+env = env.Clone()
+
+trace = env.ConvenienceLibrary(
+ target = 'trace',
+ source = [
+ 'tr_buffer.c',
+ 'tr_context.c',
+ 'tr_drm.c',
+ 'tr_dump.c',
+ 'tr_dump_state.c',
+ 'tr_screen.c',
+ 'tr_state.c',
+ 'tr_rbug.c',
+ 'tr_texture.c',
+ ])
+
+Export('trace')
diff --git a/src/gallium/drivers/trace/tr_buffer.c b/src/gallium/drivers/trace/tr_buffer.c
new file mode 100644
index 0000000000..4f0eff6a5a
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_buffer.c
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+
+#include "tr_buffer.h"
+
+struct pipe_buffer *
+trace_buffer_create(struct trace_screen *tr_scr,
+ struct pipe_buffer *buffer)
+{
+ struct trace_buffer *tr_buf;
+
+ if(!buffer)
+ goto error;
+
+ assert(buffer->screen == tr_scr->screen);
+
+ tr_buf = CALLOC_STRUCT(trace_buffer);
+ if(!tr_buf)
+ goto error;
+
+ memcpy(&tr_buf->base, buffer, sizeof(struct pipe_buffer));
+
+ pipe_reference_init(&tr_buf->base.reference, 1);
+ tr_buf->base.screen = &tr_scr->base;
+ tr_buf->buffer = buffer;
+
+ trace_screen_add_to_list(tr_scr, buffers, tr_buf);
+
+ return &tr_buf->base;
+
+error:
+ pipe_buffer_reference(&buffer, NULL);
+ return NULL;
+}
+
+
+void
+trace_buffer_destroy(struct trace_screen *tr_scr,
+ struct pipe_buffer *buffer)
+{
+ struct trace_buffer *tr_buf = trace_buffer(buffer);
+
+ trace_screen_remove_from_list(tr_scr, buffers, tr_buf);
+
+ pipe_buffer_reference(&tr_buf->buffer, NULL);
+ FREE(tr_buf);
+}
diff --git a/src/gallium/drivers/trace/tr_buffer.h b/src/gallium/drivers/trace/tr_buffer.h
new file mode 100644
index 0000000000..1a2d0b9aea
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_buffer.h
@@ -0,0 +1,70 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TR_BUFFER_H_
+#define TR_BUFFER_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+#include "tr_screen.h"
+
+
+struct trace_buffer
+{
+ struct pipe_buffer base;
+
+ struct pipe_buffer *buffer;
+
+ struct tr_list list;
+
+ void *map;
+ boolean range_flushed;
+};
+
+
+static INLINE struct trace_buffer *
+trace_buffer(struct pipe_buffer *buffer)
+{
+ if(!buffer)
+ return NULL;
+ (void)trace_screen(buffer->screen);
+ return (struct trace_buffer *)buffer;
+}
+
+
+struct pipe_buffer *
+trace_buffer_create(struct trace_screen *tr_scr,
+ struct pipe_buffer *buffer);
+
+void
+trace_buffer_destroy(struct trace_screen *tr_scr,
+ struct pipe_buffer *buffer);
+
+
+#endif
diff --git a/src/gallium/drivers/trace/tr_context.c b/src/gallium/drivers/trace/tr_context.c
new file mode 100644
index 0000000000..ae0af4d055
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_context.c
@@ -0,0 +1,1302 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+
+#include "pipe/p_screen.h"
+
+#include "tr_dump.h"
+#include "tr_dump_state.h"
+#include "tr_state.h"
+#include "tr_buffer.h"
+#include "tr_screen.h"
+#include "tr_texture.h"
+
+
+static INLINE struct pipe_buffer *
+trace_buffer_unwrap(struct trace_context *tr_ctx,
+ struct pipe_buffer *buffer)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_ctx->base.screen);
+ struct trace_buffer *tr_buf;
+
+ if(!buffer)
+ return NULL;
+
+ tr_buf = trace_buffer(buffer);
+
+ assert(tr_buf->buffer);
+ assert(tr_buf->buffer->screen == tr_scr->screen);
+ return tr_buf->buffer;
+}
+
+
+static INLINE struct pipe_texture *
+trace_texture_unwrap(struct trace_context *tr_ctx,
+ struct pipe_texture *texture)
+{
+ struct trace_texture *tr_tex;
+
+ if(!texture)
+ return NULL;
+
+ tr_tex = trace_texture(texture);
+
+ assert(tr_tex->texture);
+ return tr_tex->texture;
+}
+
+
+static INLINE struct pipe_surface *
+trace_surface_unwrap(struct trace_context *tr_ctx,
+ struct pipe_surface *surface)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_ctx->base.screen);
+ struct trace_surface *tr_surf;
+
+ if(!surface)
+ return NULL;
+
+ assert(surface->texture);
+ if(!surface->texture)
+ return surface;
+
+ tr_surf = trace_surface(surface);
+
+ assert(tr_surf->surface);
+ assert(tr_surf->surface->texture->screen == tr_scr->screen);
+ return tr_surf->surface;
+}
+
+
+static INLINE void
+trace_context_set_edgeflags(struct pipe_context *_pipe,
+ const unsigned *bitfield)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_edgeflags");
+
+ trace_dump_arg(ptr, pipe);
+ /* FIXME: we don't know how big this array is */
+ trace_dump_arg(ptr, bitfield);
+
+ pipe->set_edgeflags(pipe, bitfield);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_draw_block(struct trace_context *tr_ctx, int flag)
+{
+ int k;
+
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+
+ if (tr_ctx->draw_blocker & flag) {
+ tr_ctx->draw_blocked |= flag;
+ } else if ((tr_ctx->draw_rule.blocker & flag) &&
+ (tr_ctx->draw_blocker & 4)) {
+ boolean block = FALSE;
+ debug_printf("%s (%lu %lu) (%lu %lu) (%lu %u) (%lu %u)\n", __FUNCTION__,
+ tr_ctx->draw_rule.fs, tr_ctx->curr.fs,
+ tr_ctx->draw_rule.vs, tr_ctx->curr.vs,
+ tr_ctx->draw_rule.surf, 0,
+ tr_ctx->draw_rule.tex, 0);
+ if (tr_ctx->draw_rule.fs &&
+ tr_ctx->draw_rule.fs == tr_ctx->curr.fs)
+ block = TRUE;
+ if (tr_ctx->draw_rule.vs &&
+ tr_ctx->draw_rule.vs == tr_ctx->curr.vs)
+ block = TRUE;
+ if (tr_ctx->draw_rule.surf &&
+ tr_ctx->draw_rule.surf == tr_ctx->curr.zsbuf)
+ block = TRUE;
+ if (tr_ctx->draw_rule.surf)
+ for (k = 0; k < tr_ctx->curr.nr_cbufs; k++)
+ if (tr_ctx->draw_rule.surf == tr_ctx->curr.cbufs[k])
+ block = TRUE;
+ if (tr_ctx->draw_rule.tex)
+ for (k = 0; k < tr_ctx->curr.num_texs; k++)
+ if (tr_ctx->draw_rule.tex == tr_ctx->curr.tex[k])
+ block = TRUE;
+
+ if (block)
+ tr_ctx->draw_blocked |= (flag | 4);
+ }
+
+ if (tr_ctx->draw_blocked)
+ trace_rbug_notify_draw_blocked(tr_ctx);
+
+ /* wait for rbug to clear the blocked flag */
+ while (tr_ctx->draw_blocked & flag) {
+ tr_ctx->draw_blocked |= flag;
+#ifdef PIPE_THREAD_HAVE_CONDVAR
+ pipe_condvar_wait(tr_ctx->draw_cond, tr_ctx->draw_mutex);
+#else
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
+ Sleep(1);
+#endif
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+#endif
+ }
+
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+}
+
+static INLINE boolean
+trace_context_draw_arrays(struct pipe_context *_pipe,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ boolean result;
+
+ if (tr_ctx->curr.fs->disabled || tr_ctx->curr.vs->disabled)
+ return 0;
+
+ trace_context_draw_block(tr_ctx, 1);
+
+ trace_dump_call_begin("pipe_context", "draw_arrays");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, mode);
+ trace_dump_arg(uint, start);
+ trace_dump_arg(uint, count);
+
+ result = pipe->draw_arrays(pipe, mode, start, count);;
+
+ trace_dump_ret(bool, result);
+
+ trace_dump_call_end();
+
+ trace_context_draw_block(tr_ctx, 2);
+
+ return result;
+}
+
+
+static INLINE boolean
+trace_context_draw_elements(struct pipe_context *_pipe,
+ struct pipe_buffer *_indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_buffer *tr_buf = trace_buffer(_indexBuffer);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_buffer *indexBuffer = tr_buf->buffer;
+ boolean result;
+
+ if (tr_ctx->curr.fs->disabled || tr_ctx->curr.vs->disabled)
+ return 0;
+
+ trace_context_draw_block(tr_ctx, 1);
+
+ trace_screen_user_buffer_update(_pipe->screen, indexBuffer);
+
+ trace_dump_call_begin("pipe_context", "draw_elements");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, indexBuffer);
+ trace_dump_arg(uint, indexSize);
+ trace_dump_arg(uint, mode);
+ trace_dump_arg(uint, start);
+ trace_dump_arg(uint, count);
+
+ result = pipe->draw_elements(pipe, indexBuffer, indexSize, mode, start, count);;
+
+ trace_dump_ret(bool, result);
+
+ trace_dump_call_end();
+
+ trace_context_draw_block(tr_ctx, 2);
+
+ return result;
+}
+
+
+static INLINE boolean
+trace_context_draw_range_elements(struct pipe_context *_pipe,
+ struct pipe_buffer *_indexBuffer,
+ unsigned indexSize,
+ unsigned minIndex,
+ unsigned maxIndex,
+ unsigned mode,
+ unsigned start,
+ unsigned count)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_buffer *tr_buf = trace_buffer(_indexBuffer);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_buffer *indexBuffer = tr_buf->buffer;
+ boolean result;
+
+ if (tr_ctx->curr.fs->disabled || tr_ctx->curr.vs->disabled)
+ return 0;
+
+ trace_context_draw_block(tr_ctx, 1);
+
+ trace_screen_user_buffer_update(_pipe->screen, indexBuffer);
+
+ trace_dump_call_begin("pipe_context", "draw_range_elements");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, indexBuffer);
+ trace_dump_arg(uint, indexSize);
+ trace_dump_arg(uint, minIndex);
+ trace_dump_arg(uint, maxIndex);
+ trace_dump_arg(uint, mode);
+ trace_dump_arg(uint, start);
+ trace_dump_arg(uint, count);
+
+ result = pipe->draw_range_elements(pipe,
+ indexBuffer,
+ indexSize, minIndex, maxIndex,
+ mode, start, count);
+
+ trace_dump_ret(bool, result);
+
+ trace_dump_call_end();
+
+ trace_context_draw_block(tr_ctx, 2);
+
+ return result;
+}
+
+
+static INLINE struct pipe_query *
+trace_context_create_query(struct pipe_context *_pipe,
+ unsigned query_type)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_query *result;
+
+ trace_dump_call_begin("pipe_context", "create_query");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, query_type);
+
+ result = pipe->create_query(pipe, query_type);;
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_destroy_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "destroy_query");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, query);
+
+ pipe->destroy_query(pipe, query);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_begin_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "begin_query");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, query);
+
+ pipe->begin_query(pipe, query);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_end_query(struct pipe_context *_pipe,
+ struct pipe_query *query)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "end_query");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, query);
+
+ pipe->end_query(pipe, query);
+
+ trace_dump_call_end();
+}
+
+
+static INLINE boolean
+trace_context_get_query_result(struct pipe_context *_pipe,
+ struct pipe_query *query,
+ boolean wait,
+ uint64_t *presult)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ uint64_t result;
+ boolean _result;
+
+ trace_dump_call_begin("pipe_context", "get_query_result");
+
+ trace_dump_arg(ptr, pipe);
+
+ _result = pipe->get_query_result(pipe, query, wait, presult);;
+ result = *presult;
+
+ trace_dump_arg(uint, result);
+ trace_dump_ret(bool, _result);
+
+ trace_dump_call_end();
+
+ return _result;
+}
+
+
+static INLINE void *
+trace_context_create_blend_state(struct pipe_context *_pipe,
+ const struct pipe_blend_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void * result;
+
+ trace_dump_call_begin("pipe_context", "create_blend_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(blend_state, state);
+
+ result = pipe->create_blend_state(pipe, state);;
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_bind_blend_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "bind_blend_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->bind_blend_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_delete_blend_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "delete_blend_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->delete_blend_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void *
+trace_context_create_sampler_state(struct pipe_context *_pipe,
+ const struct pipe_sampler_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void * result;
+
+ trace_dump_call_begin("pipe_context", "create_sampler_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(sampler_state, state);
+
+ result = pipe->create_sampler_state(pipe, state);;
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_bind_sampler_states(struct pipe_context *_pipe,
+ unsigned num_states, void **states)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "bind_sampler_states");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, num_states);
+ trace_dump_arg_array(ptr, states, num_states);
+
+ pipe->bind_sampler_states(pipe, num_states, states);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_delete_sampler_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "delete_sampler_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->delete_sampler_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void *
+trace_context_create_rasterizer_state(struct pipe_context *_pipe,
+ const struct pipe_rasterizer_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void * result;
+
+ trace_dump_call_begin("pipe_context", "create_rasterizer_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(rasterizer_state, state);
+
+ result = pipe->create_rasterizer_state(pipe, state);;
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_bind_rasterizer_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "bind_rasterizer_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->bind_rasterizer_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_delete_rasterizer_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "delete_rasterizer_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->delete_rasterizer_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void *
+trace_context_create_depth_stencil_alpha_state(struct pipe_context *_pipe,
+ const struct pipe_depth_stencil_alpha_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void * result;
+
+ trace_dump_call_begin("pipe_context", "create_depth_stencil_alpha_state");
+
+ result = pipe->create_depth_stencil_alpha_state(pipe, state);;
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(depth_stencil_alpha_state, state);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_bind_depth_stencil_alpha_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "bind_depth_stencil_alpha_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->bind_depth_stencil_alpha_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_delete_depth_stencil_alpha_state(struct pipe_context *_pipe,
+ void *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "delete_depth_stencil_alpha_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->delete_depth_stencil_alpha_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void *
+trace_context_create_fs_state(struct pipe_context *_pipe,
+ const struct pipe_shader_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void * result;
+
+ trace_dump_call_begin("pipe_context", "create_fs_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(shader_state, state);
+
+ result = pipe->create_fs_state(pipe, state);;
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ result = trace_shader_create(tr_ctx, state, result, TRACE_SHADER_FRAGMENT);
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_bind_fs_state(struct pipe_context *_pipe,
+ void *_state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_shader *tr_shdr = trace_shader(_state);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void *state = tr_shdr ? tr_shdr->state : NULL;
+
+ trace_dump_call_begin("pipe_context", "bind_fs_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ tr_ctx->curr.fs = tr_shdr;
+
+ if (tr_shdr && tr_shdr->replaced)
+ state = tr_shdr->replaced;
+
+ pipe->bind_fs_state(pipe, state);
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_delete_fs_state(struct pipe_context *_pipe,
+ void *_state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_shader *tr_shdr = trace_shader(_state);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void *state = tr_shdr->state;
+
+ trace_dump_call_begin("pipe_context", "delete_fs_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->delete_fs_state(pipe, state);
+
+ trace_dump_call_end();
+
+ trace_shader_destroy(tr_ctx, tr_shdr);
+}
+
+
+static INLINE void *
+trace_context_create_vs_state(struct pipe_context *_pipe,
+ const struct pipe_shader_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void * result;
+
+ trace_dump_call_begin("pipe_context", "create_vs_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(shader_state, state);
+
+ result = pipe->create_vs_state(pipe, state);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ result = trace_shader_create(tr_ctx, state, result, TRACE_SHADER_VERTEX);
+
+ return result;
+}
+
+
+static INLINE void
+trace_context_bind_vs_state(struct pipe_context *_pipe,
+ void *_state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_shader *tr_shdr = trace_shader(_state);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void *state = tr_shdr ? tr_shdr->state : NULL;
+
+ trace_dump_call_begin("pipe_context", "bind_vs_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ tr_ctx->curr.vs = tr_shdr;
+
+ if (tr_shdr && tr_shdr->replaced)
+ state = tr_shdr->replaced;
+
+ pipe->bind_vs_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_delete_vs_state(struct pipe_context *_pipe,
+ void *_state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_shader *tr_shdr = trace_shader(_state);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ void *state = tr_shdr->state;
+
+ trace_dump_call_begin("pipe_context", "delete_vs_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, state);
+
+ pipe->delete_vs_state(pipe, state);;
+
+ trace_dump_call_end();
+
+ trace_shader_destroy(tr_ctx, tr_shdr);
+}
+
+
+static INLINE void
+trace_context_set_blend_color(struct pipe_context *_pipe,
+ const struct pipe_blend_color *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_blend_color");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(blend_color, state);
+
+ pipe->set_blend_color(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_clip_state(struct pipe_context *_pipe,
+ const struct pipe_clip_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_clip_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(clip_state, state);
+
+ pipe->set_clip_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_constant_buffer(struct pipe_context *_pipe,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buffer)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ if (buffer)
+ trace_screen_user_buffer_update(_pipe->screen, buffer->buffer);
+
+ trace_dump_call_begin("pipe_context", "set_constant_buffer");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, shader);
+ trace_dump_arg(uint, index);
+ trace_dump_arg(constant_buffer, buffer);
+
+ if (buffer) {
+ struct pipe_constant_buffer _buffer;
+ _buffer.buffer = trace_buffer_unwrap(tr_ctx, buffer->buffer);
+ pipe->set_constant_buffer(pipe, shader, index, &_buffer);
+ } else {
+ pipe->set_constant_buffer(pipe, shader, index, buffer);
+ }
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_framebuffer_state(struct pipe_context *_pipe,
+ const struct pipe_framebuffer_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_framebuffer_state unwrapped_state;
+ unsigned i;
+
+ {
+ tr_ctx->curr.nr_cbufs = state->nr_cbufs;
+ for (i = 0; i < state->nr_cbufs; i++)
+ if (state->cbufs[i])
+ tr_ctx->curr.cbufs[i] = trace_texture(state->cbufs[i]->texture);
+ else
+ tr_ctx->curr.cbufs[i] = NULL;
+ if (state->zsbuf)
+ tr_ctx->curr.zsbuf = trace_texture(state->zsbuf->texture);
+ else
+ tr_ctx->curr.zsbuf = NULL;
+ }
+
+ /* Unwrap the input state */
+ memcpy(&unwrapped_state, state, sizeof(unwrapped_state));
+ for(i = 0; i < state->nr_cbufs; ++i)
+ unwrapped_state.cbufs[i] = trace_surface_unwrap(tr_ctx, state->cbufs[i]);
+ for(i = state->nr_cbufs; i < PIPE_MAX_COLOR_BUFS; ++i)
+ unwrapped_state.cbufs[i] = NULL;
+ unwrapped_state.zsbuf = trace_surface_unwrap(tr_ctx, state->zsbuf);
+ state = &unwrapped_state;
+
+ trace_dump_call_begin("pipe_context", "set_framebuffer_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(framebuffer_state, state);
+
+ pipe->set_framebuffer_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_polygon_stipple(struct pipe_context *_pipe,
+ const struct pipe_poly_stipple *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_polygon_stipple");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(poly_stipple, state);
+
+ pipe->set_polygon_stipple(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_scissor_state(struct pipe_context *_pipe,
+ const struct pipe_scissor_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_scissor_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(scissor_state, state);
+
+ pipe->set_scissor_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_viewport_state(struct pipe_context *_pipe,
+ const struct pipe_viewport_state *state)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_viewport_state");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(viewport_state, state);
+
+ pipe->set_viewport_state(pipe, state);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_sampler_textures(struct pipe_context *_pipe,
+ unsigned num_textures,
+ struct pipe_texture **textures)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_texture *tr_tex;
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_texture *unwrapped_textures[PIPE_MAX_SAMPLERS];
+ unsigned i;
+
+ tr_ctx->curr.num_texs = num_textures;
+ for(i = 0; i < num_textures; ++i) {
+ tr_tex = trace_texture(textures[i]);
+ tr_ctx->curr.tex[i] = tr_tex;
+ unwrapped_textures[i] = tr_tex ? tr_tex->texture : NULL;
+ }
+ textures = unwrapped_textures;
+
+ trace_dump_call_begin("pipe_context", "set_sampler_textures");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, num_textures);
+ trace_dump_arg_array(ptr, textures, num_textures);
+
+ pipe->set_sampler_textures(pipe, num_textures, textures);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_vertex_buffers(struct pipe_context *_pipe,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ unsigned i;
+
+ for(i = 0; i < num_buffers; ++i)
+ trace_screen_user_buffer_update(_pipe->screen, buffers[i].buffer);
+
+ trace_dump_call_begin("pipe_context", "set_vertex_buffers");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, num_buffers);
+
+ trace_dump_arg_begin("buffers");
+ trace_dump_struct_array(vertex_buffer, buffers, num_buffers);
+ trace_dump_arg_end();
+
+ if (num_buffers) {
+ struct pipe_vertex_buffer *_buffers = malloc(num_buffers * sizeof(*_buffers));
+ memcpy(_buffers, buffers, num_buffers * sizeof(*_buffers));
+ for (i = 0; i < num_buffers; i++)
+ _buffers[i].buffer = trace_buffer_unwrap(tr_ctx, buffers[i].buffer);
+ pipe->set_vertex_buffers(pipe, num_buffers, _buffers);
+ free(_buffers);
+ } else {
+ pipe->set_vertex_buffers(pipe, num_buffers, NULL);
+ }
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_set_vertex_elements(struct pipe_context *_pipe,
+ unsigned num_elements,
+ const struct pipe_vertex_element *elements)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "set_vertex_elements");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, num_elements);
+
+ trace_dump_arg_begin("elements");
+ trace_dump_struct_array(vertex_element, elements, num_elements);
+ trace_dump_arg_end();
+
+ pipe->set_vertex_elements(pipe, num_elements, elements);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_surface_copy(struct pipe_context *_pipe,
+ struct pipe_surface *dest,
+ unsigned destx, unsigned desty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ dest = trace_surface_unwrap(tr_ctx, dest);
+ src = trace_surface_unwrap(tr_ctx, src);
+
+ trace_dump_call_begin("pipe_context", "surface_copy");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, dest);
+ trace_dump_arg(uint, destx);
+ trace_dump_arg(uint, desty);
+ trace_dump_arg(ptr, src);
+ trace_dump_arg(uint, srcx);
+ trace_dump_arg(uint, srcy);
+ trace_dump_arg(uint, width);
+ trace_dump_arg(uint, height);
+
+ pipe->surface_copy(pipe,
+ dest, destx, desty,
+ src, srcx, srcy, width, height);
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_surface_fill(struct pipe_context *_pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height,
+ unsigned value)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ dst = trace_surface_unwrap(tr_ctx, dst);
+
+ trace_dump_call_begin("pipe_context", "surface_fill");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, dst);
+ trace_dump_arg(uint, dstx);
+ trace_dump_arg(uint, dsty);
+ trace_dump_arg(uint, width);
+ trace_dump_arg(uint, height);
+
+ pipe->surface_fill(pipe, dst, dstx, dsty, width, height, value);;
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_clear(struct pipe_context *_pipe,
+ unsigned buffers,
+ const float *rgba,
+ double depth,
+ unsigned stencil)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "clear");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, buffers);
+ trace_dump_arg_array(float, rgba, 4);
+ trace_dump_arg(float, depth);
+ trace_dump_arg(uint, stencil);
+
+ pipe->clear(pipe, buffers, rgba, depth, stencil);
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_flush(struct pipe_context *_pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "flush");
+
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(uint, flags);
+
+ pipe->flush(pipe, flags, fence);;
+
+ if(fence)
+ trace_dump_ret(ptr, *fence);
+
+ trace_dump_call_end();
+}
+
+
+static INLINE void
+trace_context_destroy(struct pipe_context *_pipe)
+{
+ struct trace_screen *tr_scr = trace_screen(_pipe->screen);
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct pipe_context *pipe = tr_ctx->pipe;
+
+ trace_dump_call_begin("pipe_context", "destroy");
+ trace_dump_arg(ptr, pipe);
+ trace_dump_call_end();
+
+ trace_screen_remove_from_list(tr_scr, contexts, tr_ctx);
+
+ pipe->destroy(pipe);
+
+ FREE(tr_ctx);
+}
+
+static unsigned int
+trace_is_texture_referenced( struct pipe_context *_pipe,
+ struct pipe_texture *_texture,
+ unsigned face, unsigned level)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_texture *tr_tex = trace_texture(_texture);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_texture *texture = tr_tex->texture;
+ unsigned int referenced;
+
+ trace_dump_call_begin("pipe_context", "is_texture_referenced");
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, texture);
+ trace_dump_arg(uint, face);
+ trace_dump_arg(uint, level);
+
+ referenced = pipe->is_texture_referenced(pipe, texture, face, level);
+
+ trace_dump_ret(uint, referenced);
+ trace_dump_call_end();
+
+ return referenced;
+}
+
+static unsigned int
+trace_is_buffer_referenced( struct pipe_context *_pipe,
+ struct pipe_buffer *_buf)
+{
+ struct trace_context *tr_ctx = trace_context(_pipe);
+ struct trace_buffer *tr_buf = trace_buffer(_buf);
+ struct pipe_context *pipe = tr_ctx->pipe;
+ struct pipe_buffer *buf = tr_buf->buffer;
+ unsigned int referenced;
+
+ trace_dump_call_begin("pipe_context", "is_buffer_referenced");
+ trace_dump_arg(ptr, pipe);
+ trace_dump_arg(ptr, buf);
+
+ referenced = pipe->is_buffer_referenced(pipe, buf);
+
+ trace_dump_ret(uint, referenced);
+ trace_dump_call_end();
+
+ return referenced;
+}
+
+static const struct debug_named_value rbug_blocker_flags[] = {
+ {"before", 1},
+ {"after", 2},
+ {NULL, 0},
+};
+
+struct pipe_context *
+trace_context_create(struct pipe_screen *_screen,
+ struct pipe_context *pipe)
+{
+ struct trace_screen *tr_scr;
+ struct trace_context *tr_ctx;
+ struct pipe_screen *screen;
+
+ if(!pipe)
+ goto error1;
+
+ if(!trace_enabled())
+ goto error1;
+
+ tr_scr = trace_screen(_screen);
+ screen = tr_scr->screen;
+
+ tr_ctx = CALLOC_STRUCT(trace_context);
+ if(!tr_ctx)
+ goto error1;
+
+ tr_ctx->draw_blocker = debug_get_flags_option("RBUG_BLOCK",
+ rbug_blocker_flags,
+ 0);
+ pipe_mutex_init(tr_ctx->draw_mutex);
+ pipe_condvar_init(tr_ctx->draw_cond);
+ pipe_mutex_init(tr_ctx->list_mutex);
+ make_empty_list(&tr_ctx->shaders);
+
+ tr_ctx->base.winsys = _screen->winsys;
+ tr_ctx->base.screen = _screen;
+ tr_ctx->base.destroy = trace_context_destroy;
+ tr_ctx->base.set_edgeflags = trace_context_set_edgeflags;
+ tr_ctx->base.draw_arrays = trace_context_draw_arrays;
+ tr_ctx->base.draw_elements = trace_context_draw_elements;
+ tr_ctx->base.draw_range_elements = trace_context_draw_range_elements;
+ tr_ctx->base.create_query = trace_context_create_query;
+ tr_ctx->base.destroy_query = trace_context_destroy_query;
+ tr_ctx->base.begin_query = trace_context_begin_query;
+ tr_ctx->base.end_query = trace_context_end_query;
+ tr_ctx->base.get_query_result = trace_context_get_query_result;
+ tr_ctx->base.create_blend_state = trace_context_create_blend_state;
+ tr_ctx->base.bind_blend_state = trace_context_bind_blend_state;
+ tr_ctx->base.delete_blend_state = trace_context_delete_blend_state;
+ tr_ctx->base.create_sampler_state = trace_context_create_sampler_state;
+ tr_ctx->base.bind_sampler_states = trace_context_bind_sampler_states;
+ tr_ctx->base.delete_sampler_state = trace_context_delete_sampler_state;
+ tr_ctx->base.create_rasterizer_state = trace_context_create_rasterizer_state;
+ tr_ctx->base.bind_rasterizer_state = trace_context_bind_rasterizer_state;
+ tr_ctx->base.delete_rasterizer_state = trace_context_delete_rasterizer_state;
+ tr_ctx->base.create_depth_stencil_alpha_state = trace_context_create_depth_stencil_alpha_state;
+ tr_ctx->base.bind_depth_stencil_alpha_state = trace_context_bind_depth_stencil_alpha_state;
+ tr_ctx->base.delete_depth_stencil_alpha_state = trace_context_delete_depth_stencil_alpha_state;
+ tr_ctx->base.create_fs_state = trace_context_create_fs_state;
+ tr_ctx->base.bind_fs_state = trace_context_bind_fs_state;
+ tr_ctx->base.delete_fs_state = trace_context_delete_fs_state;
+ tr_ctx->base.create_vs_state = trace_context_create_vs_state;
+ tr_ctx->base.bind_vs_state = trace_context_bind_vs_state;
+ tr_ctx->base.delete_vs_state = trace_context_delete_vs_state;
+ tr_ctx->base.set_blend_color = trace_context_set_blend_color;
+ tr_ctx->base.set_clip_state = trace_context_set_clip_state;
+ tr_ctx->base.set_constant_buffer = trace_context_set_constant_buffer;
+ tr_ctx->base.set_framebuffer_state = trace_context_set_framebuffer_state;
+ tr_ctx->base.set_polygon_stipple = trace_context_set_polygon_stipple;
+ tr_ctx->base.set_scissor_state = trace_context_set_scissor_state;
+ tr_ctx->base.set_viewport_state = trace_context_set_viewport_state;
+ tr_ctx->base.set_sampler_textures = trace_context_set_sampler_textures;
+ tr_ctx->base.set_vertex_buffers = trace_context_set_vertex_buffers;
+ tr_ctx->base.set_vertex_elements = trace_context_set_vertex_elements;
+ if (pipe->surface_copy)
+ tr_ctx->base.surface_copy = trace_context_surface_copy;
+ if (pipe->surface_fill)
+ tr_ctx->base.surface_fill = trace_context_surface_fill;
+ tr_ctx->base.clear = trace_context_clear;
+ tr_ctx->base.flush = trace_context_flush;
+ tr_ctx->base.is_texture_referenced = trace_is_texture_referenced;
+ tr_ctx->base.is_buffer_referenced = trace_is_buffer_referenced;
+
+ tr_ctx->pipe = pipe;
+
+ trace_dump_call_begin("", "pipe_context_create");
+ trace_dump_arg(ptr, screen);
+ trace_dump_ret(ptr, pipe);
+ trace_dump_call_end();
+
+ trace_screen_add_to_list(tr_scr, contexts, tr_ctx);
+
+ return &tr_ctx->base;
+
+error1:
+ return pipe;
+}
diff --git a/src/gallium/drivers/trace/tr_context.h b/src/gallium/drivers/trace/tr_context.h
new file mode 100644
index 0000000000..6febe4b411
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_context.h
@@ -0,0 +1,108 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TR_CONTEXT_H_
+#define TR_CONTEXT_H_
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+#include "pipe/p_context.h"
+
+#include "tr_screen.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct trace_context
+{
+ struct pipe_context base;
+
+ struct pipe_context *pipe;
+
+ /* current state */
+ struct {
+ struct trace_shader *fs;
+ struct trace_shader *vs;
+
+ struct trace_texture *tex[PIPE_MAX_SAMPLERS];
+ unsigned num_texs;
+
+ unsigned nr_cbufs;
+ struct trace_texture *cbufs[PIPE_MAX_COLOR_BUFS];
+ struct trace_texture *zsbuf;
+ } curr;
+
+ struct {
+ struct trace_shader *fs;
+ struct trace_shader *vs;
+
+ struct trace_texture *tex;
+ struct trace_texture *surf;
+
+ int blocker;
+ } draw_rule;
+ unsigned draw_num_rules;
+ pipe_condvar draw_cond;
+ pipe_mutex draw_mutex;
+ int draw_blocker;
+ int draw_blocked;
+
+ /* for list on screen */
+ struct tr_list list;
+
+ /* list of state objects */
+ pipe_mutex list_mutex;
+ unsigned num_shaders;
+ struct tr_list shaders;
+};
+
+
+static INLINE struct trace_context *
+trace_context(struct pipe_context *pipe)
+{
+ assert(pipe);
+ return (struct trace_context *)pipe;
+}
+
+
+
+struct pipe_context *
+trace_context_create(struct pipe_screen *screen,
+ struct pipe_context *pipe);
+
+void
+trace_rbug_notify_draw_blocked(struct trace_context *tr_ctx);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* TR_CONTEXT_H_ */
diff --git a/src/gallium/drivers/trace/tr_drm.c b/src/gallium/drivers/trace/tr_drm.c
new file mode 100644
index 0000000000..781ca5d3bc
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_drm.c
@@ -0,0 +1,186 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "state_tracker/drm_api.h"
+
+#include "util/u_memory.h"
+#include "trace/tr_drm.h"
+#include "trace/tr_screen.h"
+#include "trace/tr_context.h"
+#include "trace/tr_buffer.h"
+#include "trace/tr_texture.h"
+
+struct trace_drm_api
+{
+ struct drm_api base;
+
+ struct drm_api *api;
+};
+
+static INLINE struct trace_drm_api *
+trace_drm_api(struct drm_api *_api)
+{
+ return (struct trace_drm_api *)_api;
+}
+
+static struct pipe_screen *
+trace_drm_create_screen(struct drm_api *_api, int fd,
+ struct drm_create_screen_arg *arg)
+{
+ struct trace_drm_api *tr_api = trace_drm_api(_api);
+ struct drm_api *api = tr_api->api;
+ struct pipe_screen *screen;
+
+ /* TODO trace call */
+
+ if (arg && arg->mode != DRM_CREATE_NORMAL)
+ return NULL;
+
+ screen = api->create_screen(api, fd, arg);
+
+ return trace_screen_create(screen);
+}
+
+static struct pipe_context *
+trace_drm_create_context(struct drm_api *_api,
+ struct pipe_screen *_screen)
+{
+ struct trace_screen *tr_screen = trace_screen(_screen);
+ struct trace_drm_api *tr_api = trace_drm_api(_api);
+ struct pipe_screen *screen = tr_screen->screen;
+ struct drm_api *api = tr_api->api;
+ struct pipe_context *pipe;
+
+ /* TODO trace call */
+
+ pipe = api->create_context(api, screen);
+
+ pipe = trace_context_create(_screen, pipe);
+
+ return pipe;
+}
+
+static struct pipe_texture *
+trace_drm_texture_from_shared_handle(struct drm_api *_api,
+ struct pipe_screen *_screen,
+ struct pipe_texture *templ,
+ const char *name,
+ unsigned stride,
+ unsigned handle)
+{
+ struct trace_screen *tr_screen = trace_screen(_screen);
+ struct trace_drm_api *tr_api = trace_drm_api(_api);
+ struct pipe_screen *screen = tr_screen->screen;
+ struct drm_api *api = tr_api->api;
+ struct pipe_texture *result;
+
+ /* TODO trace call */
+
+ result = api->texture_from_shared_handle(api, screen, templ, name, stride, handle);
+
+ result = trace_texture_create(trace_screen(_screen), result);
+
+ return result;
+}
+
+static boolean
+trace_drm_shared_handle_from_texture(struct drm_api *_api,
+ struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned *stride,
+ unsigned *handle)
+{
+ struct trace_screen *tr_screen = trace_screen(_screen);
+ struct trace_texture *tr_texture = trace_texture(_texture);
+ struct trace_drm_api *tr_api = trace_drm_api(_api);
+ struct pipe_screen *screen = tr_screen->screen;
+ struct pipe_texture *texture = tr_texture->texture;
+ struct drm_api *api = tr_api->api;
+
+ /* TODO trace call */
+
+ return api->shared_handle_from_texture(api, screen, texture, stride, handle);
+}
+
+static boolean
+trace_drm_local_handle_from_texture(struct drm_api *_api,
+ struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned *stride,
+ unsigned *handle)
+{
+ struct trace_screen *tr_screen = trace_screen(_screen);
+ struct trace_texture *tr_texture = trace_texture(_texture);
+ struct trace_drm_api *tr_api = trace_drm_api(_api);
+ struct pipe_screen *screen = tr_screen->screen;
+ struct pipe_texture *texture = tr_texture->texture;
+ struct drm_api *api = tr_api->api;
+
+ /* TODO trace call */
+
+ return api->local_handle_from_texture(api, screen, texture, stride, handle);
+}
+
+static void
+trace_drm_destroy(struct drm_api *_api)
+{
+ struct trace_drm_api *tr_api = trace_drm_api(_api);
+ struct drm_api *api = tr_api->api;
+ api->destroy(api);
+
+ free(tr_api);
+}
+
+struct drm_api *
+trace_drm_create(struct drm_api *api)
+{
+ struct trace_drm_api *tr_api;
+
+ if (!api)
+ goto error;
+
+ if (!trace_enabled())
+ goto error;
+
+ tr_api = CALLOC_STRUCT(trace_drm_api);
+
+ if (!tr_api)
+ goto error;
+
+ tr_api->base.create_screen = trace_drm_create_screen;
+ tr_api->base.create_context = trace_drm_create_context;
+ tr_api->base.texture_from_shared_handle = trace_drm_texture_from_shared_handle;
+ tr_api->base.shared_handle_from_texture = trace_drm_shared_handle_from_texture;
+ tr_api->base.local_handle_from_texture = trace_drm_local_handle_from_texture;
+ tr_api->base.destroy = trace_drm_destroy;
+ tr_api->api = api;
+
+ return &tr_api->base;
+
+error:
+ return api;
+}
diff --git a/src/gallium/drivers/trace/tr_drm.h b/src/gallium/drivers/trace/tr_drm.h
new file mode 100644
index 0000000000..845c66a32a
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_drm.h
@@ -0,0 +1,35 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TR_DRM_H
+#define TR_DRM_H
+
+struct drm_api;
+
+struct drm_api* trace_drm_create(struct drm_api *api);
+
+#endif /* ID_DRM_H */
diff --git a/src/gallium/drivers/trace/tr_dump.c b/src/gallium/drivers/trace/tr_dump.c
new file mode 100644
index 0000000000..7e2ccbcfdc
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_dump.c
@@ -0,0 +1,627 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ * Trace dumping functions.
+ *
+ * For now we just use standard XML for dumping the trace calls, as this is
+ * simple to write, parse, and visually inspect, but the actual representation
+ * is abstracted out of this file, so that we can switch to a binary
+ * representation if/when it becomes justified.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#include "pipe/p_config.h"
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+#include <stdlib.h>
+#endif
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_thread.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "util/u_string.h"
+#include "util/u_stream.h"
+
+#include "tr_dump.h"
+#include "tr_screen.h"
+#include "tr_texture.h"
+#include "tr_buffer.h"
+
+
+static struct util_stream *stream = NULL;
+static unsigned refcount = 0;
+static pipe_mutex call_mutex;
+static long unsigned call_no = 0;
+static boolean dumping = FALSE;
+static boolean initialized = FALSE;
+
+
+static INLINE void
+trace_dump_write(const char *buf, size_t size)
+{
+ if(stream)
+ util_stream_write(stream, buf, size);
+}
+
+
+static INLINE void
+trace_dump_writes(const char *s)
+{
+ trace_dump_write(s, strlen(s));
+}
+
+
+static INLINE void
+trace_dump_writef(const char *format, ...)
+{
+ static char buf[1024];
+ unsigned len;
+ va_list ap;
+ va_start(ap, format);
+ len = util_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ trace_dump_write(buf, len);
+}
+
+
+static INLINE void
+trace_dump_escape(const char *str)
+{
+ const unsigned char *p = (const unsigned char *)str;
+ unsigned char c;
+ while((c = *p++) != 0) {
+ if(c == '<')
+ trace_dump_writes("&lt;");
+ else if(c == '>')
+ trace_dump_writes("&gt;");
+ else if(c == '&')
+ trace_dump_writes("&amp;");
+ else if(c == '\'')
+ trace_dump_writes("&apos;");
+ else if(c == '\"')
+ trace_dump_writes("&quot;");
+ else if(c >= 0x20 && c <= 0x7e)
+ trace_dump_writef("%c", c);
+ else
+ trace_dump_writef("&#%u;", c);
+ }
+}
+
+
+static INLINE void
+trace_dump_indent(unsigned level)
+{
+ unsigned i;
+ for(i = 0; i < level; ++i)
+ trace_dump_writes("\t");
+}
+
+
+static INLINE void
+trace_dump_newline(void)
+{
+ trace_dump_writes("\n");
+}
+
+
+static INLINE void
+trace_dump_tag(const char *name)
+{
+ trace_dump_writes("<");
+ trace_dump_writes(name);
+ trace_dump_writes("/>");
+}
+
+
+static INLINE void
+trace_dump_tag_begin(const char *name)
+{
+ trace_dump_writes("<");
+ trace_dump_writes(name);
+ trace_dump_writes(">");
+}
+
+static INLINE void
+trace_dump_tag_begin1(const char *name,
+ const char *attr1, const char *value1)
+{
+ trace_dump_writes("<");
+ trace_dump_writes(name);
+ trace_dump_writes(" ");
+ trace_dump_writes(attr1);
+ trace_dump_writes("='");
+ trace_dump_escape(value1);
+ trace_dump_writes("'>");
+}
+
+
+static INLINE void
+trace_dump_tag_begin2(const char *name,
+ const char *attr1, const char *value1,
+ const char *attr2, const char *value2)
+{
+ trace_dump_writes("<");
+ trace_dump_writes(name);
+ trace_dump_writes(" ");
+ trace_dump_writes(attr1);
+ trace_dump_writes("=\'");
+ trace_dump_escape(value1);
+ trace_dump_writes("\' ");
+ trace_dump_writes(attr2);
+ trace_dump_writes("=\'");
+ trace_dump_escape(value2);
+ trace_dump_writes("\'>");
+}
+
+
+static INLINE void
+trace_dump_tag_begin3(const char *name,
+ const char *attr1, const char *value1,
+ const char *attr2, const char *value2,
+ const char *attr3, const char *value3)
+{
+ trace_dump_writes("<");
+ trace_dump_writes(name);
+ trace_dump_writes(" ");
+ trace_dump_writes(attr1);
+ trace_dump_writes("=\'");
+ trace_dump_escape(value1);
+ trace_dump_writes("\' ");
+ trace_dump_writes(attr2);
+ trace_dump_writes("=\'");
+ trace_dump_escape(value2);
+ trace_dump_writes("\' ");
+ trace_dump_writes(attr3);
+ trace_dump_writes("=\'");
+ trace_dump_escape(value3);
+ trace_dump_writes("\'>");
+}
+
+
+static INLINE void
+trace_dump_tag_end(const char *name)
+{
+ trace_dump_writes("</");
+ trace_dump_writes(name);
+ trace_dump_writes(">");
+}
+
+static void
+trace_dump_trace_close(void)
+{
+ if(stream) {
+ trace_dump_writes("</trace>\n");
+ util_stream_close(stream);
+ stream = NULL;
+ refcount = 0;
+ call_no = 0;
+ pipe_mutex_destroy(call_mutex);
+ }
+}
+
+void trace_dump_init()
+{
+ if (initialized)
+ return;
+
+ pipe_mutex_init(call_mutex);
+ dumping = FALSE;
+ initialized = TRUE;
+}
+
+boolean trace_dump_trace_begin()
+{
+ const char *filename;
+
+ assert(initialized);
+
+ filename = debug_get_option("GALLIUM_TRACE", NULL);
+ if(!filename)
+ return FALSE;
+
+ if(!stream) {
+
+ stream = util_stream_create(filename, 0);
+ if(!stream)
+ return FALSE;
+
+ trace_dump_writes("<?xml version='1.0' encoding='UTF-8'?>\n");
+ trace_dump_writes("<?xml-stylesheet type='text/xsl' href='trace.xsl'?>\n");
+ trace_dump_writes("<trace version='0.1'>\n");
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+ /* Linux applications rarely cleanup GL / Gallium resources so catch
+ * application exit here */
+ atexit(trace_dump_trace_close);
+#endif
+ }
+
+ ++refcount;
+
+ return TRUE;
+}
+
+boolean trace_dump_trace_enabled(void)
+{
+ return stream ? TRUE : FALSE;
+}
+
+void trace_dump_trace_end(void)
+{
+ if(stream)
+ if(!--refcount)
+ trace_dump_trace_close();
+}
+
+/*
+ * Call lock
+ */
+
+void trace_dump_call_lock(void)
+{
+ pipe_mutex_lock(call_mutex);
+}
+
+void trace_dump_call_unlock(void)
+{
+ pipe_mutex_unlock(call_mutex);
+}
+
+/*
+ * Dumping control
+ */
+
+void trace_dumping_start_locked(void)
+{
+ dumping = TRUE;
+}
+
+void trace_dumping_stop_locked(void)
+{
+ dumping = FALSE;
+}
+
+boolean trace_dumping_enabled_locked(void)
+{
+ return dumping;
+}
+
+void trace_dumping_start(void)
+{
+ pipe_mutex_lock(call_mutex);
+ trace_dumping_start_locked();
+ pipe_mutex_unlock(call_mutex);
+}
+
+void trace_dumping_stop(void)
+{
+ pipe_mutex_lock(call_mutex);
+ trace_dumping_stop_locked();
+ pipe_mutex_unlock(call_mutex);
+}
+
+boolean trace_dumping_enabled(void)
+{
+ boolean ret;
+ pipe_mutex_lock(call_mutex);
+ ret = trace_dumping_enabled_locked();
+ pipe_mutex_unlock(call_mutex);
+ return ret;
+}
+
+/*
+ * Dump functions
+ */
+
+void trace_dump_call_begin_locked(const char *klass, const char *method)
+{
+ if (!dumping)
+ return;
+
+ ++call_no;
+ trace_dump_indent(1);
+ trace_dump_writes("<call no=\'");
+ trace_dump_writef("%lu", call_no);
+ trace_dump_writes("\' class=\'");
+ trace_dump_escape(klass);
+ trace_dump_writes("\' method=\'");
+ trace_dump_escape(method);
+ trace_dump_writes("\'>");
+ trace_dump_newline();
+}
+
+void trace_dump_call_end_locked(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_indent(1);
+ trace_dump_tag_end("call");
+ trace_dump_newline();
+ util_stream_flush(stream);
+}
+
+void trace_dump_call_begin(const char *klass, const char *method)
+{
+ pipe_mutex_lock(call_mutex);
+ trace_dump_call_begin_locked(klass, method);
+}
+
+void trace_dump_call_end(void)
+{
+ trace_dump_call_end_locked();
+ pipe_mutex_unlock(call_mutex);
+}
+
+void trace_dump_arg_begin(const char *name)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_indent(2);
+ trace_dump_tag_begin1("arg", "name", name);
+}
+
+void trace_dump_arg_end(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_tag_end("arg");
+ trace_dump_newline();
+}
+
+void trace_dump_ret_begin(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_indent(2);
+ trace_dump_tag_begin("ret");
+}
+
+void trace_dump_ret_end(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_tag_end("ret");
+ trace_dump_newline();
+}
+
+void trace_dump_bool(int value)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writef("<bool>%c</bool>", value ? '1' : '0');
+}
+
+void trace_dump_int(long long int value)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writef("<int>%lli</int>", value);
+}
+
+void trace_dump_uint(long long unsigned value)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writef("<uint>%llu</uint>", value);
+}
+
+void trace_dump_float(double value)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writef("<float>%g</float>", value);
+}
+
+void trace_dump_bytes(const void *data,
+ size_t size)
+{
+ static const char hex_table[16] = "0123456789ABCDEF";
+ const uint8_t *p = data;
+ size_t i;
+
+ if (!dumping)
+ return;
+
+ trace_dump_writes("<bytes>");
+ for(i = 0; i < size; ++i) {
+ uint8_t byte = *p++;
+ char hex[2];
+ hex[0] = hex_table[byte >> 4];
+ hex[1] = hex_table[byte & 0xf];
+ trace_dump_write(hex, 2);
+ }
+ trace_dump_writes("</bytes>");
+}
+
+void trace_dump_string(const char *str)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("<string>");
+ trace_dump_escape(str);
+ trace_dump_writes("</string>");
+}
+
+void trace_dump_enum(const char *value)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("<enum>");
+ trace_dump_escape(value);
+ trace_dump_writes("</enum>");
+}
+
+void trace_dump_array_begin(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("<array>");
+}
+
+void trace_dump_array_end(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("</array>");
+}
+
+void trace_dump_elem_begin(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("<elem>");
+}
+
+void trace_dump_elem_end(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("</elem>");
+}
+
+void trace_dump_struct_begin(const char *name)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writef("<struct name='%s'>", name);
+}
+
+void trace_dump_struct_end(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("</struct>");
+}
+
+void trace_dump_member_begin(const char *name)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writef("<member name='%s'>", name);
+}
+
+void trace_dump_member_end(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("</member>");
+}
+
+void trace_dump_null(void)
+{
+ if (!dumping)
+ return;
+
+ trace_dump_writes("<null/>");
+}
+
+void trace_dump_ptr(const void *value)
+{
+ if (!dumping)
+ return;
+
+ if(value)
+ trace_dump_writef("<ptr>0x%08lx</ptr>", (unsigned long)(uintptr_t)value);
+ else
+ trace_dump_null();
+}
+
+void trace_dump_buffer_ptr(struct pipe_buffer *_buffer)
+{
+ if (!dumping)
+ return;
+
+ if (_buffer) {
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ trace_dump_ptr(tr_buf->buffer);
+ } else {
+ trace_dump_null();
+ }
+}
+
+void trace_dump_texture_ptr(struct pipe_texture *_texture)
+{
+ if (!dumping)
+ return;
+
+ if (_texture) {
+ struct trace_texture *tr_tex = trace_texture(_texture);
+ trace_dump_ptr(tr_tex->texture);
+ } else {
+ trace_dump_null();
+ }
+}
+
+void trace_dump_surface_ptr(struct pipe_surface *_surface)
+{
+ if (!dumping)
+ return;
+
+ if (_surface) {
+ struct trace_surface *tr_surf = trace_surface(_surface);
+ trace_dump_ptr(tr_surf->surface);
+ } else {
+ trace_dump_null();
+ }
+}
+
+void trace_dump_transfer_ptr(struct pipe_transfer *_transfer)
+{
+ if (!dumping)
+ return;
+
+ if (_transfer) {
+ struct trace_transfer *tr_tran = trace_transfer(_transfer);
+ trace_dump_ptr(tr_tran->transfer);
+ } else {
+ trace_dump_null();
+ }
+}
diff --git a/src/gallium/drivers/trace/tr_dump.h b/src/gallium/drivers/trace/tr_dump.h
new file mode 100644
index 0000000000..32592bab12
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_dump.h
@@ -0,0 +1,177 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Trace data dumping primitives.
+ */
+
+#ifndef TR_DUMP_H
+#define TR_DUMP_H
+
+
+#include "pipe/p_compiler.h"
+
+
+struct pipe_buffer;
+struct pipe_texture;
+struct pipe_surface;
+struct pipe_transfer;
+
+/*
+ * Call before use.
+ */
+void trace_dump_init(void);
+
+/*
+ * Low level dumping controls.
+ *
+ * Opening the trace file and checking if that is opened.
+ */
+boolean trace_dump_trace_begin(void);
+boolean trace_dump_trace_enabled(void);
+void trace_dump_trace_end(void);
+
+/*
+ * Lock and unlock the call mutex.
+ *
+ * It used by the none locked version of dumping control
+ * and begin/end call dump functions.
+ *
+ * Begin takes the lock while end unlocks it. Use the _locked
+ * version to avoid locking/unlocking it.
+ */
+void trace_dump_call_lock(void);
+void trace_dump_call_unlock(void);
+
+/*
+ * High level dumping control.
+ */
+void trace_dumping_start_locked(void);
+void trace_dumping_stop_locked(void);
+boolean trace_dumping_enabled_locked(void);
+void trace_dumping_start(void);
+void trace_dumping_stop(void);
+boolean trace_dumping_enabled(void);
+
+void trace_dump_call_begin_locked(const char *klass, const char *method);
+void trace_dump_call_end_locked(void);
+void trace_dump_call_begin(const char *klass, const char *method);
+void trace_dump_call_end(void);
+
+void trace_dump_arg_begin(const char *name);
+void trace_dump_arg_end(void);
+void trace_dump_ret_begin(void);
+void trace_dump_ret_end(void);
+void trace_dump_bool(int value);
+void trace_dump_int(long long int value);
+void trace_dump_uint(long long unsigned value);
+void trace_dump_float(double value);
+void trace_dump_bytes(const void *data, size_t size);
+void trace_dump_string(const char *str);
+void trace_dump_enum(const char *value);
+void trace_dump_array_begin(void);
+void trace_dump_array_end(void);
+void trace_dump_elem_begin(void);
+void trace_dump_elem_end(void);
+void trace_dump_struct_begin(const char *name);
+void trace_dump_struct_end(void);
+void trace_dump_member_begin(const char *name);
+void trace_dump_member_end(void);
+void trace_dump_null(void);
+void trace_dump_ptr(const void *value);
+/* will turn a wrapped object into the real one and dump ptr */
+void trace_dump_buffer_ptr(struct pipe_buffer *_buffer);
+void trace_dump_texture_ptr(struct pipe_texture *_texture);
+void trace_dump_surface_ptr(struct pipe_surface *_surface);
+void trace_dump_transfer_ptr(struct pipe_transfer *_transfer);
+
+/*
+ * Code saving macros.
+ */
+
+#define trace_dump_arg(_type, _arg) \
+ do { \
+ trace_dump_arg_begin(#_arg); \
+ trace_dump_##_type(_arg); \
+ trace_dump_arg_end(); \
+ } while(0)
+
+#define trace_dump_ret(_type, _arg) \
+ do { \
+ trace_dump_ret_begin(); \
+ trace_dump_##_type(_arg); \
+ trace_dump_ret_end(); \
+ } while(0)
+
+#define trace_dump_array(_type, _obj, _size) \
+ do { \
+ size_t idx; \
+ trace_dump_array_begin(); \
+ for(idx = 0; idx < (_size); ++idx) { \
+ trace_dump_elem_begin(); \
+ trace_dump_##_type((_obj)[idx]); \
+ trace_dump_elem_end(); \
+ } \
+ trace_dump_array_end(); \
+ } while(0)
+
+#define trace_dump_struct_array(_type, _obj, _size) \
+ do { \
+ size_t idx; \
+ trace_dump_array_begin(); \
+ for(idx = 0; idx < (_size); ++idx) { \
+ trace_dump_elem_begin(); \
+ trace_dump_##_type(&(_obj)[idx]); \
+ trace_dump_elem_end(); \
+ } \
+ trace_dump_array_end(); \
+ } while(0)
+
+#define trace_dump_member(_type, _obj, _member) \
+ do { \
+ trace_dump_member_begin(#_member); \
+ trace_dump_##_type((_obj)->_member); \
+ trace_dump_member_end(); \
+ } while(0)
+
+#define trace_dump_arg_array(_type, _arg, _size) \
+ do { \
+ trace_dump_arg_begin(#_arg); \
+ trace_dump_array(_type, _arg, _size); \
+ trace_dump_arg_end(); \
+ } while(0)
+
+#define trace_dump_member_array(_type, _obj, _member) \
+ do { \
+ trace_dump_member_begin(#_member); \
+ trace_dump_array(_type, (_obj)->_member, sizeof((_obj)->_member)/sizeof((_obj)->_member[0])); \
+ trace_dump_member_end(); \
+ } while(0)
+
+
+#endif /* TR_DUMP_H */
diff --git a/src/gallium/drivers/trace/tr_dump_state.c b/src/gallium/drivers/trace/tr_dump_state.c
new file mode 100644
index 0000000000..bcf6751af4
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_dump_state.c
@@ -0,0 +1,549 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "tr_dump.h"
+#include "tr_dump_state.h"
+
+
+void trace_dump_format(enum pipe_format format)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ trace_dump_enum(pf_name(format) );
+}
+
+
+void trace_dump_block(const struct pipe_format_block *block)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ trace_dump_struct_begin("pipe_format_block");
+ trace_dump_member(uint, block, size);
+ trace_dump_member(uint, block, width);
+ trace_dump_member(uint, block, height);
+ trace_dump_struct_end();
+}
+
+
+static void trace_dump_reference(const struct pipe_reference *reference)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ trace_dump_struct_begin("pipe_reference");
+ trace_dump_member(int, &reference->count, count);
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_template(const struct pipe_texture *templat)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!templat) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_texture");
+
+ trace_dump_member(int, templat, target);
+ trace_dump_member(format, templat, format);
+
+ trace_dump_member_begin("width");
+ trace_dump_array(uint, templat->width, 1);
+ trace_dump_member_end();
+
+ trace_dump_member_begin("height");
+ trace_dump_array(uint, templat->height, 1);
+ trace_dump_member_end();
+
+ trace_dump_member_begin("depth");
+ trace_dump_array(uint, templat->depth, 1);
+ trace_dump_member_end();
+
+ trace_dump_member_begin("block");
+ trace_dump_block(&templat->block);
+ trace_dump_member_end();
+
+ trace_dump_member(uint, templat, last_level);
+ trace_dump_member(uint, templat, tex_usage);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_rasterizer_state(const struct pipe_rasterizer_state *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_rasterizer_state");
+
+ trace_dump_member(bool, state, flatshade);
+ trace_dump_member(bool, state, light_twoside);
+ trace_dump_member(uint, state, front_winding);
+ trace_dump_member(uint, state, cull_mode);
+ trace_dump_member(uint, state, fill_cw);
+ trace_dump_member(uint, state, fill_ccw);
+ trace_dump_member(bool, state, offset_cw);
+ trace_dump_member(bool, state, offset_ccw);
+ trace_dump_member(bool, state, scissor);
+ trace_dump_member(bool, state, poly_smooth);
+ trace_dump_member(bool, state, poly_stipple_enable);
+ trace_dump_member(bool, state, point_smooth);
+ trace_dump_member(bool, state, point_sprite);
+ trace_dump_member(bool, state, point_size_per_vertex);
+ trace_dump_member(bool, state, multisample);
+ trace_dump_member(bool, state, line_smooth);
+ trace_dump_member(bool, state, line_stipple_enable);
+ trace_dump_member(uint, state, line_stipple_factor);
+ trace_dump_member(uint, state, line_stipple_pattern);
+ trace_dump_member(bool, state, line_last_pixel);
+ trace_dump_member(bool, state, bypass_vs_clip_and_viewport);
+ trace_dump_member(bool, state, flatshade_first);
+ trace_dump_member(bool, state, gl_rasterization_rules);
+
+ trace_dump_member(float, state, line_width);
+ trace_dump_member(float, state, point_size);
+ trace_dump_member(float, state, point_size_min);
+ trace_dump_member(float, state, point_size_max);
+ trace_dump_member(float, state, offset_units);
+ trace_dump_member(float, state, offset_scale);
+
+ trace_dump_member_array(uint, state, sprite_coord_mode);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_poly_stipple(const struct pipe_poly_stipple *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_poly_stipple");
+
+ trace_dump_member_begin("stipple");
+ trace_dump_array(uint,
+ state->stipple,
+ Elements(state->stipple));
+ trace_dump_member_end();
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_viewport_state(const struct pipe_viewport_state *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_viewport_state");
+
+ trace_dump_member_array(float, state, scale);
+ trace_dump_member_array(float, state, translate);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_scissor_state(const struct pipe_scissor_state *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_scissor_state");
+
+ trace_dump_member(uint, state, minx);
+ trace_dump_member(uint, state, miny);
+ trace_dump_member(uint, state, maxx);
+ trace_dump_member(uint, state, maxy);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_clip_state(const struct pipe_clip_state *state)
+{
+ unsigned i;
+
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_clip_state");
+
+ trace_dump_member_begin("ucp");
+ trace_dump_array_begin();
+ for(i = 0; i < PIPE_MAX_CLIP_PLANES; ++i) {
+ trace_dump_elem_begin();
+ trace_dump_array(float, state->ucp[i], 4);
+ trace_dump_elem_end();
+ }
+ trace_dump_array_end();
+ trace_dump_member_end();
+
+ trace_dump_member(uint, state, nr);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_constant_buffer(const struct pipe_constant_buffer *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_constant_buffer");
+
+ trace_dump_member(buffer_ptr, state, buffer);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_shader_state(const struct pipe_shader_state *state)
+{
+ static char str[8192];
+
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ tgsi_dump_str(state->tokens, 0, str, sizeof(str));
+
+ trace_dump_struct_begin("pipe_shader_state");
+
+ trace_dump_member_begin("tokens");
+ trace_dump_string(str);
+ trace_dump_member_end();
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_depth_stencil_alpha_state(const struct pipe_depth_stencil_alpha_state *state)
+{
+ unsigned i;
+
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_depth_stencil_alpha_state");
+
+ trace_dump_member_begin("depth");
+ trace_dump_struct_begin("pipe_depth_state");
+ trace_dump_member(bool, &state->depth, enabled);
+ trace_dump_member(bool, &state->depth, writemask);
+ trace_dump_member(uint, &state->depth, func);
+ trace_dump_struct_end();
+ trace_dump_member_end();
+
+ trace_dump_member_begin("stencil");
+ trace_dump_array_begin();
+ for(i = 0; i < Elements(state->stencil); ++i) {
+ trace_dump_elem_begin();
+ trace_dump_struct_begin("pipe_stencil_state");
+ trace_dump_member(bool, &state->stencil[i], enabled);
+ trace_dump_member(uint, &state->stencil[i], func);
+ trace_dump_member(uint, &state->stencil[i], fail_op);
+ trace_dump_member(uint, &state->stencil[i], zpass_op);
+ trace_dump_member(uint, &state->stencil[i], zfail_op);
+ trace_dump_member(uint, &state->stencil[i], ref_value);
+ trace_dump_member(uint, &state->stencil[i], valuemask);
+ trace_dump_member(uint, &state->stencil[i], writemask);
+ trace_dump_struct_end();
+ trace_dump_elem_end();
+ }
+ trace_dump_array_end();
+ trace_dump_member_end();
+
+ trace_dump_member_begin("alpha");
+ trace_dump_struct_begin("pipe_alpha_state");
+ trace_dump_member(bool, &state->alpha, enabled);
+ trace_dump_member(uint, &state->alpha, func);
+ trace_dump_member(float, &state->alpha, ref_value);
+ trace_dump_struct_end();
+ trace_dump_member_end();
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_blend_state(const struct pipe_blend_state *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_blend_state");
+
+ trace_dump_member(bool, state, blend_enable);
+
+ trace_dump_member(uint, state, rgb_func);
+ trace_dump_member(uint, state, rgb_src_factor);
+ trace_dump_member(uint, state, rgb_dst_factor);
+
+ trace_dump_member(uint, state, alpha_func);
+ trace_dump_member(uint, state, alpha_src_factor);
+ trace_dump_member(uint, state, alpha_dst_factor);
+
+ trace_dump_member(bool, state, logicop_enable);
+ trace_dump_member(uint, state, logicop_func);
+
+ trace_dump_member(uint, state, colormask);
+ trace_dump_member(bool, state, dither);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_blend_color(const struct pipe_blend_color *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_blend_color");
+
+ trace_dump_member_array(float, state, color);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_framebuffer_state(const struct pipe_framebuffer_state *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ trace_dump_struct_begin("pipe_framebuffer_state");
+
+ trace_dump_member(uint, state, width);
+ trace_dump_member(uint, state, height);
+ trace_dump_member(uint, state, nr_cbufs);
+ trace_dump_member_array(ptr, state, cbufs);
+ trace_dump_member(ptr, state, zsbuf);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_sampler_state(const struct pipe_sampler_state *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_sampler_state");
+
+ trace_dump_member(uint, state, wrap_s);
+ trace_dump_member(uint, state, wrap_t);
+ trace_dump_member(uint, state, wrap_r);
+ trace_dump_member(uint, state, min_img_filter);
+ trace_dump_member(uint, state, min_mip_filter);
+ trace_dump_member(uint, state, mag_img_filter);
+ trace_dump_member(bool, state, compare_mode);
+ trace_dump_member(uint, state, compare_func);
+ trace_dump_member(bool, state, normalized_coords);
+ trace_dump_member(uint, state, prefilter);
+ trace_dump_member(float, state, lod_bias);
+ trace_dump_member(float, state, min_lod);
+ trace_dump_member(float, state, max_lod);
+ trace_dump_member_array(float, state, border_color);
+ trace_dump_member(float, state, max_anisotropy);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_surface(const struct pipe_surface *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_surface");
+
+ trace_dump_reference(&state->reference);
+
+ trace_dump_member(format, state, format);
+ trace_dump_member(uint, state, width);
+ trace_dump_member(uint, state, height);
+
+ trace_dump_member(uint, state, layout);
+ trace_dump_member(uint, state, offset);
+ trace_dump_member(uint, state, usage);
+
+ trace_dump_member(ptr, state, texture);
+ trace_dump_member(uint, state, face);
+ trace_dump_member(uint, state, level);
+ trace_dump_member(uint, state, zslice);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_transfer(const struct pipe_transfer *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_transfer");
+
+ trace_dump_member(format, state, format);
+ trace_dump_member(uint, state, width);
+ trace_dump_member(uint, state, height);
+
+ trace_dump_member_begin("block");
+ trace_dump_block(&state->block);
+ trace_dump_member_end();
+
+ trace_dump_member(uint, state, nblocksx);
+ trace_dump_member(uint, state, nblocksy);
+ trace_dump_member(uint, state, stride);
+ trace_dump_member(uint, state, usage);
+
+ trace_dump_member(ptr, state, texture);
+ trace_dump_member(uint, state, face);
+ trace_dump_member(uint, state, level);
+ trace_dump_member(uint, state, zslice);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_vertex_buffer");
+
+ trace_dump_member(uint, state, stride);
+ trace_dump_member(uint, state, max_index);
+ trace_dump_member(uint, state, buffer_offset);
+ trace_dump_member(buffer_ptr, state, buffer);
+
+ trace_dump_struct_end();
+}
+
+
+void trace_dump_vertex_element(const struct pipe_vertex_element *state)
+{
+ if (!trace_dumping_enabled_locked())
+ return;
+
+ if(!state) {
+ trace_dump_null();
+ return;
+ }
+
+ trace_dump_struct_begin("pipe_vertex_element");
+
+ trace_dump_member(uint, state, src_offset);
+
+ trace_dump_member(uint, state, vertex_buffer_index);
+ trace_dump_member(uint, state, nr_components);
+
+ trace_dump_member(format, state, src_format);
+
+ trace_dump_struct_end();
+}
diff --git a/src/gallium/drivers/trace/tr_dump_state.h b/src/gallium/drivers/trace/tr_dump_state.h
new file mode 100644
index 0000000000..05b821adb6
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_dump_state.h
@@ -0,0 +1,78 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TR_DUMP_STATE_H_
+#define TR_DUMP_STATE_H_
+
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "pipe/p_shader_tokens.h"
+
+
+void trace_dump_format(enum pipe_format format);
+
+void trace_dump_block(const struct pipe_format_block *block);
+
+void trace_dump_template(const struct pipe_texture *templat);
+
+
+void trace_dump_rasterizer_state(const struct pipe_rasterizer_state *state);
+
+void trace_dump_poly_stipple(const struct pipe_poly_stipple *state);
+
+void trace_dump_viewport_state(const struct pipe_viewport_state *state);
+
+void trace_dump_scissor_state(const struct pipe_scissor_state *state);
+
+void trace_dump_clip_state(const struct pipe_clip_state *state);
+
+void trace_dump_constant_buffer(const struct pipe_constant_buffer *state);
+
+void trace_dump_token(const struct tgsi_token *token);
+
+void trace_dump_shader_state(const struct pipe_shader_state *state);
+
+void trace_dump_depth_stencil_alpha_state(const struct pipe_depth_stencil_alpha_state *state);
+
+void trace_dump_blend_state(const struct pipe_blend_state *state);
+
+void trace_dump_blend_color(const struct pipe_blend_color *state);
+
+void trace_dump_framebuffer_state(const struct pipe_framebuffer_state *state);
+
+void trace_dump_sampler_state(const struct pipe_sampler_state *state);
+
+void trace_dump_surface(const struct pipe_surface *state);
+
+void trace_dump_transfer(const struct pipe_transfer *state);
+
+void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state);
+
+void trace_dump_vertex_element(const struct pipe_vertex_element *state);
+
+
+#endif /* TR_STATE_H */
diff --git a/src/gallium/drivers/trace/tr_rbug.c b/src/gallium/drivers/trace/tr_rbug.c
new file mode 100644
index 0000000000..e85ac15edc
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_rbug.c
@@ -0,0 +1,864 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "util/u_string.h"
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+#include "util/u_network.h"
+
+#include "tgsi/tgsi_parse.h"
+
+#include "tr_dump.h"
+#include "tr_state.h"
+#include "tr_buffer.h"
+#include "tr_texture.h"
+
+#include "rbug/rbug.h"
+
+#include <errno.h>
+
+#if defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+# define sleep Sleep
+#elif defined(PIPE_OS_LINUX)
+void usleep(int);
+# define sleep usleep
+#else
+# warning "No socket implementation"
+#endif
+
+#define U642VOID(x) ((void *)(unsigned long)(x))
+#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
+
+struct trace_rbug
+{
+ struct trace_screen *tr_scr;
+ struct rbug_connection *con;
+ pipe_thread thread;
+ boolean running;
+};
+
+PIPE_THREAD_ROUTINE(trace_rbug_thread, void_tr_rbug);
+
+
+/**********************************************************
+ * Helper functions
+ */
+
+
+static struct trace_context *
+trace_rbug_get_context_locked(struct trace_screen *tr_scr, rbug_context_t ctx)
+{
+ struct trace_context *tr_ctx = NULL;
+ struct tr_list *ptr;
+
+ foreach(ptr, &tr_scr->contexts) {
+ tr_ctx = (struct trace_context *)((char*)ptr - offsetof(struct trace_context, list));
+ if (ctx == VOID2U64(tr_ctx))
+ break;
+ tr_ctx = NULL;
+ }
+
+ return tr_ctx;
+}
+
+static struct trace_shader *
+trace_rbug_get_shader_locked(struct trace_context *tr_ctx, rbug_shader_t shdr)
+{
+ struct trace_shader *tr_shdr = NULL;
+ struct tr_list *ptr;
+
+ foreach(ptr, &tr_ctx->shaders) {
+ tr_shdr = (struct trace_shader *)((char*)ptr - offsetof(struct trace_shader, list));
+ if (shdr == VOID2U64(tr_shdr))
+ break;
+ tr_shdr = NULL;
+ }
+
+ return tr_shdr;
+}
+
+static void *
+trace_shader_create_locked(struct pipe_context *pipe,
+ struct trace_shader *tr_shdr,
+ struct tgsi_token *tokens)
+{
+ void *state = NULL;
+ struct pipe_shader_state pss = { 0 };
+ pss.tokens = tokens;
+
+ if (tr_shdr->type == TRACE_SHADER_FRAGMENT) {
+ state = pipe->create_fs_state(pipe, &pss);
+ } else if (tr_shdr->type == TRACE_SHADER_VERTEX) {
+ state = pipe->create_vs_state(pipe, &pss);
+ } else
+ assert(0);
+
+ return state;
+}
+
+static void
+trace_shader_bind_locked(struct pipe_context *pipe,
+ struct trace_shader *tr_shdr,
+ void *state)
+{
+ if (tr_shdr->type == TRACE_SHADER_FRAGMENT) {
+ pipe->bind_fs_state(pipe, state);
+ } else if (tr_shdr->type == TRACE_SHADER_VERTEX) {
+ pipe->bind_vs_state(pipe, state);
+ } else
+ assert(0);
+}
+
+static void
+trace_shader_delete_locked(struct pipe_context *pipe,
+ struct trace_shader *tr_shdr,
+ void *state)
+{
+ if (tr_shdr->type == TRACE_SHADER_FRAGMENT) {
+ pipe->delete_fs_state(pipe, state);
+ } else if (tr_shdr->type == TRACE_SHADER_VERTEX) {
+ pipe->delete_vs_state(pipe, state);
+ } else
+ assert(0);
+}
+
+/************************************************
+ * Request handler functions
+ */
+
+
+static int
+trace_rbug_texture_list(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_texture *tr_tex = NULL;
+ struct tr_list *ptr;
+ rbug_texture_t *texs;
+ int i = 0;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ texs = MALLOC(tr_scr->num_textures * sizeof(rbug_texture_t));
+ foreach(ptr, &tr_scr->textures) {
+ tr_tex = (struct trace_texture *)((char*)ptr - offsetof(struct trace_texture, list));
+ texs[i++] = VOID2U64(tr_tex);
+ }
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ rbug_send_texture_list_reply(tr_rbug->con, serial, texs, i, NULL);
+ FREE(texs);
+
+ return 0;
+}
+
+static int
+trace_rbug_texture_info(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_texture *tr_tex;
+ struct rbug_proto_texture_info *gpti = (struct rbug_proto_texture_info *)header;
+ struct tr_list *ptr;
+ struct pipe_texture *t;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ foreach(ptr, &tr_scr->textures) {
+ tr_tex = (struct trace_texture *)((char*)ptr - offsetof(struct trace_texture, list));
+ if (gpti->texture == VOID2U64(tr_tex))
+ break;
+ tr_tex = NULL;
+ }
+
+ if (!tr_tex) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ t = tr_tex->texture;
+ rbug_send_texture_info_reply(tr_rbug->con, serial,
+ t->target, t->format,
+ t->width, t->last_level + 1,
+ t->height, t->last_level + 1,
+ t->depth, t->last_level + 1,
+ t->block.width, t->block.height, t->block.size,
+ t->last_level,
+ t->nr_samples,
+ t->tex_usage,
+ NULL);
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_texture_read(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_texture_read *gptr = (struct rbug_proto_texture_read *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_texture *tr_tex;
+ struct tr_list *ptr;
+
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_texture *tex;
+ struct pipe_transfer *t;
+
+ void *map;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ foreach(ptr, &tr_scr->textures) {
+ tr_tex = (struct trace_texture *)((char*)ptr - offsetof(struct trace_texture, list));
+ if (gptr->texture == VOID2U64(tr_tex))
+ break;
+ tr_tex = NULL;
+ }
+
+ if (!tr_tex) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ tex = tr_tex->texture;
+ t = screen->get_tex_transfer(tr_scr->screen, tex,
+ gptr->face, gptr->level, gptr->zslice,
+ PIPE_TRANSFER_READ,
+ gptr->x, gptr->y, gptr->w, gptr->h);
+
+ map = screen->transfer_map(screen, t);
+
+ rbug_send_texture_read_reply(tr_rbug->con, serial,
+ t->format,
+ t->block.width, t->block.height, t->block.size,
+ (uint8_t*)map, t->stride * t->nblocksy,
+ t->stride,
+ NULL);
+
+ screen->transfer_unmap(screen, t);
+ screen->tex_transfer_destroy(t);
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_list(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct tr_list *ptr;
+ struct trace_context *tr_ctx = NULL;
+ rbug_context_t *ctxs;
+ int i = 0;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ ctxs = MALLOC(tr_scr->num_contexts * sizeof(rbug_context_t));
+ foreach(ptr, &tr_scr->contexts) {
+ tr_ctx = (struct trace_context *)((char*)ptr - offsetof(struct trace_context, list));
+ ctxs[i++] = VOID2U64(tr_ctx);
+ }
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ rbug_send_context_list_reply(tr_rbug->con, serial, ctxs, i, NULL);
+ FREE(ctxs);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_info(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_context_info *info = (struct rbug_proto_context_info *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+ rbug_texture_t cbufs[PIPE_MAX_COLOR_BUFS];
+ rbug_texture_t texs[PIPE_MAX_SAMPLERS];
+ int i;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, info->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ /* protect the pipe context */
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+ trace_dump_call_lock();
+
+ for (i = 0; i < tr_ctx->curr.nr_cbufs; i++)
+ cbufs[i] = VOID2U64(tr_ctx->curr.cbufs[i]);
+
+ for (i = 0; i < tr_ctx->curr.num_texs; i++)
+ texs[i] = VOID2U64(tr_ctx->curr.tex[i]);
+
+ rbug_send_context_info_reply(tr_rbug->con, serial,
+ VOID2U64(tr_ctx->curr.vs), VOID2U64(tr_ctx->curr.fs),
+ texs, tr_ctx->curr.num_texs,
+ cbufs, tr_ctx->curr.nr_cbufs,
+ VOID2U64(tr_ctx->curr.zsbuf),
+ tr_ctx->draw_blocker, tr_ctx->draw_blocked, NULL);
+
+ trace_dump_call_unlock();
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_draw_block(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_context_draw_block *block = (struct rbug_proto_context_draw_block *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, block->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+ tr_ctx->draw_blocker |= block->block;
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_draw_step(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_context_draw_step *step = (struct rbug_proto_context_draw_step *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, step->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+ if (tr_ctx->draw_blocked & RBUG_BLOCK_RULE) {
+ if (step->step & RBUG_BLOCK_RULE)
+ tr_ctx->draw_blocked &= ~RBUG_BLOCK_MASK;
+ } else {
+ tr_ctx->draw_blocked &= ~step->step;
+ }
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+
+#ifdef PIPE_THREAD_HAVE_CONDVAR
+ pipe_condvar_broadcast(tr_ctx->draw_cond);
+#endif
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_draw_unblock(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_context_draw_unblock *unblock = (struct rbug_proto_context_draw_unblock *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, unblock->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+ if (tr_ctx->draw_blocked & RBUG_BLOCK_RULE) {
+ if (unblock->unblock & RBUG_BLOCK_RULE)
+ tr_ctx->draw_blocked &= ~RBUG_BLOCK_MASK;
+ } else {
+ tr_ctx->draw_blocked &= ~unblock->unblock;
+ }
+ tr_ctx->draw_blocker &= ~unblock->unblock;
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+
+#ifdef PIPE_THREAD_HAVE_CONDVAR
+ pipe_condvar_broadcast(tr_ctx->draw_cond);
+#endif
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_draw_rule(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_context_draw_rule *rule = (struct rbug_proto_context_draw_rule *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, rule->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->draw_mutex);
+ tr_ctx->draw_rule.vs = U642VOID(rule->vertex);
+ tr_ctx->draw_rule.fs = U642VOID(rule->fragment);
+ tr_ctx->draw_rule.tex = U642VOID(rule->texture);
+ tr_ctx->draw_rule.surf = U642VOID(rule->surface);
+ tr_ctx->draw_rule.blocker = rule->block;
+ tr_ctx->draw_blocker |= RBUG_BLOCK_RULE;
+ pipe_mutex_unlock(tr_ctx->draw_mutex);
+
+#ifdef PIPE_THREAD_HAVE_CONDVAR
+ pipe_condvar_broadcast(tr_ctx->draw_cond);
+#endif
+
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_context_flush(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_context_flush *flush = (struct rbug_proto_context_flush *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, flush->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ /* protect the pipe context */
+ trace_dump_call_lock();
+
+ tr_ctx->pipe->flush(tr_ctx->pipe, flush->flags, NULL);
+
+ trace_dump_call_unlock();
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_shader_list(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_shader_list *list = (struct rbug_proto_shader_list *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+ struct trace_shader *tr_shdr = NULL;
+ struct tr_list *ptr;
+ rbug_shader_t *shdrs;
+ int i = 0;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, list->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->list_mutex);
+ shdrs = MALLOC(tr_ctx->num_shaders * sizeof(rbug_shader_t));
+ foreach(ptr, &tr_ctx->shaders) {
+ tr_shdr = (struct trace_shader *)((char*)ptr - offsetof(struct trace_shader, list));
+ shdrs[i++] = VOID2U64(tr_shdr);
+ }
+
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ rbug_send_shader_list_reply(tr_rbug->con, serial, shdrs, i, NULL);
+ FREE(shdrs);
+
+ return 0;
+}
+
+static int
+trace_rbug_shader_info(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ struct rbug_proto_shader_info *info = (struct rbug_proto_shader_info *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+ struct trace_shader *tr_shdr = NULL;
+ unsigned original_len;
+ unsigned replaced_len;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, info->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->list_mutex);
+
+ tr_shdr = trace_rbug_get_shader_locked(tr_ctx, info->shader);
+
+ if (!tr_shdr) {
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ /* just in case */
+ assert(sizeof(struct tgsi_token) == 4);
+
+ original_len = tgsi_num_tokens(tr_shdr->tokens);
+ if (tr_shdr->replaced_tokens)
+ replaced_len = tgsi_num_tokens(tr_shdr->replaced_tokens);
+ else
+ replaced_len = 0;
+
+ rbug_send_shader_info_reply(tr_rbug->con, serial,
+ (uint32_t*)tr_shdr->tokens, original_len,
+ (uint32_t*)tr_shdr->replaced_tokens, replaced_len,
+ tr_shdr->disabled,
+ NULL);
+
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_shader_disable(struct trace_rbug *tr_rbug, struct rbug_header *header)
+{
+ struct rbug_proto_shader_disable *dis = (struct rbug_proto_shader_disable *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+ struct trace_shader *tr_shdr = NULL;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, dis->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->list_mutex);
+
+ tr_shdr = trace_rbug_get_shader_locked(tr_ctx, dis->shader);
+
+ if (!tr_shdr) {
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ tr_shdr->disabled = dis->disable;
+
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+}
+
+static int
+trace_rbug_shader_replace(struct trace_rbug *tr_rbug, struct rbug_header *header)
+{
+ struct rbug_proto_shader_replace *rep = (struct rbug_proto_shader_replace *)header;
+
+ struct trace_screen *tr_scr = tr_rbug->tr_scr;
+ struct trace_context *tr_ctx = NULL;
+ struct trace_shader *tr_shdr = NULL;
+ struct pipe_context *pipe = NULL;
+ void *state;
+
+ pipe_mutex_lock(tr_scr->list_mutex);
+ tr_ctx = trace_rbug_get_context_locked(tr_scr, rep->context);
+
+ if (!tr_ctx) {
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ pipe_mutex_lock(tr_ctx->list_mutex);
+
+ tr_shdr = trace_rbug_get_shader_locked(tr_ctx, rep->shader);
+
+ if (!tr_shdr) {
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -ESRCH;
+ }
+
+ /* protect the pipe context */
+ trace_dump_call_lock();
+
+ pipe = tr_ctx->pipe;
+
+ /* remove old replaced shader */
+ if (tr_shdr->replaced) {
+ if (tr_ctx->curr.fs == tr_shdr || tr_ctx->curr.vs == tr_shdr)
+ trace_shader_bind_locked(pipe, tr_shdr, tr_shdr->state);
+
+ FREE(tr_shdr->replaced_tokens);
+ trace_shader_delete_locked(pipe, tr_shdr, tr_shdr->replaced);
+ tr_shdr->replaced = NULL;
+ tr_shdr->replaced_tokens = NULL;
+ }
+
+ /* empty inputs means restore old which we did above */
+ if (rep->tokens_len == 0)
+ goto out;
+
+ tr_shdr->replaced_tokens = tgsi_dup_tokens((struct tgsi_token *)rep->tokens);
+ if (!tr_shdr->replaced_tokens)
+ goto err;
+
+ state = trace_shader_create_locked(pipe, tr_shdr, tr_shdr->replaced_tokens);
+ if (!state)
+ goto err;
+
+ /* bind new shader if the shader is currently a bound */
+ if (tr_ctx->curr.fs == tr_shdr || tr_ctx->curr.vs == tr_shdr)
+ trace_shader_bind_locked(pipe, tr_shdr, state);
+
+ /* save state */
+ tr_shdr->replaced = state;
+
+out:
+ trace_dump_call_unlock();
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+
+ return 0;
+
+err:
+ FREE(tr_shdr->replaced_tokens);
+ tr_shdr->replaced = NULL;
+ tr_shdr->replaced_tokens = NULL;
+
+ trace_dump_call_unlock();
+ pipe_mutex_unlock(tr_ctx->list_mutex);
+ pipe_mutex_unlock(tr_scr->list_mutex);
+ return -EINVAL;
+}
+
+static boolean
+trace_rbug_header(struct trace_rbug *tr_rbug, struct rbug_header *header, uint32_t serial)
+{
+ int ret = 0;
+
+ switch(header->opcode) {
+ case RBUG_OP_PING:
+ rbug_send_ping_reply(tr_rbug->con, serial, NULL);
+ break;
+ case RBUG_OP_TEXTURE_LIST:
+ ret = trace_rbug_texture_list(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_TEXTURE_INFO:
+ ret = trace_rbug_texture_info(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_TEXTURE_READ:
+ ret = trace_rbug_texture_read(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_LIST:
+ ret = trace_rbug_context_list(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_INFO:
+ ret = trace_rbug_context_info(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_DRAW_BLOCK:
+ ret = trace_rbug_context_draw_block(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_DRAW_STEP:
+ ret = trace_rbug_context_draw_step(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_DRAW_UNBLOCK:
+ ret = trace_rbug_context_draw_unblock(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_DRAW_RULE:
+ ret = trace_rbug_context_draw_rule(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_CONTEXT_FLUSH:
+ ret = trace_rbug_context_flush(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_SHADER_LIST:
+ ret = trace_rbug_shader_list(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_SHADER_INFO:
+ ret = trace_rbug_shader_info(tr_rbug, header, serial);
+ break;
+ case RBUG_OP_SHADER_DISABLE:
+ ret = trace_rbug_shader_disable(tr_rbug, header);
+ break;
+ case RBUG_OP_SHADER_REPLACE:
+ ret = trace_rbug_shader_replace(tr_rbug, header);
+ break;
+ default:
+ debug_printf("%s - unsupported opcode %u\n", __FUNCTION__, header->opcode);
+ ret = -ENOSYS;
+ break;
+ }
+ rbug_free_header(header);
+
+ if (ret)
+ rbug_send_error_reply(tr_rbug->con, serial, ret, NULL);
+
+ return TRUE;
+}
+
+static void
+trace_rbug_con(struct trace_rbug *tr_rbug)
+{
+ struct rbug_header *header;
+ uint32_t serial;
+
+ debug_printf("%s - connection received\n", __FUNCTION__);
+
+ while(tr_rbug->running) {
+ header = rbug_get_message(tr_rbug->con, &serial);
+ if (!header)
+ break;
+
+ if (!trace_rbug_header(tr_rbug, header, serial))
+ break;
+ }
+
+ debug_printf("%s - connection closed\n", __FUNCTION__);
+
+ rbug_disconnect(tr_rbug->con);
+ tr_rbug->con = NULL;
+}
+
+PIPE_THREAD_ROUTINE(trace_rbug_thread, void_tr_rbug)
+{
+ struct trace_rbug *tr_rbug = void_tr_rbug;
+ uint16_t port = 13370;
+ int s = -1;
+ int c;
+
+ u_socket_init();
+
+ for (;port <= 13379 && s < 0; port++)
+ s = u_socket_listen_on_port(port);
+
+ if (s < 0) {
+ debug_printf("trace_rbug - failed to listen\n");
+ return NULL;
+ }
+
+ u_socket_block(s, false);
+
+ debug_printf("trace_rbug - remote debugging listening on port %u\n", --port);
+
+ while(tr_rbug->running) {
+ sleep(1);
+
+ c = u_socket_accept(s);
+ if (c < 0)
+ continue;
+
+ u_socket_block(c, true);
+ tr_rbug->con = rbug_from_socket(c);
+
+ trace_rbug_con(tr_rbug);
+
+ u_socket_close(c);
+ }
+
+ u_socket_close(s);
+
+ u_socket_stop();
+
+ return NULL;
+}
+
+/**********************************************************
+ *
+ */
+
+struct trace_rbug *
+trace_rbug_start(struct trace_screen *tr_scr)
+{
+ struct trace_rbug *tr_rbug = CALLOC_STRUCT(trace_rbug);
+ if (!tr_rbug)
+ return NULL;
+
+ tr_rbug->tr_scr = tr_scr;
+ tr_rbug->running = TRUE;
+ tr_rbug->thread = pipe_thread_create(trace_rbug_thread, tr_rbug);
+
+ return tr_rbug;
+}
+
+void
+trace_rbug_stop(struct trace_rbug *tr_rbug)
+{
+ if (!tr_rbug)
+ return;
+
+ tr_rbug->running = false;
+ pipe_thread_wait(tr_rbug->thread);
+
+ FREE(tr_rbug);
+
+ return;
+}
+
+void
+trace_rbug_notify_draw_blocked(struct trace_context *tr_ctx)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_ctx->base.screen);
+ struct trace_rbug *tr_rbug = tr_scr->rbug;
+
+ if (tr_rbug && tr_rbug->con)
+ rbug_send_context_draw_blocked(tr_rbug->con,
+ VOID2U64(tr_ctx), tr_ctx->draw_blocked, NULL);
+}
diff --git a/src/gallium/drivers/trace/tr_screen.c b/src/gallium/drivers/trace/tr_screen.c
new file mode 100644
index 0000000000..26f1c04594
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_screen.c
@@ -0,0 +1,958 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+
+#include "tr_buffer.h"
+#include "tr_dump.h"
+#include "tr_dump_state.h"
+#include "tr_texture.h"
+#include "tr_screen.h"
+
+#include "pipe/p_inlines.h"
+
+
+static boolean trace = FALSE;
+static boolean rbug = FALSE;
+
+static const char *
+trace_screen_get_name(struct pipe_screen *_screen)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ const char *result;
+
+ trace_dump_call_begin("pipe_screen", "get_name");
+
+ trace_dump_arg(ptr, screen);
+
+ result = screen->get_name(screen);
+
+ trace_dump_ret(string, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static const char *
+trace_screen_get_vendor(struct pipe_screen *_screen)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ const char *result;
+
+ trace_dump_call_begin("pipe_screen", "get_vendor");
+
+ trace_dump_arg(ptr, screen);
+
+ result = screen->get_vendor(screen);
+
+ trace_dump_ret(string, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static int
+trace_screen_get_param(struct pipe_screen *_screen,
+ int param)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ int result;
+
+ trace_dump_call_begin("pipe_screen", "get_param");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(int, param);
+
+ result = screen->get_param(screen, param);
+
+ trace_dump_ret(int, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static float
+trace_screen_get_paramf(struct pipe_screen *_screen,
+ int param)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ float result;
+
+ trace_dump_call_begin("pipe_screen", "get_paramf");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(int, param);
+
+ result = screen->get_paramf(screen, param);
+
+ trace_dump_ret(float, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static boolean
+trace_screen_is_format_supported(struct pipe_screen *_screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ boolean result;
+
+ trace_dump_call_begin("pipe_screen", "is_format_supported");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(format, format);
+ trace_dump_arg(int, target);
+ trace_dump_arg(uint, tex_usage);
+ trace_dump_arg(uint, geom_flags);
+
+ result = screen->is_format_supported(screen, format, target, tex_usage, geom_flags);
+
+ trace_dump_ret(bool, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static void
+trace_screen_flush_frontbuffer(struct pipe_screen *_screen,
+ struct pipe_surface *_surface,
+ void *context_private)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_surface *tr_surf = trace_surface(_surface);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_surface *surface = tr_surf->surface;
+
+ trace_dump_call_begin("pipe_screen", "flush_frontbuffer");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, surface);
+ /* XXX: hide, as there is nothing we can do with this
+ trace_dump_arg(ptr, context_private);
+ */
+
+ screen->flush_frontbuffer(screen, surface, context_private);
+
+ trace_dump_call_end();
+}
+
+
+/********************************************************************
+ * texture
+ */
+
+
+static struct pipe_texture *
+trace_screen_texture_create(struct pipe_screen *_screen,
+ const struct pipe_texture *templat)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_texture *result;
+
+ trace_dump_call_begin("pipe_screen", "texture_create");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(template, templat);
+
+ result = screen->texture_create(screen, templat);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ result = trace_texture_create(tr_scr, result);
+
+ return result;
+}
+
+
+static struct pipe_texture *
+trace_screen_texture_blanket(struct pipe_screen *_screen,
+ const struct pipe_texture *templat,
+ const unsigned *ppitch,
+ struct pipe_buffer *_buffer)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *buffer = tr_buf->buffer;
+ unsigned pitch = *ppitch;
+ struct pipe_texture *result;
+
+ trace_dump_call_begin("pipe_screen", "texture_blanket");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(template, templat);
+ trace_dump_arg(uint, pitch);
+ trace_dump_arg(ptr, buffer);
+
+ result = screen->texture_blanket(screen, templat, ppitch, buffer);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ result = trace_texture_create(tr_scr, result);
+
+ return result;
+}
+
+
+static void
+trace_screen_texture_destroy(struct pipe_texture *_texture)
+{
+ struct trace_screen *tr_scr = trace_screen(_texture->screen);
+ struct trace_texture *tr_tex = trace_texture(_texture);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_texture *texture = tr_tex->texture;
+
+ assert(texture->screen == screen);
+
+ trace_dump_call_begin("pipe_screen", "texture_destroy");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, texture);
+
+ trace_dump_call_end();
+
+ trace_texture_destroy(tr_tex);
+}
+
+
+/********************************************************************
+ * surface
+ */
+
+
+static struct pipe_surface *
+trace_screen_get_tex_surface(struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned face, unsigned level,
+ unsigned zslice,
+ unsigned usage)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_texture *tr_tex = trace_texture(_texture);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_texture *texture = tr_tex->texture;
+ struct pipe_surface *result = NULL;
+
+ assert(texture->screen == screen);
+
+ trace_dump_call_begin("pipe_screen", "get_tex_surface");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, texture);
+ trace_dump_arg(uint, face);
+ trace_dump_arg(uint, level);
+ trace_dump_arg(uint, zslice);
+ trace_dump_arg(uint, usage);
+
+ result = screen->get_tex_surface(screen, texture, face, level, zslice, usage);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ result = trace_surface_create(tr_tex, result);
+
+ return result;
+}
+
+
+static void
+trace_screen_tex_surface_destroy(struct pipe_surface *_surface)
+{
+ struct trace_screen *tr_scr = trace_screen(_surface->texture->screen);
+ struct trace_surface *tr_surf = trace_surface(_surface);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_surface *surface = tr_surf->surface;
+
+ trace_dump_call_begin("pipe_screen", "tex_surface_destroy");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, surface);
+
+ trace_dump_call_end();
+
+ trace_surface_destroy(tr_surf);
+}
+
+
+/********************************************************************
+ * transfer
+ */
+
+
+static struct pipe_transfer *
+trace_screen_get_tex_transfer(struct pipe_screen *_screen,
+ struct pipe_texture *_texture,
+ unsigned face, unsigned level,
+ unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y, unsigned w, unsigned h)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_texture *tr_tex = trace_texture(_texture);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_texture *texture = tr_tex->texture;
+ struct pipe_transfer *result = NULL;
+
+ assert(texture->screen == screen);
+
+ trace_dump_call_begin("pipe_screen", "get_tex_transfer");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, texture);
+ trace_dump_arg(uint, face);
+ trace_dump_arg(uint, level);
+ trace_dump_arg(uint, zslice);
+ trace_dump_arg(uint, usage);
+
+ trace_dump_arg(uint, x);
+ trace_dump_arg(uint, y);
+ trace_dump_arg(uint, w);
+ trace_dump_arg(uint, h);
+
+ result = screen->get_tex_transfer(screen, texture, face, level, zslice, usage,
+ x, y, w, h);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ result = trace_transfer_create(tr_tex, result);
+
+ return result;
+}
+
+
+static void
+trace_screen_tex_transfer_destroy(struct pipe_transfer *_transfer)
+{
+ struct trace_screen *tr_scr = trace_screen(_transfer->texture->screen);
+ struct trace_transfer *tr_trans = trace_transfer(_transfer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_transfer *transfer = tr_trans->transfer;
+
+ trace_dump_call_begin("pipe_screen", "tex_transfer_destroy");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, transfer);
+
+ trace_dump_call_end();
+
+ trace_transfer_destroy(tr_trans);
+}
+
+
+static void *
+trace_screen_transfer_map(struct pipe_screen *_screen,
+ struct pipe_transfer *_transfer)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_transfer *tr_trans = trace_transfer(_transfer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_transfer *transfer = tr_trans->transfer;
+ void *map;
+
+ map = screen->transfer_map(screen, transfer);
+ if(map) {
+ if(transfer->usage != PIPE_TRANSFER_READ) {
+ assert(!tr_trans->map);
+ tr_trans->map = map;
+ }
+ }
+
+ return map;
+}
+
+
+static void
+trace_screen_transfer_unmap(struct pipe_screen *_screen,
+ struct pipe_transfer *_transfer)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_transfer *tr_trans = trace_transfer(_transfer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_transfer *transfer = tr_trans->transfer;
+
+ if(tr_trans->map) {
+ size_t size = transfer->nblocksy * transfer->stride;
+
+ trace_dump_call_begin("pipe_screen", "transfer_write");
+
+ trace_dump_arg(ptr, screen);
+
+ trace_dump_arg(ptr, transfer);
+
+ trace_dump_arg_begin("stride");
+ trace_dump_uint(transfer->stride);
+ trace_dump_arg_end();
+
+ trace_dump_arg_begin("data");
+ trace_dump_bytes(tr_trans->map, size);
+ trace_dump_arg_end();
+
+ trace_dump_arg_begin("size");
+ trace_dump_uint(size);
+ trace_dump_arg_end();
+
+ trace_dump_call_end();
+
+ tr_trans->map = NULL;
+ }
+
+ screen->transfer_unmap(screen, transfer);
+}
+
+
+/********************************************************************
+ * buffer
+ */
+
+
+static struct pipe_buffer *
+trace_screen_surface_buffer_create(struct pipe_screen *_screen,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *pstride)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ unsigned stride;
+ struct pipe_buffer *result;
+
+ trace_dump_call_begin("pipe_screen", "surface_buffer_create");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(uint, width);
+ trace_dump_arg(uint, height);
+ trace_dump_arg(format, format);
+ trace_dump_arg(uint, usage);
+ trace_dump_arg(uint, tex_usage);
+
+ result = screen->surface_buffer_create(screen,
+ width, height,
+ format,
+ usage,
+ tex_usage,
+ pstride);
+
+ stride = *pstride;
+
+ trace_dump_arg(uint, stride);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ return trace_buffer_create(tr_scr, result);
+}
+
+
+static struct pipe_buffer *
+trace_screen_buffer_create(struct pipe_screen *_screen,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *result;
+
+ trace_dump_call_begin("pipe_screen", "buffer_create");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(uint, alignment);
+ trace_dump_arg(uint, usage);
+ trace_dump_arg(uint, size);
+
+ result = screen->buffer_create(screen, alignment, usage, size);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ /* Zero the buffer to avoid dumping uninitialized memory */
+ if(result->usage & PIPE_BUFFER_USAGE_CPU_WRITE) {
+ void *map;
+ map = pipe_buffer_map(screen, result, PIPE_BUFFER_USAGE_CPU_WRITE);
+ if(map) {
+ memset(map, 0, result->size);
+ screen->buffer_unmap(screen, result);
+ }
+ }
+
+ return trace_buffer_create(tr_scr, result);
+}
+
+
+static struct pipe_buffer *
+trace_screen_user_buffer_create(struct pipe_screen *_screen,
+ void *data,
+ unsigned size)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *result;
+
+ trace_dump_call_begin("pipe_screen", "user_buffer_create");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg_begin("data");
+ trace_dump_bytes(data, size);
+ trace_dump_arg_end();
+ trace_dump_arg(uint, size);
+
+ result = screen->user_buffer_create(screen, data, size);
+
+ trace_dump_ret(ptr, result);
+
+ trace_dump_call_end();
+
+ if(result) {
+ assert(!(result->usage & TRACE_BUFFER_USAGE_USER));
+ result->usage |= TRACE_BUFFER_USAGE_USER;
+ }
+
+ return trace_buffer_create(tr_scr, result);
+}
+
+
+/**
+ * This function is used to track if data has been changed on a user buffer
+ * without map/unmap being called.
+ */
+void
+trace_screen_user_buffer_update(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer)
+{
+#if 0
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ const void *map;
+
+ if(buffer && buffer->usage & TRACE_BUFFER_USAGE_USER) {
+ map = screen->buffer_map(screen, buffer, PIPE_BUFFER_USAGE_CPU_READ);
+ if(map) {
+ trace_dump_call_begin("pipe_winsys", "buffer_write");
+
+ trace_dump_arg(ptr, screen);
+
+ trace_dump_arg(ptr, buffer);
+
+ trace_dump_arg_begin("data");
+ trace_dump_bytes(map, buffer->size);
+ trace_dump_arg_end();
+
+ trace_dump_arg_begin("size");
+ trace_dump_uint(buffer->size);
+ trace_dump_arg_end();
+
+ trace_dump_call_end();
+
+ screen->buffer_unmap(screen, buffer);
+ }
+ }
+#endif
+}
+
+
+static void *
+trace_screen_buffer_map(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer,
+ unsigned usage)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *buffer = tr_buf->buffer;
+ void *map;
+
+ assert(screen->buffer_map);
+ map = screen->buffer_map(screen, buffer, usage);
+ if(map) {
+ if(usage & PIPE_BUFFER_USAGE_CPU_WRITE) {
+ tr_buf->map = map;
+ }
+ }
+
+ return map;
+}
+
+
+static void *
+trace_screen_buffer_map_range(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer,
+ unsigned offset,
+ unsigned length,
+ unsigned usage)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *buffer = tr_buf->buffer;
+ void *map;
+
+ assert(screen->buffer_map_range);
+ map = screen->buffer_map_range(screen, buffer, offset, length, usage);
+ if(map) {
+ if(usage & PIPE_BUFFER_USAGE_CPU_WRITE) {
+ tr_buf->map = map;
+ }
+ }
+
+ return map;
+}
+
+
+static void
+buffer_write(struct pipe_screen *screen,
+ struct pipe_buffer *buffer,
+ unsigned offset,
+ const char *map,
+ unsigned size)
+{
+ assert(map);
+
+ trace_dump_call_begin("pipe_screen", "buffer_write");
+
+ trace_dump_arg(ptr, screen);
+
+ trace_dump_arg(ptr, buffer);
+
+ trace_dump_arg(uint, offset);
+
+ trace_dump_arg_begin("data");
+ trace_dump_bytes(map + offset, size);
+ trace_dump_arg_end();
+
+ trace_dump_arg(uint, size);
+
+ trace_dump_call_end();
+
+}
+
+
+static void
+trace_screen_buffer_flush_mapped_range(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer,
+ unsigned offset,
+ unsigned length)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *buffer = tr_buf->buffer;
+
+ assert(tr_buf->map);
+ buffer_write(screen, buffer, offset, tr_buf->map, length);
+ tr_buf->range_flushed = TRUE;
+ screen->buffer_flush_mapped_range(screen, buffer, offset, length);
+}
+
+
+static void
+trace_screen_buffer_unmap(struct pipe_screen *_screen,
+ struct pipe_buffer *_buffer)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *buffer = tr_buf->buffer;
+
+ if (tr_buf->map && !tr_buf->range_flushed)
+ buffer_write(screen, buffer, 0, tr_buf->map, buffer->size);
+ tr_buf->map = NULL;
+ tr_buf->range_flushed = FALSE;
+ screen->buffer_unmap(screen, buffer);
+}
+
+
+static void
+trace_screen_buffer_destroy(struct pipe_buffer *_buffer)
+{
+ struct trace_screen *tr_scr = trace_screen(_buffer->screen);
+ struct trace_buffer *tr_buf = trace_buffer(_buffer);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_buffer *buffer = tr_buf->buffer;
+
+ trace_dump_call_begin("pipe_screen", "buffer_destroy");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, buffer);
+
+ trace_dump_call_end();
+
+ trace_buffer_destroy(tr_scr, _buffer);
+}
+
+
+/********************************************************************
+ * fence
+ */
+
+
+static void
+trace_screen_fence_reference(struct pipe_screen *_screen,
+ struct pipe_fence_handle **pdst,
+ struct pipe_fence_handle *src)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_fence_handle *dst;
+
+ assert(pdst);
+ dst = *pdst;
+
+ trace_dump_call_begin("pipe_screen", "fence_reference");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, dst);
+ trace_dump_arg(ptr, src);
+
+ screen->fence_reference(screen, pdst, src);
+
+ trace_dump_call_end();
+}
+
+
+static int
+trace_screen_fence_signalled(struct pipe_screen *_screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ int result;
+
+ trace_dump_call_begin("pipe_screen", "fence_signalled");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, fence);
+ trace_dump_arg(uint, flags);
+
+ result = screen->fence_signalled(screen, fence, flags);
+
+ trace_dump_ret(int, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+static int
+trace_screen_fence_finish(struct pipe_screen *_screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+ int result;
+
+ trace_dump_call_begin("pipe_screen", "fence_finish");
+
+ trace_dump_arg(ptr, screen);
+ trace_dump_arg(ptr, fence);
+ trace_dump_arg(uint, flags);
+
+ result = screen->fence_finish(screen, fence, flags);
+
+ trace_dump_ret(int, result);
+
+ trace_dump_call_end();
+
+ return result;
+}
+
+
+/********************************************************************
+ * screen
+ */
+
+static void
+trace_screen_destroy(struct pipe_screen *_screen)
+{
+ struct trace_screen *tr_scr = trace_screen(_screen);
+ struct pipe_screen *screen = tr_scr->screen;
+
+ trace_dump_call_begin("pipe_screen", "destroy");
+ trace_dump_arg(ptr, screen);
+ trace_dump_call_end();
+ trace_dump_trace_end();
+
+ if (tr_scr->rbug)
+ trace_rbug_stop(tr_scr->rbug);
+
+ screen->destroy(screen);
+
+ FREE(tr_scr);
+}
+
+boolean
+trace_enabled(void)
+{
+ static boolean firstrun = TRUE;
+
+ if (!firstrun)
+ return trace;
+ firstrun = FALSE;
+
+ trace_dump_init();
+
+ if(trace_dump_trace_begin()) {
+ trace_dumping_start();
+ trace = TRUE;
+ }
+
+ if (debug_get_bool_option("GALLIUM_RBUG", FALSE)) {
+ trace = TRUE;
+ rbug = TRUE;
+ }
+
+ return trace;
+}
+
+struct pipe_screen *
+trace_screen_create(struct pipe_screen *screen)
+{
+ struct trace_screen *tr_scr;
+ struct pipe_winsys *winsys;
+
+ if(!screen)
+ goto error1;
+
+ if (!trace_enabled())
+ goto error1;
+
+ trace_dump_call_begin("", "pipe_screen_create");
+
+ tr_scr = CALLOC_STRUCT(trace_screen);
+ if(!tr_scr)
+ goto error2;
+
+#if 0
+ winsys = trace_winsys_create(screen->winsys);
+ if(!winsys)
+ goto error3;
+#else
+ winsys = screen->winsys;
+#endif
+ pipe_mutex_init(tr_scr->list_mutex);
+ make_empty_list(&tr_scr->buffers);
+ make_empty_list(&tr_scr->contexts);
+ make_empty_list(&tr_scr->textures);
+ make_empty_list(&tr_scr->surfaces);
+ make_empty_list(&tr_scr->transfers);
+
+ tr_scr->base.winsys = winsys;
+ tr_scr->base.destroy = trace_screen_destroy;
+ tr_scr->base.get_name = trace_screen_get_name;
+ tr_scr->base.get_vendor = trace_screen_get_vendor;
+ tr_scr->base.get_param = trace_screen_get_param;
+ tr_scr->base.get_paramf = trace_screen_get_paramf;
+ tr_scr->base.is_format_supported = trace_screen_is_format_supported;
+ tr_scr->base.texture_create = trace_screen_texture_create;
+ tr_scr->base.texture_blanket = trace_screen_texture_blanket;
+ tr_scr->base.texture_destroy = trace_screen_texture_destroy;
+ tr_scr->base.get_tex_surface = trace_screen_get_tex_surface;
+ tr_scr->base.tex_surface_destroy = trace_screen_tex_surface_destroy;
+ tr_scr->base.get_tex_transfer = trace_screen_get_tex_transfer;
+ tr_scr->base.tex_transfer_destroy = trace_screen_tex_transfer_destroy;
+ tr_scr->base.transfer_map = trace_screen_transfer_map;
+ tr_scr->base.transfer_unmap = trace_screen_transfer_unmap;
+ tr_scr->base.buffer_create = trace_screen_buffer_create;
+ tr_scr->base.user_buffer_create = trace_screen_user_buffer_create;
+ tr_scr->base.surface_buffer_create = trace_screen_surface_buffer_create;
+ if (screen->buffer_map)
+ tr_scr->base.buffer_map = trace_screen_buffer_map;
+ if (screen->buffer_map_range)
+ tr_scr->base.buffer_map_range = trace_screen_buffer_map_range;
+ if (screen->buffer_flush_mapped_range)
+ tr_scr->base.buffer_flush_mapped_range = trace_screen_buffer_flush_mapped_range;
+ if (screen->buffer_unmap)
+ tr_scr->base.buffer_unmap = trace_screen_buffer_unmap;
+ tr_scr->base.buffer_destroy = trace_screen_buffer_destroy;
+ tr_scr->base.fence_reference = trace_screen_fence_reference;
+ tr_scr->base.fence_signalled = trace_screen_fence_signalled;
+ tr_scr->base.fence_finish = trace_screen_fence_finish;
+ tr_scr->base.flush_frontbuffer = trace_screen_flush_frontbuffer;
+ tr_scr->screen = screen;
+
+ trace_dump_ret(ptr, screen);
+ trace_dump_call_end();
+
+ if (rbug)
+ tr_scr->rbug = trace_rbug_start(tr_scr);
+
+ return &tr_scr->base;
+
+#if 0
+error3:
+ FREE(tr_scr);
+#endif
+error2:
+ trace_dump_ret(ptr, screen);
+ trace_dump_call_end();
+ trace_dump_trace_end();
+error1:
+ return screen;
+}
+
+
+struct trace_screen *
+trace_screen(struct pipe_screen *screen)
+{
+ assert(screen);
+ assert(screen->destroy == trace_screen_destroy);
+ return (struct trace_screen *)screen;
+}
diff --git a/src/gallium/drivers/trace/tr_screen.h b/src/gallium/drivers/trace/tr_screen.h
new file mode 100644
index 0000000000..dba8cd7c65
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_screen.h
@@ -0,0 +1,130 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TR_SCREEN_H_
+#define TR_SCREEN_H_
+
+
+#include "pipe/p_screen.h"
+#include "pipe/p_thread.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct tr_list {
+ struct tr_list *next;
+ struct tr_list *prev;
+};
+
+/**
+ * It often happens that new data is written directly to the user buffers
+ * without mapping/unmapping. This flag marks user buffers, so that their
+ * contents can be dumpped before being used by the pipe context.
+ */
+#define TRACE_BUFFER_USAGE_USER (1 << 31)
+
+
+struct trace_screen
+{
+ struct pipe_screen base;
+
+ struct pipe_screen *screen;
+
+ /* remote debugger */
+ struct trace_rbug *rbug;
+
+ pipe_mutex list_mutex;
+ int num_buffers;
+ int num_contexts;
+ int num_textures;
+ int num_surfaces;
+ int num_transfers;
+ struct tr_list buffers;
+ struct tr_list contexts;
+ struct tr_list textures;
+ struct tr_list surfaces;
+ struct tr_list transfers;
+};
+
+
+/*
+ * tr_rbug.c
+ */
+
+
+struct trace_rbug;
+
+struct trace_rbug *
+trace_rbug_start(struct trace_screen *tr_scr);
+
+void
+trace_rbug_stop(struct trace_rbug *tr_rbug);
+
+
+/*
+ * tr_screen.c
+ */
+
+
+boolean
+trace_enabled(void);
+
+struct trace_screen *
+trace_screen(struct pipe_screen *screen);
+
+struct pipe_screen *
+trace_screen_create(struct pipe_screen *screen);
+
+void
+trace_screen_user_buffer_update(struct pipe_screen *screen,
+ struct pipe_buffer *buffer);
+
+#define trace_screen_add_to_list(tr_scr, name, obj) \
+ do { \
+ pipe_mutex_lock(tr_scr->list_mutex); \
+ insert_at_head(&tr_scr->name, &obj->list); \
+ tr_scr->num_##name++; \
+ pipe_mutex_unlock(tr_scr->list_mutex); \
+ } while (0)
+
+#define trace_screen_remove_from_list(tr_scr, name, obj) \
+ do { \
+ pipe_mutex_lock(tr_scr->list_mutex); \
+ remove_from_list(&obj->list); \
+ tr_scr->num_##name--; \
+ pipe_mutex_unlock(tr_scr->list_mutex); \
+ } while (0)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* TR_SCREEN_H_ */
diff --git a/src/gallium/drivers/trace/tr_state.c b/src/gallium/drivers/trace/tr_state.c
new file mode 100644
index 0000000000..d8c11640bf
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_state.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "tr_state.h"
+
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+
+#include "tgsi/tgsi_parse.h"
+
+struct trace_shader * trace_shader_create(struct trace_context *tr_ctx,
+ const struct pipe_shader_state *state,
+ void *result,
+ enum trace_shader_type type)
+{
+ struct trace_shader *tr_shdr = CALLOC_STRUCT(trace_shader);
+
+ tr_shdr->state = result;
+ tr_shdr->type = type;
+ tr_shdr->tokens = tgsi_dup_tokens(state->tokens);
+
+ /* works on context as well */
+ trace_screen_add_to_list(tr_ctx, shaders, tr_shdr);
+
+ return tr_shdr;
+}
+
+void trace_shader_destroy(struct trace_context *tr_ctx,
+ struct trace_shader *tr_shdr)
+{
+ trace_screen_remove_from_list(tr_ctx, shaders, tr_shdr);
+
+ if (tr_shdr->replaced) {
+ if (tr_shdr->type == TRACE_SHADER_FRAGMENT)
+ tr_ctx->pipe->delete_fs_state(tr_ctx->pipe, tr_shdr->replaced);
+ else if (tr_shdr->type == TRACE_SHADER_VERTEX)
+ tr_ctx->pipe->delete_vs_state(tr_ctx->pipe, tr_shdr->replaced);
+ else
+ assert(0);
+ }
+
+ FREE(tr_shdr->replaced_tokens);
+ FREE(tr_shdr->tokens);
+ FREE(tr_shdr);
+}
diff --git a/src/gallium/drivers/trace/tr_state.h b/src/gallium/drivers/trace/tr_state.h
new file mode 100644
index 0000000000..1c16042ee5
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_state.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef TR_STATE_H_
+#define TR_STATE_H_
+
+#include "tr_context.h"
+
+struct tgsi_token;
+
+enum trace_shader_type {
+ TRACE_SHADER_FRAGMENT = 0,
+ TRACE_SHADER_VERTEX = 1,
+ TRACE_SHADER_GEOMETRY = 2,
+};
+
+struct trace_shader
+{
+ struct tr_list list;
+
+ enum trace_shader_type type;
+
+ void *state;
+ void *replaced;
+
+ struct tgsi_token *tokens;
+ struct tgsi_token *replaced_tokens;
+
+ boolean disabled;
+};
+
+
+static INLINE struct trace_shader *
+trace_shader(void *state)
+{
+ return (struct trace_shader *)state;
+}
+
+struct trace_shader * trace_shader_create(struct trace_context *tr_ctx,
+ const struct pipe_shader_state *state,
+ void *result,
+ enum trace_shader_type type);
+
+void trace_shader_destroy(struct trace_context *tr_ctx,
+ struct trace_shader *tr_shdr);
+
+#endif
diff --git a/src/gallium/drivers/trace/tr_texture.c b/src/gallium/drivers/trace/tr_texture.c
new file mode 100644
index 0000000000..1f25fe38d4
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_texture.c
@@ -0,0 +1,170 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "util/u_hash_table.h"
+#include "util/u_memory.h"
+#include "util/u_simple_list.h"
+
+#include "tr_screen.h"
+#include "tr_texture.h"
+
+
+struct pipe_texture *
+trace_texture_create(struct trace_screen *tr_scr,
+ struct pipe_texture *texture)
+{
+ struct trace_texture *tr_tex;
+
+ if(!texture)
+ goto error;
+
+ assert(texture->screen == tr_scr->screen);
+
+ tr_tex = CALLOC_STRUCT(trace_texture);
+ if(!tr_tex)
+ goto error;
+
+ memcpy(&tr_tex->base, texture, sizeof(struct pipe_texture));
+
+ pipe_reference_init(&tr_tex->base.reference, 1);
+ tr_tex->base.screen = &tr_scr->base;
+ tr_tex->texture = texture;
+
+ trace_screen_add_to_list(tr_scr, textures, tr_tex);
+
+ return &tr_tex->base;
+
+error:
+ pipe_texture_reference(&texture, NULL);
+ return NULL;
+}
+
+
+void
+trace_texture_destroy(struct trace_texture *tr_tex)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_tex->base.screen);
+
+ trace_screen_remove_from_list(tr_scr, textures, tr_tex);
+
+ pipe_texture_reference(&tr_tex->texture, NULL);
+ FREE(tr_tex);
+}
+
+
+struct pipe_surface *
+trace_surface_create(struct trace_texture *tr_tex,
+ struct pipe_surface *surface)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_tex->base.screen);
+ struct trace_surface *tr_surf;
+
+ if(!surface)
+ goto error;
+
+ assert(surface->texture == tr_tex->texture);
+
+ tr_surf = CALLOC_STRUCT(trace_surface);
+ if(!tr_surf)
+ goto error;
+
+ memcpy(&tr_surf->base, surface, sizeof(struct pipe_surface));
+
+ pipe_reference_init(&tr_surf->base.reference, 1);
+ tr_surf->base.texture = NULL;
+ pipe_texture_reference(&tr_surf->base.texture, &tr_tex->base);
+ tr_surf->surface = surface;
+
+ trace_screen_add_to_list(tr_scr, surfaces, tr_surf);
+
+ return &tr_surf->base;
+
+error:
+ pipe_surface_reference(&surface, NULL);
+ return NULL;
+}
+
+
+void
+trace_surface_destroy(struct trace_surface *tr_surf)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_surf->base.texture->screen);
+
+ trace_screen_remove_from_list(tr_scr, surfaces, tr_surf);
+
+ pipe_texture_reference(&tr_surf->base.texture, NULL);
+ pipe_surface_reference(&tr_surf->surface, NULL);
+ FREE(tr_surf);
+}
+
+
+struct pipe_transfer *
+trace_transfer_create(struct trace_texture *tr_tex,
+ struct pipe_transfer *transfer)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_tex->base.screen);
+ struct trace_transfer *tr_trans;
+
+ if(!transfer)
+ goto error;
+
+ assert(transfer->texture == tr_tex->texture);
+
+ tr_trans = CALLOC_STRUCT(trace_transfer);
+ if(!tr_trans)
+ goto error;
+
+ memcpy(&tr_trans->base, transfer, sizeof(struct pipe_transfer));
+
+ tr_trans->base.texture = NULL;
+ pipe_texture_reference(&tr_trans->base.texture, &tr_tex->base);
+ tr_trans->transfer = transfer;
+ assert(tr_trans->base.texture == &tr_tex->base);
+
+ trace_screen_add_to_list(tr_scr, transfers, tr_trans);
+
+ return &tr_trans->base;
+
+error:
+ transfer->texture->screen->tex_transfer_destroy(transfer);
+ return NULL;
+}
+
+
+void
+trace_transfer_destroy(struct trace_transfer *tr_trans)
+{
+ struct trace_screen *tr_scr = trace_screen(tr_trans->base.texture->screen);
+ struct pipe_screen *screen = tr_trans->transfer->texture->screen;
+
+ trace_screen_remove_from_list(tr_scr, transfers, tr_trans);
+
+ pipe_texture_reference(&tr_trans->base.texture, NULL);
+ screen->tex_transfer_destroy(tr_trans->transfer);
+ FREE(tr_trans);
+}
+
diff --git a/src/gallium/drivers/trace/tr_texture.h b/src/gallium/drivers/trace/tr_texture.h
new file mode 100644
index 0000000000..395e523e73
--- /dev/null
+++ b/src/gallium/drivers/trace/tr_texture.h
@@ -0,0 +1,122 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TR_TEXTURE_H_
+#define TR_TEXTURE_H_
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_state.h"
+
+#include "tr_screen.h"
+
+
+struct trace_texture
+{
+ struct pipe_texture base;
+
+ struct pipe_texture *texture;
+
+ struct tr_list list;
+};
+
+
+struct trace_surface
+{
+ struct pipe_surface base;
+
+ struct pipe_surface *surface;
+
+ struct tr_list list;
+};
+
+
+struct trace_transfer
+{
+ struct pipe_transfer base;
+
+ struct pipe_transfer *transfer;
+
+ struct tr_list list;
+
+ void *map;
+};
+
+
+static INLINE struct trace_texture *
+trace_texture(struct pipe_texture *texture)
+{
+ if(!texture)
+ return NULL;
+ (void)trace_screen(texture->screen);
+ return (struct trace_texture *)texture;
+}
+
+
+static INLINE struct trace_surface *
+trace_surface(struct pipe_surface *surface)
+{
+ if(!surface)
+ return NULL;
+ (void)trace_texture(surface->texture);
+ return (struct trace_surface *)surface;
+}
+
+
+static INLINE struct trace_transfer *
+trace_transfer(struct pipe_transfer *transfer)
+{
+ if(!transfer)
+ return NULL;
+ (void)trace_texture(transfer->texture);
+ return (struct trace_transfer *)transfer;
+}
+
+
+struct pipe_texture *
+trace_texture_create(struct trace_screen *tr_scr,
+ struct pipe_texture *texture);
+
+void
+trace_texture_destroy(struct trace_texture *tr_tex);
+
+struct pipe_surface *
+trace_surface_create(struct trace_texture *tr_tex,
+ struct pipe_surface *surface);
+
+void
+trace_surface_destroy(struct trace_surface *tr_surf);
+
+struct pipe_transfer *
+trace_transfer_create(struct trace_texture *tr_tex,
+ struct pipe_transfer *transfer);
+
+void
+trace_transfer_destroy(struct trace_transfer *tr_trans);
+
+
+#endif /* TR_TEXTURE_H_ */
diff --git a/src/gallium/drivers/trace/trace.xsl b/src/gallium/drivers/trace/trace.xsl
new file mode 100644
index 0000000000..7be95e0e75
--- /dev/null
+++ b/src/gallium/drivers/trace/trace.xsl
@@ -0,0 +1,188 @@
+<?xml version="1.0"?>
+
+<!--
+
+Copyright 2008 Tungsten Graphics, Inc.
+
+This program is free software: you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+!-->
+
+<xsl:transform version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+ <xsl:output method="html" />
+
+ <xsl:strip-space elements="*" />
+
+ <xsl:template match="/trace">
+ <html>
+ <head>
+ <title>Gallium Trace</title>
+ </head>
+ <style>
+ body {
+ font-family: verdana, sans-serif;
+ font-size: 11px;
+ font-weight: normal;
+ text-align : left;
+ }
+
+ .fun {
+ font-weight: bold;
+ }
+
+ .var {
+ font-style: italic;
+ }
+
+ .typ {
+ display: none;
+ }
+
+ .lit {
+ color: #0000ff;
+ }
+
+ .ptr {
+ color: #008000;
+ }
+ </style>
+ <body>
+ <ol class="calls">
+ <xsl:apply-templates/>
+ </ol>
+ </body>
+ </html>
+ </xsl:template>
+
+ <xsl:template match="call">
+ <li>
+ <xsl:attribute name="value">
+ <xsl:apply-templates select="@no"/>
+ </xsl:attribute>
+ <span class="fun">
+ <xsl:value-of select="@class"/>
+ <xsl:text>::</xsl:text>
+ <xsl:value-of select="@method"/>
+ </span>
+ <xsl:text>(</xsl:text>
+ <xsl:apply-templates select="arg"/>
+ <xsl:text>)</xsl:text>
+ <xsl:apply-templates select="ret"/>
+ </li>
+ </xsl:template>
+
+ <xsl:template match="arg|member">
+ <xsl:apply-templates select="@name"/>
+ <xsl:text> = </xsl:text>
+ <xsl:apply-templates />
+ <xsl:if test="position() != last()">
+ <xsl:text>, </xsl:text>
+ </xsl:if>
+ </xsl:template>
+
+ <xsl:template match="ret">
+ <xsl:text> = </xsl:text>
+ <xsl:apply-templates />
+ </xsl:template>
+
+ <xsl:template match="bool|int|uint|float|enum">
+ <span class="lit">
+ <xsl:value-of select="text()"/>
+ </span>
+ </xsl:template>
+
+ <xsl:template match="bytes">
+ <span class="lit">
+ <xsl:text>...</xsl:text>
+ </span>
+ </xsl:template>
+
+ <xsl:template match="string">
+ <span class="lit">
+ <xsl:text>"</xsl:text>
+ <xsl:call-template name="break">
+ <xsl:with-param name="text" select="text()"/>
+ </xsl:call-template>
+ <xsl:text>"</xsl:text>
+ </span>
+ </xsl:template>
+
+ <xsl:template match="array|struct">
+ <xsl:text>{</xsl:text>
+ <xsl:apply-templates />
+ <xsl:text>}</xsl:text>
+ </xsl:template>
+
+ <xsl:template match="elem">
+ <xsl:apply-templates />
+ <xsl:if test="position() != last()">
+ <xsl:text>, </xsl:text>
+ </xsl:if>
+ </xsl:template>
+
+ <xsl:template match="null">
+ <span class="ptr">
+ <xsl:text>NULL</xsl:text>
+ </span>
+ </xsl:template>
+
+ <xsl:template match="ptr">
+ <span class="ptr">
+ <xsl:value-of select="text()"/>
+ </span>
+ </xsl:template>
+
+ <xsl:template match="@name">
+ <span class="var">
+ <xsl:value-of select="."/>
+ </span>
+ </xsl:template>
+
+ <xsl:template name="break">
+ <xsl:param name="text" select="."/>
+ <xsl:choose>
+ <xsl:when test="contains($text, '&#xa;')">
+ <xsl:value-of select="substring-before($text, '&#xa;')"/>
+ <br/>
+ <xsl:call-template name="break">
+ <xsl:with-param name="text" select="substring-after($text, '&#xa;')"/>
+ </xsl:call-template>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:value-of select="$text"/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:template>
+
+ <xsl:template name="replace">
+ <xsl:param name="text"/>
+ <xsl:param name="from"/>
+ <xsl:param name="to"/>
+ <xsl:choose>
+ <xsl:when test="contains($text,$from)">
+ <xsl:value-of select="concat(substring-before($text,$from),$to)"/>
+ <xsl:call-template name="replace">
+ <xsl:with-param name="text" select="substring-after($text,$from)"/>
+ <xsl:with-param name="from" select="$from"/>
+ <xsl:with-param name="to" select="$to"/>
+ </xsl:call-template>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:value-of select="$text"/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:template>
+
+</xsl:transform>
diff --git a/src/gallium/include/pipe/internal/p_winsys_screen.h b/src/gallium/include/pipe/internal/p_winsys_screen.h
new file mode 100644
index 0000000000..a1542dada7
--- /dev/null
+++ b/src/gallium/include/pipe/internal/p_winsys_screen.h
@@ -0,0 +1,190 @@
+ /**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * This is the interface that Gallium3D requires any window system
+ * hosting it to implement. This is the only include file in Gallium3D
+ * which is public.
+ */
+
+#ifndef P_WINSYS_H
+#define P_WINSYS_H
+
+
+#include "pipe/p_format.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/** Opaque type */
+struct pipe_fence_handle;
+
+struct pipe_surface;
+
+
+/**
+ * Gallium3D drivers are (meant to be!) independent of both GL and the
+ * window system. The window system provides a buffer manager and a
+ * set of additional hooks for things like command buffer submission,
+ * etc.
+ *
+ * There clearly has to be some agreement between the window system
+ * driver and the hardware driver about the format of command buffers,
+ * etc.
+ */
+struct pipe_winsys
+{
+ void (*destroy)( struct pipe_winsys *ws );
+
+ /** Returns name of this winsys interface */
+ const char *(*get_name)( struct pipe_winsys *ws );
+
+ /**
+ * Do any special operations to ensure buffer size is correct
+ */
+ void (*update_buffer)( struct pipe_winsys *ws,
+ void *context_private );
+ /**
+ * Do any special operations to ensure frontbuffer contents are
+ * displayed, eg copy fake frontbuffer.
+ */
+ void (*flush_frontbuffer)( struct pipe_winsys *ws,
+ struct pipe_surface *surf,
+ void *context_private );
+
+
+ /**
+ * Buffer management. Buffer attributes are mostly fixed over its lifetime.
+ *
+ * Remember that gallium gets to choose the interface it needs, and the
+ * window systems must then implement that interface (rather than the
+ * other way around...).
+ *
+ * usage is a bitmask of PIPE_BUFFER_USAGE_PIXEL/VERTEX/INDEX/CONSTANT. This
+ * usage argument is only an optimization hint, not a guarantee, therefore
+ * proper behavior must be observed in all circumstances.
+ *
+ * alignment indicates the client's alignment requirements, eg for
+ * SSE instructions.
+ */
+ struct pipe_buffer *(*buffer_create)( struct pipe_winsys *ws,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size );
+
+ /**
+ * Create a buffer that wraps user-space data.
+ *
+ * Effectively this schedules a delayed call to buffer_create
+ * followed by an upload of the data at *some point in the future*,
+ * or perhaps never. Basically the allocate/upload is delayed
+ * until the buffer is actually passed to hardware.
+ *
+ * The intention is to provide a quick way to turn regular data
+ * into a buffer, and secondly to avoid a copy operation if that
+ * data subsequently turns out to be only accessed by the CPU.
+ *
+ * Common example is OpenGL vertex buffers that are subsequently
+ * processed either by software TNL in the driver or by passing to
+ * hardware.
+ *
+ * XXX: What happens if the delayed call to buffer_create() fails?
+ *
+ * Note that ptr may be accessed at any time upto the time when the
+ * buffer is destroyed, so the data must not be freed before then.
+ */
+ struct pipe_buffer *(*user_buffer_create)(struct pipe_winsys *ws,
+ void *ptr,
+ unsigned bytes);
+
+ /**
+ * Allocate storage for a display target surface.
+ *
+ * Often surfaces which are meant to be blitted to the front screen (i.e.,
+ * display targets) must be allocated with special characteristics, memory
+ * pools, or obtained directly from the windowing system.
+ *
+ * This callback is invoked by the pipe_screenwhen creating a texture marked
+ * with the PIPE_TEXTURE_USAGE_DISPLAY_TARGET flag to get the underlying
+ * buffer storage.
+ */
+ struct pipe_buffer *(*surface_buffer_create)(struct pipe_winsys *ws,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride);
+
+
+ /**
+ * Map the entire data store of a buffer object into the client's address.
+ * flags is bitmask of PIPE_BUFFER_USAGE_CPU_READ/WRITE flags.
+ */
+ void *(*buffer_map)( struct pipe_winsys *ws,
+ struct pipe_buffer *buf,
+ unsigned usage );
+
+ void (*buffer_unmap)( struct pipe_winsys *ws,
+ struct pipe_buffer *buf );
+
+ void (*buffer_destroy)( struct pipe_buffer *buf );
+
+
+ /** Set ptr = fence, with reference counting */
+ void (*fence_reference)( struct pipe_winsys *ws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence );
+
+ /**
+ * Checks whether the fence has been signalled.
+ * \param flags driver-specific meaning
+ * \return zero on success.
+ */
+ int (*fence_signalled)( struct pipe_winsys *ws,
+ struct pipe_fence_handle *fence,
+ unsigned flag );
+
+ /**
+ * Wait for the fence to finish.
+ * \param flags driver-specific meaning
+ * \return zero on success.
+ */
+ int (*fence_finish)( struct pipe_winsys *ws,
+ struct pipe_fence_handle *fence,
+ unsigned flag );
+
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* P_WINSYS_H */
diff --git a/src/gallium/include/pipe/p_atomic.h b/src/gallium/include/pipe/p_atomic.h
new file mode 100644
index 0000000000..0c3fbae428
--- /dev/null
+++ b/src/gallium/include/pipe/p_atomic.h
@@ -0,0 +1,353 @@
+/**
+ * Many similar implementations exist. See for example libwsbm
+ * or the linux kernel include/atomic.h
+ *
+ * No copyright claimed on this file.
+ *
+ */
+
+#ifndef P_ATOMIC_H
+#define P_ATOMIC_H
+
+#include "p_compiler.h"
+#include "p_defines.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Favor OS-provided implementations.
+ *
+ * Where no OS-provided implementation is available, fall back to
+ * locally coded assembly, compiler intrinsic or ultimately a
+ * mutex-based implementation.
+ */
+#if (defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY) || \
+ defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT))
+#define PIPE_ATOMIC_OS_UNLOCKED
+#elif (defined(PIPE_CC_MSVC) && defined(PIPE_SUBSYSTEM_WINDOWS_USER))
+#define PIPE_ATOMIC_OS_MS_INTERLOCK
+#elif (defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86))
+#define PIPE_ATOMIC_ASM_MSVC_X86
+#elif (defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86))
+#define PIPE_ATOMIC_ASM_GCC_X86
+#elif defined(PIPE_CC_GCC)
+#define PIPE_ATOMIC_GCC_INTRINSIC
+#else
+#define PIPE_ATOMIC_MUTEX
+#endif
+
+
+
+#if defined(PIPE_ATOMIC_ASM_GCC_X86)
+
+#define PIPE_ATOMIC "GCC x86 assembly"
+
+struct pipe_atomic {
+ int32_t count;
+};
+
+#define p_atomic_set(_v, _i) ((_v)->count = (_i))
+#define p_atomic_read(_v) ((_v)->count)
+
+
+static INLINE boolean
+p_atomic_dec_zero(struct pipe_atomic *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__("lock; decl %0; sete %1":"+m"(v->count), "=qm"(c)
+ ::"memory");
+
+ return c != 0;
+}
+
+static INLINE void
+p_atomic_inc(struct pipe_atomic *v)
+{
+ __asm__ __volatile__("lock; incl %0":"+m"(v->count));
+}
+
+static INLINE void
+p_atomic_dec(struct pipe_atomic *v)
+{
+ __asm__ __volatile__("lock; decl %0":"+m"(v->count));
+}
+
+static INLINE int32_t
+p_atomic_cmpxchg(struct pipe_atomic *v, int32_t old, int32_t _new)
+{
+ return __sync_val_compare_and_swap(&v->count, old, _new);
+}
+#endif
+
+
+
+/* Implementation using GCC-provided synchronization intrinsics
+ */
+#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
+
+#define PIPE_ATOMIC "GCC Sync Intrinsics"
+
+struct pipe_atomic {
+ int32_t count;
+};
+
+#define p_atomic_set(_v, _i) ((_v)->count = (_i))
+#define p_atomic_read(_v) ((_v)->count)
+
+
+static INLINE boolean
+p_atomic_dec_zero(struct pipe_atomic *v)
+{
+ return (__sync_sub_and_fetch(&v->count, 1) == 0);
+}
+
+static INLINE void
+p_atomic_inc(struct pipe_atomic *v)
+{
+ (void) __sync_add_and_fetch(&v->count, 1);
+}
+
+static INLINE void
+p_atomic_dec(struct pipe_atomic *v)
+{
+ (void) __sync_sub_and_fetch(&v->count, 1);
+}
+
+static INLINE int32_t
+p_atomic_cmpxchg(struct pipe_atomic *v, int32_t old, int32_t _new)
+{
+ return __sync_val_compare_and_swap(&v->count, old, _new);
+}
+#endif
+
+
+
+/* Unlocked version for single threaded environments, such as some
+ * windows kernel modules.
+ */
+#if defined(PIPE_ATOMIC_OS_UNLOCKED)
+
+#define PIPE_ATOMIC "Unlocked"
+
+struct pipe_atomic
+{
+ int32_t count;
+};
+
+#define p_atomic_set(_v, _i) ((_v)->count = (_i))
+#define p_atomic_read(_v) ((_v)->count)
+#define p_atomic_dec_zero(_v) ((boolean) --(_v)->count)
+#define p_atomic_inc(_v) ((void) (_v)->count++)
+#define p_atomic_dec(_v) ((void) (_v)->count--)
+#define p_atomic_cmpxchg(_v, old, _new) ((_v)->count == old ? (_v)->count = (_new) : (_v)->count)
+
+#endif
+
+
+/* Locally coded assembly for MSVC on x86:
+ */
+#if defined(PIPE_ATOMIC_ASM_MSVC_X86)
+
+#define PIPE_ATOMIC "MSVC x86 assembly"
+
+struct pipe_atomic
+{
+ int32_t count;
+};
+
+#define p_atomic_set(_v, _i) ((_v)->count = (_i))
+#define p_atomic_read(_v) ((_v)->count)
+
+static INLINE boolean
+p_atomic_dec_zero(struct pipe_atomic *v)
+{
+ int32_t *pcount = &v->count;
+ unsigned char c;
+
+ __asm {
+ mov eax, [pcount]
+ lock dec dword ptr [eax]
+ sete byte ptr [c]
+ }
+
+ return c != 0;
+}
+
+static INLINE void
+p_atomic_inc(struct pipe_atomic *v)
+{
+ int32_t *pcount = &v->count;
+
+ __asm {
+ mov eax, [pcount]
+ lock inc dword ptr [eax]
+ }
+}
+
+static INLINE void
+p_atomic_dec(struct pipe_atomic *v)
+{
+ int32_t *pcount = &v->count;
+
+ __asm {
+ mov eax, [pcount]
+ lock dec dword ptr [eax]
+ }
+}
+
+static INLINE int32_t
+p_atomic_cmpxchg(struct pipe_atomic *v, int32_t old, int32_t _new)
+{
+ int32_t *pcount = &v->count;
+ int32_t orig;
+
+ __asm {
+ mov ecx, [pcount]
+ mov eax, [old]
+ mov edx, [_new]
+ lock cmpxchg [ecx], edx
+ mov [orig], eax
+ }
+
+ return orig;
+}
+#endif
+
+
+#if defined(PIPE_ATOMIC_OS_MS_INTERLOCK)
+
+#define PIPE_ATOMIC "MS userspace interlocks"
+
+#include <windows.h>
+
+struct pipe_atomic
+{
+ volatile long count;
+};
+
+#define p_atomic_set(_v, _i) ((_v)->count = (_i))
+#define p_atomic_read(_v) ((_v)->count)
+
+static INLINE boolean
+p_atomic_dec_zero(struct pipe_atomic *v)
+{
+ return InterlockedDecrement(&v->count) == 0;
+}
+
+static INLINE void
+p_atomic_inc(struct pipe_atomic *v)
+{
+ InterlockedIncrement(&v->count);
+}
+
+static INLINE void
+p_atomic_dec(struct pipe_atomic *v)
+{
+ InterlockedDecrement(&v->count);
+}
+
+static INLINE int32_t
+p_atomic_cmpxchg(struct pipe_atomic *v, int32_t old, int32_t _new)
+{
+ return InterlockedCompareExchange(&v->count, _new, old);
+}
+
+#endif
+
+
+
+#if defined(PIPE_ATOMIC_MUTEX)
+
+#define PIPE_ATOMIC "mutex-based fallback"
+
+#include "pipe/p_thread.h"
+
+/**
+ * This implementation should really not be used.
+ * Add an assembly port instead. It may abort and
+ * doesn't destroy used mutexes.
+ */
+
+struct pipe_atomic {
+ pipe_mutex mutex;
+ int32_t count;
+};
+
+static INLINE void
+p_atomic_set(struct pipe_atomic *v, int32_t i)
+{
+ pipe_mutex_init(v->mutex);
+ pipe_mutex_lock(v->mutex);
+ v->count = i;
+ pipe_mutex_unlock(v->mutex);
+}
+
+static INLINE int32_t
+p_atomic_read(struct pipe_atomic *v)
+{
+ int32_t ret;
+
+ pipe_mutex_lock(v->mutex);
+ ret = v->count;
+ pipe_mutex_unlock(v->mutex);
+ return ret;
+}
+
+static INLINE void
+p_atomic_inc(struct pipe_atomic *v)
+{
+ pipe_mutex_lock(v->mutex);
+ ++v->count;
+ pipe_mutex_unlock(v->mutex);
+}
+
+static INLINE void
+p_atomic_dec(struct pipe_atomic *v)
+{
+ pipe_mutex_lock(v->mutex);
+ --v->count;
+ pipe_mutex_unlock(v->mutex);
+}
+
+static INLINE boolean
+p_atomic_dec_zero(struct pipe_atomic *v)
+{
+ boolean ret;
+
+ pipe_mutex_lock(v->mutex);
+ ret = (--v->count == 0);
+ pipe_mutex_unlock(v->mutex);
+ return ret;
+}
+
+static INLINE int32_t
+p_atomic_cmpxchg(struct pipe_atomic *v, int32_t old, int32_t _new)
+{
+ int32_t ret;
+
+ pipe_mutex_lock(v->mutex);
+ ret = v->count;
+ if (ret == old)
+ v->count = _new;
+ pipe_mutex_unlock(v->mutex);
+
+ return ret;
+}
+
+#endif
+
+
+#ifndef PIPE_ATOMIC
+#error "No pipe_atomic implementation selected"
+#endif
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* P_ATOMIC_H */
diff --git a/src/gallium/include/pipe/p_compiler.h b/src/gallium/include/pipe/p_compiler.h
new file mode 100644
index 0000000000..c13cffceb0
--- /dev/null
+++ b/src/gallium/include/pipe/p_compiler.h
@@ -0,0 +1,179 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef P_COMPILER_H
+#define P_COMPILER_H
+
+
+#include "p_config.h"
+
+#ifndef XFree86Server
+#include <stdlib.h>
+#include <string.h>
+#else
+#include "xf86_ansic.h"
+#include "xf86_libc.h"
+#endif
+
+
+#if defined(_WIN32) && !defined(__WIN32__)
+#define __WIN32__
+#endif
+
+#if defined(_MSC_VER)
+
+/* Avoid 'expression is always true' warning */
+#pragma warning(disable: 4296)
+
+#endif /* _MSC_VER */
+
+
+#if defined(_MSC_VER)
+
+typedef __int8 int8_t;
+typedef unsigned __int8 uint8_t;
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+#ifndef __eglplatform_h_
+typedef __int32 int32_t;
+#endif
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+#if defined(_WIN64)
+typedef __int64 intptr_t;
+typedef unsigned __int64 uintptr_t;
+#else
+typedef __int32 intptr_t;
+typedef unsigned __int32 uintptr_t;
+#endif
+
+#define INT64_C(__val) __val##i64
+#define UINT64_C(__val) __val##ui64
+
+#ifndef __cplusplus
+#define false 0
+#define true 1
+#define bool _Bool
+typedef int _Bool;
+#define __bool_true_false_are_defined 1
+#endif /* !__cplusplus */
+
+#else
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+
+#ifndef __HAIKU__
+typedef unsigned int uint;
+typedef unsigned short ushort;
+#endif
+typedef unsigned char ubyte;
+
+#if 0
+#define boolean bool
+#else
+typedef unsigned char boolean;
+#endif
+#ifndef TRUE
+#define TRUE true
+#endif
+#ifndef FALSE
+#define FALSE false
+#endif
+
+
+/* Function inlining */
+#ifndef INLINE
+# ifdef __cplusplus
+# define INLINE inline
+# elif defined(__GNUC__)
+# define INLINE __inline__
+# elif defined(_MSC_VER)
+# define INLINE __inline
+# elif defined(__ICL)
+# define INLINE __inline
+# elif defined(__INTEL_COMPILER)
+# define INLINE inline
+# elif defined(__WATCOMC__) && (__WATCOMC__ >= 1100)
+# define INLINE __inline
+# elif defined(__SUNPRO_C) && defined(__C99FEATURES__)
+# define INLINE inline
+# elif (__STDC_VERSION__ >= 199901L) /* C99 */
+# define INLINE inline
+# else
+# define INLINE
+# endif
+#endif
+
+/* The __FUNCTION__ gcc variable is generally only used for debugging.
+ * If we're not using gcc, define __FUNCTION__ as a cpp symbol here.
+ */
+#ifndef __FUNCTION__
+# if (!defined(__GNUC__) || (__GNUC__ < 2))
+# if (__STDC_VERSION__ >= 199901L) /* C99 */ || \
+ (defined(__SUNPRO_C) && defined(__C99FEATURES__))
+# define __FUNCTION__ __func__
+# else
+# define __FUNCTION__ "<unknown>"
+# endif
+# endif
+#endif
+
+
+
+/* This should match linux gcc cdecl semantics everywhere, so that we
+ * just codegen one calling convention on all platforms.
+ */
+#ifdef _MSC_VER
+#define PIPE_CDECL __cdecl
+#else
+#define PIPE_CDECL
+#endif
+
+
+
+#if defined(__GNUC__)
+#define ALIGN16_DECL(TYPE, NAME, SIZE) TYPE NAME##___aligned[SIZE] __attribute__(( aligned( 16 ) ))
+#define ALIGN16_ASSIGN(NAME) NAME##___aligned
+#define ALIGN16_ATTRIB __attribute__(( aligned( 16 ) ))
+#define ALIGN8_ATTRIB __attribute__(( aligned( 8 ) ))
+#else
+#define ALIGN16_DECL(TYPE, NAME, SIZE) TYPE NAME##___unaligned[SIZE + 1]
+#define ALIGN16_ASSIGN(NAME) align16(NAME##___unaligned)
+#define ALIGN16_ATTRIB
+#define ALIGN8_ATTRIB
+#endif
+
+
+
+#endif /* P_COMPILER_H */
diff --git a/src/gallium/include/pipe/p_config.h b/src/gallium/include/pipe/p_config.h
new file mode 100644
index 0000000000..de99957d9d
--- /dev/null
+++ b/src/gallium/include/pipe/p_config.h
@@ -0,0 +1,176 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Gallium configuration defines.
+ *
+ * This header file sets several defines based on the compiler, processor
+ * architecture, and operating system being used. These defines should be used
+ * throughout the code to facilitate porting to new platforms. It is likely that
+ * this file is auto-generated by an autoconf-like tool at some point, as some
+ * things cannot be determined by pre-defined environment alone.
+ *
+ * See also:
+ * - http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+ * - echo | gcc -dM -E - | sort
+ * - http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef P_CONFIG_H_
+#define P_CONFIG_H_
+
+
+/*
+ * Compiler
+ */
+
+#if defined(__GNUC__)
+#define PIPE_CC_GCC
+#endif
+
+/*
+ * Meaning of _MSC_VER value:
+ * - 1400: Visual C++ 2005
+ * - 1310: Visual C++ .NET 2003
+ * - 1300: Visual C++ .NET 2002
+ *
+ * __MSC__ seems to be an old macro -- it is not pre-defined on recent MSVC
+ * versions.
+ */
+#if defined(_MSC_VER) || defined(__MSC__)
+#define PIPE_CC_MSVC
+#endif
+
+#if defined(__ICL)
+#define PIPE_CC_ICL
+#endif
+
+
+/*
+ * Processor architecture
+ */
+
+#if defined(__i386__) /* gcc */ || defined(_M_IX86) /* msvc */ || defined(_X86_) || defined(__386__) || defined(i386) || defined(__i386) /* Sun cc */
+#define PIPE_ARCH_X86
+#endif
+
+#if defined(__x86_64__) /* gcc */ || defined(_M_X64) /* msvc */ || defined(_M_AMD64) /* msvc */ || defined(__x86_64) /* Sun cc */
+#define PIPE_ARCH_X86_64
+#endif
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+#if defined(PIPE_CC_GCC) && !defined(__SSE2__)
+/* #warning SSE2 support requires -msse -msse2 compiler options */
+#else
+#define PIPE_ARCH_SSE
+#endif
+#endif
+
+#if defined(__PPC__)
+#define PIPE_ARCH_PPC
+#if defined(__PPC64__)
+#define PIPE_ARCH_PPC_64
+#endif
+#endif
+
+
+/*
+ * Endian detection.
+ */
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+#define PIPE_ARCH_LITTLE_ENDIAN
+#elif defined(PIPE_ARCH_PPC) || defined(PIPE_ARCH_PPC_64)
+#define PIPE_ARCH_BIG_ENDIAN
+#else
+#define PIPE_ARCH_UNKNOWN_ENDIAN
+#endif
+
+
+/*
+ * Operating system family.
+ *
+ * See subsystem below for a more fine-grained distinction.
+ */
+
+#if defined(__linux__)
+#define PIPE_OS_LINUX
+#endif
+
+#if defined(__FreeBSD__)
+#define PIPE_OS_BSD
+#endif
+
+#if defined(__sun)
+#define PIPE_OS_SOLARIS
+#endif
+
+#if defined(__APPLE__)
+#define PIPE_OS_APPLE
+#endif
+
+#if defined(_WIN32) || defined(WIN32)
+#define PIPE_OS_WINDOWS
+#endif
+
+#if defined(__HAIKU__)
+#define PIPE_OS_HAIKU
+#endif
+
+/*
+ * Subsystem.
+ *
+ * NOTE: There is no way to auto-detect most of these.
+ */
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
+#define PIPE_SUBSYSTEM_DRI
+#endif /* PIPE_OS_LINUX || PIPE_OS_BSD || PIPE_OS_SOLARIS */
+
+#if defined(PIPE_OS_WINDOWS)
+#if defined(PIPE_SUBSYSTEM_WINDOWS_DISPLAY)
+/* Windows 2000/XP Display Driver */
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_MINIPORT)
+/* Windows 2000/XP Miniport Driver */
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+/* Windows User-space Library */
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_CE)
+/* Windows CE 5.0/6.0 */
+#else
+#ifdef _WIN32_WCE
+#define PIPE_SUBSYSTEM_WINDOWS_CE
+#else /* !_WIN32_WCE */
+#error No PIPE_SUBSYSTEM_WINDOWS_xxx subsystem defined.
+#endif /* !_WIN32_WCE */
+#endif
+#endif /* PIPE_OS_WINDOWS */
+
+
+#endif /* P_CONFIG_H_ */
diff --git a/src/gallium/include/pipe/p_context.h b/src/gallium/include/pipe/p_context.h
new file mode 100644
index 0000000000..39620a7198
--- /dev/null
+++ b/src/gallium/include/pipe/p_context.h
@@ -0,0 +1,271 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_CONTEXT_H
+#define PIPE_CONTEXT_H
+
+#include "p_state.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_screen;
+struct pipe_fence_handle;
+struct pipe_state_cache;
+struct pipe_query;
+struct pipe_winsys;
+
+/**
+ * Gallium rendering context. Basically:
+ * - state setting functions
+ * - VBO drawing functions
+ * - surface functions
+ */
+struct pipe_context {
+ struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ void *priv; /**< context private data (for DRI for example) */
+ void *draw; /**< private, for draw module (temporary?) */
+
+ void (*destroy)( struct pipe_context * );
+
+
+ /* Possible interface for setting edgeflags. These aren't really
+ * vertex elements, so don't fit there.
+ */
+ void (*set_edgeflags)( struct pipe_context *,
+ const unsigned *bitfield );
+
+
+ /**
+ * VBO drawing (return false on fallbacks (temporary??))
+ */
+ /*@{*/
+ boolean (*draw_arrays)( struct pipe_context *pipe,
+ unsigned mode, unsigned start, unsigned count);
+
+ boolean (*draw_elements)( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count);
+
+ /* XXX: this is (probably) a temporary entrypoint, as the range
+ * information should be available from the vertex_buffer state.
+ * Using this to quickly evaluate a specialized path in the draw
+ * module.
+ */
+ boolean (*draw_range_elements)( struct pipe_context *pipe,
+ struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned minIndex,
+ unsigned maxIndex,
+ unsigned mode,
+ unsigned start,
+ unsigned count);
+ /*@}*/
+
+
+ /**
+ * Query objects
+ */
+ /*@{*/
+ struct pipe_query *(*create_query)( struct pipe_context *pipe,
+ unsigned query_type );
+
+ void (*destroy_query)(struct pipe_context *pipe,
+ struct pipe_query *q);
+
+ void (*begin_query)(struct pipe_context *pipe, struct pipe_query *q);
+ void (*end_query)(struct pipe_context *pipe, struct pipe_query *q);
+
+ boolean (*get_query_result)(struct pipe_context *pipe,
+ struct pipe_query *q,
+ boolean wait,
+ uint64_t *result);
+ /*@}*/
+
+ /**
+ * State functions (create/bind/destroy state objects)
+ */
+ /*@{*/
+ void * (*create_blend_state)(struct pipe_context *,
+ const struct pipe_blend_state *);
+ void (*bind_blend_state)(struct pipe_context *, void *);
+ void (*delete_blend_state)(struct pipe_context *, void *);
+
+ void * (*create_sampler_state)(struct pipe_context *,
+ const struct pipe_sampler_state *);
+ void (*bind_sampler_states)(struct pipe_context *, unsigned num, void **);
+ void (*delete_sampler_state)(struct pipe_context *, void *);
+
+ void * (*create_rasterizer_state)(struct pipe_context *,
+ const struct pipe_rasterizer_state *);
+ void (*bind_rasterizer_state)(struct pipe_context *, void *);
+ void (*delete_rasterizer_state)(struct pipe_context *, void *);
+
+ void * (*create_depth_stencil_alpha_state)(struct pipe_context *,
+ const struct pipe_depth_stencil_alpha_state *);
+ void (*bind_depth_stencil_alpha_state)(struct pipe_context *, void *);
+ void (*delete_depth_stencil_alpha_state)(struct pipe_context *, void *);
+
+ void * (*create_fs_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*bind_fs_state)(struct pipe_context *, void *);
+ void (*delete_fs_state)(struct pipe_context *, void *);
+
+ void * (*create_vs_state)(struct pipe_context *,
+ const struct pipe_shader_state *);
+ void (*bind_vs_state)(struct pipe_context *, void *);
+ void (*delete_vs_state)(struct pipe_context *, void *);
+ /*@}*/
+
+ /**
+ * Parameter-like state (or properties)
+ */
+ /*@{*/
+ void (*set_blend_color)( struct pipe_context *,
+ const struct pipe_blend_color * );
+
+ void (*set_clip_state)( struct pipe_context *,
+ const struct pipe_clip_state * );
+
+ void (*set_constant_buffer)( struct pipe_context *,
+ uint shader, uint index,
+ const struct pipe_constant_buffer *buf );
+
+ void (*set_framebuffer_state)( struct pipe_context *,
+ const struct pipe_framebuffer_state * );
+
+ void (*set_polygon_stipple)( struct pipe_context *,
+ const struct pipe_poly_stipple * );
+
+ void (*set_scissor_state)( struct pipe_context *,
+ const struct pipe_scissor_state * );
+
+ void (*set_viewport_state)( struct pipe_context *,
+ const struct pipe_viewport_state * );
+
+ void (*set_sampler_textures)( struct pipe_context *,
+ unsigned num_textures,
+ struct pipe_texture ** );
+
+ void (*set_vertex_buffers)( struct pipe_context *,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer * );
+
+ void (*set_vertex_elements)( struct pipe_context *,
+ unsigned num_elements,
+ const struct pipe_vertex_element * );
+ /*@}*/
+
+
+ /**
+ * Surface functions
+ */
+ /*@{*/
+
+ /**
+ * Copy a block of pixels from one surface to another.
+ * The surfaces must be of the same format.
+ */
+ void (*surface_copy)(struct pipe_context *pipe,
+ struct pipe_surface *dest,
+ unsigned destx, unsigned desty,
+ struct pipe_surface *src,
+ unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height);
+
+ /**
+ * Fill a region of a surface with a constant value.
+ */
+ void (*surface_fill)(struct pipe_context *pipe,
+ struct pipe_surface *dst,
+ unsigned dstx, unsigned dsty,
+ unsigned width, unsigned height,
+ unsigned value);
+ /*@}*/
+
+ /**
+ * Clear the specified set of currently bound buffers to specified values.
+ * The entire buffers are cleared (no scissor, no colormask, etc).
+ *
+ * \param buffers bitfield of PIPE_CLEAR_* values.
+ * \param rgba pointer to an array of one float for each of r, g, b, a.
+ * \param depth depth clear value in [0,1].
+ * \param stencil stencil clear value
+ */
+ void (*clear)(struct pipe_context *pipe,
+ unsigned buffers,
+ const float *rgba,
+ double depth,
+ unsigned stencil);
+
+ /** Flush rendering
+ * \param flags bitmask of PIPE_FLUSH_x tokens)
+ */
+ void (*flush)( struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence );
+
+ /**
+ * Check whether a texture is referenced by an unflushed hw command.
+ * The state-tracker uses this function to optimize away unnecessary
+ * flushes. It is safe (but wasteful) to always return.
+ * PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE.
+ * \param pipe The pipe context whose unflushed hw commands will be
+ * checked.
+ * \param level mipmap level.
+ * \param texture texture to check.
+ * \param face cubemap face. Use 0 for non-cubemap texture.
+ */
+ unsigned int (*is_texture_referenced) (struct pipe_context *pipe,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level);
+
+ /**
+ * Check whether a buffer is referenced by an unflushed hw command.
+ * The state-tracker uses this function to optimize away unnecessary
+ * flushes. It is safe (but wasteful) to always return
+ * PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE.
+ * \param pipe The pipe context whose unflushed hw commands will be
+ * checked.
+ * \param buf Buffer to check.
+ */
+ unsigned int (*is_buffer_referenced) (struct pipe_context *pipe,
+ struct pipe_buffer *buf);
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PIPE_CONTEXT_H */
diff --git a/src/gallium/include/pipe/p_defines.h b/src/gallium/include/pipe/p_defines.h
new file mode 100644
index 0000000000..b01ab6d137
--- /dev/null
+++ b/src/gallium/include/pipe/p_defines.h
@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_DEFINES_H
+#define PIPE_DEFINES_H
+
+#include "p_format.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PIPE_BLENDFACTOR_ONE 0x1
+#define PIPE_BLENDFACTOR_SRC_COLOR 0x2
+#define PIPE_BLENDFACTOR_SRC_ALPHA 0x3
+#define PIPE_BLENDFACTOR_DST_ALPHA 0x4
+#define PIPE_BLENDFACTOR_DST_COLOR 0x5
+#define PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE 0x6
+#define PIPE_BLENDFACTOR_CONST_COLOR 0x7
+#define PIPE_BLENDFACTOR_CONST_ALPHA 0x8
+#define PIPE_BLENDFACTOR_SRC1_COLOR 0x9
+#define PIPE_BLENDFACTOR_SRC1_ALPHA 0x0A
+#define PIPE_BLENDFACTOR_ZERO 0x11
+#define PIPE_BLENDFACTOR_INV_SRC_COLOR 0x12
+#define PIPE_BLENDFACTOR_INV_SRC_ALPHA 0x13
+#define PIPE_BLENDFACTOR_INV_DST_ALPHA 0x14
+#define PIPE_BLENDFACTOR_INV_DST_COLOR 0x15
+#define PIPE_BLENDFACTOR_INV_CONST_COLOR 0x17
+#define PIPE_BLENDFACTOR_INV_CONST_ALPHA 0x18
+#define PIPE_BLENDFACTOR_INV_SRC1_COLOR 0x19
+#define PIPE_BLENDFACTOR_INV_SRC1_ALPHA 0x1A
+
+#define PIPE_BLEND_ADD 0
+#define PIPE_BLEND_SUBTRACT 1
+#define PIPE_BLEND_REVERSE_SUBTRACT 2
+#define PIPE_BLEND_MIN 3
+#define PIPE_BLEND_MAX 4
+
+#define PIPE_LOGICOP_CLEAR 0
+#define PIPE_LOGICOP_NOR 1
+#define PIPE_LOGICOP_AND_INVERTED 2
+#define PIPE_LOGICOP_COPY_INVERTED 3
+#define PIPE_LOGICOP_AND_REVERSE 4
+#define PIPE_LOGICOP_INVERT 5
+#define PIPE_LOGICOP_XOR 6
+#define PIPE_LOGICOP_NAND 7
+#define PIPE_LOGICOP_AND 8
+#define PIPE_LOGICOP_EQUIV 9
+#define PIPE_LOGICOP_NOOP 10
+#define PIPE_LOGICOP_OR_INVERTED 11
+#define PIPE_LOGICOP_COPY 12
+#define PIPE_LOGICOP_OR_REVERSE 13
+#define PIPE_LOGICOP_OR 14
+#define PIPE_LOGICOP_SET 15
+
+#define PIPE_MASK_R 0x1
+#define PIPE_MASK_G 0x2
+#define PIPE_MASK_B 0x4
+#define PIPE_MASK_A 0x8
+#define PIPE_MASK_RGBA 0xf
+
+
+/**
+ * Inequality functions. Used for depth test, stencil compare, alpha
+ * test, shadow compare, etc.
+ */
+#define PIPE_FUNC_NEVER 0
+#define PIPE_FUNC_LESS 1
+#define PIPE_FUNC_EQUAL 2
+#define PIPE_FUNC_LEQUAL 3
+#define PIPE_FUNC_GREATER 4
+#define PIPE_FUNC_NOTEQUAL 5
+#define PIPE_FUNC_GEQUAL 6
+#define PIPE_FUNC_ALWAYS 7
+
+/** Polygon fill mode */
+#define PIPE_POLYGON_MODE_FILL 0
+#define PIPE_POLYGON_MODE_LINE 1
+#define PIPE_POLYGON_MODE_POINT 2
+
+/** Polygon front/back window, also for culling */
+#define PIPE_WINDING_NONE 0
+#define PIPE_WINDING_CW 1
+#define PIPE_WINDING_CCW 2
+#define PIPE_WINDING_BOTH (PIPE_WINDING_CW | PIPE_WINDING_CCW)
+
+/** Stencil ops */
+#define PIPE_STENCIL_OP_KEEP 0
+#define PIPE_STENCIL_OP_ZERO 1
+#define PIPE_STENCIL_OP_REPLACE 2
+#define PIPE_STENCIL_OP_INCR 3
+#define PIPE_STENCIL_OP_DECR 4
+#define PIPE_STENCIL_OP_INCR_WRAP 5
+#define PIPE_STENCIL_OP_DECR_WRAP 6
+#define PIPE_STENCIL_OP_INVERT 7
+
+/** Texture types */
+enum pipe_texture_target {
+ PIPE_TEXTURE_1D = 0,
+ PIPE_TEXTURE_2D = 1,
+ PIPE_TEXTURE_3D = 2,
+ PIPE_TEXTURE_CUBE = 3
+};
+
+#define PIPE_TEX_FACE_POS_X 0
+#define PIPE_TEX_FACE_NEG_X 1
+#define PIPE_TEX_FACE_POS_Y 2
+#define PIPE_TEX_FACE_NEG_Y 3
+#define PIPE_TEX_FACE_POS_Z 4
+#define PIPE_TEX_FACE_NEG_Z 5
+#define PIPE_TEX_FACE_MAX 6
+
+#define PIPE_TEX_WRAP_REPEAT 0
+#define PIPE_TEX_WRAP_CLAMP 1
+#define PIPE_TEX_WRAP_CLAMP_TO_EDGE 2
+#define PIPE_TEX_WRAP_CLAMP_TO_BORDER 3
+#define PIPE_TEX_WRAP_MIRROR_REPEAT 4
+#define PIPE_TEX_WRAP_MIRROR_CLAMP 5
+#define PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE 6
+#define PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER 7
+
+/* Between mipmaps, ie mipfilter
+ */
+#define PIPE_TEX_MIPFILTER_NEAREST 0
+#define PIPE_TEX_MIPFILTER_LINEAR 1
+#define PIPE_TEX_MIPFILTER_NONE 2
+
+/* Within a mipmap, ie min/mag filter
+ */
+#define PIPE_TEX_FILTER_NEAREST 0
+#define PIPE_TEX_FILTER_LINEAR 1
+#define PIPE_TEX_FILTER_ANISO 2
+
+
+#define PIPE_TEX_COMPARE_NONE 0
+#define PIPE_TEX_COMPARE_R_TO_TEXTURE 1
+
+#define PIPE_TEXTURE_USAGE_RENDER_TARGET 0x1
+#define PIPE_TEXTURE_USAGE_DISPLAY_TARGET 0x2 /* ie a backbuffer */
+#define PIPE_TEXTURE_USAGE_PRIMARY 0x4 /* ie a frontbuffer */
+#define PIPE_TEXTURE_USAGE_DEPTH_STENCIL 0x8
+#define PIPE_TEXTURE_USAGE_SAMPLER 0x10
+#define PIPE_TEXTURE_USAGE_DYNAMIC 0x20
+/** Pipe driver custom usage flags should be greater or equal to this value */
+#define PIPE_TEXTURE_USAGE_CUSTOM (1 << 16)
+
+#define PIPE_TEXTURE_GEOM_NON_SQUARE 0x1
+#define PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO 0x2
+
+
+/**
+ * Surface layout
+ */
+#define PIPE_SURFACE_LAYOUT_LINEAR 0
+
+
+/**
+ * Clear buffer bits
+ */
+/** All color buffers currently bound */
+#define PIPE_CLEAR_COLOR (1 << 0)
+/** Depth/stencil combined */
+#define PIPE_CLEAR_DEPTHSTENCIL (1 << 1)
+
+
+/**
+ * Transfer object usage flags
+ */
+enum pipe_transfer_usage {
+ PIPE_TRANSFER_READ = (1 << 0),
+ PIPE_TRANSFER_WRITE = (1 << 1),
+ PIPE_TRANSFER_READ_WRITE = PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE /**< Read/modify/write */
+};
+
+
+/**
+ * Buffer usage flags
+ */
+#define PIPE_BUFFER_USAGE_CPU_READ (1 << 0)
+#define PIPE_BUFFER_USAGE_CPU_WRITE (1 << 1)
+#define PIPE_BUFFER_USAGE_GPU_READ (1 << 2)
+#define PIPE_BUFFER_USAGE_GPU_WRITE (1 << 3)
+#define PIPE_BUFFER_USAGE_PIXEL (1 << 4)
+#define PIPE_BUFFER_USAGE_VERTEX (1 << 5)
+#define PIPE_BUFFER_USAGE_INDEX (1 << 6)
+#define PIPE_BUFFER_USAGE_CONSTANT (1 << 7)
+#define PIPE_BUFFER_USAGE_DISCARD (1 << 8)
+#define PIPE_BUFFER_USAGE_DONTBLOCK (1 << 9)
+#define PIPE_BUFFER_USAGE_FLUSH_EXPLICIT (1 << 10) /**< See pipe_screen::buffer_flush_mapped_range */
+/** Pipe driver custom usage flags should be greater or equal to this value */
+#define PIPE_BUFFER_USAGE_CUSTOM (1 << 16)
+
+/* Convenient shortcuts */
+#define PIPE_BUFFER_USAGE_CPU_READ_WRITE \
+ ( PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE )
+#define PIPE_BUFFER_USAGE_GPU_READ_WRITE \
+ ( PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE )
+#define PIPE_BUFFER_USAGE_WRITE \
+ ( PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_GPU_WRITE )
+
+
+/**
+ * Flush types:
+ */
+#define PIPE_FLUSH_RENDER_CACHE 0x1
+#define PIPE_FLUSH_TEXTURE_CACHE 0x2
+#define PIPE_FLUSH_SWAPBUFFERS 0x4
+#define PIPE_FLUSH_FRAME 0x8 /**< Mark the end of a frame */
+
+
+/**
+ * Shaders
+ */
+#define PIPE_SHADER_VERTEX 0
+#define PIPE_SHADER_FRAGMENT 1
+#define PIPE_SHADER_TYPES 2
+
+
+/**
+ * Primitive types:
+ */
+#define PIPE_PRIM_POINTS 0
+#define PIPE_PRIM_LINES 1
+#define PIPE_PRIM_LINE_LOOP 2
+#define PIPE_PRIM_LINE_STRIP 3
+#define PIPE_PRIM_TRIANGLES 4
+#define PIPE_PRIM_TRIANGLE_STRIP 5
+#define PIPE_PRIM_TRIANGLE_FAN 6
+#define PIPE_PRIM_QUADS 7
+#define PIPE_PRIM_QUAD_STRIP 8
+#define PIPE_PRIM_POLYGON 9
+#define PIPE_PRIM_MAX 10
+
+
+/**
+ * Query object types
+ */
+#define PIPE_QUERY_OCCLUSION_COUNTER 0
+#define PIPE_QUERY_PRIMITIVES_GENERATED 1
+#define PIPE_QUERY_PRIMITIVES_EMITTED 2
+#define PIPE_QUERY_TYPES 3
+
+
+/**
+ * Point sprite coord modes
+ */
+#define PIPE_SPRITE_COORD_NONE 0
+#define PIPE_SPRITE_COORD_UPPER_LEFT 1
+#define PIPE_SPRITE_COORD_LOWER_LEFT 2
+
+
+/**
+ * Implementation capabilities/limits which are queried through
+ * pipe_screen::get_param() and pipe_screen::get_paramf().
+ */
+#define PIPE_CAP_MAX_TEXTURE_IMAGE_UNITS 1
+#define PIPE_CAP_NPOT_TEXTURES 2
+#define PIPE_CAP_TWO_SIDED_STENCIL 3
+#define PIPE_CAP_GLSL 4 /* XXX need something better */
+#define PIPE_CAP_S3TC 5
+#define PIPE_CAP_ANISOTROPIC_FILTER 6
+#define PIPE_CAP_POINT_SPRITE 7
+#define PIPE_CAP_MAX_RENDER_TARGETS 8
+#define PIPE_CAP_OCCLUSION_QUERY 9
+#define PIPE_CAP_TEXTURE_SHADOW_MAP 10
+#define PIPE_CAP_MAX_TEXTURE_2D_LEVELS 11
+#define PIPE_CAP_MAX_TEXTURE_3D_LEVELS 12
+#define PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS 13
+#define PIPE_CAP_MAX_LINE_WIDTH 14
+#define PIPE_CAP_MAX_LINE_WIDTH_AA 15
+#define PIPE_CAP_MAX_POINT_WIDTH 16
+#define PIPE_CAP_MAX_POINT_WIDTH_AA 17
+#define PIPE_CAP_MAX_TEXTURE_ANISOTROPY 18
+#define PIPE_CAP_MAX_TEXTURE_LOD_BIAS 19
+#define PIPE_CAP_GUARD_BAND_LEFT 20 /*< float */
+#define PIPE_CAP_GUARD_BAND_TOP 21 /*< float */
+#define PIPE_CAP_GUARD_BAND_RIGHT 22 /*< float */
+#define PIPE_CAP_GUARD_BAND_BOTTOM 23 /*< float */
+#define PIPE_CAP_TEXTURE_MIRROR_CLAMP 24
+#define PIPE_CAP_TEXTURE_MIRROR_REPEAT 25
+#define PIPE_CAP_MAX_VERTEX_TEXTURE_UNITS 26
+#define PIPE_CAP_TGSI_CONT_SUPPORTED 27
+#define PIPE_CAP_BLEND_EQUATION_SEPARATE 28
+
+
+/**
+ * Referenced query flags.
+ */
+
+#define PIPE_UNREFERENCED 0
+#define PIPE_REFERENCED_FOR_READ (1 << 0)
+#define PIPE_REFERENCED_FOR_WRITE (1 << 1)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/include/pipe/p_error.h b/src/gallium/include/pipe/p_error.h
new file mode 100644
index 0000000000..b865b22635
--- /dev/null
+++ b/src/gallium/include/pipe/p_error.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Gallium error codes.
+ *
+ * @author José Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+#ifndef P_ERROR_H_
+#define P_ERROR_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Gallium error codes.
+ *
+ * - A zero value always means success.
+ * - A negative value always means failure.
+ * - The meaning of a positive value is function dependent.
+ */
+enum pipe_error {
+ PIPE_OK = 0,
+ PIPE_ERROR = -1, /**< Generic error */
+ PIPE_ERROR_BAD_INPUT = -2,
+ PIPE_ERROR_OUT_OF_MEMORY = -3,
+ PIPE_ERROR_RETRY = -4
+ /* TODO */
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* P_ERROR_H_ */
diff --git a/src/gallium/include/pipe/p_format.h b/src/gallium/include/pipe/p_format.h
new file mode 100644
index 0000000000..c4469d4a9e
--- /dev/null
+++ b/src/gallium/include/pipe/p_format.h
@@ -0,0 +1,620 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_FORMAT_H
+#define PIPE_FORMAT_H
+
+#include "p_compiler.h"
+
+/* FIXME: remove these header dependencies */
+#include "util/u_debug.h"
+#include "util/u_string.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * The pipe_format enum is a 32-bit wide bitfield that encodes all the
+ * information needed to uniquely describe a pixel format.
+ */
+
+/**
+ * Possible format layouts are encoded in the first 2 bits.
+ * The interpretation of the remaining 30 bits depends on a particular
+ * format layout.
+ */
+#define PIPE_FORMAT_LAYOUT_RGBAZS 0
+#define PIPE_FORMAT_LAYOUT_YCBCR 1
+#define PIPE_FORMAT_LAYOUT_DXT 2 /**< XXX temporary? */
+#define PIPE_FORMAT_LAYOUT_MIXED 3
+
+static INLINE uint pf_layout(uint f) /**< PIPE_FORMAT_LAYOUT_ */
+{
+ return f & 0x3;
+}
+
+/**
+ * RGBAZS Format Layout.
+ */
+
+/**
+ * Format component selectors for RGBAZS & MIXED layout.
+ */
+#define PIPE_FORMAT_COMP_R 0
+#define PIPE_FORMAT_COMP_G 1
+#define PIPE_FORMAT_COMP_B 2
+#define PIPE_FORMAT_COMP_A 3
+#define PIPE_FORMAT_COMP_0 4
+#define PIPE_FORMAT_COMP_1 5
+#define PIPE_FORMAT_COMP_Z 6
+#define PIPE_FORMAT_COMP_S 7
+
+/**
+ * Format types for RGBAZS layout.
+ */
+#define PIPE_FORMAT_TYPE_UNKNOWN 0
+#define PIPE_FORMAT_TYPE_FLOAT 1 /**< 16/32/64-bit/channel formats */
+#define PIPE_FORMAT_TYPE_UNORM 2 /**< uints, normalized to [0,1] */
+#define PIPE_FORMAT_TYPE_SNORM 3 /**< ints, normalized to [-1,1] */
+#define PIPE_FORMAT_TYPE_USCALED 4 /**< uints, not normalized */
+#define PIPE_FORMAT_TYPE_SSCALED 5 /**< ints, not normalized */
+#define PIPE_FORMAT_TYPE_SRGB 6 /**< sRGB colorspace */
+#define PIPE_FORMAT_TYPE_FIXED 7 /**< 16.16 fixed point */
+
+
+/**
+ * Because the destination vector is assumed to be RGBA FLOAT, we
+ * need to know how to swizzle and expand components from the source
+ * vector.
+ * Let's take U_A1_R5_G5_B5 as an example. X swizzle is A, X size
+ * is 1 bit and type is UNORM. So we take the most significant bit
+ * from source vector, convert 0 to 0.0 and 1 to 1.0 and save it
+ * in the last component of the destination RGBA component.
+ * Next, Y swizzle is R, Y size is 5 and type is UNORM. We normalize
+ * those 5 bits into [0.0; 1.0] range and put it into second
+ * component of the destination vector. Rinse and repeat for
+ * components Z and W.
+ * If any of size fields is zero, it means the source format contains
+ * less than four components.
+ * If any swizzle is 0 or 1, the corresponding destination component
+ * should be filled with 0.0 and 1.0, respectively.
+ */
+typedef uint pipe_format_rgbazs_t;
+
+static INLINE uint pf_get(pipe_format_rgbazs_t f, uint shift, uint mask)
+{
+ return (f >> shift) & mask;
+}
+
+#define pf_swizzle_x(f) pf_get(f, 2, 0x7) /**< PIPE_FORMAT_COMP_ */
+#define pf_swizzle_y(f) pf_get(f, 5, 0x7) /**< PIPE_FORMAT_COMP_ */
+#define pf_swizzle_z(f) pf_get(f, 8, 0x7) /**< PIPE_FORMAT_COMP_ */
+#define pf_swizzle_w(f) pf_get(f, 11, 0x7) /**< PIPE_FORMAT_COMP_ */
+#define pf_swizzle_xyzw(f,i) pf_get(f, 2+((i)*3), 0x7)
+#define pf_size_x(f) pf_get(f, 14, 0x7) /**< Size of X */
+#define pf_size_y(f) pf_get(f, 17, 0x7) /**< Size of Y */
+#define pf_size_z(f) pf_get(f, 20, 0x7) /**< Size of Z */
+#define pf_size_w(f) pf_get(f, 23, 0x7) /**< Size of W */
+#define pf_size_xyzw(f,i) pf_get(f, 14+((i)*3), 0x7)
+#define pf_exp2(f) pf_get(f, 26, 0x7) /**< Scale size by 2 ^ exp2 */
+#define pf_type(f) pf_get(f, 29, 0x7) /**< PIPE_FORMAT_TYPE_ */
+
+/**
+ * Helper macro to encode the above structure into a 32-bit value.
+ */
+#define _PIPE_FORMAT_RGBAZS( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, EXP2, TYPE ) (\
+ (PIPE_FORMAT_LAYOUT_RGBAZS << 0) |\
+ ((SWZ) << 2) |\
+ ((SIZEX) << 14) |\
+ ((SIZEY) << 17) |\
+ ((SIZEZ) << 20) |\
+ ((SIZEW) << 23) |\
+ ((EXP2) << 26) |\
+ ((TYPE) << 29) )
+
+/**
+ * Helper macro to encode the swizzle part of the structure above.
+ */
+#define _PIPE_FORMAT_SWZ( SWZX, SWZY, SWZZ, SWZW ) (((SWZX) << 0) | ((SWZY) << 3) | ((SWZZ) << 6) | ((SWZW) << 9))
+
+/**
+ * Shorthand macro for RGBAZS layout with component sizes in 1-bit units.
+ */
+#define _PIPE_FORMAT_RGBAZS_1( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, TYPE )\
+ _PIPE_FORMAT_RGBAZS( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, 0, TYPE )
+
+/**
+ * Shorthand macro for RGBAZS layout with component sizes in 2-bit units.
+ */
+#define _PIPE_FORMAT_RGBAZS_2( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, TYPE )\
+ _PIPE_FORMAT_RGBAZS( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, 1, TYPE )
+
+/**
+ * Shorthand macro for RGBAZS layout with component sizes in 8-bit units.
+ */
+#define _PIPE_FORMAT_RGBAZS_8( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, TYPE )\
+ _PIPE_FORMAT_RGBAZS( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, 3, TYPE )
+
+/**
+ * Shorthand macro for RGBAZS layout with component sizes in 64-bit units.
+ */
+#define _PIPE_FORMAT_RGBAZS_64( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, TYPE )\
+ _PIPE_FORMAT_RGBAZS( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, 6, TYPE )
+
+typedef uint pipe_format_mixed_t;
+
+/* NOTE: Use pf_swizzle_* and pf_size_* macros for swizzles and sizes.
+ */
+
+#define pf_mixed_sign_x(f) pf_get( f, 26, 0x1 ) /*< Sign of X */
+#define pf_mixed_sign_y(f) pf_get( f, 27, 0x1 ) /*< Sign of Y */
+#define pf_mixed_sign_z(f) pf_get( f, 28, 0x1 ) /*< Sign of Z */
+#define pf_mixed_sign_w(f) pf_get( f, 29, 0x1 ) /*< Sign of W */
+#define pf_mixed_sign_xyzw(f, i) pf_get( f, 26 + (i), 0x1 )
+#define pf_mixed_normalized(f) pf_get( f, 30, 0x1 ) /*< Type is NORM (1) or SCALED (0) */
+#define pf_mixed_scale8(f) pf_get( f, 31, 0x1 ) /*< Scale size by either one (0) or eight (1) */
+
+/**
+ * Helper macro to encode the above structure into a 32-bit value.
+ */
+#define _PIPE_FORMAT_MIXED( SWZ, SIZEX, SIZEY, SIZEZ, SIZEW, SIGNX, SIGNY, SIGNZ, SIGNW, NORMALIZED, SCALE8 ) (\
+ (PIPE_FORMAT_LAYOUT_MIXED << 0) |\
+ ((SWZ) << 2) |\
+ ((SIZEX) << 14) |\
+ ((SIZEY) << 17) |\
+ ((SIZEZ) << 20) |\
+ ((SIZEW) << 23) |\
+ ((SIGNX) << 26) |\
+ ((SIGNY) << 27) |\
+ ((SIGNZ) << 28) |\
+ ((SIGNW) << 29) |\
+ ((NORMALIZED) << 30) |\
+ ((SCALE8) << 31) )
+
+/**
+ * Shorthand macro for common format swizzles.
+ */
+#define _PIPE_FORMAT_R001 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_1 )
+#define _PIPE_FORMAT_RG01 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_1 )
+#define _PIPE_FORMAT_RGB1 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_B, PIPE_FORMAT_COMP_1 )
+#define _PIPE_FORMAT_RGBA _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_B, PIPE_FORMAT_COMP_A )
+#define _PIPE_FORMAT_ARGB _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_A, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_B )
+#define _PIPE_FORMAT_ABGR _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_A, PIPE_FORMAT_COMP_B, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_R )
+#define _PIPE_FORMAT_BGRA _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_B, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_A )
+#define _PIPE_FORMAT_1RGB _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_1, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_B )
+#define _PIPE_FORMAT_1BGR _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_1, PIPE_FORMAT_COMP_B, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_R )
+#define _PIPE_FORMAT_BGR1 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_B, PIPE_FORMAT_COMP_G, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_1 )
+#define _PIPE_FORMAT_0000 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0 )
+#define _PIPE_FORMAT_000R _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_R )
+#define _PIPE_FORMAT_RRR1 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_1 )
+#define _PIPE_FORMAT_RRRR _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R )
+#define _PIPE_FORMAT_RRRG _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_R, PIPE_FORMAT_COMP_G )
+#define _PIPE_FORMAT_Z000 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_Z, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0 )
+#define _PIPE_FORMAT_0Z00 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_Z, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0 )
+#define _PIPE_FORMAT_SZ00 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_S, PIPE_FORMAT_COMP_Z, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0 )
+#define _PIPE_FORMAT_ZS00 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_Z, PIPE_FORMAT_COMP_S, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0 )
+#define _PIPE_FORMAT_S000 _PIPE_FORMAT_SWZ( PIPE_FORMAT_COMP_S, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0, PIPE_FORMAT_COMP_0 )
+
+/**
+ * YCBCR Format Layout.
+ */
+
+/**
+ * This only contains a flag that indicates whether the format is reversed or
+ * not.
+ */
+typedef uint pipe_format_ycbcr_t;
+
+/**
+ * Helper macro to encode the above structure into a 32-bit value.
+ */
+#define _PIPE_FORMAT_YCBCR( REV ) (\
+ (PIPE_FORMAT_LAYOUT_YCBCR << 0) |\
+ ((REV) << 2) )
+
+static INLINE uint pf_rev(pipe_format_ycbcr_t f)
+{
+ return (f >> 2) & 0x1;
+}
+
+
+/**
+ * Compresssed format layouts (this will probably change)
+ */
+#define _PIPE_FORMAT_DXT( LEVEL, RSIZE, GSIZE, BSIZE, ASIZE, TYPE ) \
+ ((PIPE_FORMAT_LAYOUT_DXT << 0) | \
+ ((LEVEL) << 2) | \
+ ((RSIZE) << 5) | \
+ ((GSIZE) << 8) | \
+ ((BSIZE) << 11) | \
+ ((ASIZE) << 14) | \
+ ((TYPE) << 29))
+
+
+
+/**
+ * Texture/surface image formats (preliminary)
+ */
+
+/* KW: Added lots of surface formats to support vertex element layout
+ * definitions, and eventually render-to-vertex-buffer. Could
+ * consider making float/int/uint/scaled/normalized a separate
+ * parameter, but on the other hand there are special cases like
+ * z24s8, compressed textures, ycbcr, etc that won't fit that model.
+ */
+
+enum pipe_format {
+ PIPE_FORMAT_NONE = _PIPE_FORMAT_RGBAZS_1 ( _PIPE_FORMAT_0000, 0, 0, 0, 0, PIPE_FORMAT_TYPE_UNKNOWN ),
+ PIPE_FORMAT_A8R8G8B8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_ARGB, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_X8R8G8B8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_1RGB, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_B8G8R8A8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_BGRA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_B8G8R8X8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_BGR1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_A1R5G5B5_UNORM = _PIPE_FORMAT_RGBAZS_1 ( _PIPE_FORMAT_ARGB, 1, 5, 5, 5, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_A4R4G4B4_UNORM = _PIPE_FORMAT_RGBAZS_1 ( _PIPE_FORMAT_ARGB, 4, 4, 4, 4, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R5G6B5_UNORM = _PIPE_FORMAT_RGBAZS_1 ( _PIPE_FORMAT_RGB1, 5, 6, 5, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_A2B10G10R10_UNORM = _PIPE_FORMAT_RGBAZS_2 ( _PIPE_FORMAT_ABGR, 1, 5, 5, 5, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_L8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RRR1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_UNORM ), /**< ubyte luminance */
+ PIPE_FORMAT_A8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_000R, 0, 0, 0, 1, PIPE_FORMAT_TYPE_UNORM ), /**< ubyte alpha */
+ PIPE_FORMAT_I8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RRRR, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ), /**< ubyte intensity */
+ PIPE_FORMAT_A8L8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RRRG, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ), /**< ubyte alpha, luminance */
+ PIPE_FORMAT_L16_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RRR1, 2, 2, 2, 0, PIPE_FORMAT_TYPE_UNORM ), /**< ushort luminance */
+ PIPE_FORMAT_YCBCR = _PIPE_FORMAT_YCBCR( 0 ),
+ PIPE_FORMAT_YCBCR_REV = _PIPE_FORMAT_YCBCR( 1 ),
+ PIPE_FORMAT_Z16_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_Z000, 2, 0, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_Z32_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_Z000, 4, 0, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_Z32_FLOAT = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_Z000, 4, 0, 0, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_S8Z24_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_SZ00, 1, 3, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_Z24S8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_ZS00, 3, 1, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_X8Z24_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_0Z00, 1, 3, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_Z24X8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_Z000, 3, 1, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_S8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_S000, 1, 0, 0, 0, PIPE_FORMAT_TYPE_UNORM ), /**< ubyte stencil */
+ PIPE_FORMAT_R64_FLOAT = _PIPE_FORMAT_RGBAZS_64( _PIPE_FORMAT_R001, 1, 0, 0, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R64G64_FLOAT = _PIPE_FORMAT_RGBAZS_64( _PIPE_FORMAT_RG01, 1, 1, 0, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R64G64B64_FLOAT = _PIPE_FORMAT_RGBAZS_64( _PIPE_FORMAT_RGB1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R64G64B64A64_FLOAT = _PIPE_FORMAT_RGBAZS_64( _PIPE_FORMAT_RGBA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R32_FLOAT = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 4, 0, 0, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R32G32_FLOAT = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 4, 4, 0, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R32G32B32_FLOAT = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 4, 4, 4, 0, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R32G32B32A32_FLOAT = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 4, 4, 4, 4, PIPE_FORMAT_TYPE_FLOAT ),
+ PIPE_FORMAT_R32_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 4, 0, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R32G32_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 4, 4, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R32G32B32_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 4, 4, 4, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R32G32B32A32_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 4, 4, 4, 4, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R32_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 4, 0, 0, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R32G32_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 4, 4, 0, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R32G32B32_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 4, 4, 4, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R32G32B32A32_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 4, 4, 4, 4, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R32_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 4, 0, 0, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R32G32_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 4, 4, 0, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R32G32B32_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 4, 4, 4, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R32G32B32A32_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 4, 4, 4, 4, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R32_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 4, 0, 0, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R32G32_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 4, 4, 0, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R32G32B32_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 4, 4, 4, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R32G32B32A32_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 4, 4, 4, 4, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R16_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 2, 0, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R16G16_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 2, 2, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R16G16B16_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 2, 2, 2, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R16G16B16A16_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 2, 2, 2, 2, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R16_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 2, 0, 0, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R16G16_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 2, 2, 0, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R16G16B16_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 2, 2, 2, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R16G16B16A16_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 2, 2, 2, 2, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R16_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 2, 0, 0, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R16G16_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 2, 2, 0, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R16G16B16_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 2, 2, 2, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R16G16B16A16_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 2, 2, 2, 2, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R16_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 2, 0, 0, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R16G16_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 2, 2, 0, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R16G16B16_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 2, 2, 2, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R16G16B16A16_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 2, 2, 2, 2, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 1, 0, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R8G8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 1, 1, 0, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R8G8B8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R8G8B8A8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R8G8B8X8_UNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_R8_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 1, 0, 0, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R8G8_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 1, 1, 0, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R8G8B8_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R8G8B8A8_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R8G8B8X8_USCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_USCALED ),
+ PIPE_FORMAT_R8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 1, 0, 0, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R8G8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 1, 1, 0, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R8G8B8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R8G8B8A8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R8G8B8X8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_B6G5R5_SNORM = _PIPE_FORMAT_RGBAZS_1 ( _PIPE_FORMAT_BGR1, 6, 5, 5, 0, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_A8B8G8R8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_BGRA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_X8B8G8R8_SNORM = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_BGR1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SNORM ),
+ PIPE_FORMAT_R8_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 1, 0, 0, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R8G8_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 1, 1, 0, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R8G8B8_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R8G8B8A8_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R8G8B8X8_SSCALED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SSCALED ),
+ PIPE_FORMAT_R32_FIXED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_R001, 4, 0, 0, 0, PIPE_FORMAT_TYPE_FIXED ),
+ PIPE_FORMAT_R32G32_FIXED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RG01, 4, 4, 0, 0, PIPE_FORMAT_TYPE_FIXED ),
+ PIPE_FORMAT_R32G32B32_FIXED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 4, 4, 4, 0, PIPE_FORMAT_TYPE_FIXED ),
+ PIPE_FORMAT_R32G32B32A32_FIXED = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 4, 4, 4, 4, PIPE_FORMAT_TYPE_FIXED ),
+ /* sRGB formats */
+ PIPE_FORMAT_L8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RRR1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_A8L8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RRRG, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_R8G8B8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 0, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_R8G8B8A8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGBA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_R8G8B8X8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_RGB1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_A8R8G8B8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_ARGB, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_X8R8G8B8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_1RGB, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_B8G8R8A8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_BGRA, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_B8G8R8X8_SRGB = _PIPE_FORMAT_RGBAZS_8 ( _PIPE_FORMAT_BGR1, 1, 1, 1, 1, PIPE_FORMAT_TYPE_SRGB ),
+
+ /* mixed formats */
+ PIPE_FORMAT_X8UB8UG8SR8S_NORM = _PIPE_FORMAT_MIXED( _PIPE_FORMAT_1BGR, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1 ),
+ PIPE_FORMAT_B6UG5SR5S_NORM = _PIPE_FORMAT_MIXED( _PIPE_FORMAT_BGR1, 6, 5, 5, 0, 0, 1, 1, 0, 1, 0 ),
+
+ /* compressed formats */
+ PIPE_FORMAT_DXT1_RGB = _PIPE_FORMAT_DXT( 1, 8, 8, 8, 0, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_DXT1_RGBA = _PIPE_FORMAT_DXT( 1, 8, 8, 8, 8, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_DXT3_RGBA = _PIPE_FORMAT_DXT( 3, 8, 8, 8, 8, PIPE_FORMAT_TYPE_UNORM ),
+ PIPE_FORMAT_DXT5_RGBA = _PIPE_FORMAT_DXT( 5, 8, 8, 8, 8, PIPE_FORMAT_TYPE_UNORM ),
+
+ /* sRGB, compressed */
+ PIPE_FORMAT_DXT1_SRGB = _PIPE_FORMAT_DXT( 1, 8, 8, 8, 0, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_DXT1_SRGBA = _PIPE_FORMAT_DXT( 1, 8, 8, 8, 8, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_DXT3_SRGBA = _PIPE_FORMAT_DXT( 3, 8, 8, 8, 8, PIPE_FORMAT_TYPE_SRGB ),
+ PIPE_FORMAT_DXT5_SRGBA = _PIPE_FORMAT_DXT( 5, 8, 8, 8, 8, PIPE_FORMAT_TYPE_SRGB )
+};
+
+/**
+ * Builds pipe format name from format token.
+ */
+extern const char *pf_name( enum pipe_format format );
+
+/**
+ * Return bits for a particular component.
+ * \param comp component index, starting at 0
+ */
+static INLINE uint pf_get_component_bits( enum pipe_format format, uint comp )
+{
+ uint size;
+
+ if (pf_swizzle_x(format) == comp) {
+ size = pf_size_x(format);
+ }
+ else if (pf_swizzle_y(format) == comp) {
+ size = pf_size_y(format);
+ }
+ else if (pf_swizzle_z(format) == comp) {
+ size = pf_size_z(format);
+ }
+ else if (pf_swizzle_w(format) == comp) {
+ size = pf_size_w(format);
+ }
+ else {
+ size = 0;
+ }
+ if (pf_layout( format ) == PIPE_FORMAT_LAYOUT_RGBAZS)
+ return size << pf_exp2( format );
+ return size << (pf_mixed_scale8( format ) * 3);
+}
+
+/**
+ * Return total bits needed for the pixel format.
+ */
+static INLINE uint pf_get_bits( enum pipe_format format )
+{
+ switch (pf_layout(format)) {
+ case PIPE_FORMAT_LAYOUT_RGBAZS:
+ case PIPE_FORMAT_LAYOUT_MIXED:
+ return
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_0 ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_1 ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_R ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_G ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_B ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_A ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_Z ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_S );
+ case PIPE_FORMAT_LAYOUT_YCBCR:
+ assert( format == PIPE_FORMAT_YCBCR || format == PIPE_FORMAT_YCBCR_REV );
+ /* return effective bits per pixel */
+ return 16;
+ default:
+ assert( 0 );
+ return 0;
+ }
+}
+
+/**
+ * Return bytes per pixel for the given format.
+ */
+static INLINE uint pf_get_size( enum pipe_format format )
+{
+ assert(pf_get_bits(format) % 8 == 0);
+ return pf_get_bits(format) / 8;
+}
+
+/**
+ * Describe accurately the pixel format.
+ *
+ * The chars-per-pixel concept falls apart with compressed and yuv images, where
+ * more than one pixel are coded in a single data block. This structure
+ * describes that block.
+ *
+ * Simple pixel formats are effectively a 1x1xcpp block.
+ */
+struct pipe_format_block
+{
+ /** Block size in bytes */
+ unsigned size;
+
+ /** Block width in pixels */
+ unsigned width;
+
+ /** Block height in pixels */
+ unsigned height;
+};
+
+/**
+ * Describe pixel format's block.
+ *
+ * @sa http://msdn2.microsoft.com/en-us/library/ms796147.aspx
+ */
+static INLINE void
+pf_get_block(enum pipe_format format, struct pipe_format_block *block)
+{
+ switch(format) {
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_SRGBA:
+ case PIPE_FORMAT_DXT1_SRGB:
+ block->size = 8;
+ block->width = 4;
+ block->height = 4;
+ break;
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ case PIPE_FORMAT_DXT3_SRGBA:
+ case PIPE_FORMAT_DXT5_SRGBA:
+ block->size = 16;
+ block->width = 4;
+ block->height = 4;
+ break;
+ case PIPE_FORMAT_YCBCR:
+ case PIPE_FORMAT_YCBCR_REV:
+ block->size = 4; /* 2*cpp */
+ block->width = 2;
+ block->height = 1;
+ break;
+ default:
+ block->size = pf_get_size(format);
+ block->width = 1;
+ block->height = 1;
+ break;
+ }
+}
+
+static INLINE unsigned
+pf_get_nblocksx(const struct pipe_format_block *block, unsigned x)
+{
+ return (x + block->width - 1)/block->width;
+}
+
+static INLINE unsigned
+pf_get_nblocksy(const struct pipe_format_block *block, unsigned y)
+{
+ return (y + block->height - 1)/block->height;
+}
+
+static INLINE unsigned
+pf_get_nblocks(const struct pipe_format_block *block, unsigned width, unsigned height)
+{
+ return pf_get_nblocksx(block, width)*pf_get_nblocksy(block, height);
+}
+
+static INLINE size_t
+pf_get_stride(const struct pipe_format_block *block, unsigned width)
+{
+ return pf_get_nblocksx(block, width)*block->size;
+}
+
+static INLINE size_t
+pf_get_2d_size(const struct pipe_format_block *block, size_t stride, unsigned height)
+{
+ return pf_get_nblocksy(block, height)*stride;
+}
+
+static INLINE boolean
+pf_is_depth_or_stencil( enum pipe_format format )
+{
+ return (pf_get_component_bits( format, PIPE_FORMAT_COMP_Z ) +
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_S )) != 0;
+}
+
+static INLINE boolean
+pf_is_depth_and_stencil( enum pipe_format format )
+{
+ return (pf_get_component_bits( format, PIPE_FORMAT_COMP_Z ) != 0 &&
+ pf_get_component_bits( format, PIPE_FORMAT_COMP_S ) != 0);
+}
+
+/** DEPRECATED: For backwards compatibility */
+static INLINE boolean
+pf_is_depth_stencil( enum pipe_format format )
+{
+ return pf_is_depth_or_stencil( format );
+}
+
+static INLINE boolean
+pf_is_compressed( enum pipe_format format )
+{
+ return pf_layout(format) == PIPE_FORMAT_LAYOUT_DXT ? TRUE : FALSE;
+}
+
+static INLINE boolean
+pf_is_ycbcr( enum pipe_format format )
+{
+ return pf_layout(format) == PIPE_FORMAT_LAYOUT_YCBCR ? TRUE : FALSE;
+}
+
+static INLINE boolean
+pf_has_alpha( enum pipe_format format )
+{
+ switch (pf_layout(format)) {
+ case PIPE_FORMAT_LAYOUT_RGBAZS:
+ case PIPE_FORMAT_LAYOUT_MIXED:
+ /* FIXME: pf_get_component_bits( PIPE_FORMAT_A8L8_UNORM, PIPE_FORMAT_COMP_A ) should not return 0 right? */
+ if(format == PIPE_FORMAT_A8_UNORM ||
+ format == PIPE_FORMAT_A8L8_UNORM ||
+ format == PIPE_FORMAT_A8L8_SRGB)
+ return TRUE;
+ return pf_get_component_bits( format, PIPE_FORMAT_COMP_A ) ? TRUE : FALSE;
+ case PIPE_FORMAT_LAYOUT_YCBCR:
+ return FALSE;
+ case PIPE_FORMAT_LAYOUT_DXT:
+ switch (format) {
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ case PIPE_FORMAT_DXT1_SRGBA:
+ case PIPE_FORMAT_DXT3_SRGBA:
+ case PIPE_FORMAT_DXT5_SRGBA:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+ default:
+ assert( 0 );
+ return FALSE;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/include/pipe/p_inlines.h b/src/gallium/include/pipe/p_inlines.h
new file mode 100644
index 0000000000..a5c1e8270a
--- /dev/null
+++ b/src/gallium/include/pipe/p_inlines.h
@@ -0,0 +1,163 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef P_INLINES_H
+#define P_INLINES_H
+
+#include "p_context.h"
+#include "p_defines.h"
+#include "p_screen.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Convenience wrappers for screen buffer functions.
+ */
+
+static INLINE struct pipe_buffer *
+pipe_buffer_create( struct pipe_screen *screen,
+ unsigned alignment, unsigned usage, unsigned size )
+{
+ return screen->buffer_create(screen, alignment, usage, size);
+}
+
+static INLINE struct pipe_buffer *
+pipe_user_buffer_create( struct pipe_screen *screen, void *ptr, unsigned size )
+{
+ return screen->user_buffer_create(screen, ptr, size);
+}
+
+static INLINE void *
+pipe_buffer_map(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned usage)
+{
+ if(screen->buffer_map_range) {
+ unsigned offset = 0;
+ unsigned length = buf->size;
+
+ /* XXX: Actually we should be using/detecting DISCARD
+ * instead of assuming that WRITE implies discard */
+ if((usage & PIPE_BUFFER_USAGE_CPU_WRITE) &&
+ !(usage & PIPE_BUFFER_USAGE_DISCARD))
+ usage |= PIPE_BUFFER_USAGE_CPU_READ;
+
+ return screen->buffer_map_range(screen, buf, offset, length, usage);
+ }
+ else
+ return screen->buffer_map(screen, buf, usage);
+}
+
+static INLINE void
+pipe_buffer_unmap(struct pipe_screen *screen,
+ struct pipe_buffer *buf)
+{
+ screen->buffer_unmap(screen, buf);
+}
+
+static INLINE void *
+pipe_buffer_map_range(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset,
+ unsigned length,
+ unsigned usage)
+{
+ assert(offset < buf->size);
+ assert(offset + length <= buf->size);
+ assert(length);
+ if(screen->buffer_map_range)
+ return screen->buffer_map_range(screen, buf, offset, length, usage);
+ else
+ return screen->buffer_map(screen, buf, usage);
+}
+
+static INLINE void
+pipe_buffer_flush_mapped_range(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset,
+ unsigned length)
+{
+ assert(offset < buf->size);
+ assert(offset + length <= buf->size);
+ assert(length);
+ if(screen->buffer_flush_mapped_range)
+ screen->buffer_flush_mapped_range(screen, buf, offset, length);
+}
+
+static INLINE void
+pipe_buffer_write(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset, unsigned size,
+ const void *data)
+{
+ uint8_t *map;
+
+ assert(offset < buf->size);
+ assert(offset + size <= buf->size);
+ assert(size);
+
+ map = pipe_buffer_map_range(screen, buf, offset, size,
+ PIPE_BUFFER_USAGE_CPU_WRITE |
+ PIPE_BUFFER_USAGE_FLUSH_EXPLICIT);
+ assert(map);
+ if(map) {
+ memcpy(map + offset, data, size);
+ pipe_buffer_flush_mapped_range(screen, buf, offset, size);
+ pipe_buffer_unmap(screen, buf);
+ }
+}
+
+static INLINE void
+pipe_buffer_read(struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset, unsigned size,
+ void *data)
+{
+ uint8_t *map;
+
+ assert(offset < buf->size);
+ assert(offset + size <= buf->size);
+ assert(size);
+
+ map = pipe_buffer_map_range(screen, buf, offset, size, PIPE_BUFFER_USAGE_CPU_READ);
+ assert(map);
+ if(map) {
+ memcpy(data, map + offset, size);
+ pipe_buffer_unmap(screen, buf);
+ }
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* P_INLINES_H */
diff --git a/src/gallium/include/pipe/p_refcnt.h b/src/gallium/include/pipe/p_refcnt.h
new file mode 100644
index 0000000000..1f9088b3e9
--- /dev/null
+++ b/src/gallium/include/pipe/p_refcnt.h
@@ -0,0 +1,96 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef P_REFCNT_H
+#define P_REFCNT_H
+
+
+#include "p_defines.h"
+#include "p_atomic.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct pipe_reference
+{
+ struct pipe_atomic count;
+};
+
+
+static INLINE void
+pipe_reference_init(struct pipe_reference *reference, unsigned count)
+{
+ p_atomic_set(&reference->count, count);
+}
+
+
+static INLINE bool
+pipe_is_referenced(struct pipe_reference *reference)
+{
+ return p_atomic_read(&reference->count) != 0;
+}
+
+
+/**
+ * Set 'ptr' to point to 'reference' and update reference counting.
+ * The old thing pointed to, if any, will be unreferenced first.
+ * 'reference' may be NULL.
+ */
+static INLINE bool
+pipe_reference(struct pipe_reference **ptr, struct pipe_reference *reference)
+{
+ bool destroy = FALSE;
+
+ if(*ptr != reference) {
+ /* bump the reference.count first */
+ if (reference) {
+ assert(pipe_is_referenced(reference));
+ p_atomic_inc(&reference->count);
+ }
+
+ if (*ptr) {
+ assert(pipe_is_referenced(*ptr));
+ if (p_atomic_dec_zero(&(*ptr)->count)) {
+ destroy = TRUE;
+ }
+ }
+
+ *ptr = reference;
+ }
+
+ return destroy;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* P_REFCNT_H */
diff --git a/src/gallium/include/pipe/p_screen.h b/src/gallium/include/pipe/p_screen.h
new file mode 100644
index 0000000000..3f30c52a16
--- /dev/null
+++ b/src/gallium/include/pipe/p_screen.h
@@ -0,0 +1,296 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ *
+ * Screen, Adapter or GPU
+ *
+ * These are driver functions/facilities that are context independent.
+ */
+
+
+#ifndef P_SCREEN_H
+#define P_SCREEN_H
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "pipe/p_defines.h"
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/** Opaque type */
+struct pipe_fence_handle;
+struct pipe_winsys;
+struct pipe_buffer;
+
+
+
+/**
+ * Gallium screen/adapter context. Basically everything
+ * hardware-specific that doesn't actually require a rendering
+ * context.
+ */
+struct pipe_screen {
+ struct pipe_winsys *winsys;
+
+ void (*destroy)( struct pipe_screen * );
+
+
+ const char *(*get_name)( struct pipe_screen * );
+
+ const char *(*get_vendor)( struct pipe_screen * );
+
+ /**
+ * Query an integer-valued capability/parameter/limit
+ * \param param one of PIPE_CAP_x
+ */
+ int (*get_param)( struct pipe_screen *, int param );
+
+ /**
+ * Query a float-valued capability/parameter/limit
+ * \param param one of PIPE_CAP_x
+ */
+ float (*get_paramf)( struct pipe_screen *, int param );
+
+ /**
+ * Check if the given pipe_format is supported as a texture or
+ * drawing surface.
+ * \param tex_usage bitmask of PIPE_TEXTURE_USAGE_*
+ * \param geom_flags bitmask of PIPE_TEXTURE_GEOM_*
+ */
+ boolean (*is_format_supported)( struct pipe_screen *,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags );
+
+ /**
+ * Create a new texture object, using the given template info.
+ */
+ struct pipe_texture * (*texture_create)(struct pipe_screen *,
+ const struct pipe_texture *templat);
+
+ /**
+ * Create a new texture object, using the given template info, but on top of
+ * existing memory.
+ *
+ * It is assumed that the buffer data is layed out according to the expected
+ * by the hardware. NULL will be returned if any inconsistency is found.
+ */
+ struct pipe_texture * (*texture_blanket)(struct pipe_screen *,
+ const struct pipe_texture *templat,
+ const unsigned *stride,
+ struct pipe_buffer *buffer);
+
+ void (*texture_destroy)(struct pipe_texture *pt);
+
+ /** Get a surface which is a "view" into a texture */
+ struct pipe_surface *(*get_tex_surface)(struct pipe_screen *,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level,
+ unsigned zslice,
+ unsigned usage );
+
+ void (*tex_surface_destroy)(struct pipe_surface *);
+
+
+ /** Get a transfer object for transferring data to/from a texture */
+ struct pipe_transfer *(*get_tex_transfer)(struct pipe_screen *,
+ struct pipe_texture *texture,
+ unsigned face, unsigned level,
+ unsigned zslice,
+ enum pipe_transfer_usage usage,
+ unsigned x, unsigned y,
+ unsigned w, unsigned h);
+
+ void (*tex_transfer_destroy)(struct pipe_transfer *);
+
+ void *(*transfer_map)( struct pipe_screen *,
+ struct pipe_transfer *transfer );
+
+ void (*transfer_unmap)( struct pipe_screen *,
+ struct pipe_transfer *transfer );
+
+
+ /**
+ * Create a new buffer.
+ * \param alignment buffer start address alignment in bytes
+ * \param usage bitmask of PIPE_BUFFER_USAGE_x
+ * \param size size in bytes
+ */
+ struct pipe_buffer *(*buffer_create)( struct pipe_screen *screen,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size );
+
+ /**
+ * Create a buffer that wraps user-space data.
+ *
+ * Effectively this schedules a delayed call to buffer_create
+ * followed by an upload of the data at *some point in the future*,
+ * or perhaps never. Basically the allocate/upload is delayed
+ * until the buffer is actually passed to hardware.
+ *
+ * The intention is to provide a quick way to turn regular data
+ * into a buffer, and secondly to avoid a copy operation if that
+ * data subsequently turns out to be only accessed by the CPU.
+ *
+ * Common example is OpenGL vertex buffers that are subsequently
+ * processed either by software TNL in the driver or by passing to
+ * hardware.
+ *
+ * XXX: What happens if the delayed call to buffer_create() fails?
+ *
+ * Note that ptr may be accessed at any time upto the time when the
+ * buffer is destroyed, so the data must not be freed before then.
+ */
+ struct pipe_buffer *(*user_buffer_create)(struct pipe_screen *screen,
+ void *ptr,
+ unsigned bytes);
+
+ /**
+ * Allocate storage for a display target surface.
+ *
+ * Often surfaces which are meant to be blitted to the front screen (i.e.,
+ * display targets) must be allocated with special characteristics, memory
+ * pools, or obtained directly from the windowing system.
+ *
+ * This callback is invoked by the pipe_screenwhen creating a texture marked
+ * with the PIPE_TEXTURE_USAGE_DISPLAY_TARGET flag to get the underlying
+ * buffer storage.
+ */
+ struct pipe_buffer *(*surface_buffer_create)(struct pipe_screen *screen,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride);
+
+
+ /**
+ * Map the entire data store of a buffer object into the client's address.
+ * flags is bitmask of PIPE_BUFFER_USAGE_CPU_READ/WRITE flags.
+ */
+ void *(*buffer_map)( struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned usage );
+ /**
+ * Map a subrange of the buffer data store into the client's address space.
+ *
+ * The returned pointer is always relative to buffer start, regardless of
+ * the specified range. This is different from the ARB_map_buffer_range
+ * semantics because we don't forbid multiple mappings of the same buffer
+ * (yet).
+ */
+ void *(*buffer_map_range)( struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset,
+ unsigned length,
+ unsigned usage);
+
+ /**
+ * Notify a range that was actually written into.
+ *
+ * Can only be used if the buffer was mapped with the
+ * PIPE_BUFFER_USAGE_CPU_WRITE and PIPE_BUFFER_USAGE_FLUSH_EXPLICIT flags
+ * set.
+ *
+ * The range is relative to the buffer start, regardless of the range
+ * specified to buffer_map_range. This is different from the
+ * ARB_map_buffer_range semantics because we don't forbid multiple mappings
+ * of the same buffer (yet).
+ *
+ */
+ void (*buffer_flush_mapped_range)( struct pipe_screen *screen,
+ struct pipe_buffer *buf,
+ unsigned offset,
+ unsigned length);
+
+ /**
+ * Unmap buffer.
+ *
+ * If the buffer was mapped with PIPE_BUFFER_USAGE_CPU_WRITE flag but not
+ * PIPE_BUFFER_USAGE_FLUSH_EXPLICIT then the pipe driver will
+ * assume that the whole buffer was written. This is mostly for backward
+ * compatibility purposes and may affect performance -- the state tracker
+ * should always specify exactly what got written while the buffer was
+ * mapped.
+ */
+ void (*buffer_unmap)( struct pipe_screen *screen,
+ struct pipe_buffer *buf );
+
+ void (*buffer_destroy)( struct pipe_buffer *buf );
+
+
+ /**
+ * Do any special operations to ensure frontbuffer contents are
+ * displayed, eg copy fake frontbuffer.
+ */
+ void (*flush_frontbuffer)( struct pipe_screen *screen,
+ struct pipe_surface *surf,
+ void *context_private );
+
+
+
+ /** Set ptr = fence, with reference counting */
+ void (*fence_reference)( struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence );
+
+ /**
+ * Checks whether the fence has been signalled.
+ * \param flags driver-specific meaning
+ * \return zero on success.
+ */
+ int (*fence_signalled)( struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags );
+
+ /**
+ * Wait for the fence to finish.
+ * \param flags driver-specific meaning
+ * \return zero on success.
+ */
+ int (*fence_finish)( struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ unsigned flags );
+
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* P_SCREEN_H */
diff --git a/src/gallium/include/pipe/p_shader_tokens.h b/src/gallium/include/pipe/p_shader_tokens.h
new file mode 100644
index 0000000000..5fa6c9af30
--- /dev/null
+++ b/src/gallium/include/pipe/p_shader_tokens.h
@@ -0,0 +1,671 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TGSI_TOKEN_H
+#define TGSI_TOKEN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "p_compiler.h"
+
+struct tgsi_version
+{
+ unsigned MajorVersion : 8;
+ unsigned MinorVersion : 8;
+ unsigned Padding : 16;
+};
+
+struct tgsi_header
+{
+ unsigned HeaderSize : 8;
+ unsigned BodySize : 24;
+};
+
+#define TGSI_PROCESSOR_FRAGMENT 0
+#define TGSI_PROCESSOR_VERTEX 1
+#define TGSI_PROCESSOR_GEOMETRY 2
+
+struct tgsi_processor
+{
+ unsigned Processor : 4; /* TGSI_PROCESSOR_ */
+ unsigned Padding : 28;
+};
+
+#define TGSI_TOKEN_TYPE_DECLARATION 0
+#define TGSI_TOKEN_TYPE_IMMEDIATE 1
+#define TGSI_TOKEN_TYPE_INSTRUCTION 2
+
+struct tgsi_token
+{
+ unsigned Type : 4; /**< TGSI_TOKEN_TYPE_x */
+ unsigned NrTokens : 8; /**< UINT */
+ unsigned Padding : 19;
+ unsigned Extended : 1; /**< BOOL */
+};
+
+enum tgsi_file_type {
+ TGSI_FILE_NULL =0,
+ TGSI_FILE_CONSTANT =1,
+ TGSI_FILE_INPUT =2,
+ TGSI_FILE_OUTPUT =3,
+ TGSI_FILE_TEMPORARY =4,
+ TGSI_FILE_SAMPLER =5,
+ TGSI_FILE_ADDRESS =6,
+ TGSI_FILE_IMMEDIATE =7,
+ TGSI_FILE_LOOP =8,
+ TGSI_FILE_COUNT /**< how many TGSI_FILE_ types */
+};
+
+
+#define TGSI_WRITEMASK_NONE 0x00
+#define TGSI_WRITEMASK_X 0x01
+#define TGSI_WRITEMASK_Y 0x02
+#define TGSI_WRITEMASK_XY 0x03
+#define TGSI_WRITEMASK_Z 0x04
+#define TGSI_WRITEMASK_XZ 0x05
+#define TGSI_WRITEMASK_YZ 0x06
+#define TGSI_WRITEMASK_XYZ 0x07
+#define TGSI_WRITEMASK_W 0x08
+#define TGSI_WRITEMASK_XW 0x09
+#define TGSI_WRITEMASK_YW 0x0A
+#define TGSI_WRITEMASK_XYW 0x0B
+#define TGSI_WRITEMASK_ZW 0x0C
+#define TGSI_WRITEMASK_XZW 0x0D
+#define TGSI_WRITEMASK_YZW 0x0E
+#define TGSI_WRITEMASK_XYZW 0x0F
+
+#define TGSI_INTERPOLATE_CONSTANT 0
+#define TGSI_INTERPOLATE_LINEAR 1
+#define TGSI_INTERPOLATE_PERSPECTIVE 2
+#define TGSI_INTERPOLATE_COUNT 3
+
+struct tgsi_declaration
+{
+ unsigned Type : 4; /**< TGSI_TOKEN_TYPE_DECLARATION */
+ unsigned NrTokens : 8; /**< UINT */
+ unsigned File : 4; /**< one of TGSI_FILE_x */
+ unsigned UsageMask : 4; /**< bitmask of TGSI_WRITEMASK_x flags */
+ unsigned Interpolate : 4; /**< one of TGSI_INTERPOLATE_x */
+ unsigned Semantic : 1; /**< BOOL, any semantic info? */
+ unsigned Centroid : 1; /**< centroid sampling? */
+ unsigned Invariant : 1; /**< invariant optimization? */
+ unsigned Padding : 4;
+ unsigned Extended : 1; /**< BOOL */
+};
+
+struct tgsi_declaration_range
+{
+ unsigned First : 16; /**< UINT */
+ unsigned Last : 16; /**< UINT */
+};
+
+#define TGSI_SEMANTIC_POSITION 0
+#define TGSI_SEMANTIC_COLOR 1
+#define TGSI_SEMANTIC_BCOLOR 2 /**< back-face color */
+#define TGSI_SEMANTIC_FOG 3
+#define TGSI_SEMANTIC_PSIZE 4
+#define TGSI_SEMANTIC_GENERIC 5
+#define TGSI_SEMANTIC_NORMAL 6
+#define TGSI_SEMANTIC_FACE 7
+#define TGSI_SEMANTIC_COUNT 8 /**< number of semantic values */
+
+struct tgsi_declaration_semantic
+{
+ unsigned SemanticName : 8; /**< one of TGSI_SEMANTIC_x */
+ unsigned SemanticIndex : 16; /**< UINT */
+ unsigned Padding : 8;
+};
+
+#define TGSI_IMM_FLOAT32 0
+
+struct tgsi_immediate
+{
+ unsigned Type : 4; /**< TGSI_TOKEN_TYPE_IMMEDIATE */
+ unsigned NrTokens : 8; /**< UINT */
+ unsigned DataType : 4; /**< one of TGSI_IMM_x */
+ unsigned Padding : 15;
+ unsigned Extended : 1; /**< BOOL */
+};
+
+union tgsi_immediate_data
+{
+ float Float;
+};
+
+/* TGSI opcodes.
+ *
+ * For more information on semantics of opcodes and
+ * which APIs are known to use which opcodes, see
+ * auxiliary/tgsi/tgsi-instruction-set.txt
+ */
+#define TGSI_OPCODE_ARL 0
+#define TGSI_OPCODE_MOV 1
+#define TGSI_OPCODE_LIT 2
+#define TGSI_OPCODE_RCP 3
+#define TGSI_OPCODE_RSQ 4
+#define TGSI_OPCODE_EXP 5
+#define TGSI_OPCODE_LOG 6
+#define TGSI_OPCODE_MUL 7
+#define TGSI_OPCODE_ADD 8
+#define TGSI_OPCODE_DP3 9
+#define TGSI_OPCODE_DP4 10
+#define TGSI_OPCODE_DST 11
+#define TGSI_OPCODE_MIN 12
+#define TGSI_OPCODE_MAX 13
+#define TGSI_OPCODE_SLT 14
+#define TGSI_OPCODE_SGE 15
+#define TGSI_OPCODE_MAD 16
+#define TGSI_OPCODE_SUB 17
+#define TGSI_OPCODE_LRP 18
+#define TGSI_OPCODE_CND 19
+ /* gap */
+#define TGSI_OPCODE_DP2A 21
+ /* gap */
+#define TGSI_OPCODE_FRC 24
+#define TGSI_OPCODE_CLAMP 25
+#define TGSI_OPCODE_FLR 26
+#define TGSI_OPCODE_ROUND 27
+#define TGSI_OPCODE_EX2 28
+#define TGSI_OPCODE_LG2 29
+#define TGSI_OPCODE_POW 30
+#define TGSI_OPCODE_XPD 31
+ /* gap */
+#define TGSI_OPCODE_ABS 33
+#define TGSI_OPCODE_RCC 34
+#define TGSI_OPCODE_DPH 35
+#define TGSI_OPCODE_COS 36
+#define TGSI_OPCODE_DDX 37
+#define TGSI_OPCODE_DDY 38
+#define TGSI_OPCODE_KILP 39 /* predicated kill */
+#define TGSI_OPCODE_PK2H 40
+#define TGSI_OPCODE_PK2US 41
+#define TGSI_OPCODE_PK4B 42
+#define TGSI_OPCODE_PK4UB 43
+#define TGSI_OPCODE_RFL 44
+#define TGSI_OPCODE_SEQ 45
+#define TGSI_OPCODE_SFL 46
+#define TGSI_OPCODE_SGT 47
+#define TGSI_OPCODE_SIN 48
+#define TGSI_OPCODE_SLE 49
+#define TGSI_OPCODE_SNE 50
+#define TGSI_OPCODE_STR 51
+#define TGSI_OPCODE_TEX 52
+#define TGSI_OPCODE_TXD 53
+#define TGSI_OPCODE_TXP 54
+#define TGSI_OPCODE_UP2H 55
+#define TGSI_OPCODE_UP2US 56
+#define TGSI_OPCODE_UP4B 57
+#define TGSI_OPCODE_UP4UB 58
+#define TGSI_OPCODE_X2D 59
+#define TGSI_OPCODE_ARA 60
+#define TGSI_OPCODE_ARR 61
+#define TGSI_OPCODE_BRA 62
+#define TGSI_OPCODE_CAL 63
+#define TGSI_OPCODE_RET 64
+#define TGSI_OPCODE_SSG 65 /* SGN */
+#define TGSI_OPCODE_CMP 66
+#define TGSI_OPCODE_SCS 67
+#define TGSI_OPCODE_TXB 68
+#define TGSI_OPCODE_NRM 69
+#define TGSI_OPCODE_DIV 70
+#define TGSI_OPCODE_DP2 71
+#define TGSI_OPCODE_TXL 72
+#define TGSI_OPCODE_BRK 73
+#define TGSI_OPCODE_IF 74
+#define TGSI_OPCODE_BGNFOR 75
+#define TGSI_OPCODE_REP 76
+#define TGSI_OPCODE_ELSE 77
+#define TGSI_OPCODE_ENDIF 78
+#define TGSI_OPCODE_ENDFOR 79
+#define TGSI_OPCODE_ENDREP 80
+#define TGSI_OPCODE_PUSHA 81
+#define TGSI_OPCODE_POPA 82
+#define TGSI_OPCODE_CEIL 83
+#define TGSI_OPCODE_I2F 84
+#define TGSI_OPCODE_NOT 85
+#define TGSI_OPCODE_TRUNC 86
+#define TGSI_OPCODE_SHL 87
+#define TGSI_OPCODE_SHR 88
+#define TGSI_OPCODE_AND 89
+#define TGSI_OPCODE_OR 90
+#define TGSI_OPCODE_MOD 91
+#define TGSI_OPCODE_XOR 92
+#define TGSI_OPCODE_SAD 93
+#define TGSI_OPCODE_TXF 94
+#define TGSI_OPCODE_TXQ 95
+#define TGSI_OPCODE_CONT 96
+#define TGSI_OPCODE_EMIT 97
+#define TGSI_OPCODE_ENDPRIM 98
+#define TGSI_OPCODE_BGNLOOP 99
+#define TGSI_OPCODE_BGNSUB 100
+#define TGSI_OPCODE_ENDLOOP 101
+#define TGSI_OPCODE_ENDSUB 102
+#define TGSI_OPCODE_NOISE1 103
+#define TGSI_OPCODE_NOISE2 104
+#define TGSI_OPCODE_NOISE3 105
+#define TGSI_OPCODE_NOISE4 106
+#define TGSI_OPCODE_NOP 107
+ /* gap */
+#define TGSI_OPCODE_NRM4 112
+#define TGSI_OPCODE_CALLNZ 113
+#define TGSI_OPCODE_IFC 114
+#define TGSI_OPCODE_BREAKC 115
+#define TGSI_OPCODE_KIL 116 /* conditional kill */
+#define TGSI_OPCODE_END 117 /* aka HALT */
+#define TGSI_OPCODE_SWZ 118
+#define TGSI_OPCODE_LAST 119
+
+#define TGSI_SAT_NONE 0 /* do not saturate */
+#define TGSI_SAT_ZERO_ONE 1 /* clamp to [0,1] */
+#define TGSI_SAT_MINUS_PLUS_ONE 2 /* clamp to [-1,1] */
+
+/**
+ * Opcode is the operation code to execute. A given operation defines the
+ * semantics how the source registers (if any) are interpreted and what is
+ * written to the destination registers (if any) as a result of execution.
+ *
+ * NumDstRegs and NumSrcRegs is the number of destination and source registers,
+ * respectively. For a given operation code, those numbers are fixed and are
+ * present here only for convenience.
+ *
+ * If Extended is TRUE, it is now executed.
+ *
+ * Saturate controls how are final results in destination registers modified.
+ */
+
+struct tgsi_instruction
+{
+ unsigned Type : 4; /* TGSI_TOKEN_TYPE_INSTRUCTION */
+ unsigned NrTokens : 8; /* UINT */
+ unsigned Opcode : 8; /* TGSI_OPCODE_ */
+ unsigned Saturate : 2; /* TGSI_SAT_ */
+ unsigned NumDstRegs : 2; /* UINT */
+ unsigned NumSrcRegs : 4; /* UINT */
+ unsigned Padding : 3;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/*
+ * If tgsi_instruction::Extended is TRUE, tgsi_instruction_ext follows.
+ *
+ * Then, tgsi_instruction::NumDstRegs of tgsi_dst_register follow.
+ *
+ * Then, tgsi_instruction::NumSrcRegs of tgsi_src_register follow.
+ *
+ * tgsi_instruction::NrTokens contains the total number of words that make the
+ * instruction, including the instruction word.
+ */
+
+#define TGSI_INSTRUCTION_EXT_TYPE_NV 0
+#define TGSI_INSTRUCTION_EXT_TYPE_LABEL 1
+#define TGSI_INSTRUCTION_EXT_TYPE_TEXTURE 2
+#define TGSI_INSTRUCTION_EXT_TYPE_PREDICATE 3
+
+struct tgsi_instruction_ext
+{
+ unsigned Type : 4; /* TGSI_INSTRUCTION_EXT_TYPE_ */
+ unsigned Padding : 27;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/*
+ * If tgsi_instruction_ext::Type is TGSI_INSTRUCTION_EXT_TYPE_NV, it should
+ * be cast to tgsi_instruction_ext_nv.
+ *
+ * If tgsi_instruction_ext::Type is TGSI_INSTRUCTION_EXT_TYPE_LABEL, it
+ * should be cast to tgsi_instruction_ext_label.
+ *
+ * If tgsi_instruction_ext::Type is TGSI_INSTRUCTION_EXT_TYPE_TEXTURE, it
+ * should be cast to tgsi_instruction_ext_texture.
+ *
+ * If tgsi_instruction_ext::Type is TGSI_INSTRUCTION_EXT_TYPE_PREDICATE, it
+ * should be cast to tgsi_instruction_ext_predicate.
+ *
+ * If tgsi_instruction_ext::Extended is TRUE, another tgsi_instruction_ext
+ * follows.
+ */
+
+#define TGSI_PRECISION_DEFAULT 0
+#define TGSI_PRECISION_FLOAT32 1
+#define TGSI_PRECISION_FLOAT16 2
+#define TGSI_PRECISION_FIXED12 3
+
+#define TGSI_CC_GT 0
+#define TGSI_CC_EQ 1
+#define TGSI_CC_LT 2
+#define TGSI_CC_GE 3
+#define TGSI_CC_LE 4
+#define TGSI_CC_NE 5
+#define TGSI_CC_TR 6
+#define TGSI_CC_FL 7
+
+#define TGSI_SWIZZLE_X 0
+#define TGSI_SWIZZLE_Y 1
+#define TGSI_SWIZZLE_Z 2
+#define TGSI_SWIZZLE_W 3
+
+/**
+ * Precision controls the precision at which the operation should be executed.
+ *
+ * CondDstUpdate enables condition code register writes. When this field is
+ * TRUE, CondDstIndex specifies the index of the condition code register to
+ * update.
+ *
+ * CondFlowEnable enables conditional execution of the operation. When this
+ * field is TRUE, CondFlowIndex specifies the index of the condition code
+ * register to test against CondMask with component swizzle controled by
+ * CondSwizzleX, CondSwizzleY, CondSwizzleZ and CondSwizzleW. If the test fails,
+ * the operation is not executed.
+ */
+
+struct tgsi_instruction_ext_nv
+{
+ unsigned Type : 4; /* TGSI_INSTRUCTION_EXT_TYPE_NV */
+ unsigned Precision : 4; /* TGSI_PRECISION_ */
+ unsigned CondDstIndex : 4; /* UINT */
+ unsigned CondFlowIndex : 4; /* UINT */
+ unsigned CondMask : 4; /* TGSI_CC_ */
+ unsigned CondSwizzleX : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSwizzleY : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSwizzleZ : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSwizzleW : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondDstUpdate : 1; /* BOOL */
+ unsigned CondFlowEnable : 1; /* BOOL */
+ unsigned Padding : 1;
+ unsigned Extended : 1; /* BOOL */
+};
+
+struct tgsi_instruction_ext_label
+{
+ unsigned Type : 4; /* TGSI_INSTRUCTION_EXT_TYPE_LABEL */
+ unsigned Label : 24; /* UINT */
+ unsigned Padding : 3;
+ unsigned Extended : 1; /* BOOL */
+};
+
+#define TGSI_TEXTURE_UNKNOWN 0
+#define TGSI_TEXTURE_1D 1
+#define TGSI_TEXTURE_2D 2
+#define TGSI_TEXTURE_3D 3
+#define TGSI_TEXTURE_CUBE 4
+#define TGSI_TEXTURE_RECT 5
+#define TGSI_TEXTURE_SHADOW1D 6
+#define TGSI_TEXTURE_SHADOW2D 7
+#define TGSI_TEXTURE_SHADOWRECT 8
+#define TGSI_TEXTURE_COUNT 9
+
+struct tgsi_instruction_ext_texture
+{
+ unsigned Type : 4; /* TGSI_INSTRUCTION_EXT_TYPE_TEXTURE */
+ unsigned Texture : 8; /* TGSI_TEXTURE_ */
+ unsigned Padding : 19;
+ unsigned Extended : 1; /* BOOL */
+};
+
+struct tgsi_instruction_ext_predicate
+{
+ unsigned Type : 4; /* TGSI_INSTRUCTION_EXT_TYPE_PREDICATE */
+ unsigned PredDstIndex : 4; /* UINT */
+ unsigned PredWriteMask : 4; /* TGSI_WRITEMASK_ */
+ unsigned Padding : 19;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/**
+ * File specifies the register array to access.
+ *
+ * Index specifies the element number of a register in the register file.
+ *
+ * If Indirect is TRUE, Index should be offset by the X component of a source
+ * register that follows. The register can be now fetched into local storage
+ * for further processing.
+ *
+ * If Negate is TRUE, all components of the fetched register are negated.
+ *
+ * The fetched register components are swizzled according to SwizzleX, SwizzleY,
+ * SwizzleZ and SwizzleW.
+ *
+ * If Extended is TRUE, any further modifications to the source register are
+ * made to this temporary storage.
+ */
+
+struct tgsi_src_register
+{
+ unsigned File : 4; /* TGSI_FILE_ */
+ unsigned SwizzleX : 2; /* TGSI_SWIZZLE_ */
+ unsigned SwizzleY : 2; /* TGSI_SWIZZLE_ */
+ unsigned SwizzleZ : 2; /* TGSI_SWIZZLE_ */
+ unsigned SwizzleW : 2; /* TGSI_SWIZZLE_ */
+ unsigned Negate : 1; /* BOOL */
+ unsigned Indirect : 1; /* BOOL */
+ unsigned Dimension : 1; /* BOOL */
+ int Index : 16; /* SINT */
+ unsigned Extended : 1; /* BOOL */
+};
+
+/**
+ * If tgsi_src_register::Extended is TRUE, tgsi_src_register_ext follows.
+ *
+ * Then, if tgsi_src_register::Indirect is TRUE, another tgsi_src_register
+ * follows.
+ *
+ * Then, if tgsi_src_register::Dimension is TRUE, tgsi_dimension follows.
+ */
+
+#define TGSI_SRC_REGISTER_EXT_TYPE_SWZ 0
+#define TGSI_SRC_REGISTER_EXT_TYPE_MOD 1
+
+struct tgsi_src_register_ext
+{
+ unsigned Type : 4; /* TGSI_SRC_REGISTER_EXT_TYPE_ */
+ unsigned Padding : 27;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/**
+ * If tgsi_src_register_ext::Type is TGSI_SRC_REGISTER_EXT_TYPE_SWZ,
+ * it should be cast to tgsi_src_register_ext_swz.
+ *
+ * If tgsi_src_register_ext::Type is TGSI_SRC_REGISTER_EXT_TYPE_MOD,
+ * it should be cast to tgsi_src_register_ext_mod.
+ *
+ * If tgsi_dst_register_ext::Extended is TRUE, another tgsi_dst_register_ext
+ * follows.
+ */
+
+#define TGSI_EXTSWIZZLE_X TGSI_SWIZZLE_X
+#define TGSI_EXTSWIZZLE_Y TGSI_SWIZZLE_Y
+#define TGSI_EXTSWIZZLE_Z TGSI_SWIZZLE_Z
+#define TGSI_EXTSWIZZLE_W TGSI_SWIZZLE_W
+#define TGSI_EXTSWIZZLE_ZERO 4
+#define TGSI_EXTSWIZZLE_ONE 5
+
+/**
+ * ExtSwizzleX, ExtSwizzleY, ExtSwizzleZ and ExtSwizzleW swizzle the source
+ * register in an extended manner.
+ *
+ * NegateX, NegateY, NegateZ and NegateW negate individual components of the
+ * source register.
+ *
+ * NOTE: To simplify matter, if this token is present, the corresponding Swizzle
+ * and Negate fields in tgsi_src_register should be set to X,Y,Z,W
+ * and FALSE, respectively.
+ */
+
+struct tgsi_src_register_ext_swz
+{
+ unsigned Type : 4; /* TGSI_SRC_REGISTER_EXT_TYPE_SWZ */
+ unsigned ExtSwizzleX : 4; /* TGSI_EXTSWIZZLE_ */
+ unsigned ExtSwizzleY : 4; /* TGSI_EXTSWIZZLE_ */
+ unsigned ExtSwizzleZ : 4; /* TGSI_EXTSWIZZLE_ */
+ unsigned ExtSwizzleW : 4; /* TGSI_EXTSWIZZLE_ */
+ unsigned NegateX : 1; /* BOOL */
+ unsigned NegateY : 1; /* BOOL */
+ unsigned NegateZ : 1; /* BOOL */
+ unsigned NegateW : 1; /* BOOL */
+ unsigned Padding : 7;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/**
+ * Extra src register modifiers
+ *
+ * If Complement is TRUE, the source register is modified by subtracting it
+ * from 1.0.
+ *
+ * If Bias is TRUE, the source register is modified by subtracting 0.5 from it.
+ *
+ * If Scale2X is TRUE, the source register is modified by multiplying it by 2.0.
+ *
+ * If Absolute is TRUE, the source register is modified by removing the sign.
+ *
+ * If Negate is TRUE, the source register is modified by negating it.
+ */
+
+struct tgsi_src_register_ext_mod
+{
+ unsigned Type : 4; /* TGSI_SRC_REGISTER_EXT_TYPE_MOD */
+ unsigned Complement : 1; /* BOOL */
+ unsigned Bias : 1; /* BOOL */
+ unsigned Scale2X : 1; /* BOOL */
+ unsigned Absolute : 1; /* BOOL */
+ unsigned Negate : 1; /* BOOL */
+ unsigned Padding : 22;
+ unsigned Extended : 1; /* BOOL */
+};
+
+struct tgsi_dimension
+{
+ unsigned Indirect : 1; /* BOOL */
+ unsigned Dimension : 1; /* BOOL */
+ unsigned Padding : 13;
+ int Index : 16; /* SINT */
+ unsigned Extended : 1; /* BOOL */
+};
+
+struct tgsi_dst_register
+{
+ unsigned File : 4; /* TGSI_FILE_ */
+ unsigned WriteMask : 4; /* TGSI_WRITEMASK_ */
+ unsigned Indirect : 1; /* BOOL */
+ unsigned Dimension : 1; /* BOOL */
+ int Index : 16; /* SINT */
+ unsigned Padding : 5;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/*
+ * If tgsi_dst_register::Extended is TRUE, tgsi_dst_register_ext follows.
+ *
+ * Then, if tgsi_dst_register::Indirect is TRUE, tgsi_src_register follows.
+ */
+
+#define TGSI_DST_REGISTER_EXT_TYPE_CONDCODE 0
+#define TGSI_DST_REGISTER_EXT_TYPE_MODULATE 1
+#define TGSI_DST_REGISTER_EXT_TYPE_PREDICATE 2
+
+struct tgsi_dst_register_ext
+{
+ unsigned Type : 4; /* TGSI_DST_REGISTER_EXT_TYPE_ */
+ unsigned Padding : 27;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/**
+ * Extra destination register modifiers
+ *
+ * If tgsi_dst_register_ext::Type is TGSI_DST_REGISTER_EXT_TYPE_CONDCODE,
+ * it should be cast to tgsi_dst_register_ext_condcode.
+ *
+ * If tgsi_dst_register_ext::Type is TGSI_DST_REGISTER_EXT_TYPE_MODULATE,
+ * it should be cast to tgsi_dst_register_ext_modulate.
+ *
+ * If tgsi_dst_register_ext::Type is TGSI_DST_REGISTER_EXT_TYPE_PREDICATE,
+ * it should be cast to tgsi_dst_register_ext_predicate.
+ *
+ * If tgsi_dst_register_ext::Extended is TRUE, another tgsi_dst_register_ext
+ * follows.
+ */
+struct tgsi_dst_register_ext_concode
+{
+ unsigned Type : 4; /* TGSI_DST_REGISTER_EXT_TYPE_CONDCODE */
+ unsigned CondMask : 4; /* TGSI_CC_ */
+ unsigned CondSwizzleX : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSwizzleY : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSwizzleZ : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSwizzleW : 2; /* TGSI_SWIZZLE_ */
+ unsigned CondSrcIndex : 4; /* UINT */
+ unsigned Padding : 11;
+ unsigned Extended : 1; /* BOOL */
+};
+
+#define TGSI_MODULATE_1X 0
+#define TGSI_MODULATE_2X 1
+#define TGSI_MODULATE_4X 2
+#define TGSI_MODULATE_8X 3
+#define TGSI_MODULATE_HALF 4
+#define TGSI_MODULATE_QUARTER 5
+#define TGSI_MODULATE_EIGHTH 6
+#define TGSI_MODULATE_COUNT 7
+
+struct tgsi_dst_register_ext_modulate
+{
+ unsigned Type : 4; /* TGSI_DST_REGISTER_EXT_TYPE_MODULATE */
+ unsigned Modulate : 4; /* TGSI_MODULATE_ */
+ unsigned Padding : 23;
+ unsigned Extended : 1; /* BOOL */
+};
+
+/*
+ * Currently, the following constraints apply.
+ *
+ * - PredSwizzleXYZW is either set to identity or replicate.
+ * - PredSrcIndex is 0.
+ */
+
+struct tgsi_dst_register_ext_predicate
+{
+ unsigned Type : 4; /* TGSI_DST_REGISTER_EXT_TYPE_PREDICATE */
+ unsigned PredSwizzleX : 2; /* TGSI_SWIZZLE_ */
+ unsigned PredSwizzleY : 2; /* TGSI_SWIZZLE_ */
+ unsigned PredSwizzleZ : 2; /* TGSI_SWIZZLE_ */
+ unsigned PredSwizzleW : 2; /* TGSI_SWIZZLE_ */
+ unsigned PredSrcIndex : 4; /* UINT */
+ unsigned Negate : 1; /* BOOL */
+ unsigned Padding : 14;
+ unsigned Extended : 1; /* BOOL */
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* TGSI_TOKEN_H */
+
diff --git a/src/gallium/include/pipe/p_state.h b/src/gallium/include/pipe/p_state.h
new file mode 100644
index 0000000000..626bedb35a
--- /dev/null
+++ b/src/gallium/include/pipe/p_state.h
@@ -0,0 +1,412 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ *
+ * Abstract graphics pipe state objects.
+ *
+ * Basic notes:
+ * 1. Want compact representations, so we use bitfields.
+ * 2. Put bitfields before other (GLfloat) fields.
+ */
+
+
+#ifndef PIPE_STATE_H
+#define PIPE_STATE_H
+
+#include "p_compiler.h"
+#include "p_defines.h"
+#include "p_format.h"
+#include "p_refcnt.h"
+#include "p_screen.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Implementation limits
+ */
+#define PIPE_MAX_ATTRIBS 32
+#define PIPE_MAX_CLIP_PLANES 6
+#define PIPE_MAX_COLOR_BUFS 8
+#define PIPE_MAX_CONSTANT 32
+#define PIPE_MAX_SAMPLERS 16
+#define PIPE_MAX_SHADER_INPUTS 16
+#define PIPE_MAX_SHADER_OUTPUTS 16
+#define PIPE_MAX_TEXTURE_LEVELS 16
+
+
+/* fwd decls */
+struct pipe_surface;
+
+
+/**
+ * The driver will certainly subclass this to include actual memory
+ * management information.
+ */
+struct pipe_buffer
+{
+ struct pipe_reference reference;
+ unsigned size;
+ struct pipe_screen *screen;
+ unsigned alignment;
+ unsigned usage;
+};
+
+
+/**
+ * Primitive (point/line/tri) rasterization info
+ */
+struct pipe_rasterizer_state
+{
+ unsigned flatshade:1;
+ unsigned light_twoside:1;
+ unsigned front_winding:2; /**< PIPE_WINDING_x */
+ unsigned cull_mode:2; /**< PIPE_WINDING_x */
+ unsigned fill_cw:2; /**< PIPE_POLYGON_MODE_x */
+ unsigned fill_ccw:2; /**< PIPE_POLYGON_MODE_x */
+ unsigned offset_cw:1;
+ unsigned offset_ccw:1;
+ unsigned scissor:1;
+ unsigned poly_smooth:1;
+ unsigned poly_stipple_enable:1;
+ unsigned point_smooth:1;
+ unsigned point_sprite:1;
+ unsigned point_size_per_vertex:1; /**< size computed in vertex shader */
+ unsigned multisample:1; /* XXX maybe more ms state in future */
+ unsigned line_smooth:1;
+ unsigned line_stipple_enable:1;
+ unsigned line_stipple_factor:8; /**< [1..256] actually */
+ unsigned line_stipple_pattern:16;
+ unsigned line_last_pixel:1;
+
+ /**
+ * Vertex coordinates are pre-transformed to screen space. Skip
+ * the vertex shader, clipping and viewport processing. Note that
+ * a vertex shader is still needed though, to indicate the mapping
+ * from vertex elements to fragment shader input semantics.
+ */
+ unsigned bypass_vs_clip_and_viewport:1;
+
+ unsigned flatshade_first:1; /**< take color attribute from the first vertex of a primitive */
+ unsigned gl_rasterization_rules:1; /**< enable tweaks for GL rasterization? */
+
+ float line_width;
+ float point_size; /**< used when no per-vertex size */
+ float point_size_min; /* XXX - temporary, will go away */
+ float point_size_max; /* XXX - temporary, will go away */
+ float offset_units;
+ float offset_scale;
+ ubyte sprite_coord_mode[PIPE_MAX_SHADER_OUTPUTS]; /**< PIPE_SPRITE_COORD_ */
+};
+
+
+struct pipe_poly_stipple
+{
+ unsigned stipple[32];
+};
+
+
+struct pipe_viewport_state
+{
+ float scale[4];
+ float translate[4];
+};
+
+
+struct pipe_scissor_state
+{
+ unsigned minx:16;
+ unsigned miny:16;
+ unsigned maxx:16;
+ unsigned maxy:16;
+};
+
+
+struct pipe_clip_state
+{
+ float ucp[PIPE_MAX_CLIP_PLANES][4];
+ unsigned nr;
+};
+
+
+/**
+ * Constants for vertex/fragment shaders
+ */
+struct pipe_constant_buffer
+{
+ struct pipe_buffer *buffer;
+};
+
+
+struct pipe_shader_state
+{
+ const struct tgsi_token *tokens;
+};
+
+
+struct pipe_depth_state
+{
+ unsigned enabled:1; /**< depth test enabled? */
+ unsigned writemask:1; /**< allow depth buffer writes? */
+ unsigned func:3; /**< depth test func (PIPE_FUNC_x) */
+ unsigned occlusion_count:1; /**< do occlusion counting? */
+};
+
+
+struct pipe_stencil_state
+{
+ unsigned enabled:1; /**< stencil[0]: stencil enabled, stencil[1]: two-side enabled */
+ unsigned func:3; /**< PIPE_FUNC_x */
+ unsigned fail_op:3; /**< PIPE_STENCIL_OP_x */
+ unsigned zpass_op:3; /**< PIPE_STENCIL_OP_x */
+ unsigned zfail_op:3; /**< PIPE_STENCIL_OP_x */
+ ubyte ref_value;
+ ubyte valuemask;
+ ubyte writemask;
+};
+
+
+struct pipe_alpha_state
+{
+ unsigned enabled:1;
+ unsigned func:3; /**< PIPE_FUNC_x */
+ float ref_value; /**< reference value */
+};
+
+
+struct pipe_depth_stencil_alpha_state
+{
+ struct pipe_depth_state depth;
+ struct pipe_stencil_state stencil[2]; /**< [0] = front, [1] = back */
+ struct pipe_alpha_state alpha;
+};
+
+
+struct pipe_blend_state
+{
+ unsigned blend_enable:1;
+
+ unsigned rgb_func:3; /**< PIPE_BLEND_x */
+ unsigned rgb_src_factor:5; /**< PIPE_BLENDFACTOR_x */
+ unsigned rgb_dst_factor:5; /**< PIPE_BLENDFACTOR_x */
+
+ unsigned alpha_func:3; /**< PIPE_BLEND_x */
+ unsigned alpha_src_factor:5; /**< PIPE_BLENDFACTOR_x */
+ unsigned alpha_dst_factor:5; /**< PIPE_BLENDFACTOR_x */
+
+ unsigned logicop_enable:1;
+ unsigned logicop_func:4; /**< PIPE_LOGICOP_x */
+
+ unsigned colormask:4; /**< bitmask of PIPE_MASK_R/G/B/A */
+ unsigned dither:1;
+};
+
+
+struct pipe_blend_color
+{
+ float color[4];
+};
+
+
+struct pipe_framebuffer_state
+{
+ unsigned width, height;
+
+ /** multiple colorbuffers for multiple render targets */
+ unsigned nr_cbufs;
+ struct pipe_surface *cbufs[PIPE_MAX_COLOR_BUFS];
+
+ struct pipe_surface *zsbuf; /**< Z/stencil buffer */
+};
+
+
+/**
+ * Texture sampler state.
+ */
+struct pipe_sampler_state
+{
+ unsigned wrap_s:3; /**< PIPE_TEX_WRAP_x */
+ unsigned wrap_t:3; /**< PIPE_TEX_WRAP_x */
+ unsigned wrap_r:3; /**< PIPE_TEX_WRAP_x */
+ unsigned min_img_filter:2; /**< PIPE_TEX_FILTER_x */
+ unsigned min_mip_filter:2; /**< PIPE_TEX_MIPFILTER_x */
+ unsigned mag_img_filter:2; /**< PIPE_TEX_FILTER_x */
+ unsigned compare_mode:1; /**< PIPE_TEX_COMPARE_x */
+ unsigned compare_func:3; /**< PIPE_FUNC_x */
+ unsigned normalized_coords:1; /**< Are coords normalized to [0,1]? */
+ unsigned prefilter:4; /**< Wierd sampling state exposed by some api's */
+ float lod_bias; /**< LOD/lambda bias */
+ float min_lod, max_lod; /**< LOD clamp range, after bias */
+ float border_color[4];
+ float max_anisotropy;
+};
+
+
+/**
+ * 2D surface. This is basically a view into a memory buffer.
+ * May be a renderbuffer, texture mipmap level, etc.
+ */
+struct pipe_surface
+{
+ struct pipe_reference reference;
+ enum pipe_format format; /**< PIPE_FORMAT_x */
+ unsigned width; /**< logical width in pixels */
+ unsigned height; /**< logical height in pixels */
+ unsigned layout; /**< PIPE_SURFACE_LAYOUT_x */
+ unsigned offset; /**< offset from start of buffer, in bytes */
+ unsigned usage; /**< PIPE_BUFFER_USAGE_* */
+
+ unsigned zslice;
+ struct pipe_texture *texture; /**< texture into which this is a view */
+ unsigned face;
+ unsigned level;
+};
+
+
+/**
+ * Transfer object. For data transfer to/from a texture.
+ */
+struct pipe_transfer
+{
+ enum pipe_format format; /**< PIPE_FORMAT_x */
+ unsigned x; /**< x offset from start of texture image */
+ unsigned y; /**< y offset from start of texture image */
+ unsigned width; /**< logical width in pixels */
+ unsigned height; /**< logical height in pixels */
+ struct pipe_format_block block;
+ unsigned nblocksx; /**< allocated width in blocks */
+ unsigned nblocksy; /**< allocated height in blocks */
+ unsigned stride; /**< stride in bytes between rows of blocks */
+ unsigned usage; /**< PIPE_TRANSFER_* */
+
+ struct pipe_texture *texture; /**< texture to transfer to/from */
+ unsigned face;
+ unsigned level;
+ unsigned zslice;
+};
+
+
+/**
+ * Texture object.
+ */
+struct pipe_texture
+{
+ struct pipe_reference reference;
+
+ enum pipe_texture_target target; /**< PIPE_TEXTURE_x */
+ enum pipe_format format; /**< PIPE_FORMAT_x */
+
+ unsigned width[PIPE_MAX_TEXTURE_LEVELS];
+ unsigned height[PIPE_MAX_TEXTURE_LEVELS];
+ unsigned depth[PIPE_MAX_TEXTURE_LEVELS];
+
+ struct pipe_format_block block;
+ unsigned nblocksx[PIPE_MAX_TEXTURE_LEVELS]; /**< allocated width in blocks */
+ unsigned nblocksy[PIPE_MAX_TEXTURE_LEVELS]; /**< allocated height in blocks */
+
+ unsigned last_level:8; /**< Index of last mipmap level present/defined */
+
+ unsigned nr_samples:8; /**< for multisampled surfaces, nr of samples */
+
+ unsigned tex_usage; /* PIPE_TEXTURE_USAGE_* */
+
+ struct pipe_screen *screen; /**< screen that this texture belongs to */
+};
+
+
+/**
+ * A vertex buffer. Typically, all the vertex data/attributes for
+ * drawing something will be in one buffer. But it's also possible, for
+ * example, to put colors in one buffer and texcoords in another.
+ */
+struct pipe_vertex_buffer
+{
+ unsigned stride; /**< stride to same attrib in next vertex, in bytes */
+ unsigned max_index; /**< number of vertices in this buffer */
+ unsigned buffer_offset; /**< offset to start of data in buffer, in bytes */
+ struct pipe_buffer *buffer; /**< the actual buffer */
+};
+
+
+/**
+ * Information to describe a vertex attribute (position, color, etc)
+ */
+struct pipe_vertex_element
+{
+ /** Offset of this attribute, in bytes, from the start of the vertex */
+ unsigned src_offset;
+
+ /** Which vertex_buffer (as given to pipe->set_vertex_buffer()) does
+ * this attribute live in?
+ */
+ unsigned vertex_buffer_index:8;
+ unsigned nr_components:8;
+
+ enum pipe_format src_format; /**< PIPE_FORMAT_* */
+};
+
+
+/* Reference counting helper functions */
+static INLINE void
+pipe_buffer_reference(struct pipe_buffer **ptr, struct pipe_buffer *buf)
+{
+ struct pipe_buffer *old_buf = *ptr;
+
+ if (pipe_reference((struct pipe_reference **)ptr, &buf->reference))
+ old_buf->screen->buffer_destroy(old_buf);
+}
+
+static INLINE void
+pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
+{
+ struct pipe_surface *old_surf = *ptr;
+
+ if (pipe_reference((struct pipe_reference **)ptr, &surf->reference))
+ old_surf->texture->screen->tex_surface_destroy(old_surf);
+}
+
+static INLINE void
+pipe_texture_reference(struct pipe_texture **ptr, struct pipe_texture *tex)
+{
+ struct pipe_texture *old_tex = *ptr;
+
+ if (pipe_reference((struct pipe_reference **)ptr, &tex->reference))
+ old_tex->screen->texture_destroy(old_tex);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/gallium/include/pipe/p_thread.h b/src/gallium/include/pipe/p_thread.h
new file mode 100644
index 0000000000..b1606dc652
--- /dev/null
+++ b/src/gallium/include/pipe/p_thread.h
@@ -0,0 +1,279 @@
+/**************************************************************************
+ *
+ * Copyright 1999-2006 Brian Paul
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ *
+ * Thread, mutex, condition var and thread-specific data functions.
+ */
+
+
+#ifndef _P_THREAD2_H_
+#define _P_THREAD2_H_
+
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h" /* for assert */
+
+
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HAIKU)
+
+#include <pthread.h> /* POSIX threads headers */
+#include <stdio.h> /* for perror() */
+
+#define PIPE_THREAD_HAVE_CONDVAR
+
+typedef pthread_t pipe_thread;
+
+#define PIPE_THREAD_ROUTINE( name, param ) \
+ void *name( void *param )
+
+static INLINE pipe_thread pipe_thread_create( void *(* routine)( void *), void *param )
+{
+ pipe_thread thread;
+ if (pthread_create( &thread, NULL, routine, param ))
+ return 0;
+ return thread;
+}
+
+static INLINE int pipe_thread_wait( pipe_thread thread )
+{
+ return pthread_join( thread, NULL );
+}
+
+static INLINE int pipe_thread_destroy( pipe_thread thread )
+{
+ return pthread_detach( thread );
+}
+
+typedef pthread_mutex_t pipe_mutex;
+typedef pthread_cond_t pipe_condvar;
+
+#define pipe_static_mutex(mutex) \
+ static pipe_mutex mutex = PTHREAD_MUTEX_INITIALIZER
+
+#define pipe_mutex_init(mutex) \
+ (void) pthread_mutex_init(&(mutex), NULL)
+
+#define pipe_mutex_destroy(mutex) \
+ pthread_mutex_destroy(&(mutex))
+
+#define pipe_mutex_lock(mutex) \
+ (void) pthread_mutex_lock(&(mutex))
+
+#define pipe_mutex_unlock(mutex) \
+ (void) pthread_mutex_unlock(&(mutex))
+
+#define pipe_static_condvar(mutex) \
+ static pipe_condvar mutex = PTHREAD_COND_INITIALIZER
+
+#define pipe_condvar_init(cond) \
+ pthread_cond_init(&(cond), NULL)
+
+#define pipe_condvar_destroy(cond) \
+ pthread_cond_destroy(&(cond))
+
+#define pipe_condvar_wait(cond, mutex) \
+ pthread_cond_wait(&(cond), &(mutex))
+
+#define pipe_condvar_signal(cond) \
+ pthread_cond_signal(&(cond))
+
+#define pipe_condvar_broadcast(cond) \
+ pthread_cond_broadcast(&(cond))
+
+
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+
+#include <windows.h>
+
+typedef HANDLE pipe_thread;
+
+#define PIPE_THREAD_ROUTINE( name, param ) \
+ void * WINAPI name( void *param )
+
+static INLINE pipe_thread pipe_thread_create( void *(WINAPI * routine)( void *), void *param )
+{
+ DWORD id;
+ return CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE) routine, param, 0, &id );
+}
+
+static INLINE int pipe_thread_wait( pipe_thread thread )
+{
+ if (WaitForSingleObject( thread, INFINITE ) == WAIT_OBJECT_0)
+ return 0;
+ return -1;
+}
+
+static INLINE int pipe_thread_destroy( pipe_thread thread )
+{
+ if (CloseHandle( thread ))
+ return 0;
+ return -1;
+}
+
+typedef CRITICAL_SECTION pipe_mutex;
+
+#define pipe_static_mutex(mutex) \
+ /*static*/ pipe_mutex mutex = {0,0,0,0,0,0}
+
+#define pipe_mutex_init(mutex) \
+ InitializeCriticalSection(&mutex)
+
+#define pipe_mutex_destroy(mutex) \
+ DeleteCriticalSection(&mutex)
+
+#define pipe_mutex_lock(mutex) \
+ EnterCriticalSection(&mutex)
+
+#define pipe_mutex_unlock(mutex) \
+ LeaveCriticalSection(&mutex)
+
+/* XXX: dummy definitions, make it compile */
+
+typedef unsigned pipe_condvar;
+
+#define pipe_condvar_init(condvar) \
+ (void) condvar
+
+#define pipe_condvar_broadcast(condvar) \
+ (void) condvar
+
+#else
+
+/** Dummy definitions */
+
+typedef unsigned pipe_thread;
+typedef unsigned pipe_mutex;
+typedef unsigned pipe_condvar;
+
+#define pipe_static_mutex(mutex) \
+ static pipe_mutex mutex = 0
+
+#define pipe_mutex_init(mutex) \
+ (void) mutex
+
+#define pipe_mutex_destroy(mutex) \
+ (void) mutex
+
+#define pipe_mutex_lock(mutex) \
+ (void) mutex
+
+#define pipe_mutex_unlock(mutex) \
+ (void) mutex
+
+#define pipe_static_condvar(condvar) \
+ static unsigned condvar = 0
+
+#define pipe_condvar_init(condvar) \
+ (void) condvar
+
+#define pipe_condvar_destroy(condvar) \
+ (void) condvar
+
+#define pipe_condvar_wait(condvar, mutex) \
+ (void) condvar
+
+#define pipe_condvar_signal(condvar) \
+ (void) condvar
+
+#define pipe_condvar_broadcast(condvar) \
+ (void) condvar
+
+
+#endif /* PIPE_OS_? */
+
+
+
+/*
+ * Thread-specific data.
+ */
+
+typedef struct {
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HAIKU)
+ pthread_key_t key;
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ DWORD key;
+#endif
+ int initMagic;
+} pipe_tsd;
+
+
+#define PIPE_TSD_INIT_MAGIC 0xff8adc98
+
+
+static INLINE void
+pipe_tsd_init(pipe_tsd *tsd)
+{
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HAIKU)
+ if (pthread_key_create(&tsd->key, NULL/*free*/) != 0) {
+ perror("pthread_key_create(): failed to allocate key for thread specific data");
+ exit(-1);
+ }
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ assert(0);
+#endif
+ tsd->initMagic = PIPE_TSD_INIT_MAGIC;
+}
+
+static INLINE void *
+pipe_tsd_get(pipe_tsd *tsd)
+{
+ if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) {
+ pipe_tsd_init(tsd);
+ }
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HAIKU)
+ return pthread_getspecific(tsd->key);
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ assert(0);
+ return NULL;
+#else
+ assert(0);
+ return NULL;
+#endif
+}
+
+static INLINE void
+pipe_tsd_set(pipe_tsd *tsd, void *value)
+{
+ if (tsd->initMagic != (int) PIPE_TSD_INIT_MAGIC) {
+ pipe_tsd_init(tsd);
+ }
+#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS) || defined(PIPE_OS_HAIKU)
+ if (pthread_setspecific(tsd->key, value) != 0) {
+ perror("pthread_set_specific() failed");
+ exit(-1);
+ }
+#elif defined(PIPE_SUBSYSTEM_WINDOWS_USER)
+ assert(0);
+#else
+ assert(0);
+#endif
+}
+
+
+
+#endif /* _P_THREAD2_H_ */
diff --git a/src/gallium/include/state_tracker/dri1_api.h b/src/gallium/include/state_tracker/dri1_api.h
new file mode 100644
index 0000000000..b173ba3683
--- /dev/null
+++ b/src/gallium/include/state_tracker/dri1_api.h
@@ -0,0 +1,82 @@
+#ifndef _DRI1_API_H_
+#define _DRI1_API_H_
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_format.h"
+
+#include "state_tracker/drm_api.h"
+
+#include <drm.h>
+
+struct pipe_screen;
+struct pipe_winsys;
+struct pipe_buffer;
+struct pipe_context;
+struct pipe_texture;
+
+struct dri1_api_version
+{
+ int major;
+ int minor;
+ int patch_level;
+};
+
+/**
+ * This callback struct is intended for drivers that need to take
+ * the hardware lock on command submission.
+ */
+
+struct dri1_api_lock_funcs
+{
+ void (*lock) (struct pipe_context * pipe);
+ void (*unlock) (struct pipe_context * locked_pipe);
+ boolean(*is_locked) (struct pipe_context * locked_pipe);
+ boolean(*is_lock_lost) (struct pipe_context * locked_pipe);
+ void (*clear_lost_lock) (struct pipe_context * locked_pipe);
+};
+
+struct dri1_api
+{
+ /**
+ * For flushing to the front buffer. A driver should implement one and only
+ * one of the functions below. The present_locked functions allows a dri1
+ * driver to pageflip.
+ */
+
+ /*@{ */
+
+ struct pipe_surface *(*front_srf_locked) (struct pipe_context *
+ locked_pipe);
+
+ void (*present_locked) (struct pipe_context * locked_pipe,
+ struct pipe_surface * surf,
+ const struct drm_clip_rect * rect,
+ unsigned int num_clip,
+ int x_draw, int y_draw,
+ const struct drm_clip_rect * src_bbox,
+ struct pipe_fence_handle ** fence);
+ /*@} */
+};
+
+struct dri1_create_screen_arg
+{
+ struct drm_create_screen_arg base;
+
+ struct dri1_api_lock_funcs *lf;
+ void *ddx_info;
+ int ddx_info_size;
+ void *sarea;
+
+ struct dri1_api_version ddx_version;
+ struct dri1_api_version dri_version;
+ struct dri1_api_version drm_version;
+
+ /*
+ * out parameters;
+ */
+
+ struct dri1_api *api;
+};
+
+#endif
diff --git a/src/gallium/include/state_tracker/drm_api.h b/src/gallium/include/state_tracker/drm_api.h
new file mode 100644
index 0000000000..4d1259e1ee
--- /dev/null
+++ b/src/gallium/include/state_tracker/drm_api.h
@@ -0,0 +1,69 @@
+
+#ifndef _DRM_API_H_
+#define _DRM_API_H_
+
+#include "pipe/p_compiler.h"
+
+struct pipe_screen;
+struct pipe_winsys;
+struct pipe_buffer;
+struct pipe_context;
+struct pipe_texture;
+
+enum drm_create_screen_mode {
+ DRM_CREATE_NORMAL = 0,
+ DRM_CREATE_DRI1,
+ DRM_CREATE_DRIVER = 1024,
+ DRM_CREATE_MAX
+};
+
+/**
+ * Modes other than DRM_CREATE_NORMAL derive from this struct.
+ */
+/*@{*/
+struct drm_create_screen_arg {
+ enum drm_create_screen_mode mode;
+};
+/*@}*/
+
+struct drm_api
+{
+ /**
+ * Special buffer functions
+ */
+ /*@{*/
+ struct pipe_screen* (*create_screen)(struct drm_api *api, int drm_fd,
+ struct drm_create_screen_arg *arg);
+ struct pipe_context* (*create_context)(struct drm_api *api,
+ struct pipe_screen *screen);
+ /*@}*/
+
+ /**
+ * Special buffer functions
+ */
+ /*@{*/
+ struct pipe_texture*
+ (*texture_from_shared_handle)(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *templ,
+ const char *name,
+ unsigned stride,
+ unsigned handle);
+ boolean (*shared_handle_from_texture)(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *stride,
+ unsigned *handle);
+ boolean (*local_handle_from_texture)(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *stride,
+ unsigned *handle);
+ /*@}*/
+
+ void (*destroy)(struct drm_api *api);
+};
+
+extern struct drm_api * drm_api_create(void);
+
+#endif
diff --git a/src/gallium/state_trackers/Makefile b/src/gallium/state_trackers/Makefile
new file mode 100644
index 0000000000..265ca468c2
--- /dev/null
+++ b/src/gallium/state_trackers/Makefile
@@ -0,0 +1,25 @@
+TOP = ../../..
+include $(TOP)/configs/current
+
+
+SUBDIRS = $(GALLIUM_STATE_TRACKERS_DIRS)
+
+
+default: subdirs
+
+
+subdirs:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE)) || exit 1 ; \
+ fi \
+ done
+
+
+clean:
+ rm -f `find . -name \*.[oa]`
+ rm -f `find . -name depend`
+
+
+# Dummy install target
+install:
diff --git a/src/gallium/state_trackers/README b/src/gallium/state_trackers/README
new file mode 100644
index 0000000000..28dd27bbd5
--- /dev/null
+++ b/src/gallium/state_trackers/README
@@ -0,0 +1,2 @@
+This directory is a placeholder for incubating state-trackers. Mesa's
+state-tracker is in src/mesa.
diff --git a/src/gallium/state_trackers/dri/Makefile b/src/gallium/state_trackers/dri/Makefile
new file mode 100644
index 0000000000..ef8f19709a
--- /dev/null
+++ b/src/gallium/state_trackers/dri/Makefile
@@ -0,0 +1,28 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = dridrm
+
+LIBRARY_INCLUDES = \
+ -I$(TOP)/include \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/mesa/drivers/dri/common \
+ -I$(TOP)/src/mesa/main \
+ $(shell pkg-config --cflags-only-I libdrm)
+
+
+C_SOURCES = \
+ dri_context.c \
+ dri_screen.c \
+ dri_drawable.c \
+ dri_extensions.c
+
+# $(TOP)/src/mesa/drivers/dri/common/utils.c \
+ $(TOP)/src/mesa/drivers/dri/common/vblank.c \
+ $(TOP)/src/mesa/drivers/dri/common/dri_util.c \
+ $(TOP)/src/mesa/drivers/dri/common/xmlconfig.c \
+ $(TOP)/src/mesa/drivers/common/driverfuncs.c \
+ $(TOP)/src/mesa/drivers/dri/common/texmem.c \
+ $(TOP)/src/mesa/drivers/dri/common/drirenderbuffer.c
+
+include ../../Makefile.template
diff --git a/src/gallium/state_trackers/dri/SConscript b/src/gallium/state_trackers/dri/SConscript
new file mode 100644
index 0000000000..ce2c273597
--- /dev/null
+++ b/src/gallium/state_trackers/dri/SConscript
@@ -0,0 +1,23 @@
+#######################################################################
+# SConscript for dri state_tracker
+
+Import('*')
+
+if env['dri']:
+
+ env = env.Clone()
+
+ env.Append(CPPPATH = [
+ '#/src/mesa',
+ '#/src/mesa/drivers/dri/common',
+ ])
+
+ st_dri = env.ConvenienceLibrary(
+ target = 'st_dri',
+ source = [ 'dri_context.c',
+ 'dri_drawable.c',
+ 'dri_extensions.c',
+ 'dri_screen.c',
+ ]
+ )
+ Export('st_dri')
diff --git a/src/gallium/state_trackers/dri/dri_context.c b/src/gallium/state_trackers/dri/dri_context.c
new file mode 100644
index 0000000000..8819936fca
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_context.c
@@ -0,0 +1,215 @@
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "dri_screen.h"
+
+#include "dri_drawable.h"
+#include "state_tracker/drm_api.h"
+#include "state_tracker/dri1_api.h"
+#include "state_tracker/st_public.h"
+#include "state_tracker/st_context.h"
+#include "pipe/p_context.h"
+
+#include "dri_context.h"
+
+#include "util/u_memory.h"
+
+GLboolean
+dri_create_context(const __GLcontextModes * visual,
+ __DRIcontextPrivate * cPriv, void *sharedContextPrivate)
+{
+ __DRIscreenPrivate *sPriv = cPriv->driScreenPriv;
+ struct dri_screen *screen = dri_screen(sPriv);
+ struct dri_context *ctx = NULL;
+ struct st_context *st_share = NULL;
+
+ if (sharedContextPrivate) {
+ st_share = ((struct dri_context *)sharedContextPrivate)->st;
+ }
+
+ ctx = CALLOC_STRUCT(dri_context);
+ if (ctx == NULL)
+ goto fail;
+
+ cPriv->driverPrivate = ctx;
+ ctx->cPriv = cPriv;
+ ctx->sPriv = sPriv;
+ ctx->lock = screen->drmLock;
+ ctx->d_stamp = -1;
+ ctx->r_stamp = -1;
+
+ driParseConfigFiles(&ctx->optionCache,
+ &screen->optionCache, sPriv->myNum, "dri");
+
+ ctx->pipe = screen->api->create_context(screen->api, screen->pipe_screen);
+
+ if (ctx->pipe == NULL)
+ goto fail;
+
+ /* used in dri_flush_frontbuffer */
+ ctx->pipe->priv = ctx;
+
+ ctx->st = st_create_context(ctx->pipe, visual, st_share);
+ if (ctx->st == NULL)
+ goto fail;
+
+ dri_init_extensions(ctx);
+
+ return GL_TRUE;
+
+ fail:
+ if (ctx && ctx->st)
+ st_destroy_context(ctx->st);
+
+ if (ctx && ctx->pipe)
+ ctx->pipe->destroy(ctx->pipe);
+
+ FREE(ctx);
+ return FALSE;
+}
+
+void
+dri_destroy_context(__DRIcontextPrivate * cPriv)
+{
+ struct dri_context *ctx = dri_context(cPriv);
+
+ /* No particular reason to wait for command completion before
+ * destroying a context, but it is probably worthwhile flushing it
+ * to avoid having to add code elsewhere to cope with flushing a
+ * partially destroyed context.
+ */
+ st_flush(ctx->st, 0, NULL);
+
+ /* Also frees ctx->pipe?
+ */
+ st_destroy_context(ctx->st);
+
+ FREE(ctx);
+}
+
+GLboolean
+dri_unbind_context(__DRIcontextPrivate * cPriv)
+{
+ if (cPriv) {
+ struct dri_context *ctx = dri_context(cPriv);
+
+ if (--ctx->bind_count == 0) {
+ if (ctx->st && ctx->st == st_get_current()) {
+ st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
+ st_make_current(NULL, NULL, NULL);
+ }
+ }
+ }
+
+ return GL_TRUE;
+}
+
+GLboolean
+dri_make_current(__DRIcontextPrivate * cPriv,
+ __DRIdrawablePrivate * driDrawPriv,
+ __DRIdrawablePrivate * driReadPriv)
+{
+ if (cPriv) {
+ struct dri_context *ctx = dri_context(cPriv);
+ struct dri_drawable *draw = dri_drawable(driDrawPriv);
+ struct dri_drawable *read = dri_drawable(driReadPriv);
+ struct st_context *old_st = st_get_current();
+
+ if (old_st && old_st != ctx->st)
+ st_flush(old_st, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+ ++ctx->bind_count;
+
+ if (ctx->dPriv != driDrawPriv) {
+ ctx->dPriv = driDrawPriv;
+ ctx->d_stamp = driDrawPriv->lastStamp - 1;
+ }
+ if (ctx->rPriv != driReadPriv) {
+ ctx->rPriv = driReadPriv;
+ ctx->r_stamp = driReadPriv->lastStamp - 1;
+ }
+
+ st_make_current(ctx->st, draw->stfb, read->stfb);
+
+ if (__dri1_api_hooks) {
+ dri1_update_drawables(ctx, draw, read);
+ } else {
+ if (driDrawPriv)
+ dri_get_buffers(driDrawPriv);
+ if (driDrawPriv != driReadPriv && driReadPriv)
+ dri_get_buffers(driReadPriv);
+ }
+ } else {
+ st_make_current(NULL, NULL, NULL);
+ }
+
+ return GL_TRUE;
+}
+
+static void
+st_dri_lock(struct pipe_context *pipe)
+{
+ dri_lock((struct dri_context *)pipe->priv);
+}
+
+static void
+st_dri_unlock(struct pipe_context *pipe)
+{
+ dri_unlock((struct dri_context *)pipe->priv);
+}
+
+static boolean
+st_dri_is_locked(struct pipe_context *pipe)
+{
+ return ((struct dri_context *)pipe->priv)->isLocked;
+}
+
+static boolean
+st_dri_lost_lock(struct pipe_context *pipe)
+{
+ return ((struct dri_context *)pipe->priv)->wsLostLock;
+}
+
+static void
+st_dri_clear_lost_lock(struct pipe_context *pipe)
+{
+ ((struct dri_context *)pipe->priv)->wsLostLock = FALSE;
+}
+
+struct dri1_api_lock_funcs dri1_lf = {
+ .lock = st_dri_lock,
+ .unlock = st_dri_unlock,
+ .is_locked = st_dri_is_locked,
+ .is_lock_lost = st_dri_lost_lock,
+ .clear_lost_lock = st_dri_clear_lost_lock
+};
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/dri/dri_context.h b/src/gallium/state_trackers/dri/dri_context.h
new file mode 100644
index 0000000000..4650178734
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_context.h
@@ -0,0 +1,123 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#ifndef DRI_CONTEXT_H
+#define DRI_CONTEXT_H
+
+#include "pipe/p_compiler.h"
+#include "drm.h"
+#include "dri_util.h"
+
+struct pipe_context;
+struct pipe_fence;
+struct st_context;
+struct dri_drawable;
+
+struct dri_context
+{
+ /* dri */
+ __DRIscreenPrivate *sPriv;
+ __DRIcontextPrivate *cPriv;
+ __DRIdrawablePrivate *dPriv;
+ __DRIdrawablePrivate *rPriv;
+
+ driOptionCache optionCache;
+
+ unsigned int d_stamp;
+ unsigned int r_stamp;
+
+ drmLock *lock;
+ boolean isLocked;
+ boolean stLostLock;
+ boolean wsLostLock;
+
+ unsigned int bind_count;
+
+ /* gallium */
+ struct st_context *st;
+ struct pipe_context *pipe;
+};
+
+static INLINE struct dri_context *
+dri_context(__DRIcontextPrivate * driContextPriv)
+{
+ return (struct dri_context *)driContextPriv->driverPrivate;
+}
+
+static INLINE void
+dri_lock(struct dri_context *ctx)
+{
+ drm_context_t hw_context = ctx->cPriv->hHWContext;
+ char ret = 0;
+
+ DRM_CAS(ctx->lock, hw_context, DRM_LOCK_HELD | hw_context, ret);
+ if (ret) {
+ drmGetLock(ctx->sPriv->fd, hw_context, 0);
+ ctx->stLostLock = TRUE;
+ ctx->wsLostLock = TRUE;
+ }
+ ctx->isLocked = TRUE;
+}
+
+static INLINE void
+dri_unlock(struct dri_context *ctx)
+{
+ ctx->isLocked = FALSE;
+ DRM_UNLOCK(ctx->sPriv->fd, ctx->lock, ctx->cPriv->hHWContext);
+}
+
+/***********************************************************************
+ * dri_context.c
+ */
+extern struct dri1_api_lock_funcs dri1_lf;
+
+void dri_destroy_context(__DRIcontextPrivate * driContextPriv);
+
+boolean dri_unbind_context(__DRIcontextPrivate * driContextPriv);
+
+boolean
+dri_make_current(__DRIcontextPrivate * driContextPriv,
+ __DRIdrawablePrivate * driDrawPriv,
+ __DRIdrawablePrivate * driReadPriv);
+
+boolean
+dri_create_context(const __GLcontextModes * visual,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+/***********************************************************************
+ * dri_extensions.c
+ */
+void dri_init_extensions(struct dri_context *ctx);
+
+#endif
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/dri/dri_drawable.c b/src/gallium/state_trackers/dri/dri_drawable.c
new file mode 100644
index 0000000000..5cec9e329d
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_drawable.c
@@ -0,0 +1,687 @@
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "dri_screen.h"
+#include "dri_context.h"
+#include "dri_drawable.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_inlines.h"
+#include "main/mtypes.h"
+#include "main/renderbuffer.h"
+#include "state_tracker/drm_api.h"
+#include "state_tracker/dri1_api.h"
+#include "state_tracker/st_public.h"
+#include "state_tracker/st_context.h"
+#include "state_tracker/st_cb_fbo.h"
+
+#include "util/u_memory.h"
+
+static struct pipe_surface *
+dri_surface_from_handle(struct drm_api *api,
+ struct pipe_screen *screen,
+ unsigned handle,
+ enum pipe_format format,
+ unsigned width, unsigned height, unsigned pitch)
+{
+ struct pipe_surface *surface = NULL;
+ struct pipe_texture *texture = NULL;
+ struct pipe_texture templat;
+
+ memset(&templat, 0, sizeof(templat));
+ templat.tex_usage |= PIPE_TEXTURE_USAGE_RENDER_TARGET;
+ templat.target = PIPE_TEXTURE_2D;
+ templat.last_level = 0;
+ templat.depth[0] = 1;
+ templat.format = format;
+ templat.width[0] = width;
+ templat.height[0] = height;
+ pf_get_block(templat.format, &templat.block);
+
+ texture = api->texture_from_shared_handle(api, screen, &templat,
+ "dri2 buffer", pitch, handle);
+
+ if (!texture) {
+ debug_printf("%s: Failed to blanket the buffer with a texture\n", __func__);
+ return NULL;
+ }
+
+ surface = screen->get_tex_surface(screen, texture, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* we don't need the texture from this point on */
+ pipe_texture_reference(&texture, NULL);
+ return surface;
+}
+
+/**
+ * Pixmaps have will have the same name of fake front and front.
+ */
+static boolean
+dri2_check_if_pixmap(__DRIbuffer *buffers, int count)
+{
+ boolean found = FALSE;
+ boolean is_pixmap = FALSE;
+ unsigned name;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FRONT_LEFT:
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ if (found) {
+ is_pixmap = buffers[i].name == name;
+ } else {
+ name = buffers[i].name;
+ found = TRUE;
+ }
+ default:
+ continue;
+ }
+ }
+
+ return is_pixmap;
+}
+
+/**
+ * This will be called a drawable is known to have been resized.
+ */
+void
+dri_get_buffers(__DRIdrawablePrivate * dPriv)
+{
+
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ struct pipe_surface *surface = NULL;
+ struct pipe_screen *screen = dri_screen(drawable->sPriv)->pipe_screen;
+ __DRIbuffer *buffers = NULL;
+ __DRIscreen *dri_screen = drawable->sPriv;
+ __DRIdrawable *dri_drawable = drawable->dPriv;
+ struct drm_api *api = ((struct dri_screen*)(dri_screen->private))->api;
+ boolean have_depth = FALSE;
+ int i, count;
+
+ buffers = (*dri_screen->dri2.loader->getBuffers) (dri_drawable,
+ &dri_drawable->w,
+ &dri_drawable->h,
+ drawable->attachments,
+ drawable->
+ num_attachments, &count,
+ dri_drawable->
+ loaderPrivate);
+
+ if (buffers == NULL) {
+ return;
+ }
+
+ /* set one cliprect to cover the whole dri_drawable */
+ dri_drawable->x = 0;
+ dri_drawable->y = 0;
+ dri_drawable->backX = 0;
+ dri_drawable->backY = 0;
+ dri_drawable->numClipRects = 1;
+ dri_drawable->pClipRects[0].x1 = 0;
+ dri_drawable->pClipRects[0].y1 = 0;
+ dri_drawable->pClipRects[0].x2 = dri_drawable->w;
+ dri_drawable->pClipRects[0].y2 = dri_drawable->h;
+ dri_drawable->numBackClipRects = 1;
+ dri_drawable->pBackClipRects[0].x1 = 0;
+ dri_drawable->pBackClipRects[0].y1 = 0;
+ dri_drawable->pBackClipRects[0].x2 = dri_drawable->w;
+ dri_drawable->pBackClipRects[0].y2 = dri_drawable->h;
+
+ if (drawable->old_num == count &&
+ drawable->old_w == dri_drawable->w &&
+ drawable->old_h == dri_drawable->h &&
+ memcmp(drawable->old, buffers, sizeof(__DRIbuffer) * count) == 0) {
+ return;
+ } else {
+ drawable->old_num = count;
+ drawable->old_w = dri_drawable->w;
+ drawable->old_h = dri_drawable->h;
+ memcpy(drawable->old, buffers, sizeof(__DRIbuffer) * count);
+ }
+
+ drawable->is_pixmap = dri2_check_if_pixmap(buffers, count);
+
+ for (i = 0; i < count; i++) {
+ enum pipe_format format = 0;
+ int index = 0;
+
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FRONT_LEFT:
+ continue;
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ index = ST_SURFACE_FRONT_LEFT;
+ format = drawable->color_format;
+ break;
+ case __DRI_BUFFER_BACK_LEFT:
+ index = ST_SURFACE_BACK_LEFT;
+ format = drawable->color_format;
+ break;
+ case __DRI_BUFFER_DEPTH:
+ case __DRI_BUFFER_DEPTH_STENCIL:
+ case __DRI_BUFFER_STENCIL:
+ index = ST_SURFACE_DEPTH;
+ format = drawable->depth_stencil_format;
+ break;
+ case __DRI_BUFFER_ACCUM:
+ default:
+ assert(0);
+ }
+
+ if (index == ST_SURFACE_DEPTH) {
+ if (have_depth)
+ continue;
+ else
+ have_depth = TRUE;
+ }
+
+ surface = dri_surface_from_handle(api,
+ screen,
+ buffers[i].name,
+ format,
+ dri_drawable->w,
+ dri_drawable->h, buffers[i].pitch);
+
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ case __DRI_BUFFER_BACK_LEFT:
+ drawable->color_format = surface->format;
+ break;
+ case __DRI_BUFFER_DEPTH:
+ case __DRI_BUFFER_DEPTH_STENCIL:
+ case __DRI_BUFFER_STENCIL:
+ drawable->depth_stencil_format = surface->format;
+ break;
+ }
+
+ st_set_framebuffer_surface(drawable->stfb, index, surface);
+ pipe_surface_reference(&surface, NULL);
+ }
+ /* this needed, or else the state tracker fails to pick the new buffers */
+ st_resize_framebuffer(drawable->stfb, dri_drawable->w, dri_drawable->h);
+}
+
+/**
+ * These are used for GLX_EXT_texture_from_pixmap
+ */
+void dri2_set_tex_buffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint format, __DRIdrawable *dPriv)
+{
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ struct pipe_surface *ps;
+
+ if (!drawable->stfb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer) {
+ struct gl_renderbuffer *rb =
+ st_new_renderbuffer_fb(drawable->color_format, 0 /*XXX*/, FALSE);
+ _mesa_add_renderbuffer(&drawable->stfb->Base, BUFFER_FRONT_LEFT, rb);
+ }
+
+ dri_get_buffers(drawable->dPriv);
+ st_get_framebuffer_surface(drawable->stfb, ST_SURFACE_FRONT_LEFT, &ps);
+
+ st_bind_texture_surface(ps, target == GL_TEXTURE_2D ? ST_TEXTURE_2D :
+ ST_TEXTURE_RECT, 0, drawable->color_format);
+}
+
+void dri2_set_tex_buffer(__DRIcontext *pDRICtx, GLint target,
+ __DRIdrawable *dPriv)
+{
+ dri2_set_tex_buffer2(pDRICtx, target, GLX_TEXTURE_FORMAT_RGBA_EXT, dPriv);
+}
+
+void
+dri_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf, void *context_private)
+{
+ struct dri_context *ctx = (struct dri_context *)context_private;
+ struct dri_drawable *drawable = dri_drawable(ctx->dPriv);
+ __DRIdrawable *dri_drawable = ctx->dPriv;
+ __DRIscreen *dri_screen = ctx->sPriv;
+
+ /* XXX Does this function get called with DRI1? */
+
+ if (ctx->dPriv == NULL) {
+ debug_printf("%s: no drawable bound to context\n", __func__);
+ return;
+ }
+
+#if 0
+ /* TODO if rendering to pixmaps is slow enable this code. */
+ if (drawable->is_pixmap)
+ return;
+#else
+ (void)drawable;
+#endif
+
+ (*dri_screen->dri2.loader->flushFrontBuffer)(dri_drawable,
+ dri_drawable->loaderPrivate);
+}
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+boolean
+dri_create_buffer(__DRIscreenPrivate * sPriv,
+ __DRIdrawablePrivate * dPriv,
+ const __GLcontextModes * visual, boolean isPixmap)
+{
+ struct dri_screen *screen = sPriv->private;
+ struct dri_drawable *drawable = NULL;
+ int i;
+
+ if (isPixmap)
+ goto fail; /* not implemented */
+
+ drawable = CALLOC_STRUCT(dri_drawable);
+ if (drawable == NULL)
+ goto fail;
+
+ if (visual->redBits == 8) {
+ if (visual->alphaBits == 8)
+ drawable->color_format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ else
+ drawable->color_format = PIPE_FORMAT_X8R8G8B8_UNORM;
+ } else {
+ drawable->color_format = PIPE_FORMAT_R5G6B5_UNORM;
+ }
+
+ switch(visual->depthBits) {
+ default:
+ case 0:
+ drawable->depth_stencil_format = PIPE_FORMAT_NONE;
+ break;
+ case 16:
+ drawable->depth_stencil_format = PIPE_FORMAT_Z16_UNORM;
+ break;
+ case 24:
+ if (visual->stencilBits == 0) {
+ drawable->depth_stencil_format = (screen->d_depth_bits_last) ?
+ PIPE_FORMAT_X8Z24_UNORM:
+ PIPE_FORMAT_Z24X8_UNORM;
+ } else {
+ drawable->depth_stencil_format = (screen->sd_depth_bits_last) ?
+ PIPE_FORMAT_S8Z24_UNORM:
+ PIPE_FORMAT_Z24S8_UNORM;
+ }
+ break;
+ case 32:
+ drawable->depth_stencil_format = PIPE_FORMAT_Z32_UNORM;
+ break;
+ }
+
+ drawable->stfb = st_create_framebuffer(visual,
+ drawable->color_format,
+ drawable->depth_stencil_format,
+ drawable->depth_stencil_format,
+ dPriv->w,
+ dPriv->h, (void *)drawable);
+ if (drawable->stfb == NULL)
+ goto fail;
+
+ drawable->sPriv = sPriv;
+ drawable->dPriv = dPriv;
+ dPriv->driverPrivate = (void *)drawable;
+
+ /* setup dri2 buffers information */
+ /* TODO incase of double buffer visual, delay fake creation */
+ i = 0;
+ drawable->attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+
+ if (visual->doubleBufferMode)
+ drawable->attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ else
+ drawable->attachments[i++] = __DRI_BUFFER_FAKE_FRONT_LEFT;
+ if (visual->depthBits && visual->stencilBits)
+ drawable->attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
+ else if (visual->depthBits)
+ drawable->attachments[i++] = __DRI_BUFFER_DEPTH;
+ else if (visual->stencilBits)
+ drawable->attachments[i++] = __DRI_BUFFER_STENCIL;
+ drawable->num_attachments = i;
+
+ drawable->desired_fences = 2;
+
+ return GL_TRUE;
+ fail:
+ FREE(drawable);
+ return GL_FALSE;
+}
+
+static struct pipe_fence_handle *
+dri_swap_fences_pop_front(struct dri_drawable *draw)
+{
+ struct pipe_screen *screen = dri_screen(draw->sPriv)->pipe_screen;
+ struct pipe_fence_handle *fence = NULL;
+
+ if (draw->cur_fences >= draw->desired_fences) {
+ screen->fence_reference(screen, &fence, draw->swap_fences[draw->tail]);
+ screen->fence_reference(screen, &draw->swap_fences[draw->tail++], NULL);
+ --draw->cur_fences;
+ draw->tail &= DRI_SWAP_FENCES_MASK;
+ }
+ return fence;
+}
+
+static void
+dri_swap_fences_push_back(struct dri_drawable *draw,
+ struct pipe_fence_handle *fence)
+{
+ struct pipe_screen *screen = dri_screen(draw->sPriv)->pipe_screen;
+
+ if (!fence)
+ return;
+
+ if (draw->cur_fences < DRI_SWAP_FENCES_MAX) {
+ draw->cur_fences++;
+ screen->fence_reference(screen, &draw->swap_fences[draw->head++],
+ fence);
+ draw->head &= DRI_SWAP_FENCES_MASK;
+ }
+}
+
+void
+dri_destroy_buffer(__DRIdrawablePrivate * dPriv)
+{
+ struct dri_drawable *drawable = dri_drawable(dPriv);
+ struct pipe_fence_handle *fence;
+ struct pipe_screen *screen = dri_screen(drawable->sPriv)->pipe_screen;
+
+ st_unreference_framebuffer(drawable->stfb);
+ drawable->desired_fences = 0;
+ while (drawable->cur_fences) {
+ fence = dri_swap_fences_pop_front(drawable);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+
+ FREE(drawable);
+}
+
+static void
+dri1_update_drawables_locked(struct dri_context *ctx,
+ __DRIdrawablePrivate * driDrawPriv,
+ __DRIdrawablePrivate * driReadPriv)
+{
+ if (ctx->stLostLock) {
+ ctx->stLostLock = FALSE;
+ if (driDrawPriv == driReadPriv)
+ DRI_VALIDATE_DRAWABLE_INFO(ctx->sPriv, driDrawPriv);
+ else
+ DRI_VALIDATE_TWO_DRAWABLES_INFO(ctx->sPriv, driDrawPriv,
+ driReadPriv);
+ }
+}
+
+/**
+ * This ensures all contexts which bind to a drawable pick up the
+ * drawable change and signal new buffer state.
+ * Calling st_resize_framebuffer for each context may seem like overkill,
+ * but no new buffers will actually be allocated if the dimensions don't
+ * change.
+ */
+
+static void
+dri1_propagate_drawable_change(struct dri_context *ctx)
+{
+ __DRIdrawablePrivate *dPriv = ctx->dPriv;
+ __DRIdrawablePrivate *rPriv = ctx->rPriv;
+ boolean flushed = FALSE;
+
+ if (dPriv && ctx->d_stamp != dPriv->lastStamp) {
+
+ st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
+ flushed = TRUE;
+ ctx->d_stamp = dPriv->lastStamp;
+ st_resize_framebuffer(dri_drawable(dPriv)->stfb, dPriv->w, dPriv->h);
+
+ }
+
+ if (rPriv && dPriv != rPriv && ctx->r_stamp != rPriv->lastStamp) {
+
+ if (!flushed)
+ st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
+ ctx->r_stamp = rPriv->lastStamp;
+ st_resize_framebuffer(dri_drawable(rPriv)->stfb, rPriv->w, rPriv->h);
+
+ } else if (rPriv && dPriv == rPriv) {
+
+ ctx->r_stamp = ctx->d_stamp;
+
+ }
+}
+
+void
+dri1_update_drawables(struct dri_context *ctx,
+ struct dri_drawable *draw, struct dri_drawable *read)
+{
+ dri_lock(ctx);
+ dri1_update_drawables_locked(ctx, draw->dPriv, read->dPriv);
+ dri_unlock(ctx);
+
+ dri1_propagate_drawable_change(ctx);
+}
+
+static INLINE boolean
+dri1_intersect_src_bbox(struct drm_clip_rect *dst,
+ int dst_x,
+ int dst_y,
+ const struct drm_clip_rect *src,
+ const struct drm_clip_rect *bbox)
+{
+ int xy1;
+ int xy2;
+
+ xy1 = ((int)src->x1 > (int)bbox->x1 + dst_x) ? src->x1 :
+ (int)bbox->x1 + dst_x;
+ xy2 = ((int)src->x2 < (int)bbox->x2 + dst_x) ? src->x2 :
+ (int)bbox->x2 + dst_x;
+ if (xy1 >= xy2 || xy1 < 0)
+ return FALSE;
+
+ dst->x1 = xy1;
+ dst->x2 = xy2;
+
+ xy1 = ((int)src->y1 > (int)bbox->y1 + dst_y) ? src->y1 :
+ (int)bbox->y1 + dst_y;
+ xy2 = ((int)src->y2 < (int)bbox->y2 + dst_y) ? src->y2 :
+ (int)bbox->y2 + dst_y;
+ if (xy1 >= xy2 || xy1 < 0)
+ return FALSE;
+
+ dst->y1 = xy1;
+ dst->y2 = xy2;
+ return TRUE;
+}
+
+static void
+dri1_swap_copy(struct dri_context *ctx,
+ struct pipe_surface *dst,
+ struct pipe_surface *src,
+ __DRIdrawablePrivate * dPriv, const struct drm_clip_rect *bbox)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct drm_clip_rect clip;
+ struct drm_clip_rect *cur;
+ int i;
+
+ cur = dPriv->pClipRects;
+
+ for (i = 0; i < dPriv->numClipRects; ++i) {
+ if (dri1_intersect_src_bbox(&clip, dPriv->x, dPriv->y, cur++, bbox))
+ pipe->surface_copy(pipe, dst, clip.x1, clip.y1,
+ src,
+ (int)clip.x1 - dPriv->x,
+ (int)clip.y1 - dPriv->y,
+ clip.x2 - clip.x1, clip.y2 - clip.y1);
+ }
+}
+
+static void
+dri1_copy_to_front(struct dri_context *ctx,
+ struct pipe_surface *surf,
+ __DRIdrawablePrivate * dPriv,
+ const struct drm_clip_rect *sub_box,
+ struct pipe_fence_handle **fence)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ boolean save_lost_lock;
+ uint cur_w;
+ uint cur_h;
+ struct drm_clip_rect bbox;
+ boolean visible = TRUE;
+
+ *fence = NULL;
+
+ dri_lock(ctx);
+ save_lost_lock = ctx->stLostLock;
+ dri1_update_drawables_locked(ctx, dPriv, dPriv);
+ st_get_framebuffer_dimensions(dri_drawable(dPriv)->stfb, &cur_w, &cur_h);
+
+ bbox.x1 = 0;
+ bbox.x2 = cur_w;
+ bbox.y1 = 0;
+ bbox.y2 = cur_h;
+
+ if (sub_box)
+ visible = dri1_intersect_src_bbox(&bbox, 0, 0, &bbox, sub_box);
+
+ if (visible && __dri1_api_hooks->present_locked) {
+
+ __dri1_api_hooks->present_locked(pipe,
+ surf,
+ dPriv->pClipRects,
+ dPriv->numClipRects,
+ dPriv->x, dPriv->y, &bbox, fence);
+
+ } else if (visible && __dri1_api_hooks->front_srf_locked) {
+
+ struct pipe_surface *front = __dri1_api_hooks->front_srf_locked(pipe);
+
+ if (front)
+ dri1_swap_copy(ctx, front, surf, dPriv, &bbox);
+
+ st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, fence);
+ }
+
+ ctx->stLostLock = save_lost_lock;
+
+ /**
+ * FIXME: Revisit this: Update drawables on copy_sub_buffer ?
+ */
+
+ if (!sub_box)
+ dri1_update_drawables_locked(ctx, ctx->dPriv, ctx->rPriv);
+
+ dri_unlock(ctx);
+ dri1_propagate_drawable_change(ctx);
+}
+
+void
+dri1_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf, void *context_private)
+{
+ struct dri_context *ctx = (struct dri_context *)context_private;
+ struct pipe_fence_handle *dummy_fence;
+
+ dri1_copy_to_front(ctx, surf, ctx->dPriv, NULL, &dummy_fence);
+ screen->fence_reference(screen, &dummy_fence, NULL);
+
+ /**
+ * FIXME: Do we need swap throttling here?
+ */
+}
+
+void
+dri_swap_buffers(__DRIdrawablePrivate * dPriv)
+{
+ struct dri_context *ctx;
+ struct pipe_surface *back_surf;
+ struct dri_drawable *draw = dri_drawable(dPriv);
+ struct pipe_screen *screen = dri_screen(draw->sPriv)->pipe_screen;
+ struct pipe_fence_handle *fence;
+ struct st_context *st = st_get_current();
+
+ assert(__dri1_api_hooks != NULL);
+
+ if (!st)
+ return; /* For now */
+
+ ctx = (struct dri_context *)st->pipe->priv;
+
+ st_get_framebuffer_surface(draw->stfb, ST_SURFACE_BACK_LEFT, &back_surf);
+ if (back_surf) {
+ st_notify_swapbuffers(draw->stfb);
+ st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
+ fence = dri_swap_fences_pop_front(draw);
+ if (fence) {
+ (void)screen->fence_finish(screen, fence, 0);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+ dri1_copy_to_front(ctx, back_surf, dPriv, NULL, &fence);
+ dri_swap_fences_push_back(draw, fence);
+ screen->fence_reference(screen, &fence, NULL);
+ }
+}
+
+void
+dri_copy_sub_buffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h)
+{
+ struct pipe_screen *screen = dri_screen(dPriv->driScreenPriv)->pipe_screen;
+ struct drm_clip_rect sub_bbox;
+ struct dri_context *ctx;
+ struct pipe_surface *back_surf;
+ struct dri_drawable *draw = dri_drawable(dPriv);
+ struct pipe_fence_handle *dummy_fence;
+ struct st_context *st = st_get_current();
+
+ assert(__dri1_api_hooks != NULL);
+
+ if (!st)
+ return;
+
+ ctx = (struct dri_context *)st->pipe->priv;
+
+ sub_bbox.x1 = x;
+ sub_bbox.x2 = x + w;
+ sub_bbox.y1 = y;
+ sub_bbox.y2 = y + h;
+
+ st_get_framebuffer_surface(draw->stfb, ST_SURFACE_BACK_LEFT, &back_surf);
+ if (back_surf) {
+ st_flush(ctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
+ dri1_copy_to_front(ctx, back_surf, dPriv, &sub_bbox, &dummy_fence);
+ screen->fence_reference(screen, &dummy_fence, NULL);
+ }
+}
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/dri/dri_drawable.h b/src/gallium/state_trackers/dri/dri_drawable.h
new file mode 100644
index 0000000000..b910930db4
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_drawable.h
@@ -0,0 +1,111 @@
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef DRI_DRAWABLE_H
+#define DRI_DRAWABLE_H
+
+#include "pipe/p_compiler.h"
+
+struct pipe_surface;
+struct pipe_fence_handle;
+struct st_framebuffer;
+struct dri_context;
+
+#define DRI_SWAP_FENCES_MAX 8
+#define DRI_SWAP_FENCES_MASK 7
+
+struct dri_drawable
+{
+ /* dri */
+ __DRIdrawablePrivate *dPriv;
+ __DRIscreenPrivate *sPriv;
+
+ unsigned attachments[8];
+ unsigned num_attachments;
+
+ boolean is_pixmap;
+
+ __DRIbuffer old[8];
+ unsigned old_num;
+ unsigned old_w;
+ unsigned old_h;
+
+ /* gallium */
+ struct st_framebuffer *stfb;
+ struct pipe_fence_handle *swap_fences[DRI_SWAP_FENCES_MAX];
+ unsigned int head;
+ unsigned int tail;
+ unsigned int desired_fences;
+ unsigned int cur_fences;
+
+ enum pipe_format color_format;
+ enum pipe_format depth_stencil_format;
+};
+
+static INLINE struct dri_drawable *
+dri_drawable(__DRIdrawablePrivate * driDrawPriv)
+{
+ return (struct dri_drawable *)driDrawPriv->driverPrivate;
+}
+
+/***********************************************************************
+ * dri_drawable.c
+ */
+boolean
+dri_create_buffer(__DRIscreenPrivate * sPriv,
+ __DRIdrawablePrivate * dPriv,
+ const __GLcontextModes * visual, boolean isPixmap);
+
+void
+dri_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf, void *context_private);
+
+void dri_swap_buffers(__DRIdrawablePrivate * dPriv);
+
+void
+dri_copy_sub_buffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h);
+
+void dri_get_buffers(__DRIdrawablePrivate * dPriv);
+
+void dri_destroy_buffer(__DRIdrawablePrivate * dPriv);
+
+void dri2_set_tex_buffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint glx_texture_format, __DRIdrawable *dPriv);
+
+void dri2_set_tex_buffer(__DRIcontext *pDRICtx, GLint target,
+ __DRIdrawable *dPriv);
+
+void
+dri1_update_drawables(struct dri_context *ctx,
+ struct dri_drawable *draw, struct dri_drawable *read);
+
+void
+dri1_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surf, void *context_private);
+#endif
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/dri/dri_extensions.c b/src/gallium/state_trackers/dri/dri_extensions.c
new file mode 100644
index 0000000000..4349a4d1d2
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_extensions.c
@@ -0,0 +1,128 @@
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "dri_screen.h"
+#include "dri_context.h"
+#include "state_tracker/st_context.h"
+
+#define need_GL_ARB_map_buffer_range
+#define need_GL_ARB_multisample
+#define need_GL_ARB_occlusion_query
+#define need_GL_ARB_point_parameters
+#define need_GL_ARB_shader_objects
+#define need_GL_ARB_texture_compression
+#define need_GL_ARB_vertex_buffer_object
+#define need_GL_ARB_vertex_program
+#define need_GL_ARB_vertex_shader
+#define need_GL_ARB_window_pos
+#define need_GL_EXT_blend_color
+#define need_GL_EXT_blend_equation_separate
+#define need_GL_EXT_blend_func_separate
+#define need_GL_EXT_blend_minmax
+#define need_GL_EXT_cull_vertex
+#define need_GL_EXT_fog_coord
+#define need_GL_EXT_framebuffer_object
+#define need_GL_EXT_multi_draw_arrays
+#define need_GL_EXT_secondary_color
+#define need_GL_NV_vertex_program
+#define need_GL_VERSION_2_0
+#define need_GL_VERSION_2_1
+#include "extension_helper.h"
+
+/**
+ * Extension strings exported by the driver.
+ */
+const struct dri_extension card_extensions[] = {
+ {"GL_ARB_fragment_shader", NULL},
+ {"GL_ARB_map_buffer_range", GL_ARB_map_buffer_range_functions},
+ {"GL_ARB_multisample", GL_ARB_multisample_functions},
+ {"GL_ARB_multitexture", NULL},
+ {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions},
+ {"GL_ARB_pixel_buffer_object", NULL},
+ {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
+ {"GL_ARB_shading_language_100", GL_VERSION_2_0_functions },
+ {"GL_ARB_shading_language_120", GL_VERSION_2_1_functions },
+ {"GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
+ {"GL_ARB_texture_border_clamp", NULL},
+ {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
+ {"GL_ARB_texture_cube_map", NULL},
+ {"GL_ARB_texture_env_add", NULL},
+ {"GL_ARB_texture_env_combine", NULL},
+ {"GL_ARB_texture_env_dot3", NULL},
+ {"GL_ARB_texture_mirrored_repeat", NULL},
+ {"GL_ARB_texture_rectangle", NULL},
+ {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
+ {"GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
+ {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
+ {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
+ {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
+ {"GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions},
+ {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
+ {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
+ {"GL_EXT_blend_subtract", NULL},
+ {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
+ {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
+ {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
+ {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
+ {"GL_EXT_packed_depth_stencil", NULL},
+ {"GL_EXT_pixel_buffer_object", NULL},
+ {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
+ {"GL_EXT_stencil_wrap", NULL},
+ {"GL_EXT_texture_edge_clamp", NULL},
+ {"GL_EXT_texture_env_combine", NULL},
+ {"GL_EXT_texture_env_dot3", NULL},
+ {"GL_EXT_texture_filter_anisotropic", NULL},
+ {"GL_EXT_texture_lod_bias", NULL},
+ {"GL_3DFX_texture_compression_FXT1", NULL},
+ {"GL_APPLE_client_storage", NULL},
+ {"GL_MESA_pack_invert", NULL},
+ {"GL_MESA_ycbcr_texture", NULL},
+ {"GL_NV_blend_square", NULL},
+ {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
+ {"GL_NV_vertex_program1_1", NULL},
+ {"GL_SGIS_generate_mipmap", NULL},
+ {NULL, NULL}
+};
+
+void
+dri_init_extensions(struct dri_context *ctx)
+{
+ /* The card_extensions list should be pruned according to the
+ * capabilities of the pipe_screen. This is actually something
+ * that can/should be done inside st_create_context().
+ */
+ if (ctx)
+ driInitExtensions(ctx->st->ctx, card_extensions, GL_TRUE);
+ else
+ driInitExtensions(NULL, card_extensions, GL_FALSE);
+}
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/dri/dri_screen.c b/src/gallium/state_trackers/dri/dri_screen.c
new file mode 100644
index 0000000000..884b6d5011
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_screen.c
@@ -0,0 +1,354 @@
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#include "utils.h"
+#include "vblank.h"
+#include "xmlpool.h"
+
+#include "dri_screen.h"
+#include "dri_context.h"
+#include "dri_drawable.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_format.h"
+#include "state_tracker/drm_api.h"
+#include "state_tracker/dri1_api.h"
+#include "state_tracker/st_public.h"
+#include "state_tracker/st_cb_fbo.h"
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
+ /*DRI_CONF_FORCE_S3TC_ENABLE(false) */
+ DRI_CONF_ALLOW_LARGE_TEXTURES(1)
+ DRI_CONF_SECTION_END DRI_CONF_END;
+
+ const uint __driNConfigOptions = 3;
+
+static const __DRItexBufferExtension dri2TexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ dri2_set_tex_buffer,
+ dri2_set_tex_buffer2,
+};
+
+ static const __DRIextension *dri_screen_extensions[] = {
+ &driReadDrawableExtension,
+ &driCopySubBufferExtension.base,
+ &driSwapControlExtension.base,
+ &driFrameTrackingExtension.base,
+ &driMediaStreamCounterExtension.base,
+ &dri2TexBufferExtension.base,
+ NULL
+ };
+
+struct dri1_api *__dri1_api_hooks = NULL;
+
+static const __DRIconfig **
+dri_fill_in_modes(struct dri_screen *screen,
+ unsigned pixel_bits)
+{
+ __DRIconfig **configs = NULL;
+ unsigned num_modes;
+ uint8_t depth_bits_array[5];
+ uint8_t stencil_bits_array[5];
+ uint8_t msaa_samples_array[1];
+ unsigned depth_buffer_factor;
+ unsigned back_buffer_factor;
+ unsigned msaa_samples_factor;
+ struct pipe_screen *p_screen = screen->pipe_screen;
+ boolean pf_r5g6b5, pf_a8r8g8b8, pf_x8r8g8b8;
+ boolean pf_z16, pf_x8z24, pf_z24x8, pf_s8z24, pf_z24s8, pf_z32;
+
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ depth_bits_array[0] = 0;
+ stencil_bits_array[0] = 0;
+ depth_buffer_factor = 1;
+
+ pf_z16 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z16_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ pf_z32 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z32_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ pf_x8z24 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_X8Z24_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ pf_z24x8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ pf_s8z24 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_S8Z24_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ pf_z24s8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_Z24S8_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ pf_r5g6b5 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0);
+ pf_a8r8g8b8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0);
+ pf_x8r8g8b8 = p_screen->is_format_supported(p_screen, PIPE_FORMAT_X8R8G8B8_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0);
+
+ if (pf_z16) {
+ depth_bits_array[depth_buffer_factor] = 16;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ }
+ if (pf_x8z24 || pf_z24x8) {
+ depth_bits_array[depth_buffer_factor] = 24;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ screen->d_depth_bits_last = pf_x8z24;
+ }
+ if (pf_s8z24 || pf_z24s8) {
+ depth_bits_array[depth_buffer_factor] = 24;
+ stencil_bits_array[depth_buffer_factor++] = 8;
+ screen->sd_depth_bits_last = pf_s8z24;
+ }
+ if (pf_z32) {
+ depth_bits_array[depth_buffer_factor] = 32;
+ stencil_bits_array[depth_buffer_factor++] = 0;
+ }
+
+ msaa_samples_array[0] = 0;
+ back_buffer_factor = 3;
+ msaa_samples_factor = 1;
+
+ num_modes =
+ depth_buffer_factor * back_buffer_factor * msaa_samples_factor * 4;
+
+ if (pixel_bits == 16 && pf_r5g6b5) {
+ configs = driCreateConfigs(GL_RGB, GL_UNSIGNED_SHORT_5_6_5,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array, 1);
+ } else {
+ __DRIconfig **configs_a8r8g8b8 = NULL;
+ __DRIconfig **configs_x8r8g8b8 = NULL;
+
+ if (pf_a8r8g8b8)
+ configs_a8r8g8b8 = driCreateConfigs(GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV,
+ depth_bits_array,
+ stencil_bits_array,
+ depth_buffer_factor,
+ back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array, 1);
+ if (pf_x8r8g8b8)
+ configs_x8r8g8b8 = driCreateConfigs(GL_BGR, GL_UNSIGNED_INT_8_8_8_8_REV,
+ depth_bits_array,
+ stencil_bits_array,
+ depth_buffer_factor,
+ back_buffer_modes,
+ back_buffer_factor,
+ msaa_samples_array, 1);
+
+ if (configs_a8r8g8b8 && configs_x8r8g8b8)
+ configs = driConcatConfigs(configs_x8r8g8b8, configs_a8r8g8b8);
+ else if (configs_a8r8g8b8)
+ configs = configs_a8r8g8b8;
+ else if (configs_x8r8g8b8)
+ configs = configs_x8r8g8b8;
+ else
+ configs = NULL;
+ }
+
+ if (configs == NULL) {
+ debug_printf("%s: driCreateConfigs failed\n", __FUNCTION__);
+ return NULL;
+ }
+
+ return (const const __DRIconfig **)configs;
+}
+
+/**
+ * Get information about previous buffer swaps.
+ */
+static int
+dri_get_swap_info(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
+{
+ if (dPriv == NULL || dPriv->driverPrivate == NULL || sInfo == NULL)
+ return -1;
+ else
+ return 0;
+}
+
+static INLINE void
+dri_copy_version(struct dri1_api_version *dst,
+ const struct __DRIversionRec *src)
+{
+ dst->major = src->major;
+ dst->minor = src->minor;
+ dst->patch_level = src->patch;
+}
+
+static const __DRIconfig **
+dri_init_screen(__DRIscreenPrivate * sPriv)
+{
+ struct dri_screen *screen;
+ const __DRIconfig **configs;
+ struct dri1_create_screen_arg arg;
+
+ dri_init_extensions(NULL);
+
+ screen = CALLOC_STRUCT(dri_screen);
+ if (!screen)
+ return NULL;
+
+ screen->api = drm_api_create();
+ screen->sPriv = sPriv;
+ screen->fd = sPriv->fd;
+ screen->drmLock = (drmLock *) & sPriv->pSAREA->lock;
+
+ sPriv->private = (void *)screen;
+ sPriv->extensions = dri_screen_extensions;
+
+ arg.base.mode = DRM_CREATE_DRI1;
+ arg.lf = &dri1_lf;
+ arg.ddx_info = sPriv->pDevPriv;
+ arg.ddx_info_size = sPriv->devPrivSize;
+ arg.sarea = sPriv->pSAREA;
+ dri_copy_version(&arg.ddx_version, &sPriv->ddx_version);
+ dri_copy_version(&arg.dri_version, &sPriv->dri_version);
+ dri_copy_version(&arg.drm_version, &sPriv->drm_version);
+ arg.api = NULL;
+
+ screen->pipe_screen = screen->api->create_screen(screen->api, screen->fd, &arg.base);
+
+ if (!screen->pipe_screen || !arg.api) {
+ debug_printf("%s: failed to create dri1 screen\n", __FUNCTION__);
+ goto out_no_screen;
+ }
+
+ __dri1_api_hooks = arg.api;
+
+ screen->pipe_screen->flush_frontbuffer = dri1_flush_frontbuffer;
+ driParseOptionInfo(&screen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ /**
+ * FIXME: If the driver supports format conversion swapbuffer blits, we might
+ * want to support other color bit depths than the server is currently
+ * using.
+ */
+
+ configs = dri_fill_in_modes(screen, sPriv->fbBPP);
+ if (!configs)
+ goto out_no_configs;
+
+ return configs;
+ out_no_configs:
+ screen->pipe_screen->destroy(screen->pipe_screen);
+ out_no_screen:
+ FREE(screen);
+ return NULL;
+}
+
+/**
+ * This is the driver specific part of the createNewScreen entry point.
+ *
+ * Returns the __GLcontextModes supported by this driver.
+ */
+static const __DRIconfig **
+dri_init_screen2(__DRIscreenPrivate * sPriv)
+{
+ struct dri_screen *screen;
+ struct drm_create_screen_arg arg;
+
+ /* Set up dispatch table to cope with all known extensions */
+ dri_init_extensions(NULL);
+
+ screen = CALLOC_STRUCT(dri_screen);
+ if (!screen)
+ goto fail;
+
+ screen->api = drm_api_create();
+ screen->sPriv = sPriv;
+ screen->fd = sPriv->fd;
+ sPriv->private = (void *)screen;
+ sPriv->extensions = dri_screen_extensions;
+ arg.mode = DRM_CREATE_NORMAL;
+
+ screen->pipe_screen = screen->api->create_screen(screen->api, screen->fd, &arg);
+ if (!screen->pipe_screen) {
+ debug_printf("%s: failed to create pipe_screen\n", __FUNCTION__);
+ goto fail;
+ }
+
+ /* We need to hook in here */
+ screen->pipe_screen->flush_frontbuffer = dri_flush_frontbuffer;
+
+ driParseOptionInfo(&screen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ return dri_fill_in_modes(screen, 32);
+ fail:
+ return NULL;
+}
+
+static void
+dri_destroy_screen(__DRIscreenPrivate * sPriv)
+{
+ struct dri_screen *screen = dri_screen(sPriv);
+
+ screen->pipe_screen->destroy(screen->pipe_screen);
+ FREE(screen);
+ sPriv->private = NULL;
+}
+
+PUBLIC const struct __DriverAPIRec driDriverAPI = {
+ .InitScreen = dri_init_screen,
+ .DestroyScreen = dri_destroy_screen,
+ .CreateContext = dri_create_context,
+ .DestroyContext = dri_destroy_context,
+ .CreateBuffer = dri_create_buffer,
+ .DestroyBuffer = dri_destroy_buffer,
+ .SwapBuffers = dri_swap_buffers,
+ .MakeCurrent = dri_make_current,
+ .UnbindContext = dri_unbind_context,
+ .GetSwapInfo = dri_get_swap_info,
+ .GetDrawableMSC = driDrawableGetMSC32,
+ .WaitForMSC = driWaitForMSC32,
+ .CopySubBuffer = dri_copy_sub_buffer,
+ .InitScreen = dri_init_screen,
+ .InitScreen2 = dri_init_screen2,
+};
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/dri/dri_screen.h b/src/gallium/state_trackers/dri/dri_screen.h
new file mode 100644
index 0000000000..f6c56d0f0c
--- /dev/null
+++ b/src/gallium/state_trackers/dri/dri_screen.h
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2009, VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Author: Keith Whitwell <keithw@vmware.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ */
+
+#ifndef DRI_SCREEN_H
+#define DRI_SCREEN_H
+
+#include "dri_util.h"
+#include "xmlconfig.h"
+
+#include "pipe/p_compiler.h"
+
+#include "state_tracker/dri1_api.h"
+
+struct dri_screen
+{
+ /* dri */
+ __DRIscreenPrivate *sPriv;
+
+ /**
+ * Configuration cache with default values for all contexts
+ */
+ driOptionCache optionCache;
+
+ /* drm */
+ int fd;
+ drmLock *drmLock;
+
+ /* gallium */
+ struct drm_api *api;
+ struct pipe_winsys *pipe_winsys;
+ struct pipe_screen *pipe_screen;
+ boolean d_depth_bits_last;
+ boolean sd_depth_bits_last;
+};
+
+/** cast wrapper */
+static INLINE struct dri_screen *
+dri_screen(__DRIscreenPrivate * sPriv)
+{
+ return (struct dri_screen *)sPriv->private;
+}
+
+/***********************************************************************
+ * dri_screen.c
+ */
+
+extern struct dri1_api *__dri1_api_hooks;
+
+#endif
+
+/* vim: set sw=3 ts=8 sts=3 expandtab: */
diff --git a/src/gallium/state_trackers/egl/Makefile b/src/gallium/state_trackers/egl/Makefile
new file mode 100644
index 0000000000..e825aa718b
--- /dev/null
+++ b/src/gallium/state_trackers/egl/Makefile
@@ -0,0 +1,19 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = egldrm
+
+LIBRARY_INCLUDES = \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/mesa/drivers/dri/common \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/include \
+ -I$(TOP)/src/egl/main \
+ $(shell pkg-config --cflags-only-I libdrm)
+
+
+C_SOURCES = $(wildcard ./*.c)
+
+
+include ../../Makefile.template
diff --git a/src/gallium/state_trackers/egl/egl_context.c b/src/gallium/state_trackers/egl/egl_context.c
new file mode 100644
index 0000000000..c4f7361ca0
--- /dev/null
+++ b/src/gallium/state_trackers/egl/egl_context.c
@@ -0,0 +1,179 @@
+
+#include "utils.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "egl_tracker.h"
+
+#include "egllog.h"
+
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+
+#include "state_tracker/st_public.h"
+#include "state_tracker/drm_api.h"
+
+#include "GL/internal/glcore.h"
+
+#define need_GL_ARB_multisample
+#define need_GL_ARB_point_parameters
+#define need_GL_ARB_texture_compression
+#define need_GL_ARB_vertex_buffer_object
+#define need_GL_ARB_vertex_program
+#define need_GL_ARB_window_pos
+#define need_GL_EXT_blend_color
+#define need_GL_EXT_blend_equation_separate
+#define need_GL_EXT_blend_func_separate
+#define need_GL_EXT_blend_minmax
+#define need_GL_EXT_cull_vertex
+#define need_GL_EXT_fog_coord
+#define need_GL_EXT_framebuffer_object
+#define need_GL_EXT_multi_draw_arrays
+#define need_GL_EXT_secondary_color
+#define need_GL_NV_vertex_program
+#include "extension_helper.h"
+
+/**
+ * TODO HACK! FUGLY!
+ * Copied for intel extentions.
+ */
+const struct dri_extension card_extensions[] = {
+ {"GL_ARB_multisample", GL_ARB_multisample_functions},
+ {"GL_ARB_multitexture", NULL},
+ {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
+ {"GL_ARB_texture_border_clamp", NULL},
+ {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
+ {"GL_ARB_texture_cube_map", NULL},
+ {"GL_ARB_texture_env_add", NULL},
+ {"GL_ARB_texture_env_combine", NULL},
+ {"GL_ARB_texture_env_dot3", NULL},
+ {"GL_ARB_texture_mirrored_repeat", NULL},
+ {"GL_ARB_texture_rectangle", NULL},
+ {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
+ {"GL_ARB_pixel_buffer_object", NULL},
+ {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
+ {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
+ {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
+ {"GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions},
+ {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
+ {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
+ {"GL_EXT_blend_subtract", NULL},
+ {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
+ {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
+ {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
+ {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
+ {"GL_EXT_packed_depth_stencil", NULL},
+ {"GL_EXT_pixel_buffer_object", NULL},
+ {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
+ {"GL_EXT_stencil_wrap", NULL},
+ {"GL_EXT_texture_edge_clamp", NULL},
+ {"GL_EXT_texture_env_combine", NULL},
+ {"GL_EXT_texture_env_dot3", NULL},
+ {"GL_EXT_texture_filter_anisotropic", NULL},
+ {"GL_EXT_texture_lod_bias", NULL},
+ {"GL_3DFX_texture_compression_FXT1", NULL},
+ {"GL_APPLE_client_storage", NULL},
+ {"GL_MESA_pack_invert", NULL},
+ {"GL_MESA_ycbcr_texture", NULL},
+ {"GL_NV_blend_square", NULL},
+ {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
+ {"GL_NV_vertex_program1_1", NULL},
+ {"GL_SGIS_generate_mipmap", NULL },
+ {NULL, NULL}
+};
+
+_EGLContext *
+drm_create_context(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, _EGLContext *share_list, const EGLint *attrib_list)
+{
+ struct drm_device *dev = lookup_drm_device(dpy);
+ struct drm_context *ctx;
+ struct drm_context *share = NULL;
+ struct st_context *st_share = NULL;
+ int i;
+ __GLcontextModes *visual;
+
+ for (i = 0; attrib_list && attrib_list[i] != EGL_NONE; i++) {
+ switch (attrib_list[i]) {
+ /* no attribs defined for now */
+ default:
+ _eglError(EGL_BAD_ATTRIBUTE, "eglCreateContext");
+ return EGL_NO_CONTEXT;
+ }
+ }
+
+ ctx = (struct drm_context *) calloc(1, sizeof(struct drm_context));
+ if (!ctx)
+ goto err_c;
+
+ _eglInitContext(drv, &ctx->base, conf, attrib_list);
+
+ ctx->pipe = dev->api->create_context(dev->api, dev->screen);
+ if (!ctx->pipe)
+ goto err_pipe;
+
+ if (share)
+ st_share = share->st;
+
+ visual = drm_visual_from_config(conf);
+ ctx->st = st_create_context(ctx->pipe, visual, st_share);
+ drm_visual_modes_destroy(visual);
+
+ if (!ctx->st)
+ goto err_gl;
+
+ return &ctx->base;
+
+err_gl:
+ ctx->pipe->destroy(ctx->pipe);
+err_pipe:
+ free(ctx);
+err_c:
+ return NULL;
+}
+
+EGLBoolean
+drm_destroy_context(_EGLDriver *drv, _EGLDisplay *dpy, _EGLContext *context)
+{
+ struct drm_context *c = lookup_drm_context(context);
+ if (!_eglIsContextBound(&c->base)) {
+ st_destroy_context(c->st);
+ c->pipe->destroy(c->pipe);
+ free(c);
+ }
+ return EGL_TRUE;
+}
+
+EGLBoolean
+drm_make_current(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *draw, _EGLSurface *read, _EGLContext *context)
+{
+ struct drm_surface *readSurf = lookup_drm_surface(read);
+ struct drm_surface *drawSurf = lookup_drm_surface(draw);
+ struct drm_context *ctx = lookup_drm_context(context);
+ EGLBoolean b;
+
+ b = _eglMakeCurrent(drv, dpy, draw, read, context);
+ if (!b)
+ return EGL_FALSE;
+
+ if (ctx) {
+ if (!drawSurf || !readSurf)
+ return EGL_FALSE;
+
+ drawSurf->user = ctx;
+ readSurf->user = ctx;
+
+ st_make_current(ctx->st, drawSurf->stfb, readSurf->stfb);
+
+ /* st_resize_framebuffer needs a bound context to work */
+ st_resize_framebuffer(drawSurf->stfb, drawSurf->w, drawSurf->h);
+ st_resize_framebuffer(readSurf->stfb, readSurf->w, readSurf->h);
+ } else {
+ drawSurf->user = NULL;
+ readSurf->user = NULL;
+
+ st_make_current(NULL, NULL, NULL);
+ }
+
+ return EGL_TRUE;
+}
diff --git a/src/gallium/state_trackers/egl/egl_surface.c b/src/gallium/state_trackers/egl/egl_surface.c
new file mode 100644
index 0000000000..69e2d6b708
--- /dev/null
+++ b/src/gallium/state_trackers/egl/egl_surface.c
@@ -0,0 +1,387 @@
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "egl_tracker.h"
+
+#include "egllog.h"
+
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_context.h"
+
+#include "state_tracker/drm_api.h"
+
+/*
+ * Util functions
+ */
+
+static drmModeModeInfoPtr
+drm_find_mode(drmModeConnectorPtr connector, _EGLMode *mode)
+{
+ int i;
+ drmModeModeInfoPtr m = NULL;
+
+ for (i = 0; i < connector->count_modes; i++) {
+ m = &connector->modes[i];
+ if (m->hdisplay == mode->Width && m->vdisplay == mode->Height && m->vrefresh == mode->RefreshRate)
+ break;
+ m = &connector->modes[0]; /* if we can't find one, return first */
+ }
+
+ return m;
+}
+
+static struct st_framebuffer *
+drm_create_framebuffer(const __GLcontextModes *visual,
+ unsigned width,
+ unsigned height,
+ void *priv)
+{
+ enum pipe_format colorFormat, depthFormat, stencilFormat;
+
+ if (visual->redBits == 5)
+ colorFormat = PIPE_FORMAT_R5G6B5_UNORM;
+ else
+ colorFormat = PIPE_FORMAT_A8R8G8B8_UNORM;
+
+ if (visual->depthBits == 16)
+ depthFormat = PIPE_FORMAT_Z16_UNORM;
+ else if (visual->depthBits == 24)
+ depthFormat = PIPE_FORMAT_S8Z24_UNORM;
+ else
+ depthFormat = PIPE_FORMAT_NONE;
+
+ if (visual->stencilBits == 8)
+ stencilFormat = PIPE_FORMAT_S8Z24_UNORM;
+ else
+ stencilFormat = PIPE_FORMAT_NONE;
+
+ return st_create_framebuffer(visual,
+ colorFormat,
+ depthFormat,
+ stencilFormat,
+ width,
+ height,
+ priv);
+}
+
+static void
+drm_create_texture(_EGLDisplay *dpy,
+ struct drm_screen *scrn,
+ unsigned w, unsigned h)
+{
+ struct drm_device *dev = lookup_drm_device(dpy);
+ struct pipe_screen *screen = dev->screen;
+ struct pipe_surface *surface;
+ struct pipe_texture *texture;
+ struct pipe_texture templat;
+ struct pipe_buffer *buf = NULL;
+ unsigned pitch = 0;
+
+ memset(&templat, 0, sizeof(templat));
+ templat.tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET;
+ templat.tex_usage |= PIPE_TEXTURE_USAGE_PRIMARY;
+ templat.target = PIPE_TEXTURE_2D;
+ templat.last_level = 0;
+ templat.depth[0] = 1;
+ templat.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ templat.width[0] = w;
+ templat.height[0] = h;
+ pf_get_block(templat.format, &templat.block);
+
+ texture = screen->texture_create(dev->screen,
+ &templat);
+
+ if (!texture)
+ goto err_tex;
+
+ surface = screen->get_tex_surface(screen,
+ texture,
+ 0,
+ 0,
+ 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ if (!surface)
+ goto err_surf;
+
+ scrn->tex = texture;
+ scrn->surface = surface;
+ scrn->front.width = w;
+ scrn->front.height = h;
+ scrn->front.pitch = pitch;
+ dev->api->local_handle_from_texture(dev->api, screen, texture,
+ &scrn->front.pitch, &scrn->front.handle);
+ if (0)
+ goto err_handle;
+
+ return;
+
+err_handle:
+ pipe_surface_reference(&surface, NULL);
+err_surf:
+ pipe_texture_reference(&texture, NULL);
+err_tex:
+ pipe_buffer_reference(&buf, NULL);
+ return;
+}
+
+/*
+ * Exported functions
+ */
+
+void
+drm_takedown_shown_screen(_EGLDisplay *dpy, struct drm_screen *screen)
+{
+ struct drm_device *dev = lookup_drm_device(dpy);
+
+ screen->surf = NULL;
+
+ drmModeSetCrtc(
+ dev->drmFD,
+ screen->crtcID,
+ 0, // FD
+ 0, 0,
+ NULL, 0, // List of output ids
+ NULL);
+
+ drmModeRmFB(dev->drmFD, screen->fbID);
+ drmModeFreeFB(screen->fb);
+ screen->fb = NULL;
+
+ pipe_surface_reference(&screen->surface, NULL);
+ pipe_texture_reference(&screen->tex, NULL);
+ pipe_buffer_reference(&screen->buffer, NULL);
+
+ screen->shown = 0;
+}
+
+_EGLSurface *
+drm_create_window_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, NativeWindowType window, const EGLint *attrib_list)
+{
+ return NULL;
+}
+
+
+_EGLSurface *
+drm_create_pixmap_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, NativePixmapType pixmap, const EGLint *attrib_list)
+{
+ return NULL;
+}
+
+
+_EGLSurface *
+drm_create_pbuffer_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf,
+ const EGLint *attrib_list)
+{
+ int i;
+ int width = -1;
+ int height = -1;
+ struct drm_surface *surf = NULL;
+ __GLcontextModes *visual;
+
+ for (i = 0; attrib_list && attrib_list[i] != EGL_NONE; i++) {
+ switch (attrib_list[i]) {
+ case EGL_WIDTH:
+ width = attrib_list[++i];
+ break;
+ case EGL_HEIGHT:
+ height = attrib_list[++i];
+ break;
+ default:
+ _eglError(EGL_BAD_ATTRIBUTE, "eglCreatePbufferSurface");
+ return EGL_NO_SURFACE;
+ }
+ }
+
+ if (width < 1 || height < 1) {
+ _eglError(EGL_BAD_ATTRIBUTE, "eglCreatePbufferSurface");
+ return NULL;
+ }
+
+ surf = (struct drm_surface *) calloc(1, sizeof(struct drm_surface));
+ if (!surf)
+ goto err;
+
+ if (!_eglInitSurface(drv, &surf->base, EGL_PBUFFER_BIT, conf, attrib_list))
+ goto err_surf;
+
+ surf->w = width;
+ surf->h = height;
+
+ visual = drm_visual_from_config(conf);
+ surf->stfb = drm_create_framebuffer(visual,
+ width,
+ height,
+ (void*)surf);
+ drm_visual_modes_destroy(visual);
+
+ return &surf->base;
+
+err_surf:
+ free(surf);
+err:
+ return NULL;
+}
+
+_EGLSurface *
+drm_create_screen_surface_mesa(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *cfg,
+ const EGLint *attrib_list)
+{
+ EGLSurface surf = drm_create_pbuffer_surface(drv, dpy, cfg, attrib_list);
+
+ return surf;
+}
+
+EGLBoolean
+drm_show_screen_surface_mesa(_EGLDriver *drv, _EGLDisplay *dpy,
+ _EGLScreen *screen,
+ _EGLSurface *surface, _EGLMode *mode)
+{
+ struct drm_device *dev = lookup_drm_device(dpy);
+ struct drm_surface *surf = lookup_drm_surface(surface);
+ struct drm_screen *scrn = lookup_drm_screen(screen);
+ int ret;
+ unsigned int i, k;
+
+ if (scrn->shown)
+ drm_takedown_shown_screen(dpy, scrn);
+
+
+ drm_create_texture(dpy, scrn, mode->Width, mode->Height);
+ if (!scrn->buffer)
+ return EGL_FALSE;
+
+ ret = drmModeAddFB(dev->drmFD,
+ scrn->front.width, scrn->front.height,
+ 32, 32, scrn->front.pitch,
+ scrn->front.handle,
+ &scrn->fbID);
+
+ if (ret)
+ goto err_bo;
+
+ scrn->fb = drmModeGetFB(dev->drmFD, scrn->fbID);
+ if (!scrn->fb)
+ goto err_bo;
+
+ /* find a fitting crtc */
+ {
+ drmModeConnector *con = scrn->connector;
+
+ scrn->mode = drm_find_mode(con, mode);
+ if (!scrn->mode)
+ goto err_fb;
+
+ for (k = 0; k < con->count_encoders; k++) {
+ drmModeEncoder *enc = drmModeGetEncoder(dev->drmFD, con->encoders[k]);
+ for (i = 0; i < dev->res->count_crtcs; i++) {
+ if (enc->possible_crtcs & (1<<i)) {
+ /* save the ID */
+ scrn->crtcID = dev->res->crtcs[i];
+
+ /* skip the rest */
+ i = dev->res->count_crtcs;
+ k = dev->res->count_encoders;
+ }
+ }
+ drmModeFreeEncoder(enc);
+ }
+ }
+
+ ret = drmModeSetCrtc(dev->drmFD,
+ scrn->crtcID,
+ scrn->fbID,
+ 0, 0,
+ &scrn->connectorID, 1,
+ scrn->mode);
+
+ if (ret)
+ goto err_crtc;
+
+
+ if (scrn->dpms)
+ drmModeConnectorSetProperty(dev->drmFD,
+ scrn->connectorID,
+ scrn->dpms->prop_id,
+ DRM_MODE_DPMS_ON);
+
+ surf->screen = scrn;
+
+ scrn->surf = surf;
+ scrn->shown = 1;
+
+ return EGL_TRUE;
+
+err_crtc:
+ scrn->crtcID = 0;
+
+err_fb:
+ drmModeRmFB(dev->drmFD, scrn->fbID);
+ drmModeFreeFB(scrn->fb);
+ scrn->fb = NULL;
+
+err_bo:
+ pipe_surface_reference(&scrn->surface, NULL);
+ pipe_texture_reference(&scrn->tex, NULL);
+ pipe_buffer_reference(&scrn->buffer, NULL);
+
+ return EGL_FALSE;
+}
+
+EGLBoolean
+drm_destroy_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *surface)
+{
+ struct drm_surface *surf = lookup_drm_surface(surface);
+ if (!_eglIsSurfaceBound(&surf->base)) {
+ if (surf->screen)
+ drm_takedown_shown_screen(dpy, surf->screen);
+ st_unreference_framebuffer(surf->stfb);
+ free(surf);
+ }
+ return EGL_TRUE;
+}
+
+EGLBoolean
+drm_swap_buffers(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *draw)
+{
+ struct drm_device *dev = lookup_drm_device(dpy);
+ struct drm_surface *surf = lookup_drm_surface(draw);
+ struct pipe_surface *back_surf;
+
+ if (!surf)
+ return EGL_FALSE;
+
+ /* error checking */
+ if (!_eglSwapBuffers(drv, dpy, draw))
+ return EGL_FALSE;
+
+ st_get_framebuffer_surface(surf->stfb, ST_SURFACE_BACK_LEFT, &back_surf);
+
+ if (back_surf) {
+
+ st_notify_swapbuffers(surf->stfb);
+
+ if (surf->screen) {
+ surf->user->pipe->surface_copy(surf->user->pipe,
+ surf->screen->surface,
+ 0, 0,
+ back_surf,
+ 0, 0,
+ surf->w, surf->h);
+ surf->user->pipe->flush(surf->user->pipe, PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_TEXTURE_CACHE, NULL);
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ /* TODO query connector property to see if this is needed */
+ drmModeDirtyFB(dev->drmFD, surf->screen->fbID, NULL, 0);
+#else
+ (void)dev;
+#endif
+
+ /* TODO more stuff here */
+ }
+ }
+
+ return EGL_TRUE;
+}
diff --git a/src/gallium/state_trackers/egl/egl_tracker.c b/src/gallium/state_trackers/egl/egl_tracker.c
new file mode 100644
index 0000000000..5140755001
--- /dev/null
+++ b/src/gallium/state_trackers/egl/egl_tracker.c
@@ -0,0 +1,271 @@
+
+#include "utils.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "egl_tracker.h"
+
+#include <fcntl.h>
+
+#include "egllog.h"
+#include "state_tracker/drm_api.h"
+
+#include "pipe/p_screen.h"
+#include "pipe/internal/p_winsys_screen.h"
+
+/** HACK */
+void* driDriverAPI;
+extern const struct dri_extension card_extensions[];
+
+
+/*
+ * Exported functions
+ */
+
+static void
+drm_unload(_EGLDriver *drv)
+{
+ free(drv);
+}
+
+/**
+ * The bootstrap function. Return a new drm_driver object and
+ * plug in API functions.
+ */
+_EGLDriver *
+_eglMain(const char *args)
+{
+ _EGLDriver *drv;
+
+ drv = (_EGLDriver *) calloc(1, sizeof(_EGLDriver));
+ if (!drv) {
+ return NULL;
+ }
+
+ /* First fill in the dispatch table with defaults */
+ _eglInitDriverFallbacks(drv);
+ /* then plug in our Drm-specific functions */
+ drv->API.Initialize = drm_initialize;
+ drv->API.Terminate = drm_terminate;
+ drv->API.CreateContext = drm_create_context;
+ drv->API.MakeCurrent = drm_make_current;
+ drv->API.CreateWindowSurface = drm_create_window_surface;
+ drv->API.CreatePixmapSurface = drm_create_pixmap_surface;
+ drv->API.CreatePbufferSurface = drm_create_pbuffer_surface;
+ drv->API.DestroySurface = drm_destroy_surface;
+ drv->API.DestroyContext = drm_destroy_context;
+ drv->API.CreateScreenSurfaceMESA = drm_create_screen_surface_mesa;
+ drv->API.ShowScreenSurfaceMESA = drm_show_screen_surface_mesa;
+ drv->API.SwapBuffers = drm_swap_buffers;
+
+ drv->Name = "DRM/Gallium/Win";
+ drv->Unload = drm_unload;
+
+ return drv;
+}
+
+static void
+drm_get_device_id(struct drm_device *device)
+{
+ char path[512];
+ FILE *file;
+ char *ret;
+
+ /* TODO get the real minor */
+ int minor = 0;
+
+ device->deviceID = 0;
+
+ snprintf(path, sizeof(path), "/sys/class/drm/card%d/device/device", minor);
+ file = fopen(path, "r");
+ if (!file) {
+ _eglLog(_EGL_WARNING, "Could not retrive device ID\n");
+ return;
+ }
+
+ ret = fgets(path, sizeof( path ), file);
+ if (!ret)
+ return;
+
+ sscanf(path, "%x", &device->deviceID);
+ fclose(file);
+}
+
+static void
+drm_update_res(struct drm_device *dev)
+{
+ drmModeFreeResources(dev->res);
+ dev->res = drmModeGetResources(dev->drmFD);
+}
+
+static void
+drm_add_modes_from_connector(_EGLScreen *screen, drmModeConnectorPtr connector)
+{
+ drmModeModeInfoPtr m = NULL;
+ int i;
+
+ for (i = 0; i < connector->count_modes; i++) {
+ m = &connector->modes[i];
+ _eglAddNewMode(screen, m->hdisplay, m->vdisplay, m->vrefresh, m->name);
+ }
+}
+
+static void
+drm_find_dpms(struct drm_device *dev, struct drm_screen *screen)
+{
+ drmModeConnectorPtr c = screen->connector;
+ drmModePropertyPtr p;
+ int i;
+
+ for (i = 0; i < c->count_props; i++) {
+ p = drmModeGetProperty(dev->drmFD, c->props[i]);
+ if (!strcmp(p->name, "DPMS"))
+ break;
+
+ drmModeFreeProperty(p);
+ p = NULL;
+ }
+
+ screen->dpms = p;
+}
+
+static int drm_open_minor(int minor)
+{
+ char buf[64];
+
+ sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor);
+ return open(buf, O_RDWR, 0);
+}
+
+EGLBoolean
+drm_initialize(_EGLDriver *drv, _EGLDisplay *disp, EGLint *major, EGLint *minor)
+{
+ struct drm_device *dev;
+ struct drm_screen *screen = NULL;
+ drmModeConnectorPtr connector = NULL;
+ drmModeResPtr res = NULL;
+ unsigned count_connectors = 0;
+ int num_screens = 0;
+ EGLint i;
+ int fd;
+
+ dev = (struct drm_device *) calloc(1, sizeof(struct drm_device));
+ if (!dev)
+ return EGL_FALSE;
+ dev->api = drm_api_create();
+
+ /* try the first node */
+ fd = drm_open_minor(0);
+ if (fd < 0)
+ goto err_fd;
+
+ dev->drmFD = fd;
+ drm_get_device_id(dev);
+
+ dev->screen = dev->api->create_screen(dev->api, dev->drmFD, NULL);
+ if (!dev->screen)
+ goto err_screen;
+ dev->winsys = dev->screen->winsys;
+
+ /* TODO HACK */
+ driInitExtensions(NULL, card_extensions, GL_FALSE);
+
+ drm_update_res(dev);
+ res = dev->res;
+ if (res)
+ count_connectors = res->count_connectors;
+ else
+ _eglLog(_EGL_WARNING, "Could not retrive kms information\n");
+
+ for(i = 0; i < count_connectors && i < MAX_SCREENS; i++) {
+ connector = drmModeGetConnector(fd, res->connectors[i]);
+
+ if (!connector)
+ continue;
+
+ if (connector->connection != DRM_MODE_CONNECTED) {
+ drmModeFreeConnector(connector);
+ continue;
+ }
+
+ screen = malloc(sizeof(struct drm_screen));
+ memset(screen, 0, sizeof(*screen));
+ screen->connector = connector;
+ screen->connectorID = connector->connector_id;
+ _eglInitScreen(&screen->base);
+ _eglAddScreen(disp, &screen->base);
+ drm_add_modes_from_connector(&screen->base, connector);
+ drm_find_dpms(dev, screen);
+ dev->screens[num_screens++] = screen;
+ }
+ dev->count_screens = num_screens;
+
+ disp->DriverData = dev;
+
+ /* for now we only have one config */
+ _EGLConfig *config = calloc(1, sizeof(*config));
+ memset(config, 1, sizeof(*config));
+ _eglInitConfig(config, 1);
+ _eglSetConfigAttrib(config, EGL_RED_SIZE, 8);
+ _eglSetConfigAttrib(config, EGL_GREEN_SIZE, 8);
+ _eglSetConfigAttrib(config, EGL_BLUE_SIZE, 8);
+ _eglSetConfigAttrib(config, EGL_ALPHA_SIZE, 8);
+ _eglSetConfigAttrib(config, EGL_BUFFER_SIZE, 32);
+ _eglSetConfigAttrib(config, EGL_DEPTH_SIZE, 24);
+ _eglSetConfigAttrib(config, EGL_STENCIL_SIZE, 8);
+ _eglSetConfigAttrib(config, EGL_SURFACE_TYPE, EGL_PBUFFER_BIT);
+ _eglAddConfig(disp, config);
+
+ disp->ClientAPIsMask = EGL_OPENGL_BIT /*| EGL_OPENGL_ES_BIT*/;
+ /* enable supported extensions */
+ disp->Extensions.MESA_screen_surface = EGL_TRUE;
+ disp->Extensions.MESA_copy_context = EGL_TRUE;
+
+ *major = 1;
+ *minor = 4;
+
+ return EGL_TRUE;
+
+err_screen:
+ drmClose(fd);
+err_fd:
+ free(dev);
+ return EGL_FALSE;
+}
+
+EGLBoolean
+drm_terminate(_EGLDriver *drv, _EGLDisplay *dpy)
+{
+ struct drm_device *dev = lookup_drm_device(dpy);
+ struct drm_screen *screen;
+ int i = 0;
+
+ _eglReleaseDisplayResources(drv, dpy);
+ _eglCleanupDisplay(dpy);
+
+ drmFreeVersion(dev->version);
+
+ for (i = 0; i < dev->count_screens; i++) {
+ screen = dev->screens[i];
+
+ if (screen->shown)
+ drm_takedown_shown_screen(dpy, screen);
+
+ drmModeFreeProperty(screen->dpms);
+ drmModeFreeConnector(screen->connector);
+ _eglDestroyScreen(&screen->base);
+ dev->screens[i] = NULL;
+ }
+
+ dev->screen->destroy(dev->screen);
+ dev->winsys = NULL;
+
+ drmClose(dev->drmFD);
+
+ dev->api->destroy(dev->api);
+ free(dev);
+ dpy->DriverData = NULL;
+
+ return EGL_TRUE;
+}
diff --git a/src/gallium/state_trackers/egl/egl_tracker.h b/src/gallium/state_trackers/egl/egl_tracker.h
new file mode 100644
index 0000000000..dd4730f957
--- /dev/null
+++ b/src/gallium/state_trackers/egl/egl_tracker.h
@@ -0,0 +1,197 @@
+
+#ifndef _EGL_TRACKER_H_
+#define _EGL_TRACKER_H_
+
+#include <stdint.h>
+
+#include "eglconfig.h"
+#include "eglcontext.h"
+#include "egldisplay.h"
+#include "egldriver.h"
+#include "eglglobals.h"
+#include "eglmode.h"
+#include "eglscreen.h"
+#include "eglsurface.h"
+
+#include "xf86drm.h"
+#include "xf86drmMode.h"
+
+#include "pipe/p_compiler.h"
+
+#include "state_tracker/st_public.h"
+
+#define MAX_SCREENS 16
+
+struct pipe_winsys;
+struct pipe_screen;
+struct pipe_context;
+struct state_tracker;
+
+struct drm_screen;
+struct drm_context;
+
+struct drm_device
+{
+ /*
+ * pipe
+ */
+
+ struct drm_api *api;
+ struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ /*
+ * drm
+ */
+
+ int drmFD;
+ drmVersionPtr version;
+ int deviceID;
+
+ drmModeResPtr res;
+
+ struct drm_screen *screens[MAX_SCREENS];
+ size_t count_screens;
+};
+
+struct drm_surface
+{
+ _EGLSurface base; /* base class/object */
+
+ /*
+ * pipe
+ */
+
+
+ struct st_framebuffer *stfb;
+
+ /*
+ * drm
+ */
+
+ struct drm_context *user;
+ struct drm_screen *screen;
+
+ int w;
+ int h;
+};
+
+struct drm_context
+{
+ _EGLContext base; /* base class/object */
+
+ /* pipe */
+
+ struct pipe_context *pipe;
+ struct st_context *st;
+};
+
+struct drm_screen
+{
+ _EGLScreen base;
+
+ /*
+ * pipe
+ */
+
+ struct pipe_buffer *buffer;
+ struct pipe_texture *tex;
+ struct pipe_surface *surface;
+
+ /*
+ * drm
+ */
+
+ struct {
+ unsigned height;
+ unsigned width;
+ unsigned pitch;
+ unsigned handle;
+ } front;
+
+ /* currently only support one connector */
+ drmModeConnectorPtr connector;
+ uint32_t connectorID;
+
+ /* dpms property */
+ drmModePropertyPtr dpms;
+
+ /* Has this screen been shown */
+ int shown;
+
+ /* Surface that is currently attached to this screen */
+ struct drm_surface *surf;
+
+ /* framebuffer */
+ drmModeFBPtr fb;
+ uint32_t fbID;
+
+ /* crtc and mode used */
+ /*drmModeCrtcPtr crtc;*/
+ uint32_t crtcID;
+
+ drmModeModeInfoPtr mode;
+};
+
+
+static INLINE struct drm_device *
+lookup_drm_device(_EGLDisplay *d)
+{
+ return (struct drm_device *) d->DriverData;
+}
+
+
+static INLINE struct drm_context *
+lookup_drm_context(_EGLContext *c)
+{
+ return (struct drm_context *) c;
+}
+
+
+static INLINE struct drm_surface *
+lookup_drm_surface(_EGLSurface *s)
+{
+ return (struct drm_surface *) s;
+}
+
+static INLINE struct drm_screen *
+lookup_drm_screen(_EGLScreen *s)
+{
+ return (struct drm_screen *) s;
+}
+
+/**
+ * egl_visual.h
+ */
+/*@{*/
+void drm_visual_modes_destroy(__GLcontextModes *modes);
+__GLcontextModes* drm_visual_modes_create(unsigned count, size_t minimum_size);
+__GLcontextModes* drm_visual_from_config(_EGLConfig *conf);
+/*@}*/
+
+/**
+ * egl_surface.h
+ */
+/*@{*/
+void drm_takedown_shown_screen(_EGLDisplay *dpy, struct drm_screen *screen);
+/*@}*/
+
+/**
+ * All function exported to the egl side.
+ */
+/*@{*/
+EGLBoolean drm_initialize(_EGLDriver *drv, _EGLDisplay *dpy, EGLint *major, EGLint *minor);
+EGLBoolean drm_terminate(_EGLDriver *drv, _EGLDisplay *dpy);
+_EGLContext *drm_create_context(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, _EGLContext *share_list, const EGLint *attrib_list);
+EGLBoolean drm_destroy_context(_EGLDriver *drv, _EGLDisplay *dpy, _EGLContext *context);
+_EGLSurface *drm_create_window_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, NativeWindowType window, const EGLint *attrib_list);
+_EGLSurface *drm_create_pixmap_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, NativePixmapType pixmap, const EGLint *attrib_list);
+_EGLSurface *drm_create_pbuffer_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, const EGLint *attrib_list);
+_EGLSurface *drm_create_screen_surface_mesa(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf, const EGLint *attrib_list);
+EGLBoolean drm_show_screen_surface_mesa(_EGLDriver *drv, _EGLDisplay *dpy, _EGLScreen *screen, _EGLSurface *surface, _EGLMode *mode);
+EGLBoolean drm_destroy_surface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *surface);
+EGLBoolean drm_make_current(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *draw, _EGLSurface *read, _EGLContext *context);
+EGLBoolean drm_swap_buffers(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *draw);
+/*@}*/
+
+#endif
diff --git a/src/gallium/state_trackers/egl/egl_visual.c b/src/gallium/state_trackers/egl/egl_visual.c
new file mode 100644
index 0000000000..e59f893851
--- /dev/null
+++ b/src/gallium/state_trackers/egl/egl_visual.c
@@ -0,0 +1,85 @@
+
+#include "egl_tracker.h"
+
+#include "egllog.h"
+
+void
+drm_visual_modes_destroy(__GLcontextModes *modes)
+{
+ _eglLog(_EGL_DEBUG, "%s", __FUNCTION__);
+
+ while (modes) {
+ __GLcontextModes * const next = modes->next;
+ free(modes);
+ modes = next;
+ }
+}
+
+__GLcontextModes *
+drm_visual_modes_create(unsigned count, size_t minimum_size)
+{
+ /* This code copied from libGLX, and modified */
+ const size_t size = (minimum_size > sizeof(__GLcontextModes))
+ ? minimum_size : sizeof(__GLcontextModes);
+ __GLcontextModes * head = NULL;
+ __GLcontextModes ** next;
+ unsigned i;
+
+ _eglLog(_EGL_DEBUG, "%s %d %d", __FUNCTION__, count, minimum_size);
+
+ next = & head;
+ for (i = 0 ; i < count ; i++) {
+ *next = (__GLcontextModes *) calloc(1, size);
+ if (*next == NULL) {
+ drm_visual_modes_destroy(head);
+ head = NULL;
+ break;
+ }
+
+ (*next)->doubleBufferMode = 1;
+ (*next)->visualID = GLX_DONT_CARE;
+ (*next)->visualType = GLX_DONT_CARE;
+ (*next)->visualRating = GLX_NONE;
+ (*next)->transparentPixel = GLX_NONE;
+ (*next)->transparentRed = GLX_DONT_CARE;
+ (*next)->transparentGreen = GLX_DONT_CARE;
+ (*next)->transparentBlue = GLX_DONT_CARE;
+ (*next)->transparentAlpha = GLX_DONT_CARE;
+ (*next)->transparentIndex = GLX_DONT_CARE;
+ (*next)->xRenderable = GLX_DONT_CARE;
+ (*next)->fbconfigID = GLX_DONT_CARE;
+ (*next)->swapMethod = GLX_SWAP_UNDEFINED_OML;
+ (*next)->bindToTextureRgb = GLX_DONT_CARE;
+ (*next)->bindToTextureRgba = GLX_DONT_CARE;
+ (*next)->bindToMipmapTexture = GLX_DONT_CARE;
+ (*next)->bindToTextureTargets = 0;
+ (*next)->yInverted = GLX_DONT_CARE;
+
+ next = & ((*next)->next);
+ }
+
+ return head;
+}
+
+__GLcontextModes *
+drm_visual_from_config(_EGLConfig *conf)
+{
+ __GLcontextModes *visual;
+ (void)conf;
+
+ visual = drm_visual_modes_create(1, sizeof(*visual));
+ visual->redBits = 8;
+ visual->greenBits = 8;
+ visual->blueBits = 8;
+ visual->alphaBits = 8;
+
+ visual->rgbBits = 32;
+ visual->doubleBufferMode = 1;
+
+ visual->depthBits = 24;
+ visual->haveDepthBuffer = visual->depthBits > 0;
+ visual->stencilBits = 8;
+ visual->haveStencilBuffer = visual->stencilBits > 0;
+
+ return visual;
+}
diff --git a/src/gallium/state_trackers/es/Makefile b/src/gallium/state_trackers/es/Makefile
new file mode 100644
index 0000000000..1ffabf339a
--- /dev/null
+++ b/src/gallium/state_trackers/es/Makefile
@@ -0,0 +1,100 @@
+# src/gallium/state_trackers/es/Makefile
+
+# Build the ES 1/2 state tracker libraries
+# This consists of core Mesa ES, plus GL/gallium state tracker.
+
+TOP = ../../../..
+include $(TOP)/configs/current
+
+GLES_1_VERSION_MAJOR = 1
+GLES_1_VERSION_MINOR = 1
+GLES_1_VERSION_PATCH = 0
+
+GLES_2_VERSION_MAJOR = 2
+GLES_2_VERSION_MINOR = 0
+GLES_2_VERSION_PATCH = 0
+
+
+# Maybe move these into configs/default:
+GLES_1_LIB = GLESv1_CM
+GLES_1_LIB_NAME = lib$(GLES_1_LIB).so
+GLES_2_LIB = GLESv2
+GLES_2_LIB_NAME = lib$(GLES_2_LIB).so
+
+
+INCLUDE_DIRS = \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/include
+
+ES1_SOURCES = \
+ st_es1.c \
+
+ES1_OBJECTS = $(ES1_SOURCES:.c=.o)
+
+ES2_SOURCES = \
+ st_es2.c \
+
+ES2_OBJECTS = $(ES2_SOURCES:.c=.o)
+
+
+# we only need the gallium libs that the state trackers directly use:
+GALLIUM_LIBS = \
+ $(TOP)/src/gallium/auxiliary/cso_cache/libcso_cache.a \
+ $(TOP)/src/gallium/auxiliary/rtasm/librtasm.a \
+ $(TOP)/src/gallium/auxiliary/tgsi/libtgsi.a \
+ $(TOP)/src/gallium/auxiliary/util/libutil.a
+
+ES1_LIBS = \
+ $(TOP)/src/mesa/es/libes1gallium.a \
+ $(TOP)/src/mesa/es/libes1api.a
+
+ES2_LIBS = \
+ $(TOP)/src/mesa/es/libes2gallium.a \
+ $(TOP)/src/mesa/es/libes2api.a
+
+SYS_LIBS = -lm -pthread
+
+
+.c.o:
+ $(CC) -c $(INCLUDE_DIRS) $(CFLAGS) $< -o $@
+
+
+# Default: make both GL ES 1.1 and GL ES 2.0 libraries
+default: $(TOP)/$(LIB_DIR)/$(GLES_1_LIB_NAME) $(TOP)/$(LIB_DIR)/$(GLES_2_LIB_NAME)
+
+# Make the shared libs
+$(TOP)/$(LIB_DIR)/$(GLES_1_LIB_NAME): $(ES1_OBJECTS) $(ES1_LIBS)
+ $(TOP)/bin/mklib -o $(GLES_1_LIB) \
+ -major $(GLES_1_VERSION_MAJOR) \
+ -minor $(GLES_1_VERSION_MINOR) \
+ -patch $(GLES_1_VERSION_PATCH) \
+ -install $(TOP)/$(LIB_DIR) \
+ $(ES1_OBJECTS) \
+ -Wl,--whole-archive $(ES1_LIBS) -Wl,--no-whole-archive \
+ -Wl,--start-group $(GALLIUM_LIBS) -Wl,--end-group \
+ $(SYS_LIBS)
+
+$(TOP)/$(LIB_DIR)/$(GLES_2_LIB_NAME): $(ES2_OBJECTS) $(ES1_LIBS)
+ $(TOP)/bin/mklib -o $(GLES_2_LIB) \
+ -major $(GLES_2_VERSION_MAJOR) \
+ -minor $(GLES_2_VERSION_MINOR) \
+ -patch $(GLES_2_VERSION_PATCH) \
+ -install $(TOP)/$(LIB_DIR) \
+ $(ES2_OBJECTS) \
+ -Wl,--whole-archive $(ES2_LIBS) -Wl,--no-whole-archive \
+ -Wl,--start-group $(GALLIUM_LIBS) -Wl,--end-group \
+ $(SYS_LIBS)
+
+install: default
+ $(INSTALL) -d $(INSTALL_DIR)/include/GLES
+ $(INSTALL) -m 644 $(TOP)/include/GLES/*.h $(INSTALL_DIR)/include/GLES
+ $(INSTALL) -d $(INSTALL_DIR)/include/GLES2
+ $(INSTALL) -m 644 $(TOP)/include/GLES2/*.h $(INSTALL_DIR)/include/GLES2
+ $(INSTALL) -d $(INSTALL_DIR)/$(LIB_DIR)
+ $(INSTALL) $(TOP)/$(LIB_DIR)/libGLESv1* $(INSTALL_DIR)/$(LIB_DIR)
+ $(INSTALL) $(TOP)/$(LIB_DIR)/libGLESv2* $(INSTALL_DIR)/$(LIB_DIR)
+
+clean:
+ -rm -f *.o *~
+ -rm -f $(TOP)/$(LIB_DIR)/$(GLES_1_LIB_NAME)* $(TOP)/$(LIB_DIR)/$(GLES_2_LIB_NAME)*
+ -rm -f *.pyc
diff --git a/src/gallium/state_trackers/es/st_es1.c b/src/gallium/state_trackers/es/st_es1.c
new file mode 100644
index 0000000000..7f0c038957
--- /dev/null
+++ b/src/gallium/state_trackers/es/st_es1.c
@@ -0,0 +1 @@
+const int st_api_OpenGL_ES1 = 1;
diff --git a/src/gallium/state_trackers/es/st_es2.c b/src/gallium/state_trackers/es/st_es2.c
new file mode 100644
index 0000000000..78e3791fbe
--- /dev/null
+++ b/src/gallium/state_trackers/es/st_es2.c
@@ -0,0 +1 @@
+const int st_api_OpenGL_ES2 = 1;
diff --git a/src/gallium/state_trackers/g3dvl/Makefile b/src/gallium/state_trackers/g3dvl/Makefile
new file mode 100644
index 0000000000..f9f4d6be3c
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/Makefile
@@ -0,0 +1,21 @@
+TARGET = libg3dvl.a
+OBJECTS = vl_display.o vl_screen.o vl_context.o vl_surface.o vl_shader_build.o vl_util.o vl_basic_csc.o \
+ vl_r16snorm_mc_buf.o
+GALLIUMDIR = ../..
+
+CFLAGS += -g -Wall -Werror-implicit-function-declaration -fPIC \
+ -I${GALLIUMDIR}/include \
+ -I${GALLIUMDIR}/auxiliary \
+ -I${GALLIUMDIR}/winsys/g3dvl \
+
+#############################################
+
+.PHONY = all clean
+
+all: ${TARGET}
+
+${TARGET}: ${OBJECTS}
+ ar rcs $@ $^
+
+clean:
+ rm -rf ${OBJECTS} ${TARGET}
diff --git a/src/gallium/state_trackers/g3dvl/vl_basic_csc.c b/src/gallium/state_trackers/g3dvl/vl_basic_csc.c
new file mode 100644
index 0000000000..20d682de3f
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_basic_csc.c
@@ -0,0 +1,714 @@
+#define VL_INTERNAL
+#include "vl_basic_csc.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_state.h>
+#include <pipe/p_inlines.h>
+#include <tgsi/tgsi_parse.h>
+#include <tgsi/tgsi_build.h>
+#include <util/u_memory.h>
+#include "vl_csc.h"
+#include "vl_surface.h"
+#include "vl_shader_build.h"
+#include "vl_types.h"
+
+struct vlVertexShaderConsts
+{
+ struct vlVertex4f dst_scale;
+ struct vlVertex4f dst_trans;
+ struct vlVertex4f src_scale;
+ struct vlVertex4f src_trans;
+};
+
+struct vlFragmentShaderConsts
+{
+ struct vlVertex4f bias;
+ float matrix[16];
+};
+
+struct vlBasicCSC
+{
+ struct vlCSC base;
+
+ struct pipe_context *pipe;
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state framebuffer;
+ struct pipe_texture *framebuffer_tex;
+ void *sampler;
+ void *vertex_shader, *fragment_shader;
+ struct pipe_vertex_buffer vertex_bufs[2];
+ struct pipe_vertex_element vertex_elems[2];
+ struct pipe_constant_buffer vs_const_buf, fs_const_buf;
+};
+
+static int vlResizeFrameBuffer
+(
+ struct vlCSC *csc,
+ unsigned int width,
+ unsigned int height
+)
+{
+ struct vlBasicCSC *basic_csc;
+ struct pipe_context *pipe;
+ struct pipe_texture template;
+
+ assert(csc);
+
+ basic_csc = (struct vlBasicCSC*)csc;
+ pipe = basic_csc->pipe;
+
+ if (basic_csc->framebuffer.width == width && basic_csc->framebuffer.height == height)
+ return 0;
+
+ basic_csc->viewport.scale[0] = width;
+ basic_csc->viewport.scale[1] = height;
+ basic_csc->viewport.scale[2] = 1;
+ basic_csc->viewport.scale[3] = 1;
+ basic_csc->viewport.translate[0] = 0;
+ basic_csc->viewport.translate[1] = 0;
+ basic_csc->viewport.translate[2] = 0;
+ basic_csc->viewport.translate[3] = 0;
+
+ if (basic_csc->framebuffer_tex)
+ {
+ pipe_surface_reference(&basic_csc->framebuffer.cbufs[0], NULL);
+ pipe_texture_reference(&basic_csc->framebuffer_tex, NULL);
+ }
+
+ memset(&template, 0, sizeof(struct pipe_texture));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ template.last_level = 0;
+ template.width[0] = width;
+ template.height[0] = height;
+ template.depth[0] = 1;
+ pf_get_block(template.format, &template.block);
+ template.tex_usage = PIPE_TEXTURE_USAGE_DISPLAY_TARGET;
+
+ basic_csc->framebuffer_tex = pipe->screen->texture_create(pipe->screen, &template);
+
+ basic_csc->framebuffer.width = width;
+ basic_csc->framebuffer.height = height;
+ basic_csc->framebuffer.cbufs[0] = pipe->screen->get_tex_surface
+ (
+ pipe->screen,
+ basic_csc->framebuffer_tex,
+ 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE
+ );
+
+ /* Clear to black, in case video doesn't fill the entire window */
+ pipe->set_framebuffer_state(pipe, &basic_csc->framebuffer);
+ pipe->clear(pipe, PIPE_CLEAR_COLOR, 0, 0.0f, 0);
+
+ return 0;
+}
+
+static int vlBegin
+(
+ struct vlCSC *csc
+)
+{
+ struct vlBasicCSC *basic_csc;
+ struct pipe_context *pipe;
+
+ assert(csc);
+
+ basic_csc = (struct vlBasicCSC*)csc;
+ pipe = basic_csc->pipe;
+
+ pipe->set_framebuffer_state(pipe, &basic_csc->framebuffer);
+ pipe->set_viewport_state(pipe, &basic_csc->viewport);
+ pipe->bind_sampler_states(pipe, 1, (void**)&basic_csc->sampler);
+ /* Source texture set in vlPutPictureCSC() */
+ pipe->bind_vs_state(pipe, basic_csc->vertex_shader);
+ pipe->bind_fs_state(pipe, basic_csc->fragment_shader);
+ pipe->set_vertex_buffers(pipe, 2, basic_csc->vertex_bufs);
+ pipe->set_vertex_elements(pipe, 2, basic_csc->vertex_elems);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &basic_csc->vs_const_buf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &basic_csc->fs_const_buf);
+
+ return 0;
+}
+
+static int vlPutPictureCSC
+(
+ struct vlCSC *csc,
+ struct vlSurface *surface,
+ int srcx,
+ int srcy,
+ int srcw,
+ int srch,
+ int destx,
+ int desty,
+ int destw,
+ int desth,
+ enum vlPictureType picture_type
+)
+{
+ struct vlBasicCSC *basic_csc;
+ struct pipe_context *pipe;
+ struct vlVertexShaderConsts *vs_consts;
+
+ assert(csc);
+ assert(surface);
+
+ basic_csc = (struct vlBasicCSC*)csc;
+ pipe = basic_csc->pipe;
+
+ vs_consts = pipe_buffer_map
+ (
+ pipe->screen,
+ basic_csc->vs_const_buf.buffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
+ );
+
+ vs_consts->dst_scale.x = destw / (float)basic_csc->framebuffer.cbufs[0]->width;
+ vs_consts->dst_scale.y = desth / (float)basic_csc->framebuffer.cbufs[0]->height;
+ vs_consts->dst_scale.z = 1;
+ vs_consts->dst_scale.w = 1;
+ vs_consts->dst_trans.x = destx / (float)basic_csc->framebuffer.cbufs[0]->width;
+ vs_consts->dst_trans.y = desty / (float)basic_csc->framebuffer.cbufs[0]->height;
+ vs_consts->dst_trans.z = 0;
+ vs_consts->dst_trans.w = 0;
+
+ vs_consts->src_scale.x = srcw / (float)surface->texture->width[0];
+ vs_consts->src_scale.y = srch / (float)surface->texture->height[0];
+ vs_consts->src_scale.z = 1;
+ vs_consts->src_scale.w = 1;
+ vs_consts->src_trans.x = srcx / (float)surface->texture->width[0];
+ vs_consts->src_trans.y = srcy / (float)surface->texture->height[0];
+ vs_consts->src_trans.z = 0;
+ vs_consts->src_trans.w = 0;
+
+ pipe_buffer_unmap(pipe->screen, basic_csc->vs_const_buf.buffer);
+
+ pipe->set_sampler_textures(pipe, 1, &surface->texture);
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLE_STRIP, 0, 4);
+
+ return 0;
+}
+
+static int vlEnd
+(
+ struct vlCSC *csc
+)
+{
+ assert(csc);
+
+ return 0;
+}
+
+static struct pipe_surface* vlGetFrameBuffer
+(
+ struct vlCSC *csc
+)
+{
+ struct vlBasicCSC *basic_csc;
+
+ assert(csc);
+
+ basic_csc = (struct vlBasicCSC*)csc;
+
+ return basic_csc->framebuffer.cbufs[0];
+}
+
+static int vlDestroy
+(
+ struct vlCSC *csc
+)
+{
+ struct vlBasicCSC *basic_csc;
+ struct pipe_context *pipe;
+ unsigned int i;
+
+ assert(csc);
+
+ basic_csc = (struct vlBasicCSC*)csc;
+ pipe = basic_csc->pipe;
+
+ if (basic_csc->framebuffer_tex)
+ {
+ pipe_surface_reference(&basic_csc->framebuffer.cbufs[0], NULL);
+ pipe_texture_reference(&basic_csc->framebuffer_tex, NULL);
+ }
+
+ pipe->delete_sampler_state(pipe, basic_csc->sampler);
+ pipe->delete_vs_state(pipe, basic_csc->vertex_shader);
+ pipe->delete_fs_state(pipe, basic_csc->fragment_shader);
+
+ for (i = 0; i < 2; ++i)
+ pipe_buffer_reference(&basic_csc->vertex_bufs[i].buffer, NULL);
+
+ pipe_buffer_reference(&basic_csc->vs_const_buf.buffer, NULL);
+ pipe_buffer_reference(&basic_csc->fs_const_buf.buffer, NULL);
+
+ FREE(basic_csc);
+
+ return 0;
+}
+
+/*
+ * Represents 2 triangles in a strip in normalized coords.
+ * Used to render the surface onto the frame buffer.
+ */
+static const struct vlVertex2f surface_verts[4] =
+{
+ {0.0f, 0.0f},
+ {0.0f, 1.0f},
+ {1.0f, 0.0f},
+ {1.0f, 1.0f}
+};
+
+/*
+ * Represents texcoords for the above. We can use the position values directly.
+ * TODO: Duplicate these in the shader, no need to create a buffer.
+ */
+static const struct vlVertex2f *surface_texcoords = surface_verts;
+
+/*
+ * Identity color conversion constants, for debugging
+ */
+static const struct vlFragmentShaderConsts identity =
+{
+ {
+ 0.0f, 0.0f, 0.0f, 0.0f
+ },
+ {
+ 1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+ }
+};
+
+/*
+ * Converts ITU-R BT.601 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [16,235]
+ */
+static const struct vlFragmentShaderConsts bt_601 =
+{
+ {
+ 0.0f, 0.501960784f, 0.501960784f, 0.0f
+ },
+ {
+ 1.0f, 0.0f, 1.371f, 0.0f,
+ 1.0f, -0.336f, -0.698f, 0.0f,
+ 1.0f, 1.732f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+ }
+};
+
+/*
+ * Converts ITU-R BT.601 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [0,255]
+ */
+static const struct vlFragmentShaderConsts bt_601_full =
+{
+ {
+ 0.062745098f, 0.501960784f, 0.501960784f, 0.0f
+ },
+ {
+ 1.164f, 0.0f, 1.596f, 0.0f,
+ 1.164f, -0.391f, -0.813f, 0.0f,
+ 1.164f, 2.018f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+ }
+};
+
+/*
+ * Converts ITU-R BT.709 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [16,235]
+ */
+static const struct vlFragmentShaderConsts bt_709 =
+{
+ {
+ 0.0f, 0.501960784f, 0.501960784f, 0.0f
+ },
+ {
+ 1.0f, 0.0f, 1.540f, 0.0f,
+ 1.0f, -0.183f, -0.459f, 0.0f,
+ 1.0f, 1.816f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+ }
+};
+
+/*
+ * Converts ITU-R BT.709 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [0,255]
+ */
+const struct vlFragmentShaderConsts bt_709_full =
+{
+ {
+ 0.062745098f, 0.501960784f, 0.501960784f, 0.0f
+ },
+ {
+ 1.164f, 0.0f, 1.793f, 0.0f,
+ 1.164f, -0.213f, -0.534f, 0.0f,
+ 1.164f, 2.115f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+ }
+};
+
+static int vlCreateVertexShader
+(
+ struct vlBasicCSC *csc
+)
+{
+ const unsigned int max_tokens = 50;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state vs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(csc);
+
+ pipe = csc->pipe;
+ tokens = (struct tgsi_token*)MALLOC(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Vertex pos
+ * decl i1 ; Vertex texcoords
+ */
+ for (i = 0; i < 2; i++)
+ {
+ decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl c0 ; Scaling vector to scale vertex pos rect to destination size
+ * decl c1 ; Translation vector to move vertex pos rect into position
+ * decl c2 ; Scaling vector to scale texcoord rect to source size
+ * decl c3 ; Translation vector to move texcoord rect into position
+ */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 3);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl o0 ; Vertex pos
+ * decl o1 ; Vertex texcoords
+ */
+ for (i = 0; i < 2; i++)
+ {
+ decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* decl t0, t1 */
+ decl = vl_decl_temps(0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * madd o0, i0, c0, c1 ; Scale and translate unit output rect to destination size and pos
+ * madd o1, i1, c2, c3 ; Scale and translate unit texcoord rect to source size and pos
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_inst4(TGSI_OPCODE_MADD, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i, TGSI_FILE_CONSTANT, i * 2, TGSI_FILE_CONSTANT, i * 2 + 1);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ vs.tokens = tokens;
+ csc->vertex_shader = pipe->create_vs_state(pipe, &vs);
+ FREE(tokens);
+
+ return 0;
+}
+
+static int vlCreateFragmentShader
+(
+ struct vlBasicCSC *csc
+)
+{
+ const unsigned int max_tokens = 50;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state fs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(csc);
+
+ pipe = csc->pipe;
+ tokens = (struct tgsi_token*)MALLOC(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header);
+
+ ti = 3;
+
+ /* decl i0 ; Texcoords for s0 */
+ decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, 1, 0, 0, TGSI_INTERPOLATE_LINEAR);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl c0 ; Bias vector for CSC
+ * decl c1-c4 ; CSC matrix c1-c4
+ */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 4);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl o0 ; Fragment color */
+ decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl t0 */
+ decl = vl_decl_temps(0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl s0 ; Sampler for tex containing picture to display */
+ decl = vl_decl_samplers(0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* tex2d t0, i0, s0 ; Read src pixel */
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_INPUT, 0, TGSI_FILE_SAMPLER, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* sub t0, t0, c0 ; Subtract bias vector from pixel */
+ inst = vl_inst3(TGSI_OPCODE_SUB, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * dp4 o0.x, t0, c1 ; Multiply pixel by the color conversion matrix
+ * dp4 o0.y, t0, c2
+ * dp4 o0.z, t0, c3
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ inst = vl_inst3(TGSI_OPCODE_DP4, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, i + 1);
+ inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ fs.tokens = tokens;
+ csc->fragment_shader = pipe->create_fs_state(pipe, &fs);
+ FREE(tokens);
+
+ return 0;
+}
+
+static int vlCreateDataBufs
+(
+ struct vlBasicCSC *csc
+)
+{
+ struct pipe_context *pipe;
+
+ assert(csc);
+
+ pipe = csc->pipe;
+
+ /*
+ * Create our vertex buffer and vertex buffer element
+ * VB contains 4 vertices that render a quad covering the entire window
+ * to display a rendered surface
+ * Quad is rendered as a tri strip
+ */
+ csc->vertex_bufs[0].stride = sizeof(struct vlVertex2f);
+ csc->vertex_bufs[0].max_index = 3;
+ csc->vertex_bufs[0].buffer_offset = 0;
+ csc->vertex_bufs[0].buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ 1,
+ PIPE_BUFFER_USAGE_VERTEX,
+ sizeof(struct vlVertex2f) * 4
+ );
+
+ memcpy
+ (
+ pipe_buffer_map(pipe->screen, csc->vertex_bufs[0].buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
+ surface_verts,
+ sizeof(struct vlVertex2f) * 4
+ );
+
+ pipe_buffer_unmap(pipe->screen, csc->vertex_bufs[0].buffer);
+
+ csc->vertex_elems[0].src_offset = 0;
+ csc->vertex_elems[0].vertex_buffer_index = 0;
+ csc->vertex_elems[0].nr_components = 2;
+ csc->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /*
+ * Create our texcoord buffer and texcoord buffer element
+ * Texcoord buffer contains the TCs for mapping the rendered surface to the 4 vertices
+ */
+ csc->vertex_bufs[1].stride = sizeof(struct vlVertex2f);
+ csc->vertex_bufs[1].max_index = 3;
+ csc->vertex_bufs[1].buffer_offset = 0;
+ csc->vertex_bufs[1].buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ 1,
+ PIPE_BUFFER_USAGE_VERTEX,
+ sizeof(struct vlVertex2f) * 4
+ );
+
+ memcpy
+ (
+ pipe_buffer_map(pipe->screen, csc->vertex_bufs[1].buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
+ surface_texcoords,
+ sizeof(struct vlVertex2f) * 4
+ );
+
+ pipe_buffer_unmap(pipe->screen, csc->vertex_bufs[1].buffer);
+
+ csc->vertex_elems[1].src_offset = 0;
+ csc->vertex_elems[1].vertex_buffer_index = 1;
+ csc->vertex_elems[1].nr_components = 2;
+ csc->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /*
+ * Create our vertex shader's constant buffer
+ * Const buffer contains scaling and translation vectors
+ */
+ csc->vs_const_buf.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ 1,
+ PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD,
+ sizeof(struct vlVertexShaderConsts)
+ );
+
+ /*
+ * Create our fragment shader's constant buffer
+ * Const buffer contains the color conversion matrix and bias vectors
+ */
+ csc->fs_const_buf.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ 1,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ sizeof(struct vlFragmentShaderConsts)
+ );
+
+ /*
+ * TODO: Refactor this into a seperate function,
+ * allow changing the CSC matrix at runtime to switch between regular & full versions
+ */
+ memcpy
+ (
+ pipe_buffer_map(pipe->screen, csc->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
+ &bt_601_full,
+ sizeof(struct vlFragmentShaderConsts)
+ );
+
+ pipe_buffer_unmap(pipe->screen, csc->fs_const_buf.buffer);
+
+ return 0;
+}
+
+static int vlInit
+(
+ struct vlBasicCSC *csc
+)
+{
+ struct pipe_context *pipe;
+ struct pipe_sampler_state sampler;
+
+ assert(csc);
+
+ pipe = csc->pipe;
+
+ /* Delay creating the FB until vlPutPictureCSC() so we know window size */
+ csc->framebuffer_tex = NULL;
+ csc->framebuffer.width = 0;
+ csc->framebuffer.height = 0;
+ csc->framebuffer.nr_cbufs = 1;
+ csc->framebuffer.cbufs[0] = NULL;
+ csc->framebuffer.zsbuf = NULL;
+
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.prefilter = ;*/
+ /*sampler.lod_bias = ;*/
+ /*sampler.min_lod = ;*/
+ /*sampler.max_lod = ;*/
+ /*sampler.border_color[i] = ;*/
+ /*sampler.max_anisotropy = ;*/
+ csc->sampler = pipe->create_sampler_state(pipe, &sampler);
+
+ vlCreateVertexShader(csc);
+ vlCreateFragmentShader(csc);
+ vlCreateDataBufs(csc);
+
+ return 0;
+}
+
+int vlCreateBasicCSC
+(
+ struct pipe_context *pipe,
+ struct vlCSC **csc
+)
+{
+ struct vlBasicCSC *basic_csc;
+
+ assert(pipe);
+ assert(csc);
+
+ basic_csc = CALLOC_STRUCT(vlBasicCSC);
+
+ if (!basic_csc)
+ return 1;
+
+ basic_csc->base.vlResizeFrameBuffer = &vlResizeFrameBuffer;
+ basic_csc->base.vlBegin = &vlBegin;
+ basic_csc->base.vlPutPicture = &vlPutPictureCSC;
+ basic_csc->base.vlEnd = &vlEnd;
+ basic_csc->base.vlGetFrameBuffer = &vlGetFrameBuffer;
+ basic_csc->base.vlDestroy = &vlDestroy;
+ basic_csc->pipe = pipe;
+
+ vlInit(basic_csc);
+
+ *csc = &basic_csc->base;
+
+ return 0;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_basic_csc.h b/src/gallium/state_trackers/g3dvl/vl_basic_csc.h
new file mode 100644
index 0000000000..2e17f1d814
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_basic_csc.h
@@ -0,0 +1,13 @@
+#ifndef vl_basic_csc_h
+#define vl_basic_csc_h
+
+struct pipe_context;
+struct vlCSC;
+
+int vlCreateBasicCSC
+(
+ struct pipe_context *pipe,
+ struct vlCSC **csc
+);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_context.c b/src/gallium/state_trackers/g3dvl/vl_context.c
new file mode 100644
index 0000000000..5cfd233c4c
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_context.c
@@ -0,0 +1,205 @@
+#define VL_INTERNAL
+#include "vl_context.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_state.h>
+#include <util/u_memory.h>
+#include "vl_render.h"
+#include "vl_r16snorm_mc_buf.h"
+#include "vl_csc.h"
+#include "vl_basic_csc.h"
+
+static int vlInitCommon(struct vlContext *context)
+{
+ struct pipe_context *pipe;
+ struct pipe_rasterizer_state rast;
+ struct pipe_blend_state blend;
+ struct pipe_depth_stencil_alpha_state dsa;
+ unsigned int i;
+
+ assert(context);
+
+ pipe = context->pipe;
+
+ rast.flatshade = 1;
+ rast.flatshade_first = 0;
+ rast.light_twoside = 0;
+ rast.front_winding = PIPE_WINDING_CCW;
+ rast.cull_mode = PIPE_WINDING_CW;
+ rast.fill_cw = PIPE_POLYGON_MODE_FILL;
+ rast.fill_ccw = PIPE_POLYGON_MODE_FILL;
+ rast.offset_cw = 0;
+ rast.offset_ccw = 0;
+ rast.scissor = 0;
+ rast.poly_smooth = 0;
+ rast.poly_stipple_enable = 0;
+ rast.point_sprite = 0;
+ rast.point_size_per_vertex = 0;
+ rast.multisample = 0;
+ rast.line_smooth = 0;
+ rast.line_stipple_enable = 0;
+ rast.line_stipple_factor = 0;
+ rast.line_stipple_pattern = 0;
+ rast.line_last_pixel = 0;
+ rast.bypass_vs_clip_and_viewport = 0;
+ rast.line_width = 1;
+ rast.point_smooth = 0;
+ rast.point_size = 1;
+ rast.offset_units = 1;
+ rast.offset_scale = 1;
+ /*rast.sprite_coord_mode[i] = ;*/
+ context->raster = pipe->create_rasterizer_state(pipe, &rast);
+ pipe->bind_rasterizer_state(pipe, context->raster);
+
+ blend.blend_enable = 0;
+ blend.rgb_func = PIPE_BLEND_ADD;
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_func = PIPE_BLEND_ADD;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend.logicop_enable = 0;
+ blend.logicop_func = PIPE_LOGICOP_CLEAR;
+ /* Needed to allow color writes to FB, even if blending disabled */
+ blend.colormask = PIPE_MASK_RGBA;
+ blend.dither = 0;
+ context->blend = pipe->create_blend_state(pipe, &blend);
+ pipe->bind_blend_state(pipe, context->blend);
+
+ dsa.depth.enabled = 0;
+ dsa.depth.writemask = 0;
+ dsa.depth.func = PIPE_FUNC_ALWAYS;
+ dsa.depth.occlusion_count = 0;
+ for (i = 0; i < 2; ++i)
+ {
+ dsa.stencil[i].enabled = 0;
+ dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[i].ref_value = 0;
+ dsa.stencil[i].valuemask = 0;
+ dsa.stencil[i].writemask = 0;
+ }
+ dsa.alpha.enabled = 0;
+ dsa.alpha.func = PIPE_FUNC_ALWAYS;
+ dsa.alpha.ref_value = 0;
+ context->dsa = pipe->create_depth_stencil_alpha_state(pipe, &dsa);
+ pipe->bind_depth_stencil_alpha_state(pipe, context->dsa);
+
+ return 0;
+}
+
+int vlCreateContext
+(
+ struct vlScreen *screen,
+ struct pipe_context *pipe,
+ unsigned int picture_width,
+ unsigned int picture_height,
+ enum vlFormat picture_format,
+ enum vlProfile profile,
+ enum vlEntryPoint entry_point,
+ struct vlContext **context
+)
+{
+ struct vlContext *ctx;
+
+ assert(screen);
+ assert(context);
+ assert(pipe);
+
+ ctx = CALLOC_STRUCT(vlContext);
+
+ if (!ctx)
+ return 1;
+
+ ctx->screen = screen;
+ ctx->pipe = pipe;
+ ctx->picture_width = picture_width;
+ ctx->picture_height = picture_height;
+ ctx->picture_format = picture_format;
+ ctx->profile = profile;
+ ctx->entry_point = entry_point;
+
+ vlInitCommon(ctx);
+
+ vlCreateR16SNormBufferedMC(pipe, picture_width, picture_height, picture_format, &ctx->render);
+ vlCreateBasicCSC(pipe, &ctx->csc);
+
+ *context = ctx;
+
+ return 0;
+}
+
+int vlDestroyContext
+(
+ struct vlContext *context
+)
+{
+ assert(context);
+
+ /* XXX: Must unbind shaders before we can delete them for some reason */
+ context->pipe->bind_vs_state(context->pipe, NULL);
+ context->pipe->bind_fs_state(context->pipe, NULL);
+
+ context->render->vlDestroy(context->render);
+ context->csc->vlDestroy(context->csc);
+
+ context->pipe->delete_blend_state(context->pipe, context->blend);
+ context->pipe->delete_rasterizer_state(context->pipe, context->raster);
+ context->pipe->delete_depth_stencil_alpha_state(context->pipe, context->dsa);
+
+ FREE(context);
+
+ return 0;
+}
+
+struct vlScreen* vlContextGetScreen
+(
+ struct vlContext *context
+)
+{
+ assert(context);
+
+ return context->screen;
+}
+
+struct pipe_context* vlGetPipeContext
+(
+ struct vlContext *context
+)
+{
+ assert(context);
+
+ return context->pipe;
+}
+
+unsigned int vlGetPictureWidth
+(
+ struct vlContext *context
+)
+{
+ assert(context);
+
+ return context->picture_width;
+}
+
+unsigned int vlGetPictureHeight
+(
+ struct vlContext *context
+)
+{
+ assert(context);
+
+ return context->picture_height;
+}
+
+enum vlFormat vlGetPictureFormat
+(
+ struct vlContext *context
+)
+{
+ assert(context);
+
+ return context->picture_format;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_context.h b/src/gallium/state_trackers/g3dvl/vl_context.h
new file mode 100644
index 0000000000..3d14634c44
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_context.h
@@ -0,0 +1,73 @@
+#ifndef vl_context_h
+#define vl_context_h
+
+#include "vl_types.h"
+
+struct pipe_context;
+
+#ifdef VL_INTERNAL
+struct vlRender;
+struct vlCSC;
+
+struct vlContext
+{
+ struct vlScreen *screen;
+ struct pipe_context *pipe;
+ unsigned int picture_width;
+ unsigned int picture_height;
+ enum vlFormat picture_format;
+ enum vlProfile profile;
+ enum vlEntryPoint entry_point;
+
+ void *raster;
+ void *dsa;
+ void *blend;
+
+ struct vlRender *render;
+ struct vlCSC *csc;
+};
+#endif
+
+int vlCreateContext
+(
+ struct vlScreen *screen,
+ struct pipe_context *pipe,
+ unsigned int picture_width,
+ unsigned int picture_height,
+ enum vlFormat picture_format,
+ enum vlProfile profile,
+ enum vlEntryPoint entry_point,
+ struct vlContext **context
+);
+
+int vlDestroyContext
+(
+ struct vlContext *context
+);
+
+struct vlScreen* vlContextGetScreen
+(
+ struct vlContext *context
+);
+
+struct pipe_context* vlGetPipeContext
+(
+ struct vlContext *context
+);
+
+unsigned int vlGetPictureWidth
+(
+ struct vlContext *context
+);
+
+unsigned int vlGetPictureHeight
+(
+ struct vlContext *context
+);
+
+enum vlFormat vlGetPictureFormat
+(
+ struct vlContext *context
+);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_csc.h b/src/gallium/state_trackers/g3dvl/vl_csc.h
new file mode 100644
index 0000000000..36417a2792
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_csc.h
@@ -0,0 +1,53 @@
+#ifndef vl_csc_h
+#define vl_csc_h
+
+#include "vl_types.h"
+
+struct pipe_surface;
+
+struct vlCSC
+{
+ int (*vlResizeFrameBuffer)
+ (
+ struct vlCSC *csc,
+ unsigned int width,
+ unsigned int height
+ );
+
+ int (*vlBegin)
+ (
+ struct vlCSC *csc
+ );
+
+ int (*vlPutPicture)
+ (
+ struct vlCSC *csc,
+ struct vlSurface *surface,
+ int srcx,
+ int srcy,
+ int srcw,
+ int srch,
+ int destx,
+ int desty,
+ int destw,
+ int desth,
+ enum vlPictureType picture_type
+ );
+
+ int (*vlEnd)
+ (
+ struct vlCSC *csc
+ );
+
+ struct pipe_surface* (*vlGetFrameBuffer)
+ (
+ struct vlCSC *csc
+ );
+
+ int (*vlDestroy)
+ (
+ struct vlCSC *csc
+ );
+};
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_defs.h b/src/gallium/state_trackers/g3dvl/vl_defs.h
new file mode 100644
index 0000000000..d612d02502
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_defs.h
@@ -0,0 +1,11 @@
+#ifndef vl_defs_h
+#define vl_defs_h
+
+#define VL_BLOCK_WIDTH 8
+#define VL_BLOCK_HEIGHT 8
+#define VL_BLOCK_SIZE (VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT)
+#define VL_MACROBLOCK_WIDTH 16
+#define VL_MACROBLOCK_HEIGHT 16
+#define VL_MACROBLOCK_SIZE (VL_MACROBLOCK_WIDTH * VL_MACROBLOCK_HEIGHT)
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_display.c b/src/gallium/state_trackers/g3dvl/vl_display.c
new file mode 100644
index 0000000000..dce06de758
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_display.c
@@ -0,0 +1,48 @@
+#define VL_INTERNAL
+#include "vl_display.h"
+#include <assert.h>
+#include <util/u_memory.h>
+
+int vlCreateDisplay
+(
+ vlNativeDisplay native_display,
+ struct vlDisplay **display
+)
+{
+ struct vlDisplay *dpy;
+
+ assert(native_display);
+ assert(display);
+
+ dpy = CALLOC_STRUCT(vlDisplay);
+
+ if (!dpy)
+ return 1;
+
+ dpy->native = native_display;
+ *display = dpy;
+
+ return 0;
+}
+
+int vlDestroyDisplay
+(
+ struct vlDisplay *display
+)
+{
+ assert(display);
+
+ FREE(display);
+
+ return 0;
+}
+
+vlNativeDisplay vlGetNativeDisplay
+(
+ struct vlDisplay *display
+)
+{
+ assert(display);
+
+ return display->native;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_display.h b/src/gallium/state_trackers/g3dvl/vl_display.h
new file mode 100644
index 0000000000..e11fd40799
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_display.h
@@ -0,0 +1,29 @@
+#ifndef vl_display_h
+#define vl_display_h
+
+#include "vl_types.h"
+
+#ifdef VL_INTERNAL
+struct vlDisplay
+{
+ vlNativeDisplay native;
+};
+#endif
+
+int vlCreateDisplay
+(
+ vlNativeDisplay native_display,
+ struct vlDisplay **display
+);
+
+int vlDestroyDisplay
+(
+ struct vlDisplay *display
+);
+
+vlNativeDisplay vlGetNativeDisplay
+(
+ struct vlDisplay *display
+);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.c b/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.c
new file mode 100644
index 0000000000..23631adb69
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.c
@@ -0,0 +1,1155 @@
+#define VL_INTERNAL
+#include "vl_r16snorm_mc_buf.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_state.h>
+#include <pipe/p_inlines.h>
+#include <tgsi/tgsi_parse.h>
+#include <tgsi/tgsi_build.h>
+#include <util/u_math.h>
+#include <util/u_memory.h>
+#include "vl_render.h"
+#include "vl_shader_build.h"
+#include "vl_surface.h"
+#include "vl_util.h"
+#include "vl_types.h"
+#include "vl_defs.h"
+
+const unsigned int DEFAULT_BUF_ALIGNMENT = 1;
+
+enum vlMacroBlockTypeEx
+{
+ vlMacroBlockExTypeIntra,
+ vlMacroBlockExTypeFwdPredictedFrame,
+ vlMacroBlockExTypeFwdPredictedField,
+ vlMacroBlockExTypeBkwdPredictedFrame,
+ vlMacroBlockExTypeBkwdPredictedField,
+ vlMacroBlockExTypeBiPredictedFrame,
+ vlMacroBlockExTypeBiPredictedField,
+
+ vlNumMacroBlockExTypes
+};
+
+struct vlVertexShaderConsts
+{
+ struct vlVertex4f denorm;
+};
+
+struct vlFragmentShaderConsts
+{
+ struct vlVertex4f multiplier;
+ struct vlVertex4f div;
+};
+
+struct vlMacroBlockVertexStream0
+{
+ struct vlVertex2f pos;
+ struct vlVertex2f luma_tc;
+ struct vlVertex2f cb_tc;
+ struct vlVertex2f cr_tc;
+};
+
+struct vlR16SnormBufferedMC
+{
+ struct vlRender base;
+
+ unsigned int picture_width;
+ unsigned int picture_height;
+ enum vlFormat picture_format;
+ unsigned int macroblocks_per_picture;
+
+ struct vlSurface *buffered_surface;
+ struct vlSurface *past_surface;
+ struct vlSurface *future_surface;
+ struct vlVertex2f surface_tex_inv_size;
+ struct vlVertex2f zero_block[3];
+ unsigned int num_macroblocks;
+ struct vlMpeg2MacroBlock *macroblocks;
+ struct pipe_transfer *tex_transfer[3];
+ short *texels[3];
+
+ struct pipe_context *pipe;
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state render_target;
+
+ union
+ {
+ void *all[5];
+ struct
+ {
+ void *y;
+ void *cb;
+ void *cr;
+ void *ref[2];
+ };
+ } samplers;
+
+ union
+ {
+ struct pipe_texture *all[5];
+ struct
+ {
+ struct pipe_texture *y;
+ struct pipe_texture *cb;
+ struct pipe_texture *cr;
+ struct pipe_texture *ref[2];
+ };
+ } textures;
+
+ union
+ {
+ struct pipe_vertex_buffer all[3];
+ struct
+ {
+ struct pipe_vertex_buffer ycbcr;
+ struct pipe_vertex_buffer ref[2];
+ };
+ } vertex_bufs;
+
+ void *i_vs, *p_vs[2], *b_vs[2];
+ void *i_fs, *p_fs[2], *b_fs[2];
+ struct pipe_vertex_element vertex_elems[8];
+ struct pipe_constant_buffer vs_const_buf;
+ struct pipe_constant_buffer fs_const_buf;
+};
+
+static inline int vlBegin
+(
+ struct vlRender *render
+)
+{
+ assert(render);
+
+ return 0;
+}
+
+static inline int vlGrabFrameCodedBlock(short *src, short *dst, unsigned int dst_pitch)
+{
+ unsigned int y;
+
+ for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
+ memcpy
+ (
+ dst + y * dst_pitch,
+ src + y * VL_BLOCK_WIDTH,
+ VL_BLOCK_WIDTH * 2
+ );
+
+ return 0;
+}
+
+static inline int vlGrabFieldCodedBlock(short *src, short *dst, unsigned int dst_pitch)
+{
+ unsigned int y;
+
+ for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
+ memcpy
+ (
+ dst + y * dst_pitch * 2,
+ src + y * VL_BLOCK_WIDTH,
+ VL_BLOCK_WIDTH * 2
+ );
+
+ return 0;
+}
+
+static inline int vlGrabNoBlock(short *dst, unsigned int dst_pitch)
+{
+ unsigned int y;
+
+ for (y = 0; y < VL_BLOCK_HEIGHT; ++y)
+ memset
+ (
+ dst + y * dst_pitch,
+ 0,
+ VL_BLOCK_WIDTH * 2
+ );
+
+ return 0;
+}
+
+static inline int vlGrabBlocks
+(
+ struct vlR16SnormBufferedMC *mc,
+ unsigned int mbx,
+ unsigned int mby,
+ enum vlDCTType dct_type,
+ unsigned int coded_block_pattern,
+ short *blocks
+)
+{
+ short *texels;
+ unsigned int tex_pitch;
+ unsigned int x, y, tb = 0, sb = 0;
+ unsigned int mbpx = mbx * VL_MACROBLOCK_WIDTH, mbpy = mby * VL_MACROBLOCK_HEIGHT;
+
+ assert(mc);
+ assert(blocks);
+
+ tex_pitch = mc->tex_transfer[0]->stride / mc->tex_transfer[0]->block.size;
+ texels = mc->texels[0] + mbpy * tex_pitch + mbpx;
+
+ for (y = 0; y < 2; ++y)
+ {
+ for (x = 0; x < 2; ++x, ++tb)
+ {
+ if ((coded_block_pattern >> (5 - tb)) & 1)
+ {
+ short *cur_block = blocks + sb * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
+
+ if (dct_type == vlDCTTypeFrameCoded)
+ {
+ vlGrabFrameCodedBlock
+ (
+ cur_block,
+ texels + y * tex_pitch * VL_BLOCK_HEIGHT + x * VL_BLOCK_WIDTH,
+ tex_pitch
+ );
+ }
+ else
+ {
+ vlGrabFieldCodedBlock
+ (
+ cur_block,
+ texels + y * tex_pitch + x * VL_BLOCK_WIDTH,
+ tex_pitch
+ );
+ }
+
+ ++sb;
+ }
+ else if (mc->zero_block[0].x < 0.0f)
+ {
+ vlGrabNoBlock(texels + y * tex_pitch * VL_BLOCK_HEIGHT + x * VL_BLOCK_WIDTH, tex_pitch);
+
+ mc->zero_block[0].x = (mbpx + x * 8) * mc->surface_tex_inv_size.x;
+ mc->zero_block[0].y = (mbpy + y * 8) * mc->surface_tex_inv_size.y;
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ mbpx >>= 1;
+ mbpy >>= 1;
+
+ for (tb = 0; tb < 2; ++tb)
+ {
+ tex_pitch = mc->tex_transfer[tb + 1]->stride / mc->tex_transfer[tb + 1]->block.size;
+ texels = mc->texels[tb + 1] + mbpy * tex_pitch + mbpx;
+
+ if ((coded_block_pattern >> (1 - tb)) & 1)
+ {
+ short *cur_block = blocks + sb * VL_BLOCK_WIDTH * VL_BLOCK_HEIGHT;
+
+ vlGrabFrameCodedBlock
+ (
+ cur_block,
+ texels,
+ tex_pitch
+ );
+
+ ++sb;
+ }
+ else if (mc->zero_block[tb + 1].x < 0.0f)
+ {
+ vlGrabNoBlock(texels, tex_pitch);
+
+ mc->zero_block[tb + 1].x = (mbpx << 1) * mc->surface_tex_inv_size.x;
+ mc->zero_block[tb + 1].y = (mbpy << 1) * mc->surface_tex_inv_size.y;
+ }
+ }
+
+ return 0;
+}
+
+static inline enum vlMacroBlockTypeEx vlGetMacroBlockTypeEx(struct vlMpeg2MacroBlock *mb)
+{
+ assert(mb);
+
+ switch (mb->mb_type)
+ {
+ case vlMacroBlockTypeIntra:
+ return vlMacroBlockExTypeIntra;
+ case vlMacroBlockTypeFwdPredicted:
+ return mb->mo_type == vlMotionTypeFrame ?
+ vlMacroBlockExTypeFwdPredictedFrame : vlMacroBlockExTypeFwdPredictedField;
+ case vlMacroBlockTypeBkwdPredicted:
+ return mb->mo_type == vlMotionTypeFrame ?
+ vlMacroBlockExTypeBkwdPredictedFrame : vlMacroBlockExTypeBkwdPredictedField;
+ case vlMacroBlockTypeBiPredicted:
+ return mb->mo_type == vlMotionTypeFrame ?
+ vlMacroBlockExTypeBiPredictedFrame : vlMacroBlockExTypeBiPredictedField;
+ default:
+ assert(0);
+ }
+
+ /* Unreachable */
+ return -1;
+}
+
+static inline int vlGrabMacroBlock
+(
+ struct vlR16SnormBufferedMC *mc,
+ struct vlMpeg2MacroBlock *macroblock
+)
+{
+ assert(mc);
+ assert(macroblock);
+ assert(mc->num_macroblocks < mc->macroblocks_per_picture);
+
+ mc->macroblocks[mc->num_macroblocks].mbx = macroblock->mbx;
+ mc->macroblocks[mc->num_macroblocks].mby = macroblock->mby;
+ mc->macroblocks[mc->num_macroblocks].mb_type = macroblock->mb_type;
+ mc->macroblocks[mc->num_macroblocks].mo_type = macroblock->mo_type;
+ mc->macroblocks[mc->num_macroblocks].dct_type = macroblock->dct_type;
+ mc->macroblocks[mc->num_macroblocks].PMV[0][0][0] = macroblock->PMV[0][0][0];
+ mc->macroblocks[mc->num_macroblocks].PMV[0][0][1] = macroblock->PMV[0][0][1];
+ mc->macroblocks[mc->num_macroblocks].PMV[0][1][0] = macroblock->PMV[0][1][0];
+ mc->macroblocks[mc->num_macroblocks].PMV[0][1][1] = macroblock->PMV[0][1][1];
+ mc->macroblocks[mc->num_macroblocks].PMV[1][0][0] = macroblock->PMV[1][0][0];
+ mc->macroblocks[mc->num_macroblocks].PMV[1][0][1] = macroblock->PMV[1][0][1];
+ mc->macroblocks[mc->num_macroblocks].PMV[1][1][0] = macroblock->PMV[1][1][0];
+ mc->macroblocks[mc->num_macroblocks].PMV[1][1][1] = macroblock->PMV[1][1][1];
+ mc->macroblocks[mc->num_macroblocks].cbp = macroblock->cbp;
+ mc->macroblocks[mc->num_macroblocks].blocks = macroblock->blocks;
+
+ vlGrabBlocks
+ (
+ mc,
+ macroblock->mbx,
+ macroblock->mby,
+ macroblock->dct_type,
+ macroblock->cbp,
+ macroblock->blocks
+ );
+
+ mc->num_macroblocks++;
+
+ return 0;
+}
+
+#define SET_BLOCK(vb, cbp, mbx, mby, unitx, unity, ofsx, ofsy, hx, hy, lm, cbm, crm, zb) \
+ do { \
+ (vb)[0].pos.x = (mbx) * (unitx) + (ofsx); (vb)[0].pos.y = (mby) * (unity) + (ofsy); \
+ (vb)[1].pos.x = (mbx) * (unitx) + (ofsx); (vb)[1].pos.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[2].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].pos.y = (mby) * (unity) + (ofsy); \
+ (vb)[3].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].pos.y = (mby) * (unity) + (ofsy); \
+ (vb)[4].pos.x = (mbx) * (unitx) + (ofsx); (vb)[4].pos.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[5].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].pos.y = (mby) * (unity) + (ofsy) + (hy); \
+ \
+ if ((cbp) & (lm)) \
+ { \
+ (vb)[0].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].luma_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[1].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[2].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].luma_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[3].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].luma_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[4].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[5].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ } \
+ else \
+ { \
+ (vb)[0].luma_tc.x = (zb)[0].x; (vb)[0].luma_tc.y = (zb)[0].y; \
+ (vb)[1].luma_tc.x = (zb)[0].x; (vb)[1].luma_tc.y = (zb)[0].y + (hy); \
+ (vb)[2].luma_tc.x = (zb)[0].x + (hx); (vb)[2].luma_tc.y = (zb)[0].y; \
+ (vb)[3].luma_tc.x = (zb)[0].x + (hx); (vb)[3].luma_tc.y = (zb)[0].y; \
+ (vb)[4].luma_tc.x = (zb)[0].x; (vb)[4].luma_tc.y = (zb)[0].y + (hy); \
+ (vb)[5].luma_tc.x = (zb)[0].x + (hx); (vb)[5].luma_tc.y = (zb)[0].y + (hy); \
+ } \
+ \
+ if ((cbp) & (cbm)) \
+ { \
+ (vb)[0].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cb_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[1].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[2].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cb_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[3].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cb_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[4].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[5].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ } \
+ else \
+ { \
+ (vb)[0].cb_tc.x = (zb)[1].x; (vb)[0].cb_tc.y = (zb)[1].y; \
+ (vb)[1].cb_tc.x = (zb)[1].x; (vb)[1].cb_tc.y = (zb)[1].y + (hy); \
+ (vb)[2].cb_tc.x = (zb)[1].x + (hx); (vb)[2].cb_tc.y = (zb)[1].y; \
+ (vb)[3].cb_tc.x = (zb)[1].x + (hx); (vb)[3].cb_tc.y = (zb)[1].y; \
+ (vb)[4].cb_tc.x = (zb)[1].x; (vb)[4].cb_tc.y = (zb)[1].y + (hy); \
+ (vb)[5].cb_tc.x = (zb)[1].x + (hx); (vb)[5].cb_tc.y = (zb)[1].y + (hy); \
+ } \
+ \
+ if ((cbp) & (crm)) \
+ { \
+ (vb)[0].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cr_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[1].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[2].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cr_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[3].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cr_tc.y = (mby) * (unity) + (ofsy); \
+ (vb)[4].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ (vb)[5].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \
+ } \
+ else \
+ { \
+ (vb)[0].cr_tc.x = (zb)[2].x; (vb)[0].cr_tc.y = (zb)[2].y; \
+ (vb)[1].cr_tc.x = (zb)[2].x; (vb)[1].cr_tc.y = (zb)[2].y + (hy); \
+ (vb)[2].cr_tc.x = (zb)[2].x + (hx); (vb)[2].cr_tc.y = (zb)[2].y; \
+ (vb)[3].cr_tc.x = (zb)[2].x + (hx); (vb)[3].cr_tc.y = (zb)[2].y; \
+ (vb)[4].cr_tc.x = (zb)[2].x; (vb)[4].cr_tc.y = (zb)[2].y + (hy); \
+ (vb)[5].cr_tc.x = (zb)[2].x + (hx); (vb)[5].cr_tc.y = (zb)[2].y + (hy); \
+ } \
+ } while (0)
+
+static inline int vlGenMacroblockVerts
+(
+ struct vlR16SnormBufferedMC *mc,
+ struct vlMpeg2MacroBlock *macroblock,
+ unsigned int pos,
+ struct vlMacroBlockVertexStream0 *ycbcr_vb,
+ struct vlVertex2f **ref_vb
+)
+{
+ struct vlVertex2f mo_vec[2];
+ unsigned int i;
+
+ assert(mc);
+ assert(macroblock);
+ assert(ycbcr_vb);
+ assert(pos < mc->macroblocks_per_picture);
+
+ switch (macroblock->mb_type)
+ {
+ case vlMacroBlockTypeBiPredicted:
+ {
+ struct vlVertex2f *vb;
+
+ assert(ref_vb && ref_vb[1]);
+
+ vb = ref_vb[1] + pos * 2 * 24;
+
+ mo_vec[0].x = macroblock->PMV[0][1][0] * 0.5f * mc->surface_tex_inv_size.x;
+ mo_vec[0].y = macroblock->PMV[0][1][1] * 0.5f * mc->surface_tex_inv_size.y;
+
+ if (macroblock->mo_type == vlMotionTypeFrame)
+ {
+ for (i = 0; i < 24 * 2; i += 2)
+ {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ }
+ }
+ else
+ {
+ mo_vec[1].x = macroblock->PMV[1][1][0] * 0.5f * mc->surface_tex_inv_size.x;
+ mo_vec[1].y = macroblock->PMV[1][1][1] * 0.5f * mc->surface_tex_inv_size.y;
+
+ for (i = 0; i < 24 * 2; i += 2)
+ {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ vb[i + 1].x = mo_vec[1].x;
+ vb[i + 1].y = mo_vec[1].y;
+ }
+ }
+
+ /* fall-through */
+ }
+ case vlMacroBlockTypeFwdPredicted:
+ case vlMacroBlockTypeBkwdPredicted:
+ {
+ struct vlVertex2f *vb;
+
+ assert(ref_vb && ref_vb[0]);
+
+ vb = ref_vb[0] + pos * 2 * 24;
+
+ if (macroblock->mb_type == vlMacroBlockTypeBkwdPredicted)
+ {
+ mo_vec[0].x = macroblock->PMV[0][1][0] * 0.5f * mc->surface_tex_inv_size.x;
+ mo_vec[0].y = macroblock->PMV[0][1][1] * 0.5f * mc->surface_tex_inv_size.y;
+
+ if (macroblock->mo_type == vlMotionTypeField)
+ {
+ mo_vec[1].x = macroblock->PMV[1][1][0] * 0.5f * mc->surface_tex_inv_size.x;
+ mo_vec[1].y = macroblock->PMV[1][1][1] * 0.5f * mc->surface_tex_inv_size.y;
+ }
+ }
+ else
+ {
+ mo_vec[0].x = macroblock->PMV[0][0][0] * 0.5f * mc->surface_tex_inv_size.x;
+ mo_vec[0].y = macroblock->PMV[0][0][1] * 0.5f * mc->surface_tex_inv_size.y;
+
+ if (macroblock->mo_type == vlMotionTypeField)
+ {
+ mo_vec[1].x = macroblock->PMV[1][0][0] * 0.5f * mc->surface_tex_inv_size.x;
+ mo_vec[1].y = macroblock->PMV[1][0][1] * 0.5f * mc->surface_tex_inv_size.y;
+ }
+ }
+
+ if (macroblock->mo_type == vlMotionTypeFrame)
+ {
+ for (i = 0; i < 24 * 2; i += 2)
+ {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ }
+ }
+ else
+ {
+ for (i = 0; i < 24 * 2; i += 2)
+ {
+ vb[i].x = mo_vec[0].x;
+ vb[i].y = mo_vec[0].y;
+ vb[i + 1].x = mo_vec[1].x;
+ vb[i + 1].y = mo_vec[1].y;
+ }
+ }
+
+ /* fall-through */
+ }
+ case vlMacroBlockTypeIntra:
+ {
+ const struct vlVertex2f unit =
+ {
+ mc->surface_tex_inv_size.x * VL_MACROBLOCK_WIDTH,
+ mc->surface_tex_inv_size.y * VL_MACROBLOCK_HEIGHT
+ };
+ const struct vlVertex2f half =
+ {
+ mc->surface_tex_inv_size.x * (VL_MACROBLOCK_WIDTH / 2),
+ mc->surface_tex_inv_size.y * (VL_MACROBLOCK_HEIGHT / 2)
+ };
+
+ struct vlMacroBlockVertexStream0 *vb;
+
+ vb = ycbcr_vb + pos * 24;
+
+ SET_BLOCK
+ (
+ vb,
+ macroblock->cbp, macroblock->mbx, macroblock->mby,
+ unit.x, unit.y, 0, 0, half.x, half.y,
+ 32, 2, 1, mc->zero_block
+ );
+
+ SET_BLOCK
+ (
+ vb + 6,
+ macroblock->cbp, macroblock->mbx, macroblock->mby,
+ unit.x, unit.y, half.x, 0, half.x, half.y,
+ 16, 2, 1, mc->zero_block
+ );
+
+ SET_BLOCK
+ (
+ vb + 12,
+ macroblock->cbp, macroblock->mbx, macroblock->mby,
+ unit.x, unit.y, 0, half.y, half.x, half.y,
+ 8, 2, 1, mc->zero_block
+ );
+
+ SET_BLOCK
+ (
+ vb + 18,
+ macroblock->cbp, macroblock->mbx, macroblock->mby,
+ unit.x, unit.y, half.x, half.y, half.x, half.y,
+ 4, 2, 1, mc->zero_block
+ );
+
+ break;
+ }
+ default:
+ assert(0);
+ }
+
+ return 0;
+}
+
+static int vlFlush
+(
+ struct vlRender *render
+)
+{
+ struct vlR16SnormBufferedMC *mc;
+ struct pipe_context *pipe;
+ struct vlVertexShaderConsts *vs_consts;
+ unsigned int num_macroblocks[vlNumMacroBlockExTypes] = {0};
+ unsigned int offset[vlNumMacroBlockExTypes];
+ unsigned int vb_start = 0;
+ unsigned int i;
+
+ assert(render);
+
+ mc = (struct vlR16SnormBufferedMC*)render;
+
+ if (!mc->buffered_surface)
+ return 0;
+
+ if (mc->num_macroblocks < mc->macroblocks_per_picture)
+ return 0;
+
+ assert(mc->num_macroblocks <= mc->macroblocks_per_picture);
+
+ pipe = mc->pipe;
+
+ for (i = 0; i < mc->num_macroblocks; ++i)
+ {
+ enum vlMacroBlockTypeEx mb_type_ex = vlGetMacroBlockTypeEx(&mc->macroblocks[i]);
+
+ num_macroblocks[mb_type_ex]++;
+ }
+
+ offset[0] = 0;
+
+ for (i = 1; i < vlNumMacroBlockExTypes; ++i)
+ offset[i] = offset[i - 1] + num_macroblocks[i - 1];
+
+ {
+ struct vlMacroBlockVertexStream0 *ycbcr_vb;
+ struct vlVertex2f *ref_vb[2];
+
+ ycbcr_vb = (struct vlMacroBlockVertexStream0*)pipe_buffer_map
+ (
+ pipe->screen,
+ mc->vertex_bufs.ycbcr.buffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
+ );
+
+ for (i = 0; i < 2; ++i)
+ ref_vb[i] = (struct vlVertex2f*)pipe_buffer_map
+ (
+ pipe->screen,
+ mc->vertex_bufs.ref[i].buffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
+ );
+
+ for (i = 0; i < mc->num_macroblocks; ++i)
+ {
+ enum vlMacroBlockTypeEx mb_type_ex = vlGetMacroBlockTypeEx(&mc->macroblocks[i]);
+
+ vlGenMacroblockVerts(mc, &mc->macroblocks[i], offset[mb_type_ex], ycbcr_vb, ref_vb);
+
+ offset[mb_type_ex]++;
+ }
+
+ pipe_buffer_unmap(pipe->screen, mc->vertex_bufs.ycbcr.buffer);
+ for (i = 0; i < 2; ++i)
+ pipe_buffer_unmap(pipe->screen, mc->vertex_bufs.ref[i].buffer);
+ }
+
+ for (i = 0; i < 3; ++i)
+ {
+ pipe->screen->transfer_unmap(pipe->screen, mc->tex_transfer[i]);
+ pipe->screen->tex_transfer_destroy(mc->tex_transfer[i]);
+ }
+
+ mc->render_target.cbufs[0] = pipe->screen->get_tex_surface
+ (
+ pipe->screen,
+ mc->buffered_surface->texture,
+ 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE
+ );
+
+ pipe->set_framebuffer_state(pipe, &mc->render_target);
+ pipe->set_viewport_state(pipe, &mc->viewport);
+ vs_consts = pipe_buffer_map
+ (
+ pipe->screen,
+ mc->vs_const_buf.buffer,
+ PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD
+ );
+
+ vs_consts->denorm.x = mc->buffered_surface->texture->width[0];
+ vs_consts->denorm.y = mc->buffered_surface->texture->height[0];
+
+ pipe_buffer_unmap(pipe->screen, mc->vs_const_buf.buffer);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &mc->vs_const_buf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &mc->fs_const_buf);
+
+ if (num_macroblocks[vlMacroBlockExTypeIntra] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 1, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 4, mc->vertex_elems);
+ pipe->set_sampler_textures(pipe, 3, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 3, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->i_vs);
+ pipe->bind_fs_state(pipe, mc->i_fs);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeIntra] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeIntra] * 24;
+ }
+
+ if (num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
+ mc->textures.ref[0] = mc->past_surface->texture;
+ pipe->set_sampler_textures(pipe, 4, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->p_vs[0]);
+ pipe->bind_fs_state(pipe, mc->p_fs[0]);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeFwdPredictedFrame] * 24;
+ }
+
+ if (num_macroblocks[vlMacroBlockExTypeFwdPredictedField] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
+ mc->textures.ref[0] = mc->past_surface->texture;
+ pipe->set_sampler_textures(pipe, 4, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->p_vs[1]);
+ pipe->bind_fs_state(pipe, mc->p_fs[1]);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeFwdPredictedField] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeFwdPredictedField] * 24;
+ }
+
+ if (num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
+ mc->textures.ref[0] = mc->future_surface->texture;
+ pipe->set_sampler_textures(pipe, 4, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->p_vs[0]);
+ pipe->bind_fs_state(pipe, mc->p_fs[0]);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeBkwdPredictedFrame] * 24;
+ }
+
+ if (num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 2, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 6, mc->vertex_elems);
+ mc->textures.ref[0] = mc->future_surface->texture;
+ pipe->set_sampler_textures(pipe, 4, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 4, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->p_vs[1]);
+ pipe->bind_fs_state(pipe, mc->p_fs[1]);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeBkwdPredictedField] * 24;
+ }
+
+ if (num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 3, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 8, mc->vertex_elems);
+ mc->textures.ref[0] = mc->past_surface->texture;
+ mc->textures.ref[1] = mc->future_surface->texture;
+ pipe->set_sampler_textures(pipe, 5, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 5, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->b_vs[0]);
+ pipe->bind_fs_state(pipe, mc->b_fs[0]);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeBiPredictedFrame] * 24;
+ }
+
+ if (num_macroblocks[vlMacroBlockExTypeBiPredictedField] > 0)
+ {
+ pipe->set_vertex_buffers(pipe, 3, mc->vertex_bufs.all);
+ pipe->set_vertex_elements(pipe, 8, mc->vertex_elems);
+ mc->textures.ref[0] = mc->past_surface->texture;
+ mc->textures.ref[1] = mc->future_surface->texture;
+ pipe->set_sampler_textures(pipe, 5, mc->textures.all);
+ pipe->bind_sampler_states(pipe, 5, mc->samplers.all);
+ pipe->bind_vs_state(pipe, mc->b_vs[1]);
+ pipe->bind_fs_state(pipe, mc->b_fs[1]);
+
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLES, vb_start, num_macroblocks[vlMacroBlockExTypeBiPredictedField] * 24);
+ vb_start += num_macroblocks[vlMacroBlockExTypeBiPredictedField] * 24;
+ }
+
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, &mc->buffered_surface->render_fence);
+ pipe_surface_reference(&mc->render_target.cbufs[0], NULL);
+
+ for (i = 0; i < 3; ++i)
+ mc->zero_block[i].x = -1.0f;
+
+ mc->buffered_surface = NULL;
+ mc->num_macroblocks = 0;
+
+ return 0;
+}
+
+static int vlRenderMacroBlocksMpeg2R16SnormBuffered
+(
+ struct vlRender *render,
+ struct vlMpeg2MacroBlockBatch *batch,
+ struct vlSurface *surface
+)
+{
+ struct vlR16SnormBufferedMC *mc;
+ bool new_surface = false;
+ unsigned int i;
+
+ assert(render);
+
+ mc = (struct vlR16SnormBufferedMC*)render;
+
+ if (mc->buffered_surface)
+ {
+ if (mc->buffered_surface != surface)
+ {
+ vlFlush(&mc->base);
+ new_surface = true;
+ }
+ }
+ else
+ new_surface = true;
+
+ if (new_surface)
+ {
+ mc->buffered_surface = surface;
+ mc->past_surface = batch->past_surface;
+ mc->future_surface = batch->future_surface;
+ mc->surface_tex_inv_size.x = 1.0f / surface->texture->width[0];
+ mc->surface_tex_inv_size.y = 1.0f / surface->texture->height[0];
+
+ for (i = 0; i < 3; ++i)
+ {
+ mc->tex_transfer[i] = mc->pipe->screen->get_tex_transfer
+ (
+ mc->pipe->screen,
+ mc->textures.all[i],
+ 0, 0, 0, PIPE_TRANSFER_WRITE, 0, 0,
+ surface->texture->width[0],
+ surface->texture->height[0]
+ );
+
+ mc->texels[i] = mc->pipe->screen->transfer_map(mc->pipe->screen, mc->tex_transfer[i]);
+ }
+ }
+
+ for (i = 0; i < batch->num_macroblocks; ++i)
+ vlGrabMacroBlock(mc, &batch->macroblocks[i]);
+
+ return 0;
+}
+
+static inline int vlEnd
+(
+ struct vlRender *render
+)
+{
+ assert(render);
+
+ return 0;
+}
+
+static int vlDestroy
+(
+ struct vlRender *render
+)
+{
+ struct vlR16SnormBufferedMC *mc;
+ struct pipe_context *pipe;
+ unsigned int i;
+
+ assert(render);
+
+ mc = (struct vlR16SnormBufferedMC*)render;
+ pipe = mc->pipe;
+
+ for (i = 0; i < 5; ++i)
+ pipe->delete_sampler_state(pipe, mc->samplers.all[i]);
+
+ for (i = 0; i < 3; ++i)
+ pipe_buffer_reference(&mc->vertex_bufs.all[i].buffer, NULL);
+
+ /* Textures 3 & 4 are not created directly, no need to release them here */
+ for (i = 0; i < 3; ++i)
+ pipe_texture_reference(&mc->textures.all[i], NULL);
+
+ pipe->delete_vs_state(pipe, mc->i_vs);
+ pipe->delete_fs_state(pipe, mc->i_fs);
+
+ for (i = 0; i < 2; ++i)
+ {
+ pipe->delete_vs_state(pipe, mc->p_vs[i]);
+ pipe->delete_fs_state(pipe, mc->p_fs[i]);
+ pipe->delete_vs_state(pipe, mc->b_vs[i]);
+ pipe->delete_fs_state(pipe, mc->b_fs[i]);
+ }
+
+ pipe_buffer_reference(&mc->vs_const_buf.buffer, NULL);
+ pipe_buffer_reference(&mc->fs_const_buf.buffer, NULL);
+
+ FREE(mc->macroblocks);
+ FREE(mc);
+
+ return 0;
+}
+
+/*
+ * Muliplier renormalizes block samples from 16 bits to 12 bits.
+ * Divider is used when calculating Y % 2 for choosing top or bottom
+ * field for P or B macroblocks.
+ * TODO: Use immediates.
+ */
+static const struct vlFragmentShaderConsts fs_consts =
+{
+ {32767.0f / 255.0f, 32767.0f / 255.0f, 32767.0f / 255.0f, 0.0f},
+ {0.5f, 2.0f, 0.0f, 0.0f}
+};
+
+#include "vl_r16snorm_mc_buf_shaders.inc"
+
+static int vlCreateDataBufs
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int mbw = align(mc->picture_width, VL_MACROBLOCK_WIDTH) / VL_MACROBLOCK_WIDTH;
+ const unsigned int mbh = align(mc->picture_height, VL_MACROBLOCK_HEIGHT) / VL_MACROBLOCK_HEIGHT;
+
+ struct pipe_context *pipe;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ mc->macroblocks_per_picture = mbw * mbh;
+
+ /* Create our vertex buffers */
+ mc->vertex_bufs.ycbcr.stride = sizeof(struct vlVertex2f) * 4;
+ mc->vertex_bufs.ycbcr.max_index = 24 * mc->macroblocks_per_picture - 1;
+ mc->vertex_bufs.ycbcr.buffer_offset = 0;
+ mc->vertex_bufs.ycbcr.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ DEFAULT_BUF_ALIGNMENT,
+ PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
+ sizeof(struct vlVertex2f) * 4 * 24 * mc->macroblocks_per_picture
+ );
+
+ for (i = 1; i < 3; ++i)
+ {
+ mc->vertex_bufs.all[i].stride = sizeof(struct vlVertex2f) * 2;
+ mc->vertex_bufs.all[i].max_index = 24 * mc->macroblocks_per_picture - 1;
+ mc->vertex_bufs.all[i].buffer_offset = 0;
+ mc->vertex_bufs.all[i].buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ DEFAULT_BUF_ALIGNMENT,
+ PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD,
+ sizeof(struct vlVertex2f) * 2 * 24 * mc->macroblocks_per_picture
+ );
+ }
+
+ /* Position element */
+ mc->vertex_elems[0].src_offset = 0;
+ mc->vertex_elems[0].vertex_buffer_index = 0;
+ mc->vertex_elems[0].nr_components = 2;
+ mc->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Luma, texcoord element */
+ mc->vertex_elems[1].src_offset = sizeof(struct vlVertex2f);
+ mc->vertex_elems[1].vertex_buffer_index = 0;
+ mc->vertex_elems[1].nr_components = 2;
+ mc->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Chroma Cr texcoord element */
+ mc->vertex_elems[2].src_offset = sizeof(struct vlVertex2f) * 2;
+ mc->vertex_elems[2].vertex_buffer_index = 0;
+ mc->vertex_elems[2].nr_components = 2;
+ mc->vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Chroma Cb texcoord element */
+ mc->vertex_elems[3].src_offset = sizeof(struct vlVertex2f) * 3;
+ mc->vertex_elems[3].vertex_buffer_index = 0;
+ mc->vertex_elems[3].nr_components = 2;
+ mc->vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* First ref surface top field texcoord element */
+ mc->vertex_elems[4].src_offset = 0;
+ mc->vertex_elems[4].vertex_buffer_index = 1;
+ mc->vertex_elems[4].nr_components = 2;
+ mc->vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* First ref surface bottom field texcoord element */
+ mc->vertex_elems[5].src_offset = sizeof(struct vlVertex2f);
+ mc->vertex_elems[5].vertex_buffer_index = 1;
+ mc->vertex_elems[5].nr_components = 2;
+ mc->vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Second ref surface top field texcoord element */
+ mc->vertex_elems[6].src_offset = 0;
+ mc->vertex_elems[6].vertex_buffer_index = 2;
+ mc->vertex_elems[6].nr_components = 2;
+ mc->vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Second ref surface bottom field texcoord element */
+ mc->vertex_elems[7].src_offset = sizeof(struct vlVertex2f);
+ mc->vertex_elems[7].vertex_buffer_index = 2;
+ mc->vertex_elems[7].nr_components = 2;
+ mc->vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ /* Create our constant buffer */
+ mc->vs_const_buf.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ DEFAULT_BUF_ALIGNMENT,
+ PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD,
+ sizeof(struct vlVertexShaderConsts)
+ );
+
+ mc->fs_const_buf.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ DEFAULT_BUF_ALIGNMENT,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ sizeof(struct vlFragmentShaderConsts)
+ );
+
+ memcpy
+ (
+ pipe_buffer_map(pipe->screen, mc->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE),
+ &fs_consts,
+ sizeof(struct vlFragmentShaderConsts)
+ );
+
+ pipe_buffer_unmap(pipe->screen, mc->fs_const_buf.buffer);
+
+ mc->macroblocks = MALLOC(sizeof(struct vlMpeg2MacroBlock) * mc->macroblocks_per_picture);
+
+ return 0;
+}
+
+static int vlInit
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ struct pipe_context *pipe;
+ struct pipe_sampler_state sampler;
+ struct pipe_texture template;
+ unsigned int filters[5];
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+
+ mc->buffered_surface = NULL;
+ mc->past_surface = NULL;
+ mc->future_surface = NULL;
+ for (i = 0; i < 3; ++i)
+ mc->zero_block[i].x = -1.0f;
+ mc->num_macroblocks = 0;
+
+ /* For MC we render to textures, which are rounded up to nearest POT */
+ mc->viewport.scale[0] = vlRoundUpPOT(mc->picture_width);
+ mc->viewport.scale[1] = vlRoundUpPOT(mc->picture_height);
+ mc->viewport.scale[2] = 1;
+ mc->viewport.scale[3] = 1;
+ mc->viewport.translate[0] = 0;
+ mc->viewport.translate[1] = 0;
+ mc->viewport.translate[2] = 0;
+ mc->viewport.translate[3] = 0;
+
+ mc->render_target.width = vlRoundUpPOT(mc->picture_width);
+ mc->render_target.height = vlRoundUpPOT(mc->picture_height);
+ mc->render_target.nr_cbufs = 1;
+ /* FB for MC stage is a vlSurface created by the user, set at render time */
+ mc->render_target.zsbuf = NULL;
+
+ filters[0] = PIPE_TEX_FILTER_NEAREST;
+ /* FIXME: Linear causes discoloration around block edges */
+ filters[1] = /*mc->picture_format == vlFormatYCbCr444 ?*/ PIPE_TEX_FILTER_NEAREST /*: PIPE_TEX_FILTER_LINEAR*/;
+ filters[2] = /*mc->picture_format == vlFormatYCbCr444 ?*/ PIPE_TEX_FILTER_NEAREST /*: PIPE_TEX_FILTER_LINEAR*/;
+ filters[3] = PIPE_TEX_FILTER_LINEAR;
+ filters[4] = PIPE_TEX_FILTER_LINEAR;
+
+ for (i = 0; i < 5; ++i)
+ {
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = filters[i];
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = filters[i];
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.prefilter = ;*/
+ /*sampler.lod_bias = ;*/
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ;*/
+ /*sampler.border_color[i] = ;*/
+ /*sampler.max_anisotropy = ;*/
+ mc->samplers.all[i] = pipe->create_sampler_state(pipe, &sampler);
+ }
+
+ memset(&template, 0, sizeof(struct pipe_texture));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R16_SNORM;
+ template.last_level = 0;
+ template.width[0] = vlRoundUpPOT(mc->picture_width);
+ template.height[0] = vlRoundUpPOT(mc->picture_height);
+ template.depth[0] = 1;
+ pf_get_block(template.format, &template.block);
+ template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_DYNAMIC;
+
+ mc->textures.y = pipe->screen->texture_create(pipe->screen, &template);
+
+ if (mc->picture_format == vlFormatYCbCr420)
+ {
+ template.width[0] = vlRoundUpPOT(mc->picture_width / 2);
+ template.height[0] = vlRoundUpPOT(mc->picture_height / 2);
+ }
+ else if (mc->picture_format == vlFormatYCbCr422)
+ template.height[0] = vlRoundUpPOT(mc->picture_height / 2);
+
+ mc->textures.cb = pipe->screen->texture_create(pipe->screen, &template);
+ mc->textures.cr = pipe->screen->texture_create(pipe->screen, &template);
+
+ /* textures.all[3] & textures.all[4] are assigned from vlSurfaces for P and B macroblocks at render time */
+
+ vlCreateVertexShaderIMB(mc);
+ vlCreateFragmentShaderIMB(mc);
+ vlCreateVertexShaderFramePMB(mc);
+ vlCreateVertexShaderFieldPMB(mc);
+ vlCreateFragmentShaderFramePMB(mc);
+ vlCreateFragmentShaderFieldPMB(mc);
+ vlCreateVertexShaderFrameBMB(mc);
+ vlCreateVertexShaderFieldBMB(mc);
+ vlCreateFragmentShaderFrameBMB(mc);
+ vlCreateFragmentShaderFieldBMB(mc);
+ vlCreateDataBufs(mc);
+
+ return 0;
+}
+
+int vlCreateR16SNormBufferedMC
+(
+ struct pipe_context *pipe,
+ unsigned int picture_width,
+ unsigned int picture_height,
+ enum vlFormat picture_format,
+ struct vlRender **render
+)
+{
+ struct vlR16SnormBufferedMC *mc;
+
+ assert(pipe);
+ assert(render);
+
+ mc = CALLOC_STRUCT(vlR16SnormBufferedMC);
+
+ mc->base.vlBegin = &vlBegin;
+ mc->base.vlRenderMacroBlocksMpeg2 = &vlRenderMacroBlocksMpeg2R16SnormBuffered;
+ mc->base.vlEnd = &vlEnd;
+ mc->base.vlFlush = &vlFlush;
+ mc->base.vlDestroy = &vlDestroy;
+ mc->pipe = pipe;
+ mc->picture_width = picture_width;
+ mc->picture_height = picture_height;
+
+ vlInit(mc);
+
+ *render = &mc->base;
+
+ return 0;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.h b/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.h
new file mode 100644
index 0000000000..27177d64ca
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf.h
@@ -0,0 +1,18 @@
+#ifndef vl_r16snorm_mc_buf_h
+#define vl_r16snorm_mc_buf_h
+
+#include "vl_types.h"
+
+struct pipe_context;
+struct vlRender;
+
+int vlCreateR16SNormBufferedMC
+(
+ struct pipe_context *pipe,
+ unsigned int picture_width,
+ unsigned int picture_height,
+ enum vlFormat picture_format,
+ struct vlRender **render
+);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf_shaders.inc b/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf_shaders.inc
new file mode 100644
index 0000000000..ef4a4b2add
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_r16snorm_mc_buf_shaders.inc
@@ -0,0 +1,1185 @@
+static int vlCreateVertexShaderIMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 50;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state vs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Vertex pos
+ * decl i1 ; Luma texcoords
+ * decl i2 ; Chroma Cb texcoords
+ * decl i3 ; Chroma Cr texcoords
+ */
+ for (i = 0; i < 4; i++)
+ {
+ decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl o0 ; Vertex pos
+ * decl o1 ; Luma texcoords
+ * decl o2 ; Chroma Cb texcoords
+ * decl o3 ; Chroma Cr texcoords
+ */
+ for (i = 0; i < 4; i++)
+ {
+ decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * mov o0, i0 ; Move input vertex pos to output
+ * mov o1, i1 ; Move input luma texcoords to output
+ * mov o2, i2 ; Move input chroma Cb texcoords to output
+ * mov o3, i3 ; Move input chroma Cr texcoords to output
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ vs.tokens = tokens;
+ mc->i_vs = pipe->create_vs_state(pipe, &vs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateFragmentShaderIMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state fs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Luma texcoords
+ * decl i1 ; Chroma Cb texcoords
+ * decl i2 ; Chroma Cr texcoords
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl o0 ; Fragment color */
+ decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl t0, t1 */
+ decl = vl_decl_temps(0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl s0 ; Sampler for luma texture
+ * decl s1 ; Sampler for chroma Cb texture
+ * decl s2 ; Sampler for chroma Cr texture
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ decl = vl_decl_samplers(i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header,max_tokens - ti);
+ }
+
+ /*
+ * tex2d t1, i0, s0 ; Read texel from luma texture
+ * mov t0.x, t1.x ; Move luma sample into .x component
+ * tex2d t1, i1, s1 ; Read texel from chroma Cb texture
+ * mov t0.y, t1.x ; Move Cb sample into .y component
+ * tex2d t1, i2, s2 ; Read texel from chroma Cr texture
+ * mov t0.z, t1.x ; Move Cr sample into .z component
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul o0, t0, c0 ; Rescale texel to correct range */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ fs.tokens = tokens;
+ mc->i_fs = pipe->create_fs_state(pipe, &fs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateVertexShaderFramePMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state vs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Vertex pos
+ * decl i1 ; Luma texcoords
+ * decl i2 ; Chroma Cb texcoords
+ * decl i3 ; Chroma Cr texcoords
+ * decl i4 ; Ref surface top field texcoords
+ * decl i5 ; Ref surface bottom field texcoords (unused, packed in the same stream)
+ */
+ for (i = 0; i < 6; i++)
+ {
+ decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl o0 ; Vertex pos
+ * decl o1 ; Luma texcoords
+ * decl o2 ; Chroma Cb texcoords
+ * decl o3 ; Chroma Cr texcoords
+ * decl o4 ; Ref macroblock texcoords
+ */
+ for (i = 0; i < 5; i++)
+ {
+ decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * mov o0, i0 ; Move input vertex pos to output
+ * mov o1, i1 ; Move input luma texcoords to output
+ * mov o2, i2 ; Move input chroma Cb texcoords to output
+ * mov o3, i3 ; Move input chroma Cr texcoords to output
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* add o4, i0, i4 ; Translate vertex pos by motion vec to form ref macroblock texcoords */
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, 4);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ vs.tokens = tokens;
+ mc->p_vs[0] = pipe->create_vs_state(pipe, &vs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateVertexShaderFieldPMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state vs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Vertex pos
+ * decl i1 ; Luma texcoords
+ * decl i2 ; Chroma Cb texcoords
+ * decl i3 ; Chroma Cr texcoords
+ * decl i4 ; Ref macroblock top field texcoords
+ * decl i5 ; Ref macroblock bottom field texcoords
+ */
+ for (i = 0; i < 6; i++)
+ {
+ decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* decl c0 ; Render target dimensions */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl o0 ; Vertex pos
+ * decl o1 ; Luma texcoords
+ * decl o2 ; Chroma Cb texcoords
+ * decl o3 ; Chroma Cr texcoords
+ * decl o4 ; Ref macroblock top field texcoords
+ * decl o5 ; Ref macroblock bottom field texcoords
+ * decl o6 ; Denormalized vertex pos
+ */
+ for (i = 0; i < 7; i++)
+ {
+ decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * mov o0, i0 ; Move input vertex pos to output
+ * mov o1, i1 ; Move input luma texcoords to output
+ * mov o2, i2 ; Move input chroma Cb texcoords to output
+ * mov o3, i3 ; Move input chroma Cr texcoords to output
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * add o4, i0, i4 ; Translate vertex pos by motion vec to form top field macroblock texcoords
+ * add o5, i0, i5 ; Translate vertex pos by motion vec to form bottom field macroblock texcoords
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, i + 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, i + 4);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul o6, i0, c0 ; Denorm vertex pos */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_OUTPUT, 6, TGSI_FILE_INPUT, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ vs.tokens = tokens;
+ mc->p_vs[1] = pipe->create_vs_state(pipe, &vs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateFragmentShaderFramePMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state fs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Luma texcoords
+ * decl i1 ; Chroma Cb texcoords
+ * decl i2 ; Chroma Cr texcoords
+ * decl i3 ; Ref macroblock texcoords
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl o0 ; Fragment color */
+ decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl t0, t1 */
+ decl = vl_decl_temps(0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl s0 ; Sampler for luma texture
+ * decl s1 ; Sampler for chroma Cb texture
+ * decl s2 ; Sampler for chroma Cr texture
+ * decl s3 ; Sampler for ref surface texture
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ decl = vl_decl_samplers(i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * tex2d t1, i0, s0 ; Read texel from luma texture
+ * mov t0.x, t1.x ; Move luma sample into .x component
+ * tex2d t1, i1, s1 ; Read texel from chroma Cb texture
+ * mov t0.y, t1.x ; Move Cb sample into .y component
+ * tex2d t1, i2, s2 ; Read texel from chroma Cr texture
+ * mov t0.z, t1.x ; Move Cr sample into .z component
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul t0, t0, c0 ; Rescale texel to correct range */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* tex2d t1, i3, s3 ; Read texel from ref macroblock */
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, 3, TGSI_FILE_SAMPLER, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* add o0, t0, t1 ; Add ref and differential to form final output */
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ fs.tokens = tokens;
+ mc->p_fs[0] = pipe->create_fs_state(pipe, &fs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateFragmentShaderFieldPMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 200;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state fs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Luma texcoords
+ * decl i1 ; Chroma Cb texcoords
+ * decl i2 ; Chroma Cr texcoords
+ * decl i3 ; Ref macroblock top field texcoords
+ * decl i4 ; Ref macroblock bottom field texcoords
+ * decl i5 ; Denormalized vertex pos
+ */
+ for (i = 0; i < 6; ++i)
+ {
+ decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm
+ * decl c1 ; Constants 1/2 & 2 in .x, .y channels for Y-mod-2 top/bottom field selection
+ */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl o0 ; Fragment color */
+ decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl t0-t4 */
+ decl = vl_decl_temps(0, 4);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl s0 ; Sampler for luma texture
+ * decl s1 ; Sampler for chroma Cb texture
+ * decl s2 ; Sampler for chroma Cr texture
+ * decl s3 ; Sampler for ref surface texture
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ decl = vl_decl_samplers(i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * tex2d t1, i0, s0 ; Read texel from luma texture
+ * mov t0.x, t1.x ; Move luma sample into .x component
+ * tex2d t1, i1, s1 ; Read texel from chroma Cb texture
+ * mov t0.y, t1.x ; Move Cb sample into .y component
+ * tex2d t1, i2, s2 ; Read texel from chroma Cr texture
+ * mov t0.z, t1.x ; Move Cr sample into .z component
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul t0, t0, c0 ; Rescale texel to correct range */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * tex2d t1, i3, s3 ; Read texel from ref macroblock top field
+ * tex2d t2, i4, s3 ; Read texel from ref macroblock bottom field
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, i + 1, TGSI_FILE_INPUT, i + 3, TGSI_FILE_SAMPLER, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* XXX: Pos values off by 0.5? */
+ /* sub t4, i5.y, c1.x ; Sub 0.5 from denormalized pos */
+ inst = vl_inst3(TGSI_OPCODE_SUB, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_INPUT, 5, TGSI_FILE_CONSTANT, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_X;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* mul t3, t4, c1.x ; Multiply pos Y-coord by 1/2 */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_CONSTANT, 1);
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_X;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* floor t3, t3 ; Get rid of fractional part */
+ inst = vl_inst2(TGSI_OPCODE_FLOOR, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* mul t3, t3, c1.y ; Multiply by 2 */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_CONSTANT, 1);
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* sub t3, t4, t3 ; Subtract from original Y to get Y % 2 */
+ inst = vl_inst3(TGSI_OPCODE_SUB, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_TEMPORARY, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* TODO: Move to conditional tex fetch on t3 instead of lerp */
+ /* lerp t1, t3, t1, t2 ; Choose between top and bottom fields based on Y % 2 */
+ inst = vl_inst4(TGSI_OPCODE_LERP, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 2);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* add o0, t0, t1 ; Add ref and differential to form final output */
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ fs.tokens = tokens;
+ mc->p_fs[1] = pipe->create_fs_state(pipe, &fs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateVertexShaderFrameBMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state vs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Vertex pos
+ * decl i1 ; Luma texcoords
+ * decl i2 ; Chroma Cb texcoords
+ * decl i3 ; Chroma Cr texcoords
+ * decl i4 ; First ref macroblock top field texcoords
+ * decl i5 ; First ref macroblock bottom field texcoords (unused, packed in the same stream)
+ * decl i6 ; Second ref macroblock top field texcoords
+ * decl i7 ; Second ref macroblock bottom field texcoords (unused, packed in the same stream)
+ */
+ for (i = 0; i < 8; i++)
+ {
+ decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl o0 ; Vertex pos
+ * decl o1 ; Luma texcoords
+ * decl o2 ; Chroma Cb texcoords
+ * decl o3 ; Chroma Cr texcoords
+ * decl o4 ; First ref macroblock texcoords
+ * decl o5 ; Second ref macroblock texcoords
+ */
+ for (i = 0; i < 6; i++)
+ {
+ decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * mov o0, i0 ; Move input vertex pos to output
+ * mov o1, i1 ; Move input luma texcoords to output
+ * mov o2, i2 ; Move input chroma Cb texcoords to output
+ * mov o3, i3 ; Move input chroma Cr texcoords to output
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * add o4, i0, i4 ; Translate vertex pos by motion vec to form first ref macroblock texcoords
+ * add o5, i0, i6 ; Translate vertex pos by motion vec to form second ref macroblock texcoords
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, i + 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, (i + 2) * 2);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ vs.tokens = tokens;
+ mc->b_vs[0] = pipe->create_vs_state(pipe, &vs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateVertexShaderFieldBMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state vs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Vertex pos
+ * decl i1 ; Luma texcoords
+ * decl i2 ; Chroma Cb texcoords
+ * decl i3 ; Chroma Cr texcoords
+ * decl i4 ; First ref macroblock top field texcoords
+ * decl i5 ; First ref macroblock bottom field texcoords
+ * decl i6 ; Second ref macroblock top field texcoords
+ * decl i7 ; Second ref macroblock bottom field texcoords
+ */
+ for (i = 0; i < 8; i++)
+ {
+ decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* decl c0 ; Render target dimensions */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl o0 ; Vertex pos
+ * decl o1 ; Luma texcoords
+ * decl o2 ; Chroma Cb texcoords
+ * decl o3 ; Chroma Cr texcoords
+ * decl o4 ; First ref macroblock top field texcoords
+ * decl o5 ; First ref macroblock Bottom field texcoords
+ * decl o6 ; Second ref macroblock top field texcoords
+ * decl o7 ; Second ref macroblock Bottom field texcoords
+ * decl o8 ; Denormalized vertex pos
+ */
+ for (i = 0; i < 9; i++)
+ {
+ decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* decl t0, t1 */
+ decl = vl_decl_temps(0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * mov o0, i0 ; Move input vertex pos to output
+ * mov o1, i1 ; Move input luma texcoords to output
+ * mov o2, i2 ; Move input chroma Cb texcoords to output
+ * mov o3, i3 ; Move input chroma Cr texcoords to output
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * add o4, i0, i4 ; Translate vertex pos by motion vec to form first top field macroblock texcoords
+ * add o5, i0, i5 ; Translate vertex pos by motion vec to form first bottom field macroblock texcoords
+ * add o6, i0, i6 ; Translate vertex pos by motion vec to form second top field macroblock texcoords
+ * add o7, i0, i7 ; Translate vertex pos by motion vec to form second bottom field macroblock texcoords
+ */
+ for (i = 0; i < 4; ++i)
+ {
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, i + 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, i + 4);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul o8, i0, c0 ; Denorm vertex pos */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_OUTPUT, 8, TGSI_FILE_INPUT, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ vs.tokens = tokens;
+ mc->b_vs[1] = pipe->create_vs_state(pipe, &vs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateFragmentShaderFrameBMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 100;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state fs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Luma texcoords
+ * decl i1 ; Chroma Cb texcoords
+ * decl i2 ; Chroma Cr texcoords
+ * decl i3 ; First ref macroblock texcoords
+ * decl i4 ; Second ref macroblock texcoords
+ */
+ for (i = 0; i < 5; ++i)
+ {
+ decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm
+ * decl c1 ; Constant 1/2 in .x channel to use as weight to blend past and future texels
+ */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl o0 ; Fragment color */
+ decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl t0-t2 */
+ decl = vl_decl_temps(0, 2);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl s0 ; Sampler for luma texture
+ * decl s1 ; Sampler for chroma Cb texture
+ * decl s2 ; Sampler for chroma Cr texture
+ * decl s3 ; Sampler for first ref surface texture
+ * decl s4 ; Sampler for second ref surface texture
+ */
+ for (i = 0; i < 5; ++i)
+ {
+ decl = vl_decl_samplers(i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * tex2d t1, i0, s0 ; Read texel from luma texture
+ * mov t0.x, t1.x ; Move luma sample into .x component
+ * tex2d t1, i1, s1 ; Read texel from chroma Cb texture
+ * mov t0.y, t1.x ; Move Cb sample into .y component
+ * tex2d t1, i2, s2 ; Read texel from chroma Cr texture
+ * mov t0.z, t1.x ; Move Cr sample into .z component
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul t0, t0, c0 ; Rescale texel to correct range */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * tex2d t1, i3, s3 ; Read texel from first ref macroblock
+ * tex2d t2, i4, s4 ; Read texel from second ref macroblock
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, i + 1, TGSI_FILE_INPUT, i + 3, TGSI_FILE_SAMPLER, i + 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* lerp t1, c1.x, t1, t2 ; Blend past and future texels */
+ inst = vl_inst4(TGSI_OPCODE_LERP, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_CONSTANT, 1, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 2);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_X;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* add o0, t0, t1 ; Add past/future ref and differential to form final output */
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ fs.tokens = tokens;
+ mc->b_fs[0] = pipe->create_fs_state(pipe, &fs);
+ free(tokens);
+
+ return 0;
+}
+
+static int vlCreateFragmentShaderFieldBMB
+(
+ struct vlR16SnormBufferedMC *mc
+)
+{
+ const unsigned int max_tokens = 200;
+
+ struct pipe_context *pipe;
+ struct pipe_shader_state fs;
+ struct tgsi_token *tokens;
+ struct tgsi_header *header;
+
+ struct tgsi_full_declaration decl;
+ struct tgsi_full_instruction inst;
+
+ unsigned int ti;
+ unsigned int i;
+
+ assert(mc);
+
+ pipe = mc->pipe;
+ tokens = (struct tgsi_token*)malloc(max_tokens * sizeof(struct tgsi_token));
+
+ /* Version */
+ *(struct tgsi_version*)&tokens[0] = tgsi_build_version();
+ /* Header */
+ header = (struct tgsi_header*)&tokens[1];
+ *header = tgsi_build_header();
+ /* Processor */
+ *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header);
+
+ ti = 3;
+
+ /*
+ * decl i0 ; Luma texcoords
+ * decl i1 ; Chroma Cb texcoords
+ * decl i2 ; Chroma Cr texcoords
+ * decl i3 ; First ref macroblock top field texcoords
+ * decl i4 ; First ref macroblock bottom field texcoords
+ * decl i5 ; Second ref macroblock top field texcoords
+ * decl i6 ; Second ref macroblock bottom field texcoords
+ * decl i7 ; Denormalized vertex pos
+ */
+ for (i = 0; i < 8; ++i)
+ {
+ decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm
+ * decl c1 ; Constants 1/2 & 2 in .x, .y channels to use as weight to blend past and future texels
+ * ; and for Y-mod-2 top/bottom field selection
+ */
+ decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 1);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl o0 ; Fragment color */
+ decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /* decl t0-t5 */
+ decl = vl_decl_temps(0, 5);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * decl s0 ; Sampler for luma texture
+ * decl s1 ; Sampler for chroma Cb texture
+ * decl s2 ; Sampler for chroma Cr texture
+ * decl s3 ; Sampler for first ref surface texture
+ * decl s4 ; Sampler for second ref surface texture
+ */
+ for (i = 0; i < 5; ++i)
+ {
+ decl = vl_decl_samplers(i, i);
+ ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /*
+ * tex2d t1, i0, s0 ; Read texel from luma texture
+ * mov t0.x, t1.x ; Move luma sample into .x component
+ * tex2d t1, i1, s1 ; Read texel from chroma Cb texture
+ * mov t0.y, t1.x ; Move Cb sample into .y component
+ * tex2d t1, i2, s2 ; Read texel from chroma Cr texture
+ * mov t0.z, t1.x ; Move Cr sample into .z component
+ */
+ for (i = 0; i < 3; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* mul t0, t0, c0 ; Rescale texel to correct range */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* XXX: Pos values off by 0.5? */
+ /* sub t4, i7.y, c1.x ; Sub 0.5 from denormalized pos */
+ inst = vl_inst3(TGSI_OPCODE_SUB, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_INPUT, 7, TGSI_FILE_CONSTANT, 1);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_X;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* mul t3, t4, c1.x ; Multiply pos Y-coord by 1/2 */
+ inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_CONSTANT, 1);
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_X;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* floor t3, t3 ; Get rid of fractional part */
+ inst = vl_inst2(TGSI_OPCODE_FLOOR, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* mul t3, t3, c1.y ; Multiply by 2 */
+ inst = vl_inst3( TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_CONSTANT, 1);
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleX = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleY = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleZ = TGSI_SWIZZLE_Y;
+ inst.FullSrcRegisters[1].SrcRegister.SwizzleW = TGSI_SWIZZLE_Y;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* sub t3, t4, t3 ; Subtract from original Y to get Y % 2 */
+ inst = vl_inst3(TGSI_OPCODE_SUB, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_TEMPORARY, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * tex2d t1, i3, s3 ; Read texel from past ref macroblock top field
+ * tex2d t2, i4, s3 ; Read texel from past ref macroblock bottom field
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, i + 1, TGSI_FILE_INPUT, i + 3, TGSI_FILE_SAMPLER, 3);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* TODO: Move to conditional tex fetch on t3 instead of lerp */
+ /* lerp t1, t3, t1, t2 ; Choose between top and bottom fields based on Y % 2 */
+ inst = vl_inst4(TGSI_OPCODE_LERP, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 2);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /*
+ * tex2d t4, i5, s4 ; Read texel from future ref macroblock top field
+ * tex2d t5, i6, s4 ; Read texel from future ref macroblock bottom field
+ */
+ for (i = 0; i < 2; ++i)
+ {
+ inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, i + 4, TGSI_FILE_INPUT, i + 5, TGSI_FILE_SAMPLER, 4);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+ }
+
+ /* TODO: Move to conditional tex fetch on t3 instead of lerp */
+ /* lerp t2, t3, t4, t5 ; Choose between top and bottom fields based on Y % 2 */
+ inst = vl_inst4(TGSI_OPCODE_LERP, TGSI_FILE_TEMPORARY, 2, TGSI_FILE_TEMPORARY, 3, TGSI_FILE_TEMPORARY, 4, TGSI_FILE_TEMPORARY, 5);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* lerp t1, c1.x, t1, t2 ; Blend past and future texels */
+ inst = vl_inst4(TGSI_OPCODE_LERP, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_CONSTANT, 1, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 2);
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X;
+ inst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_X;
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* add o0, t0, t1 ; Add past/future ref and differential to form final output */
+ inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1);
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ /* end */
+ inst = vl_end();
+ ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti);
+
+ fs.tokens = tokens;
+ mc->b_fs[1] = pipe->create_fs_state(pipe, &fs);
+ free(tokens);
+
+ return 0;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_render.h b/src/gallium/state_trackers/g3dvl/vl_render.h
new file mode 100644
index 0000000000..166030b498
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_render.h
@@ -0,0 +1,38 @@
+#ifndef vl_render_h
+#define vl_render_h
+
+#include "vl_types.h"
+
+struct pipe_surface;
+
+struct vlRender
+{
+ int (*vlBegin)
+ (
+ struct vlRender *render
+ );
+
+ int (*vlRenderMacroBlocksMpeg2)
+ (
+ struct vlRender *render,
+ struct vlMpeg2MacroBlockBatch *batch,
+ struct vlSurface *surface
+ );
+
+ int (*vlEnd)
+ (
+ struct vlRender *render
+ );
+
+ int (*vlFlush)
+ (
+ struct vlRender *render
+ );
+
+ int (*vlDestroy)
+ (
+ struct vlRender *render
+ );
+};
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_screen.c b/src/gallium/state_trackers/g3dvl/vl_screen.c
new file mode 100644
index 0000000000..ade8643a66
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_screen.c
@@ -0,0 +1,115 @@
+#define VL_INTERNAL
+#include "vl_screen.h"
+#include <assert.h>
+#include <util/u_memory.h>
+
+int vlCreateScreen
+(
+ struct vlDisplay *display,
+ int screen,
+ struct pipe_screen *pscreen,
+ struct vlScreen **vl_screen
+)
+{
+ struct vlScreen *scrn;
+
+ assert(display);
+ assert(pscreen);
+ assert(vl_screen);
+
+ scrn = CALLOC_STRUCT(vlScreen);
+
+ if (!scrn)
+ return 1;
+
+ scrn->display = display;
+ scrn->ordinal = screen;
+ scrn->pscreen = pscreen;
+ *vl_screen = scrn;
+
+ return 0;
+}
+
+int vlDestroyScreen
+(
+ struct vlScreen *screen
+)
+{
+ assert(screen);
+
+ FREE(screen);
+
+ return 0;
+}
+
+struct vlDisplay* vlGetDisplay
+(
+ struct vlScreen *screen
+)
+{
+ assert(screen);
+
+ return screen->display;
+}
+
+struct pipe_screen* vlGetPipeScreen
+(
+ struct vlScreen *screen
+)
+{
+ assert(screen);
+
+ return screen->pscreen;
+}
+
+unsigned int vlGetMaxProfiles
+(
+ struct vlScreen *screen
+)
+{
+ assert(screen);
+
+ return vlProfileCount;
+}
+
+int vlQueryProfiles
+(
+ struct vlScreen *screen,
+ enum vlProfile *profiles
+)
+{
+ assert(screen);
+ assert(profiles);
+
+ profiles[0] = vlProfileMpeg2Simple;
+ profiles[1] = vlProfileMpeg2Main;
+
+ return 0;
+}
+
+unsigned int vlGetMaxEntryPoints
+(
+ struct vlScreen *screen
+)
+{
+ assert(screen);
+
+ return vlEntryPointCount;
+}
+
+int vlQueryEntryPoints
+(
+ struct vlScreen *screen,
+ enum vlProfile profile,
+ enum vlEntryPoint *entry_points
+)
+{
+ assert(screen);
+ assert(entry_points);
+
+ entry_points[0] = vlEntryPointIDCT;
+ entry_points[1] = vlEntryPointMC;
+ entry_points[2] = vlEntryPointCSC;
+
+ return 0;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_screen.h b/src/gallium/state_trackers/g3dvl/vl_screen.h
new file mode 100644
index 0000000000..98f3d429b6
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_screen.h
@@ -0,0 +1,63 @@
+#ifndef vl_screen_h
+#define vl_screen_h
+
+#include "vl_types.h"
+
+struct pipe_screen;
+
+#ifdef VL_INTERNAL
+struct vlScreen
+{
+ struct vlDisplay *display;
+ unsigned int ordinal;
+ struct pipe_screen *pscreen;
+};
+#endif
+
+int vlCreateScreen
+(
+ struct vlDisplay *display,
+ int screen,
+ struct pipe_screen *pscreen,
+ struct vlScreen **vl_screen
+);
+
+int vlDestroyScreen
+(
+ struct vlScreen *screen
+);
+
+struct vlDisplay* vlGetDisplay
+(
+ struct vlScreen *screen
+);
+
+struct pipe_screen* vlGetPipeScreen
+(
+ struct vlScreen *screen
+);
+
+unsigned int vlGetMaxProfiles
+(
+ struct vlScreen *screen
+);
+
+int vlQueryProfiles
+(
+ struct vlScreen *screen,
+ enum vlProfile *profiles
+);
+
+unsigned int vlGetMaxEntryPoints
+(
+ struct vlScreen *screen
+);
+
+int vlQueryEntryPoints
+(
+ struct vlScreen *screen,
+ enum vlProfile profile,
+ enum vlEntryPoint *entry_points
+);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_shader_build.c b/src/gallium/state_trackers/g3dvl/vl_shader_build.c
new file mode 100644
index 0000000000..51f1721a33
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_shader_build.c
@@ -0,0 +1,215 @@
+#include "vl_shader_build.h"
+#include <assert.h>
+#include <tgsi/tgsi_parse.h>
+#include <tgsi/tgsi_build.h>
+
+struct tgsi_full_declaration vl_decl_input(unsigned int name, unsigned int index, unsigned int first, unsigned int last)
+{
+ struct tgsi_full_declaration decl = tgsi_default_full_declaration();
+
+ decl.Declaration.File = TGSI_FILE_INPUT;
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = name;
+ decl.Semantic.SemanticIndex = index;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ return decl;
+}
+
+struct tgsi_full_declaration vl_decl_interpolated_input
+(
+ unsigned int name,
+ unsigned int index,
+ unsigned int first,
+ unsigned int last,
+ int interpolation
+)
+{
+ struct tgsi_full_declaration decl = tgsi_default_full_declaration();
+
+ assert
+ (
+ interpolation == TGSI_INTERPOLATE_CONSTANT ||
+ interpolation == TGSI_INTERPOLATE_LINEAR ||
+ interpolation == TGSI_INTERPOLATE_PERSPECTIVE
+ );
+
+ decl.Declaration.File = TGSI_FILE_INPUT;
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = name;
+ decl.Semantic.SemanticIndex = index;
+ decl.Declaration.Interpolate = interpolation;;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ return decl;
+}
+
+struct tgsi_full_declaration vl_decl_constants(unsigned int name, unsigned int index, unsigned int first, unsigned int last)
+{
+ struct tgsi_full_declaration decl = tgsi_default_full_declaration();
+
+ decl.Declaration.File = TGSI_FILE_CONSTANT;
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = name;
+ decl.Semantic.SemanticIndex = index;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ return decl;
+}
+
+struct tgsi_full_declaration vl_decl_output(unsigned int name, unsigned int index, unsigned int first, unsigned int last)
+{
+ struct tgsi_full_declaration decl = tgsi_default_full_declaration();
+
+ decl.Declaration.File = TGSI_FILE_OUTPUT;
+ decl.Declaration.Semantic = 1;
+ decl.Semantic.SemanticName = name;
+ decl.Semantic.SemanticIndex = index;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ return decl;
+}
+
+struct tgsi_full_declaration vl_decl_temps(unsigned int first, unsigned int last)
+{
+ struct tgsi_full_declaration decl = tgsi_default_full_declaration();
+
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_TEMPORARY;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ return decl;
+}
+
+struct tgsi_full_declaration vl_decl_samplers(unsigned int first, unsigned int last)
+{
+ struct tgsi_full_declaration decl = tgsi_default_full_declaration();
+
+ decl = tgsi_default_full_declaration();
+ decl.Declaration.File = TGSI_FILE_SAMPLER;
+ decl.DeclarationRange.First = first;
+ decl.DeclarationRange.Last = last;
+
+ return decl;
+}
+
+struct tgsi_full_instruction vl_inst2
+(
+ int opcode,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src_file,
+ unsigned int src_index
+)
+{
+ struct tgsi_full_instruction inst = tgsi_default_full_instruction();
+
+ inst.Instruction.Opcode = opcode;
+ inst.Instruction.NumDstRegs = 1;
+ inst.FullDstRegisters[0].DstRegister.File = dst_file;
+ inst.FullDstRegisters[0].DstRegister.Index = dst_index;
+ inst.Instruction.NumSrcRegs = 1;
+ inst.FullSrcRegisters[0].SrcRegister.File = src_file;
+ inst.FullSrcRegisters[0].SrcRegister.Index = src_index;
+
+ return inst;
+}
+
+struct tgsi_full_instruction vl_inst3
+(
+ int opcode,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src1_file,
+ unsigned int src1_index,
+ enum tgsi_file_type src2_file,
+ unsigned int src2_index
+)
+{
+ struct tgsi_full_instruction inst = tgsi_default_full_instruction();
+
+ inst.Instruction.Opcode = opcode;
+ inst.Instruction.NumDstRegs = 1;
+ inst.FullDstRegisters[0].DstRegister.File = dst_file;
+ inst.FullDstRegisters[0].DstRegister.Index = dst_index;
+ inst.Instruction.NumSrcRegs = 2;
+ inst.FullSrcRegisters[0].SrcRegister.File = src1_file;
+ inst.FullSrcRegisters[0].SrcRegister.Index = src1_index;
+ inst.FullSrcRegisters[1].SrcRegister.File = src2_file;
+ inst.FullSrcRegisters[1].SrcRegister.Index = src2_index;
+
+ return inst;
+}
+
+struct tgsi_full_instruction vl_tex
+(
+ int tex,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src1_file,
+ unsigned int src1_index,
+ enum tgsi_file_type src2_file,
+ unsigned int src2_index
+)
+{
+ struct tgsi_full_instruction inst = tgsi_default_full_instruction();
+
+ inst.Instruction.Opcode = TGSI_OPCODE_TEX;
+ inst.Instruction.NumDstRegs = 1;
+ inst.FullDstRegisters[0].DstRegister.File = dst_file;
+ inst.FullDstRegisters[0].DstRegister.Index = dst_index;
+ inst.Instruction.NumSrcRegs = 2;
+ inst.InstructionExtTexture.Texture = tex;
+ inst.FullSrcRegisters[0].SrcRegister.File = src1_file;
+ inst.FullSrcRegisters[0].SrcRegister.Index = src1_index;
+ inst.FullSrcRegisters[1].SrcRegister.File = src2_file;
+ inst.FullSrcRegisters[1].SrcRegister.Index = src2_index;
+
+ return inst;
+}
+
+struct tgsi_full_instruction vl_inst4
+(
+ int opcode,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src1_file,
+ unsigned int src1_index,
+ enum tgsi_file_type src2_file,
+ unsigned int src2_index,
+ enum tgsi_file_type src3_file,
+ unsigned int src3_index
+)
+{
+ struct tgsi_full_instruction inst = tgsi_default_full_instruction();
+
+ inst.Instruction.Opcode = opcode;
+ inst.Instruction.NumDstRegs = 1;
+ inst.FullDstRegisters[0].DstRegister.File = dst_file;
+ inst.FullDstRegisters[0].DstRegister.Index = dst_index;
+ inst.Instruction.NumSrcRegs = 3;
+ inst.FullSrcRegisters[0].SrcRegister.File = src1_file;
+ inst.FullSrcRegisters[0].SrcRegister.Index = src1_index;
+ inst.FullSrcRegisters[1].SrcRegister.File = src2_file;
+ inst.FullSrcRegisters[1].SrcRegister.Index = src2_index;
+ inst.FullSrcRegisters[2].SrcRegister.File = src3_file;
+ inst.FullSrcRegisters[2].SrcRegister.Index = src3_index;
+
+ return inst;
+}
+
+struct tgsi_full_instruction vl_end(void)
+{
+ struct tgsi_full_instruction inst = tgsi_default_full_instruction();
+
+ inst.Instruction.Opcode = TGSI_OPCODE_END;
+ inst.Instruction.NumDstRegs = 0;
+ inst.Instruction.NumSrcRegs = 0;
+
+ return inst;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_shader_build.h b/src/gallium/state_trackers/g3dvl/vl_shader_build.h
new file mode 100644
index 0000000000..dc615cb156
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_shader_build.h
@@ -0,0 +1,61 @@
+#ifndef vl_shader_build_h
+#define vl_shader_build_h
+
+#include <pipe/p_shader_tokens.h>
+
+struct tgsi_full_declaration vl_decl_input(unsigned int name, unsigned int index, unsigned int first, unsigned int last);
+struct tgsi_full_declaration vl_decl_interpolated_input
+(
+ unsigned int name,
+ unsigned int index,
+ unsigned int first,
+ unsigned int last,
+ int interpolation
+);
+struct tgsi_full_declaration vl_decl_constants(unsigned int name, unsigned int index, unsigned int first, unsigned int last);
+struct tgsi_full_declaration vl_decl_output(unsigned int name, unsigned int index, unsigned int first, unsigned int last);
+struct tgsi_full_declaration vl_decl_temps(unsigned int first, unsigned int last);
+struct tgsi_full_declaration vl_decl_samplers(unsigned int first, unsigned int last);
+struct tgsi_full_instruction vl_inst2
+(
+ int opcode,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src_file,
+ unsigned int src_index
+);
+struct tgsi_full_instruction vl_inst3
+(
+ int opcode,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src1_file,
+ unsigned int src1_index,
+ enum tgsi_file_type src2_file,
+ unsigned int src2_index
+);
+struct tgsi_full_instruction vl_tex
+(
+ int tex,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src1_file,
+ unsigned int src1_index,
+ enum tgsi_file_type src2_file,
+ unsigned int src2_index
+);
+struct tgsi_full_instruction vl_inst4
+(
+ int opcode,
+ enum tgsi_file_type dst_file,
+ unsigned int dst_index,
+ enum tgsi_file_type src1_file,
+ unsigned int src1_index,
+ enum tgsi_file_type src2_file,
+ unsigned int src2_index,
+ enum tgsi_file_type src3_file,
+ unsigned int src3_index
+);
+struct tgsi_full_instruction vl_end(void);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_surface.c b/src/gallium/state_trackers/g3dvl/vl_surface.c
new file mode 100644
index 0000000000..7f60852cae
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_surface.c
@@ -0,0 +1,242 @@
+#define VL_INTERNAL
+#include "vl_surface.h"
+#include <assert.h>
+#include <string.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_state.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <vl_winsys.h>
+#include "vl_screen.h"
+#include "vl_context.h"
+#include "vl_render.h"
+#include "vl_csc.h"
+#include "vl_util.h"
+
+int vlCreateSurface
+(
+ struct vlScreen *screen,
+ unsigned int width,
+ unsigned int height,
+ enum vlFormat format,
+ struct vlSurface **surface
+)
+{
+ struct vlSurface *sfc;
+ struct pipe_texture template;
+
+ assert(screen);
+ assert(surface);
+
+ sfc = CALLOC_STRUCT(vlSurface);
+
+ if (!sfc)
+ return 1;
+
+ sfc->screen = screen;
+ sfc->width = width;
+ sfc->height = height;
+ sfc->format = format;
+
+ memset(&template, 0, sizeof(struct pipe_texture));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ template.last_level = 0;
+ template.width[0] = vlRoundUpPOT(sfc->width);
+ template.height[0] = vlRoundUpPOT(sfc->height);
+ template.depth[0] = 1;
+ pf_get_block(template.format, &template.block);
+ template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_RENDER_TARGET;
+
+ sfc->texture = vlGetPipeScreen(screen)->texture_create(vlGetPipeScreen(screen), &template);
+
+ if (!sfc->texture)
+ {
+ FREE(sfc);
+ return 1;
+ }
+
+ *surface = sfc;
+
+ return 0;
+}
+
+int vlDestroySurface
+(
+ struct vlSurface *surface
+)
+{
+ assert(surface);
+
+ pipe_texture_reference(&surface->texture, NULL);
+ FREE(surface);
+
+ return 0;
+}
+
+int vlRenderMacroBlocksMpeg2
+(
+ struct vlMpeg2MacroBlockBatch *batch,
+ struct vlSurface *surface
+)
+{
+ assert(batch);
+ assert(surface);
+ assert(surface->context);
+
+ surface->context->render->vlBegin(surface->context->render);
+
+ surface->context->render->vlRenderMacroBlocksMpeg2
+ (
+ surface->context->render,
+ batch,
+ surface
+ );
+
+ surface->context->render->vlEnd(surface->context->render);
+
+ return 0;
+}
+
+int vlPutPicture
+(
+ struct vlSurface *surface,
+ vlNativeDrawable drawable,
+ int srcx,
+ int srcy,
+ int srcw,
+ int srch,
+ int destx,
+ int desty,
+ int destw,
+ int desth,
+ int drawable_w,
+ int drawable_h,
+ enum vlPictureType picture_type
+)
+{
+ struct vlCSC *csc;
+ struct pipe_context *pipe;
+
+ assert(surface);
+ assert(surface->context);
+
+ surface->context->render->vlFlush(surface->context->render);
+
+ csc = surface->context->csc;
+ pipe = surface->context->pipe;
+
+ csc->vlResizeFrameBuffer(csc, drawable_w, drawable_h);
+
+ csc->vlBegin(csc);
+
+ csc->vlPutPicture
+ (
+ csc,
+ surface,
+ srcx,
+ srcy,
+ srcw,
+ srch,
+ destx,
+ desty,
+ destw,
+ desth,
+ picture_type
+ );
+
+ csc->vlEnd(csc);
+
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, &surface->disp_fence);
+
+ bind_pipe_drawable(pipe, drawable);
+
+ pipe->screen->flush_frontbuffer
+ (
+ pipe->screen,
+ csc->vlGetFrameBuffer(csc),
+ pipe->priv
+ );
+
+ return 0;
+}
+
+int vlSurfaceGetStatus
+(
+ struct vlSurface *surface,
+ enum vlResourceStatus *status
+)
+{
+ assert(surface);
+ assert(surface->context);
+ assert(status);
+
+ if (surface->render_fence && !surface->context->pipe->screen->fence_signalled(surface->context->pipe->screen, surface->render_fence, 0))
+ {
+ *status = vlResourceStatusRendering;
+ return 0;
+ }
+
+ if (surface->disp_fence && !surface->context->pipe->screen->fence_signalled(surface->context->pipe->screen, surface->disp_fence, 0))
+ {
+ *status = vlResourceStatusDisplaying;
+ return 0;
+ }
+
+ *status = vlResourceStatusFree;
+
+ return 0;
+}
+
+int vlSurfaceFlush
+(
+ struct vlSurface *surface
+)
+{
+ assert(surface);
+ assert(surface->context);
+
+ surface->context->render->vlFlush(surface->context->render);
+
+ return 0;
+}
+
+int vlSurfaceSync
+(
+ struct vlSurface *surface
+)
+{
+ assert(surface);
+ assert(surface->context);
+ assert(surface->render_fence);
+
+ surface->context->pipe->screen->fence_finish(surface->context->pipe->screen, surface->render_fence, 0);
+
+ return 0;
+}
+
+struct vlScreen* vlSurfaceGetScreen
+(
+ struct vlSurface *surface
+)
+{
+ assert(surface);
+
+ return surface->screen;
+}
+
+struct vlContext* vlBindToContext
+(
+ struct vlSurface *surface,
+ struct vlContext *context
+)
+{
+ struct vlContext *old;
+
+ assert(surface);
+
+ old = surface->context;
+ surface->context = context;
+
+ return old;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_surface.h b/src/gallium/state_trackers/g3dvl/vl_surface.h
new file mode 100644
index 0000000000..133e1515ef
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_surface.h
@@ -0,0 +1,86 @@
+#ifndef vl_surface_h
+#define vl_surface_h
+
+#include "vl_types.h"
+
+#ifdef VL_INTERNAL
+struct pipe_texture;
+
+struct vlSurface
+{
+ struct vlScreen *screen;
+ struct vlContext *context;
+ unsigned int width;
+ unsigned int height;
+ enum vlFormat format;
+ struct pipe_texture *texture;
+ struct pipe_fence_handle *render_fence;
+ struct pipe_fence_handle *disp_fence;
+};
+#endif
+
+int vlCreateSurface
+(
+ struct vlScreen *screen,
+ unsigned int width,
+ unsigned int height,
+ enum vlFormat format,
+ struct vlSurface **surface
+);
+
+int vlDestroySurface
+(
+ struct vlSurface *surface
+);
+
+int vlRenderMacroBlocksMpeg2
+(
+ struct vlMpeg2MacroBlockBatch *batch,
+ struct vlSurface *surface
+);
+
+int vlPutPicture
+(
+ struct vlSurface *surface,
+ vlNativeDrawable drawable,
+ int srcx,
+ int srcy,
+ int srcw,
+ int srch,
+ int destx,
+ int desty,
+ int destw,
+ int desth,
+ int drawable_w,
+ int drawable_h,
+ enum vlPictureType picture_type
+);
+
+int vlSurfaceGetStatus
+(
+ struct vlSurface *surface,
+ enum vlResourceStatus *status
+);
+
+int vlSurfaceFlush
+(
+ struct vlSurface *surface
+);
+
+int vlSurfaceSync
+(
+ struct vlSurface *surface
+);
+
+struct vlScreen* vlSurfaceGetScreen
+(
+ struct vlSurface *surface
+);
+
+struct vlContext* vlBindToContext
+(
+ struct vlSurface *surface,
+ struct vlContext *context
+);
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_types.h b/src/gallium/state_trackers/g3dvl/vl_types.h
new file mode 100644
index 0000000000..274e1f7437
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_types.h
@@ -0,0 +1,115 @@
+#ifndef vl_types_h
+#define vl_types_h
+
+#if 1 /*#ifdef X11*/
+#include <X11/Xlib.h>
+
+typedef Display* vlNativeDisplay;
+typedef Drawable vlNativeDrawable;
+#endif
+
+struct vlDisplay;
+struct vlScreen;
+struct vlContext;
+struct vlSurface;
+
+enum vlResourceStatus
+{
+ vlResourceStatusFree,
+ vlResourceStatusRendering,
+ vlResourceStatusDisplaying
+};
+
+enum vlProfile
+{
+ vlProfileMpeg2Simple,
+ vlProfileMpeg2Main,
+
+ vlProfileCount
+};
+
+enum vlEntryPoint
+{
+ vlEntryPointIDCT,
+ vlEntryPointMC,
+ vlEntryPointCSC,
+
+ vlEntryPointCount
+};
+
+enum vlFormat
+{
+ vlFormatYCbCr420,
+ vlFormatYCbCr422,
+ vlFormatYCbCr444
+};
+
+enum vlPictureType
+{
+ vlPictureTypeTopField,
+ vlPictureTypeBottomField,
+ vlPictureTypeFrame
+};
+
+enum vlMotionType
+{
+ vlMotionTypeField,
+ vlMotionTypeFrame,
+ vlMotionTypeDualPrime,
+ vlMotionType16x8
+};
+
+enum vlFieldOrder
+{
+ vlFieldOrderFirst,
+ vlFieldOrderSecond
+};
+
+enum vlDCTType
+{
+ vlDCTTypeFrameCoded,
+ vlDCTTypeFieldCoded
+};
+
+struct vlVertex2f
+{
+ float x, y;
+};
+
+struct vlVertex4f
+{
+ float x, y, z, w;
+};
+
+enum vlMacroBlockType
+{
+ vlMacroBlockTypeIntra,
+ vlMacroBlockTypeFwdPredicted,
+ vlMacroBlockTypeBkwdPredicted,
+ vlMacroBlockTypeBiPredicted,
+
+ vlNumMacroBlockTypes
+};
+
+struct vlMpeg2MacroBlock
+{
+ unsigned int mbx, mby;
+ enum vlMacroBlockType mb_type;
+ enum vlMotionType mo_type;
+ enum vlDCTType dct_type;
+ int PMV[2][2][2];
+ unsigned int cbp;
+ short *blocks;
+};
+
+struct vlMpeg2MacroBlockBatch
+{
+ struct vlSurface *past_surface;
+ struct vlSurface *future_surface;
+ enum vlPictureType picture_type;
+ enum vlFieldOrder field_order;
+ unsigned int num_macroblocks;
+ struct vlMpeg2MacroBlock *macroblocks;
+};
+
+#endif
diff --git a/src/gallium/state_trackers/g3dvl/vl_util.c b/src/gallium/state_trackers/g3dvl/vl_util.c
new file mode 100644
index 0000000000..50aa9af66f
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_util.c
@@ -0,0 +1,16 @@
+#include "vl_util.h"
+#include <assert.h>
+
+unsigned int vlRoundUpPOT(unsigned int x)
+{
+ unsigned int i;
+
+ assert(x > 0);
+
+ --x;
+
+ for (i = 1; i < sizeof(unsigned int) * 8; i <<= 1)
+ x |= x >> i;
+
+ return x + 1;
+}
diff --git a/src/gallium/state_trackers/g3dvl/vl_util.h b/src/gallium/state_trackers/g3dvl/vl_util.h
new file mode 100644
index 0000000000..bc98e79df4
--- /dev/null
+++ b/src/gallium/state_trackers/g3dvl/vl_util.h
@@ -0,0 +1,6 @@
+#ifndef vl_util_h
+#define vl_util_h
+
+unsigned int vlRoundUpPOT(unsigned int x);
+
+#endif
diff --git a/src/gallium/state_trackers/glx/Makefile b/src/gallium/state_trackers/glx/Makefile
new file mode 100644
index 0000000000..f779035763
--- /dev/null
+++ b/src/gallium/state_trackers/glx/Makefile
@@ -0,0 +1,25 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+
+SUBDIRS = xlib
+
+
+default: subdirs
+
+
+subdirs:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE)) || exit 1 ; \
+ fi \
+ done
+
+
+clean:
+ rm -f `find . -name \*.[oa]`
+ rm -f `find . -name depend`
+
+
+# Dummy install target
+install:
diff --git a/src/gallium/state_trackers/glx/xlib/Makefile b/src/gallium/state_trackers/glx/xlib/Makefile
new file mode 100644
index 0000000000..7b2adc62c3
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/Makefile
@@ -0,0 +1,16 @@
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = xlib
+
+LIBRARY_INCLUDES = \
+ -I$(TOP)/include \
+ -I$(TOP)/src/mesa
+
+C_SOURCES = \
+ glx_api.c \
+ glx_getproc.c \
+ glx_usefont.c \
+ xm_api.c
+
+include ../../../Makefile.template
diff --git a/src/gallium/state_trackers/glx/xlib/SConscript b/src/gallium/state_trackers/glx/xlib/SConscript
new file mode 100644
index 0000000000..fa96df357d
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/SConscript
@@ -0,0 +1,27 @@
+#######################################################################
+# SConscript for xlib state_tracker
+
+Import('*')
+
+if env['platform'] == 'linux' \
+ and 'mesa' in env['statetrackers']:
+
+ env = env.Clone()
+
+ env.Append(CPPPATH = [
+ '#/src/mesa',
+ '#/src/mesa/main',
+ ])
+
+ env.Append(CPPDEFINES = ['USE_XSHM'])
+
+ st_xlib = env.ConvenienceLibrary(
+ target = 'st_xlib',
+ source = [
+ 'glx_api.c',
+ 'glx_getproc.c',
+ 'glx_usefont.c',
+ 'xm_api.c',
+ ]
+ )
+ Export('st_xlib')
diff --git a/src/gallium/state_trackers/glx/xlib/glx_api.c b/src/gallium/state_trackers/glx/xlib/glx_api.c
new file mode 100644
index 0000000000..d1a98f8991
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/glx_api.c
@@ -0,0 +1,2629 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 7.6
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * "Fake" GLX API implemented in terms of the XMesa*() functions.
+ */
+
+
+
+#define GLX_GLXEXT_PROTOTYPES
+#include "GL/glx.h"
+
+#include "xm_api.h"
+#include "main/context.h"
+#include "main/config.h"
+#include "main/macros.h"
+#include "main/imports.h"
+#include "main/version.h"
+#include "state_tracker/st_context.h"
+#include "state_tracker/st_public.h"
+
+
+/* This indicates the client-side GLX API and GLX encoder version. */
+#define CLIENT_MAJOR_VERSION 1
+#define CLIENT_MINOR_VERSION 4 /* but don't have 1.3's pbuffers, etc yet */
+
+/* This indicates the server-side GLX decoder version.
+ * GLX 1.4 indicates OpenGL 1.3 support
+ */
+#define SERVER_MAJOR_VERSION 1
+#define SERVER_MINOR_VERSION 4
+
+/* This is appended onto the glXGetClient/ServerString version strings. */
+#define MESA_GLX_VERSION "Mesa " MESA_VERSION_STRING
+
+/* Who implemented this GLX? */
+#define VENDOR "Brian Paul"
+
+#define EXTENSIONS \
+ "GLX_MESA_copy_sub_buffer " \
+ "GLX_MESA_pixmap_colormap " \
+ "GLX_MESA_release_buffers " \
+ "GLX_ARB_get_proc_address " \
+ "GLX_EXT_texture_from_pixmap " \
+ "GLX_EXT_visual_info " \
+ "GLX_EXT_visual_rating " \
+ /*"GLX_SGI_video_sync "*/ \
+ "GLX_SGIX_fbconfig " \
+ "GLX_SGIX_pbuffer "
+
+#define DEFAULT_DIRECT GL_TRUE
+
+
+
+/**
+ * The GLXContext typedef is defined as a pointer to this structure.
+ */
+struct __GLXcontextRec
+{
+ Display *currentDpy;
+ GLboolean isDirect;
+ GLXDrawable currentDrawable;
+ GLXDrawable currentReadable;
+ XID xid;
+
+ XMesaContext xmesaContext;
+};
+
+
+
+static pipe_tsd ContextTSD;
+
+/** Set current context for calling thread */
+static void
+SetCurrentContext(GLXContext c)
+{
+ pipe_tsd_set(&ContextTSD, c);
+}
+
+/** Get current context for calling thread */
+static GLXContext
+GetCurrentContext(void)
+{
+ return pipe_tsd_get(&ContextTSD);
+}
+
+
+
+/**********************************************************************/
+/*** GLX Visual Code ***/
+/**********************************************************************/
+
+#define DONT_CARE -1
+
+
+static XMesaVisual *VisualTable = NULL;
+static int NumVisuals = 0;
+
+
+
+/* Macro to handle c_class vs class field name in XVisualInfo struct */
+#if defined(__cplusplus) || defined(c_plusplus)
+#define CLASS c_class
+#else
+#define CLASS class
+#endif
+
+
+
+/*
+ * Test if the given XVisualInfo is usable for Mesa rendering.
+ */
+static GLboolean
+is_usable_visual( XVisualInfo *vinfo )
+{
+ switch (vinfo->CLASS) {
+ case StaticGray:
+ case GrayScale:
+ /* Any StaticGray/GrayScale visual works in RGB or CI mode */
+ return GL_TRUE;
+ case StaticColor:
+ case PseudoColor:
+ /* Any StaticColor/PseudoColor visual of at least 4 bits */
+ if (vinfo->depth>=4) {
+ return GL_TRUE;
+ }
+ else {
+ return GL_FALSE;
+ }
+ case TrueColor:
+ case DirectColor:
+ /* Any depth of TrueColor or DirectColor works in RGB mode */
+ return GL_TRUE;
+ default:
+ /* This should never happen */
+ return GL_FALSE;
+ }
+}
+
+
+/*
+ * Given an XVisualInfo and RGB, Double, and Depth buffer flags, save the
+ * configuration in our list of GLX visuals.
+ */
+static XMesaVisual
+save_glx_visual( Display *dpy, XVisualInfo *vinfo,
+ GLboolean rgbFlag, GLboolean alphaFlag, GLboolean dbFlag,
+ GLboolean stereoFlag,
+ GLint depth_size, GLint stencil_size,
+ GLint accumRedSize, GLint accumGreenSize,
+ GLint accumBlueSize, GLint accumAlphaSize,
+ GLint level, GLint numAuxBuffers )
+{
+ GLboolean ximageFlag = GL_TRUE;
+ XMesaVisual xmvis;
+ GLint i;
+ GLboolean comparePointers;
+
+ if (dbFlag) {
+ /* Check if the MESA_BACK_BUFFER env var is set */
+ char *backbuffer = _mesa_getenv("MESA_BACK_BUFFER");
+ if (backbuffer) {
+ if (backbuffer[0]=='p' || backbuffer[0]=='P') {
+ ximageFlag = GL_FALSE;
+ }
+ else if (backbuffer[0]=='x' || backbuffer[0]=='X') {
+ ximageFlag = GL_TRUE;
+ }
+ else {
+ _mesa_warning(NULL, "Mesa: invalid value for MESA_BACK_BUFFER environment variable, using an XImage.");
+ }
+ }
+ }
+
+ if (stereoFlag) {
+ /* stereo not supported */
+ return NULL;
+ }
+
+ if (stencil_size > 0 && depth_size > 0)
+ depth_size = 24;
+
+ /* Comparing IDs uses less memory but sometimes fails. */
+ /* XXX revisit this after 3.0 is finished. */
+ if (_mesa_getenv("MESA_GLX_VISUAL_HACK"))
+ comparePointers = GL_TRUE;
+ else
+ comparePointers = GL_FALSE;
+
+ /* Force the visual to have an alpha channel */
+ if (rgbFlag && _mesa_getenv("MESA_GLX_FORCE_ALPHA"))
+ alphaFlag = GL_TRUE;
+
+ /* First check if a matching visual is already in the list */
+ for (i=0; i<NumVisuals; i++) {
+ XMesaVisual v = VisualTable[i];
+ if (v->display == dpy
+ && v->mesa_visual.level == level
+ && v->mesa_visual.numAuxBuffers == numAuxBuffers
+ && v->ximage_flag == ximageFlag
+ && v->mesa_visual.rgbMode == rgbFlag
+ && v->mesa_visual.doubleBufferMode == dbFlag
+ && v->mesa_visual.stereoMode == stereoFlag
+ && (v->mesa_visual.alphaBits > 0) == alphaFlag
+ && (v->mesa_visual.depthBits >= depth_size || depth_size == 0)
+ && (v->mesa_visual.stencilBits >= stencil_size || stencil_size == 0)
+ && (v->mesa_visual.accumRedBits >= accumRedSize || accumRedSize == 0)
+ && (v->mesa_visual.accumGreenBits >= accumGreenSize || accumGreenSize == 0)
+ && (v->mesa_visual.accumBlueBits >= accumBlueSize || accumBlueSize == 0)
+ && (v->mesa_visual.accumAlphaBits >= accumAlphaSize || accumAlphaSize == 0)) {
+ /* now either compare XVisualInfo pointers or visual IDs */
+ if ((!comparePointers && v->visinfo->visualid == vinfo->visualid)
+ || (comparePointers && v->vishandle == vinfo)) {
+ return v;
+ }
+ }
+ }
+
+ /* Create a new visual and add it to the list. */
+
+ xmvis = XMesaCreateVisual( dpy, vinfo, rgbFlag, alphaFlag, dbFlag,
+ stereoFlag, ximageFlag,
+ depth_size, stencil_size,
+ accumRedSize, accumBlueSize,
+ accumBlueSize, accumAlphaSize, 0, level,
+ GLX_NONE_EXT );
+ if (xmvis) {
+ /* Save a copy of the pointer now so we can find this visual again
+ * if we need to search for it in find_glx_visual().
+ */
+ xmvis->vishandle = vinfo;
+ /* Allocate more space for additional visual */
+ VisualTable = (XMesaVisual *) _mesa_realloc( VisualTable,
+ sizeof(XMesaVisual) * NumVisuals,
+ sizeof(XMesaVisual) * (NumVisuals + 1));
+ /* add xmvis to the list */
+ VisualTable[NumVisuals] = xmvis;
+ NumVisuals++;
+ /* XXX minor hack, because XMesaCreateVisual doesn't support an
+ * aux buffers parameter.
+ */
+ xmvis->mesa_visual.numAuxBuffers = numAuxBuffers;
+ }
+ return xmvis;
+}
+
+
+/**
+ * Return the default number of bits for the Z buffer.
+ * If defined, use the MESA_GLX_DEPTH_BITS env var value.
+ * Otherwise, use the DEFAULT_SOFTWARE_DEPTH_BITS constant.
+ * XXX probably do the same thing for stencil, accum, etc.
+ */
+static GLint
+default_depth_bits(void)
+{
+ int zBits;
+ const char *zEnv = _mesa_getenv("MESA_GLX_DEPTH_BITS");
+ if (zEnv)
+ zBits = _mesa_atoi(zEnv);
+ else
+ zBits = DEFAULT_SOFTWARE_DEPTH_BITS;
+ return zBits;
+}
+
+static GLint
+default_alpha_bits(void)
+{
+ int aBits;
+ const char *aEnv = _mesa_getenv("MESA_GLX_ALPHA_BITS");
+ if (aEnv)
+ aBits = _mesa_atoi(aEnv);
+ else
+ aBits = 0;
+ return aBits;
+}
+
+static GLint
+default_accum_bits(void)
+{
+ return 16;
+}
+
+
+
+/*
+ * Create a GLX visual from a regular XVisualInfo.
+ * This is called when Fake GLX is given an XVisualInfo which wasn't
+ * returned by glXChooseVisual. Since this is the first time we're
+ * considering this visual we'll take a guess at reasonable values
+ * for depth buffer size, stencil size, accum size, etc.
+ * This is the best we can do with a client-side emulation of GLX.
+ */
+static XMesaVisual
+create_glx_visual( Display *dpy, XVisualInfo *visinfo )
+{
+ GLint zBits = default_depth_bits();
+ GLint accBits = default_accum_bits();
+ GLboolean alphaFlag = default_alpha_bits() > 0;
+
+ if (is_usable_visual( visinfo )) {
+ /* Configure this visual as RGB, double-buffered, depth-buffered. */
+ /* This is surely wrong for some people's needs but what else */
+ /* can be done? They should use glXChooseVisual(). */
+ return save_glx_visual( dpy, visinfo,
+ GL_TRUE, /* rgb */
+ alphaFlag, /* alpha */
+ GL_TRUE, /* double */
+ GL_FALSE, /* stereo */
+ zBits,
+ STENCIL_BITS,
+ accBits, /* r */
+ accBits, /* g */
+ accBits, /* b */
+ accBits, /* a */
+ 0, /* level */
+ 0 /* numAux */
+ );
+ }
+ else {
+ _mesa_warning(NULL, "Mesa: error in glXCreateContext: bad visual\n");
+ return NULL;
+ }
+}
+
+
+
+/*
+ * Find the GLX visual associated with an XVisualInfo.
+ */
+static XMesaVisual
+find_glx_visual( Display *dpy, XVisualInfo *vinfo )
+{
+ int i;
+
+ /* try to match visual id */
+ for (i=0;i<NumVisuals;i++) {
+ if (VisualTable[i]->display==dpy
+ && VisualTable[i]->visinfo->visualid == vinfo->visualid) {
+ return VisualTable[i];
+ }
+ }
+
+ /* if that fails, try to match pointers */
+ for (i=0;i<NumVisuals;i++) {
+ if (VisualTable[i]->display==dpy && VisualTable[i]->vishandle==vinfo) {
+ return VisualTable[i];
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Try to get an X visual which matches the given arguments.
+ */
+static XVisualInfo *
+get_visual( Display *dpy, int scr, unsigned int depth, int xclass )
+{
+ XVisualInfo temp, *vis;
+ long mask;
+ int n;
+ unsigned int default_depth;
+ int default_class;
+
+ mask = VisualScreenMask | VisualDepthMask | VisualClassMask;
+ temp.screen = scr;
+ temp.depth = depth;
+ temp.CLASS = xclass;
+
+ default_depth = DefaultDepth(dpy,scr);
+ default_class = DefaultVisual(dpy,scr)->CLASS;
+
+ if (depth==default_depth && xclass==default_class) {
+ /* try to get root window's visual */
+ temp.visualid = DefaultVisual(dpy,scr)->visualid;
+ mask |= VisualIDMask;
+ }
+
+ vis = XGetVisualInfo( dpy, mask, &temp, &n );
+
+ /* In case bits/pixel > 24, make sure color channels are still <=8 bits.
+ * An SGI Infinite Reality system, for example, can have 30bpp pixels:
+ * 10 bits per color channel. Mesa's limited to a max of 8 bits/channel.
+ */
+ if (vis && depth > 24 && (xclass==TrueColor || xclass==DirectColor)) {
+ if (_mesa_bitcount((GLuint) vis->red_mask ) <= 8 &&
+ _mesa_bitcount((GLuint) vis->green_mask) <= 8 &&
+ _mesa_bitcount((GLuint) vis->blue_mask ) <= 8) {
+ return vis;
+ }
+ else {
+ XFree((void *) vis);
+ return NULL;
+ }
+ }
+
+ return vis;
+}
+
+
+/*
+ * Retrieve the value of the given environment variable and find
+ * the X visual which matches it.
+ * Input: dpy - the display
+ * screen - the screen number
+ * varname - the name of the environment variable
+ * Return: an XVisualInfo pointer to NULL if error.
+ */
+static XVisualInfo *
+get_env_visual(Display *dpy, int scr, const char *varname)
+{
+ char value[100], type[100];
+ int depth, xclass = -1;
+ XVisualInfo *vis;
+
+ if (!_mesa_getenv( varname )) {
+ return NULL;
+ }
+
+ _mesa_strncpy( value, _mesa_getenv(varname), 100 );
+ value[99] = 0;
+
+ sscanf( value, "%s %d", type, &depth );
+
+ if (_mesa_strcmp(type,"TrueColor")==0) xclass = TrueColor;
+ else if (_mesa_strcmp(type,"DirectColor")==0) xclass = DirectColor;
+ else if (_mesa_strcmp(type,"PseudoColor")==0) xclass = PseudoColor;
+ else if (_mesa_strcmp(type,"StaticColor")==0) xclass = StaticColor;
+ else if (_mesa_strcmp(type,"GrayScale")==0) xclass = GrayScale;
+ else if (_mesa_strcmp(type,"StaticGray")==0) xclass = StaticGray;
+
+ if (xclass>-1 && depth>0) {
+ vis = get_visual( dpy, scr, depth, xclass );
+ if (vis) {
+ return vis;
+ }
+ }
+
+ _mesa_warning(NULL, "GLX unable to find visual class=%s, depth=%d.",
+ type, depth);
+
+ return NULL;
+}
+
+
+
+/*
+ * Select an X visual which satisfies the RGBA flag and minimum depth.
+ * Input: dpy,
+ * screen - X display and screen number
+ * min_depth - minimum visual depth
+ * preferred_class - preferred GLX visual class or DONT_CARE
+ * Return: pointer to an XVisualInfo or NULL.
+ */
+static XVisualInfo *
+choose_x_visual( Display *dpy, int screen, int min_depth,
+ int preferred_class )
+{
+ XVisualInfo *vis;
+ int xclass, visclass = 0;
+ int depth;
+
+ /* First see if the MESA_RGB_VISUAL env var is defined */
+ vis = get_env_visual( dpy, screen, "MESA_RGB_VISUAL" );
+ if (vis) {
+ return vis;
+ }
+ /* Otherwise, search for a suitable visual */
+ if (preferred_class==DONT_CARE) {
+ for (xclass=0;xclass<6;xclass++) {
+ switch (xclass) {
+ case 0: visclass = TrueColor; break;
+ case 1: visclass = DirectColor; break;
+ case 2: visclass = PseudoColor; break;
+ case 3: visclass = StaticColor; break;
+ case 4: visclass = GrayScale; break;
+ case 5: visclass = StaticGray; break;
+ }
+ if (min_depth==0) {
+ /* start with shallowest */
+ for (depth=0;depth<=32;depth++) {
+ if (visclass==TrueColor && depth==8) {
+ /* Special case: try to get 8-bit PseudoColor before */
+ /* 8-bit TrueColor */
+ vis = get_visual( dpy, screen, 8, PseudoColor );
+ if (vis) {
+ return vis;
+ }
+ }
+ vis = get_visual( dpy, screen, depth, visclass );
+ if (vis) {
+ return vis;
+ }
+ }
+ }
+ else {
+ /* start with deepest */
+ for (depth=32;depth>=min_depth;depth--) {
+ if (visclass==TrueColor && depth==8) {
+ /* Special case: try to get 8-bit PseudoColor before */
+ /* 8-bit TrueColor */
+ vis = get_visual( dpy, screen, 8, PseudoColor );
+ if (vis) {
+ return vis;
+ }
+ }
+ vis = get_visual( dpy, screen, depth, visclass );
+ if (vis) {
+ return vis;
+ }
+ }
+ }
+ }
+ }
+ else {
+ /* search for a specific visual class */
+ switch (preferred_class) {
+ case GLX_TRUE_COLOR_EXT: visclass = TrueColor; break;
+ case GLX_DIRECT_COLOR_EXT: visclass = DirectColor; break;
+ case GLX_PSEUDO_COLOR_EXT: visclass = PseudoColor; break;
+ case GLX_STATIC_COLOR_EXT: visclass = StaticColor; break;
+ case GLX_GRAY_SCALE_EXT: visclass = GrayScale; break;
+ case GLX_STATIC_GRAY_EXT: visclass = StaticGray; break;
+ default: return NULL;
+ }
+ if (min_depth==0) {
+ /* start with shallowest */
+ for (depth=0;depth<=32;depth++) {
+ vis = get_visual( dpy, screen, depth, visclass );
+ if (vis) {
+ return vis;
+ }
+ }
+ }
+ else {
+ /* start with deepest */
+ for (depth=32;depth>=min_depth;depth--) {
+ vis = get_visual( dpy, screen, depth, visclass );
+ if (vis) {
+ return vis;
+ }
+ }
+ }
+ }
+
+ /* didn't find a visual */
+ return NULL;
+}
+
+
+
+
+/**********************************************************************/
+/*** Display-related functions ***/
+/**********************************************************************/
+
+
+/**
+ * Free all XMesaVisuals which are associated with the given display.
+ */
+static void
+destroy_visuals_on_display(Display *dpy)
+{
+ int i;
+ for (i = 0; i < NumVisuals; i++) {
+ if (VisualTable[i]->display == dpy) {
+ /* remove this visual */
+ int j;
+ free(VisualTable[i]);
+ for (j = i; j < NumVisuals - 1; j++)
+ VisualTable[j] = VisualTable[j + 1];
+ NumVisuals--;
+ }
+ }
+}
+
+
+/**
+ * Called from XCloseDisplay() to let us free our display-related data.
+ */
+static int
+close_display_callback(Display *dpy, XExtCodes *codes)
+{
+ destroy_visuals_on_display(dpy);
+ xmesa_destroy_buffers_on_display(dpy);
+ return 0;
+}
+
+
+/**
+ * Look for the named extension on given display and return a pointer
+ * to the _XExtension data, or NULL if extension not found.
+ */
+static _XExtension *
+lookup_extension(Display *dpy, const char *extName)
+{
+ _XExtension *ext;
+ for (ext = dpy->ext_procs; ext; ext = ext->next) {
+ if (ext->name && strcmp(ext->name, extName) == 0) {
+ return ext;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Whenever we're given a new Display pointer, call this function to
+ * register our close_display_callback function.
+ */
+static void
+register_with_display(Display *dpy)
+{
+ const char *extName = "MesaGLX";
+ _XExtension *ext;
+
+ ext = lookup_extension(dpy, extName);
+ if (!ext) {
+ XExtCodes *c = XAddExtension(dpy);
+ ext = dpy->ext_procs; /* new extension is at head of list */
+ assert(c->extension == ext->codes.extension);
+ ext->name = _mesa_strdup(extName);
+ ext->close_display = close_display_callback;
+ }
+}
+
+
+/**********************************************************************/
+/*** Begin Fake GLX API Functions ***/
+/**********************************************************************/
+
+
+/**
+ * Helper used by glXChooseVisual and glXChooseFBConfig.
+ * The fbConfig parameter must be GL_FALSE for the former and GL_TRUE for
+ * the later.
+ * In either case, the attribute list is terminated with the value 'None'.
+ */
+static XMesaVisual
+choose_visual( Display *dpy, int screen, const int *list, GLboolean fbConfig )
+{
+ const GLboolean rgbModeDefault = fbConfig;
+ const int *parselist;
+ XVisualInfo *vis;
+ int min_ci = 0;
+ int min_red=0, min_green=0, min_blue=0;
+ GLboolean rgb_flag = rgbModeDefault;
+ GLboolean alpha_flag = GL_FALSE;
+ GLboolean double_flag = GL_FALSE;
+ GLboolean stereo_flag = GL_FALSE;
+ GLint depth_size = 0;
+ GLint stencil_size = 0;
+ GLint accumRedSize = 0;
+ GLint accumGreenSize = 0;
+ GLint accumBlueSize = 0;
+ GLint accumAlphaSize = 0;
+ int level = 0;
+ int visual_type = DONT_CARE;
+ int trans_type = DONT_CARE;
+ int trans_value = DONT_CARE;
+ GLint caveat = DONT_CARE;
+ XMesaVisual xmvis = NULL;
+ int desiredVisualID = -1;
+ int numAux = 0;
+
+ parselist = list;
+
+ while (*parselist) {
+
+ switch (*parselist) {
+ case GLX_USE_GL:
+ if (fbConfig) {
+ /* invalid token */
+ return NULL;
+ }
+ else {
+ /* skip */
+ parselist++;
+ }
+ break;
+ case GLX_BUFFER_SIZE:
+ parselist++;
+ min_ci = *parselist++;
+ break;
+ case GLX_LEVEL:
+ parselist++;
+ level = *parselist++;
+ break;
+ case GLX_RGBA:
+ if (fbConfig) {
+ /* invalid token */
+ return NULL;
+ }
+ else {
+ rgb_flag = GL_TRUE;
+ parselist++;
+ }
+ break;
+ case GLX_DOUBLEBUFFER:
+ parselist++;
+ if (fbConfig) {
+ double_flag = *parselist++;
+ }
+ else {
+ double_flag = GL_TRUE;
+ }
+ break;
+ case GLX_STEREO:
+ parselist++;
+ if (fbConfig) {
+ stereo_flag = *parselist++;
+ }
+ else {
+ stereo_flag = GL_TRUE;
+ }
+ break;
+ case GLX_AUX_BUFFERS:
+ parselist++;
+ numAux = *parselist++;
+ if (numAux > MAX_AUX_BUFFERS)
+ return NULL;
+ break;
+ case GLX_RED_SIZE:
+ parselist++;
+ min_red = *parselist++;
+ break;
+ case GLX_GREEN_SIZE:
+ parselist++;
+ min_green = *parselist++;
+ break;
+ case GLX_BLUE_SIZE:
+ parselist++;
+ min_blue = *parselist++;
+ break;
+ case GLX_ALPHA_SIZE:
+ parselist++;
+ {
+ GLint size = *parselist++;
+ alpha_flag = size ? GL_TRUE : GL_FALSE;
+ }
+ break;
+ case GLX_DEPTH_SIZE:
+ parselist++;
+ depth_size = *parselist++;
+ break;
+ case GLX_STENCIL_SIZE:
+ parselist++;
+ stencil_size = *parselist++;
+ break;
+ case GLX_ACCUM_RED_SIZE:
+ parselist++;
+ {
+ GLint size = *parselist++;
+ accumRedSize = MAX2( accumRedSize, size );
+ }
+ break;
+ case GLX_ACCUM_GREEN_SIZE:
+ parselist++;
+ {
+ GLint size = *parselist++;
+ accumGreenSize = MAX2( accumGreenSize, size );
+ }
+ break;
+ case GLX_ACCUM_BLUE_SIZE:
+ parselist++;
+ {
+ GLint size = *parselist++;
+ accumBlueSize = MAX2( accumBlueSize, size );
+ }
+ break;
+ case GLX_ACCUM_ALPHA_SIZE:
+ parselist++;
+ {
+ GLint size = *parselist++;
+ accumAlphaSize = MAX2( accumAlphaSize, size );
+ }
+ break;
+
+ /*
+ * GLX_EXT_visual_info extension
+ */
+ case GLX_X_VISUAL_TYPE_EXT:
+ parselist++;
+ visual_type = *parselist++;
+ break;
+ case GLX_TRANSPARENT_TYPE_EXT:
+ parselist++;
+ trans_type = *parselist++;
+ break;
+ case GLX_TRANSPARENT_INDEX_VALUE_EXT:
+ parselist++;
+ trans_value = *parselist++;
+ break;
+ case GLX_TRANSPARENT_RED_VALUE_EXT:
+ case GLX_TRANSPARENT_GREEN_VALUE_EXT:
+ case GLX_TRANSPARENT_BLUE_VALUE_EXT:
+ case GLX_TRANSPARENT_ALPHA_VALUE_EXT:
+ /* ignore */
+ parselist++;
+ parselist++;
+ break;
+
+ /*
+ * GLX_EXT_visual_info extension
+ */
+ case GLX_VISUAL_CAVEAT_EXT:
+ parselist++;
+ caveat = *parselist++; /* ignored for now */
+ break;
+
+ /*
+ * GLX_ARB_multisample
+ */
+ case GLX_SAMPLE_BUFFERS_ARB:
+ /* ms not supported */
+ return NULL;
+ case GLX_SAMPLES_ARB:
+ /* ms not supported */
+ return NULL;
+
+ /*
+ * FBConfig attribs.
+ */
+ case GLX_RENDER_TYPE:
+ if (!fbConfig)
+ return NULL;
+ parselist++;
+ if (*parselist == GLX_RGBA_BIT) {
+ rgb_flag = GL_TRUE;
+ }
+ else if (*parselist == GLX_COLOR_INDEX_BIT) {
+ rgb_flag = GL_FALSE;
+ }
+ else if (*parselist == 0) {
+ rgb_flag = GL_TRUE;
+ }
+ parselist++;
+ break;
+ case GLX_DRAWABLE_TYPE:
+ if (!fbConfig)
+ return NULL;
+ parselist++;
+ if (*parselist & ~(GLX_WINDOW_BIT | GLX_PIXMAP_BIT | GLX_PBUFFER_BIT)) {
+ return NULL; /* bad bit */
+ }
+ parselist++;
+ break;
+ case GLX_FBCONFIG_ID:
+ if (!fbConfig)
+ return NULL;
+ parselist++;
+ desiredVisualID = *parselist++;
+ break;
+ case GLX_X_RENDERABLE:
+ if (!fbConfig)
+ return NULL;
+ parselist += 2;
+ /* ignore */
+ break;
+
+#ifdef GLX_EXT_texture_from_pixmap
+ case GLX_BIND_TO_TEXTURE_RGB_EXT:
+ parselist++; /*skip*/
+ break;
+ case GLX_BIND_TO_TEXTURE_RGBA_EXT:
+ parselist++; /*skip*/
+ break;
+ case GLX_BIND_TO_MIPMAP_TEXTURE_EXT:
+ parselist++; /*skip*/
+ break;
+ case GLX_BIND_TO_TEXTURE_TARGETS_EXT:
+ parselist++;
+ if (*parselist & ~(GLX_TEXTURE_1D_BIT_EXT |
+ GLX_TEXTURE_2D_BIT_EXT |
+ GLX_TEXTURE_RECTANGLE_BIT_EXT)) {
+ /* invalid bit */
+ return NULL;
+ }
+ break;
+ case GLX_Y_INVERTED_EXT:
+ parselist++; /*skip*/
+ break;
+#endif
+
+ case None:
+ /* end of list */
+ break;
+
+ default:
+ /* undefined attribute */
+ _mesa_warning(NULL, "unexpected attrib 0x%x in choose_visual()",
+ *parselist);
+ return NULL;
+ }
+ }
+
+ (void) caveat;
+
+
+ /*
+ * Since we're only simulating the GLX extension this function will never
+ * find any real GL visuals. Instead, all we can do is try to find an RGB
+ * or CI visual of appropriate depth. Other requested attributes such as
+ * double buffering, depth buffer, etc. will be associated with the X
+ * visual and stored in the VisualTable[].
+ */
+ if (desiredVisualID != -1) {
+ /* try to get a specific visual, by visualID */
+ XVisualInfo temp;
+ int n;
+ temp.visualid = desiredVisualID;
+ temp.screen = screen;
+ vis = XGetVisualInfo(dpy, VisualIDMask | VisualScreenMask, &temp, &n);
+ if (vis) {
+ /* give the visual some useful GLX attributes */
+ double_flag = GL_TRUE;
+ rgb_flag = GL_TRUE;
+ depth_size = default_depth_bits();
+ stencil_size = STENCIL_BITS;
+ /* XXX accum??? */
+ }
+ }
+ else if (level==0) {
+ /* normal color planes */
+ /* Get an RGB visual */
+ int min_rgb = min_red + min_green + min_blue;
+ if (min_rgb>1 && min_rgb<8) {
+ /* a special case to be sure we can get a monochrome visual */
+ min_rgb = 1;
+ }
+ vis = choose_x_visual( dpy, screen, min_rgb, visual_type );
+ }
+ else {
+ _mesa_warning(NULL, "overlay not supported");
+ return NULL;
+ }
+
+ if (vis) {
+ /* Note: we're not exactly obeying the glXChooseVisual rules here.
+ * When GLX_DEPTH_SIZE = 1 is specified we're supposed to choose the
+ * largest depth buffer size, which is 32bits/value. Instead, we
+ * return 16 to maintain performance with earlier versions of Mesa.
+ */
+ if (stencil_size > 0)
+ depth_size = 24; /* if Z and stencil, always use 24+8 format */
+ else if (depth_size > 24)
+ depth_size = 32;
+ else if (depth_size > 16)
+ depth_size = 24;
+ else if (depth_size > 0) {
+ depth_size = default_depth_bits();
+ }
+
+ if (!alpha_flag) {
+ alpha_flag = default_alpha_bits() > 0;
+ }
+
+ /* we only support one size of stencil and accum buffers. */
+ if (stencil_size > 0)
+ stencil_size = STENCIL_BITS;
+
+ if (accumRedSize > 0 ||
+ accumGreenSize > 0 ||
+ accumBlueSize > 0 ||
+ accumAlphaSize > 0) {
+
+ accumRedSize =
+ accumGreenSize =
+ accumBlueSize = default_accum_bits();
+
+ accumAlphaSize = alpha_flag ? accumRedSize : 0;
+ }
+
+ xmvis = save_glx_visual( dpy, vis, rgb_flag, alpha_flag, double_flag,
+ stereo_flag, depth_size, stencil_size,
+ accumRedSize, accumGreenSize,
+ accumBlueSize, accumAlphaSize, level, numAux );
+ }
+
+ return xmvis;
+}
+
+
+XVisualInfo *
+glXChooseVisual( Display *dpy, int screen, int *list )
+{
+ XMesaVisual xmvis;
+
+ /* register ourselves as an extension on this display */
+ register_with_display(dpy);
+
+ xmvis = choose_visual(dpy, screen, list, GL_FALSE);
+ if (xmvis) {
+ /* create a new vishandle - the cached one may be stale */
+ xmvis->vishandle = (XVisualInfo *) _mesa_malloc(sizeof(XVisualInfo));
+ if (xmvis->vishandle) {
+ _mesa_memcpy(xmvis->vishandle, xmvis->visinfo, sizeof(XVisualInfo));
+ }
+ return xmvis->vishandle;
+ }
+ else
+ return NULL;
+}
+
+
+GLXContext
+glXCreateContext( Display *dpy, XVisualInfo *visinfo,
+ GLXContext share_list, Bool direct )
+{
+ XMesaVisual xmvis;
+ GLXContext glxCtx;
+ GLXContext shareCtx = share_list;
+
+ if (!dpy || !visinfo)
+ return 0;
+
+ glxCtx = CALLOC_STRUCT(__GLXcontextRec);
+ if (!glxCtx)
+ return 0;
+
+ /* deallocate unused windows/buffers */
+#if 0
+ XMesaGarbageCollect();
+#endif
+
+ xmvis = find_glx_visual( dpy, visinfo );
+ if (!xmvis) {
+ /* This visual wasn't found with glXChooseVisual() */
+ xmvis = create_glx_visual( dpy, visinfo );
+ if (!xmvis) {
+ /* unusable visual */
+ _mesa_free(glxCtx);
+ return NULL;
+ }
+ }
+
+ glxCtx->xmesaContext = XMesaCreateContext(xmvis,
+ shareCtx ? shareCtx->xmesaContext : NULL);
+ if (!glxCtx->xmesaContext) {
+ _mesa_free(glxCtx);
+ return NULL;
+ }
+
+ glxCtx->isDirect = DEFAULT_DIRECT;
+ glxCtx->currentDpy = dpy;
+ glxCtx->xid = (XID) glxCtx; /* self pointer */
+
+ return glxCtx;
+}
+
+
+/* XXX these may have to be removed due to thread-safety issues. */
+static GLXContext MakeCurrent_PrevContext = 0;
+static GLXDrawable MakeCurrent_PrevDrawable = 0;
+static GLXDrawable MakeCurrent_PrevReadable = 0;
+static XMesaBuffer MakeCurrent_PrevDrawBuffer = 0;
+static XMesaBuffer MakeCurrent_PrevReadBuffer = 0;
+
+
+/* GLX 1.3 and later */
+Bool
+glXMakeContextCurrent( Display *dpy, GLXDrawable draw,
+ GLXDrawable read, GLXContext ctx )
+{
+ GLXContext glxCtx = ctx;
+ static boolean firsttime = 1, no_rast = 0;
+
+ if (firsttime) {
+ no_rast = getenv("SP_NO_RAST") != NULL;
+ firsttime = 0;
+ }
+
+ if (ctx && draw && read) {
+ XMesaBuffer drawBuffer, readBuffer;
+ XMesaContext xmctx = glxCtx->xmesaContext;
+
+ /* Find the XMesaBuffer which corresponds to the GLXDrawable 'draw' */
+ if (ctx == MakeCurrent_PrevContext
+ && draw == MakeCurrent_PrevDrawable) {
+ drawBuffer = MakeCurrent_PrevDrawBuffer;
+ }
+ else {
+ drawBuffer = XMesaFindBuffer( dpy, draw );
+ }
+ if (!drawBuffer) {
+ /* drawable must be a new window! */
+ drawBuffer = XMesaCreateWindowBuffer( xmctx->xm_visual, draw );
+ if (!drawBuffer) {
+ /* Out of memory, or context/drawable depth mismatch */
+ return False;
+ }
+ }
+
+ /* Find the XMesaBuffer which corresponds to the GLXDrawable 'read' */
+ if (ctx == MakeCurrent_PrevContext
+ && read == MakeCurrent_PrevReadable) {
+ readBuffer = MakeCurrent_PrevReadBuffer;
+ }
+ else {
+ readBuffer = XMesaFindBuffer( dpy, read );
+ }
+ if (!readBuffer) {
+ /* drawable must be a new window! */
+ readBuffer = XMesaCreateWindowBuffer( xmctx->xm_visual, read );
+ if (!readBuffer) {
+ /* Out of memory, or context/drawable depth mismatch */
+ return False;
+ }
+ }
+
+ if (no_rast &&
+ MakeCurrent_PrevContext == ctx &&
+ MakeCurrent_PrevDrawable == draw &&
+ MakeCurrent_PrevReadable == read &&
+ MakeCurrent_PrevDrawBuffer == drawBuffer &&
+ MakeCurrent_PrevReadBuffer == readBuffer)
+ return True;
+
+ MakeCurrent_PrevContext = ctx;
+ MakeCurrent_PrevDrawable = draw;
+ MakeCurrent_PrevReadable = read;
+ MakeCurrent_PrevDrawBuffer = drawBuffer;
+ MakeCurrent_PrevReadBuffer = readBuffer;
+
+ /* Now make current! */
+ if (XMesaMakeCurrent2(xmctx, drawBuffer, readBuffer)) {
+ ctx->currentDpy = dpy;
+ ctx->currentDrawable = draw;
+ ctx->currentReadable = read;
+ SetCurrentContext(ctx);
+ return True;
+ }
+ else {
+ return False;
+ }
+ }
+ else if (!ctx && !draw && !read) {
+ /* release current context w/out assigning new one. */
+ XMesaMakeCurrent2( NULL, NULL, NULL );
+ MakeCurrent_PrevContext = 0;
+ MakeCurrent_PrevDrawable = 0;
+ MakeCurrent_PrevReadable = 0;
+ MakeCurrent_PrevDrawBuffer = 0;
+ MakeCurrent_PrevReadBuffer = 0;
+ SetCurrentContext(NULL);
+ return True;
+ }
+ else {
+ /* The args must either all be non-zero or all zero.
+ * This is an error.
+ */
+ return False;
+ }
+}
+
+
+Bool
+glXMakeCurrent( Display *dpy, GLXDrawable drawable, GLXContext ctx )
+{
+ return glXMakeContextCurrent( dpy, drawable, drawable, ctx );
+}
+
+
+GLXContext
+glXGetCurrentContext(void)
+{
+ return GetCurrentContext();
+}
+
+
+Display *
+glXGetCurrentDisplay(void)
+{
+ GLXContext glxCtx = glXGetCurrentContext();
+
+ return glxCtx ? glxCtx->currentDpy : NULL;
+}
+
+
+Display *
+glXGetCurrentDisplayEXT(void)
+{
+ return glXGetCurrentDisplay();
+}
+
+
+GLXDrawable
+glXGetCurrentDrawable(void)
+{
+ GLXContext gc = glXGetCurrentContext();
+ return gc ? gc->currentDrawable : 0;
+}
+
+
+GLXDrawable
+glXGetCurrentReadDrawable(void)
+{
+ GLXContext gc = glXGetCurrentContext();
+ return gc ? gc->currentReadable : 0;
+}
+
+
+GLXDrawable
+glXGetCurrentReadDrawableSGI(void)
+{
+ return glXGetCurrentReadDrawable();
+}
+
+
+GLXPixmap
+glXCreateGLXPixmap( Display *dpy, XVisualInfo *visinfo, Pixmap pixmap )
+{
+ XMesaVisual v;
+ XMesaBuffer b;
+
+ v = find_glx_visual( dpy, visinfo );
+ if (!v) {
+ v = create_glx_visual( dpy, visinfo );
+ if (!v) {
+ /* unusable visual */
+ return 0;
+ }
+ }
+
+ b = XMesaCreatePixmapBuffer( v, pixmap, 0 );
+ if (!b) {
+ return 0;
+ }
+ return b->drawable;
+}
+
+
+/*** GLX_MESA_pixmap_colormap ***/
+
+GLXPixmap
+glXCreateGLXPixmapMESA( Display *dpy, XVisualInfo *visinfo,
+ Pixmap pixmap, Colormap cmap )
+{
+ XMesaVisual v;
+ XMesaBuffer b;
+
+ v = find_glx_visual( dpy, visinfo );
+ if (!v) {
+ v = create_glx_visual( dpy, visinfo );
+ if (!v) {
+ /* unusable visual */
+ return 0;
+ }
+ }
+
+ b = XMesaCreatePixmapBuffer( v, pixmap, cmap );
+ if (!b) {
+ return 0;
+ }
+ return b->drawable;
+}
+
+
+void
+glXDestroyGLXPixmap( Display *dpy, GLXPixmap pixmap )
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, pixmap);
+ if (b) {
+ XMesaDestroyBuffer(b);
+ }
+ else if (_mesa_getenv("MESA_DEBUG")) {
+ _mesa_warning(NULL, "Mesa: glXDestroyGLXPixmap: invalid pixmap\n");
+ }
+}
+
+
+void
+glXCopyContext( Display *dpy, GLXContext src, GLXContext dst,
+ unsigned long mask )
+{
+ XMesaContext xm_src = src->xmesaContext;
+ XMesaContext xm_dst = dst->xmesaContext;
+ (void) dpy;
+ if (MakeCurrent_PrevContext == src) {
+ _mesa_Flush();
+ }
+ st_copy_context_state( xm_src->st, xm_dst->st, (GLuint) mask );
+}
+
+
+Bool
+glXQueryExtension( Display *dpy, int *errorb, int *event )
+{
+ /* Mesa's GLX isn't really an X extension but we try to act like one. */
+ (void) dpy;
+ (void) errorb;
+ (void) event;
+ return True;
+}
+
+
+void
+glXDestroyContext( Display *dpy, GLXContext ctx )
+{
+ GLXContext glxCtx = ctx;
+ (void) dpy;
+ MakeCurrent_PrevContext = 0;
+ MakeCurrent_PrevDrawable = 0;
+ MakeCurrent_PrevReadable = 0;
+ MakeCurrent_PrevDrawBuffer = 0;
+ MakeCurrent_PrevReadBuffer = 0;
+ XMesaDestroyContext( glxCtx->xmesaContext );
+ XMesaGarbageCollect();
+ _mesa_free(glxCtx);
+}
+
+
+Bool
+glXIsDirect( Display *dpy, GLXContext ctx )
+{
+ GLXContext glxCtx = ctx;
+ (void) ctx;
+ return glxCtx->isDirect;
+}
+
+
+
+void
+glXSwapBuffers( Display *dpy, GLXDrawable drawable )
+{
+ XMesaBuffer buffer = XMesaFindBuffer( dpy, drawable );
+ static boolean firsttime = 1, no_rast = 0;
+
+ if (firsttime) {
+ no_rast = getenv("SP_NO_RAST") != NULL;
+ firsttime = 0;
+ }
+
+ if (no_rast)
+ return;
+
+ if (buffer) {
+ XMesaSwapBuffers(buffer);
+ }
+ else if (_mesa_getenv("MESA_DEBUG")) {
+ _mesa_warning(NULL, "glXSwapBuffers: invalid drawable 0x%x\n",
+ (int) drawable);
+ }
+}
+
+
+
+/*** GLX_MESA_copy_sub_buffer ***/
+
+void
+glXCopySubBufferMESA( Display *dpy, GLXDrawable drawable,
+ int x, int y, int width, int height )
+{
+ XMesaBuffer buffer = XMesaFindBuffer( dpy, drawable );
+ if (buffer) {
+ XMesaCopySubBuffer(buffer, x, y, width, height);
+ }
+ else if (_mesa_getenv("MESA_DEBUG")) {
+ _mesa_warning(NULL, "Mesa: glXCopySubBufferMESA: invalid drawable\n");
+ }
+}
+
+
+Bool
+glXQueryVersion( Display *dpy, int *maj, int *min )
+{
+ (void) dpy;
+ /* Return GLX version, not Mesa version */
+ assert(CLIENT_MAJOR_VERSION == SERVER_MAJOR_VERSION);
+ *maj = CLIENT_MAJOR_VERSION;
+ *min = MIN2( CLIENT_MINOR_VERSION, SERVER_MINOR_VERSION );
+ return True;
+}
+
+
+/*
+ * Query the GLX attributes of the given XVisualInfo.
+ */
+static int
+get_config( XMesaVisual xmvis, int attrib, int *value, GLboolean fbconfig )
+{
+ ASSERT(xmvis);
+ switch(attrib) {
+ case GLX_USE_GL:
+ if (fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = (int) True;
+ return 0;
+ case GLX_BUFFER_SIZE:
+ *value = xmvis->visinfo->depth;
+ return 0;
+ case GLX_LEVEL:
+ *value = xmvis->mesa_visual.level;
+ return 0;
+ case GLX_RGBA:
+ if (fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ if (xmvis->mesa_visual.rgbMode) {
+ *value = True;
+ }
+ else {
+ *value = False;
+ }
+ return 0;
+ case GLX_DOUBLEBUFFER:
+ *value = (int) xmvis->mesa_visual.doubleBufferMode;
+ return 0;
+ case GLX_STEREO:
+ *value = (int) xmvis->mesa_visual.stereoMode;
+ return 0;
+ case GLX_AUX_BUFFERS:
+ *value = xmvis->mesa_visual.numAuxBuffers;
+ return 0;
+ case GLX_RED_SIZE:
+ *value = xmvis->mesa_visual.redBits;
+ return 0;
+ case GLX_GREEN_SIZE:
+ *value = xmvis->mesa_visual.greenBits;
+ return 0;
+ case GLX_BLUE_SIZE:
+ *value = xmvis->mesa_visual.blueBits;
+ return 0;
+ case GLX_ALPHA_SIZE:
+ *value = xmvis->mesa_visual.alphaBits;
+ return 0;
+ case GLX_DEPTH_SIZE:
+ *value = xmvis->mesa_visual.depthBits;
+ return 0;
+ case GLX_STENCIL_SIZE:
+ *value = xmvis->mesa_visual.stencilBits;
+ return 0;
+ case GLX_ACCUM_RED_SIZE:
+ *value = xmvis->mesa_visual.accumRedBits;
+ return 0;
+ case GLX_ACCUM_GREEN_SIZE:
+ *value = xmvis->mesa_visual.accumGreenBits;
+ return 0;
+ case GLX_ACCUM_BLUE_SIZE:
+ *value = xmvis->mesa_visual.accumBlueBits;
+ return 0;
+ case GLX_ACCUM_ALPHA_SIZE:
+ *value = xmvis->mesa_visual.accumAlphaBits;
+ return 0;
+
+ /*
+ * GLX_EXT_visual_info extension
+ */
+ case GLX_X_VISUAL_TYPE_EXT:
+ switch (xmvis->visinfo->CLASS) {
+ case StaticGray: *value = GLX_STATIC_GRAY_EXT; return 0;
+ case GrayScale: *value = GLX_GRAY_SCALE_EXT; return 0;
+ case StaticColor: *value = GLX_STATIC_GRAY_EXT; return 0;
+ case PseudoColor: *value = GLX_PSEUDO_COLOR_EXT; return 0;
+ case TrueColor: *value = GLX_TRUE_COLOR_EXT; return 0;
+ case DirectColor: *value = GLX_DIRECT_COLOR_EXT; return 0;
+ }
+ return 0;
+ case GLX_TRANSPARENT_TYPE_EXT:
+ /* normal planes */
+ *value = GLX_NONE_EXT;
+ return 0;
+ case GLX_TRANSPARENT_INDEX_VALUE_EXT:
+ /* undefined */
+ return 0;
+ case GLX_TRANSPARENT_RED_VALUE_EXT:
+ /* undefined */
+ return 0;
+ case GLX_TRANSPARENT_GREEN_VALUE_EXT:
+ /* undefined */
+ return 0;
+ case GLX_TRANSPARENT_BLUE_VALUE_EXT:
+ /* undefined */
+ return 0;
+ case GLX_TRANSPARENT_ALPHA_VALUE_EXT:
+ /* undefined */
+ return 0;
+
+ /*
+ * GLX_EXT_visual_info extension
+ */
+ case GLX_VISUAL_CAVEAT_EXT:
+ /* test for zero, just in case */
+ if (xmvis->mesa_visual.visualRating > 0)
+ *value = xmvis->mesa_visual.visualRating;
+ else
+ *value = GLX_NONE_EXT;
+ return 0;
+
+ /*
+ * GLX_ARB_multisample
+ */
+ case GLX_SAMPLE_BUFFERS_ARB:
+ *value = 0;
+ return 0;
+ case GLX_SAMPLES_ARB:
+ *value = 0;
+ return 0;
+
+ /*
+ * For FBConfigs:
+ */
+ case GLX_SCREEN_EXT:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = xmvis->visinfo->screen;
+ break;
+ case GLX_DRAWABLE_TYPE: /*SGIX too */
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = GLX_WINDOW_BIT | GLX_PIXMAP_BIT | GLX_PBUFFER_BIT;
+ break;
+ case GLX_RENDER_TYPE_SGIX:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ if (xmvis->mesa_visual.rgbMode)
+ *value = GLX_RGBA_BIT;
+ else
+ *value = GLX_COLOR_INDEX_BIT;
+ break;
+ case GLX_X_RENDERABLE_SGIX:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = True; /* XXX really? */
+ break;
+ case GLX_FBCONFIG_ID_SGIX:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = xmvis->visinfo->visualid;
+ break;
+ case GLX_MAX_PBUFFER_WIDTH:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ /* XXX or MAX_WIDTH? */
+ *value = DisplayWidth(xmvis->display, xmvis->visinfo->screen);
+ break;
+ case GLX_MAX_PBUFFER_HEIGHT:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = DisplayHeight(xmvis->display, xmvis->visinfo->screen);
+ break;
+ case GLX_MAX_PBUFFER_PIXELS:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = DisplayWidth(xmvis->display, xmvis->visinfo->screen) *
+ DisplayHeight(xmvis->display, xmvis->visinfo->screen);
+ break;
+ case GLX_VISUAL_ID:
+ if (!fbconfig)
+ return GLX_BAD_ATTRIBUTE;
+ *value = xmvis->visinfo->visualid;
+ break;
+
+#ifdef GLX_EXT_texture_from_pixmap
+ case GLX_BIND_TO_TEXTURE_RGB_EXT:
+ *value = True; /*XXX*/
+ break;
+ case GLX_BIND_TO_TEXTURE_RGBA_EXT:
+ /* XXX review */
+ *value = xmvis->mesa_visual.alphaBits > 0 ? True : False;
+ break;
+ case GLX_BIND_TO_MIPMAP_TEXTURE_EXT:
+ *value = True; /*XXX*/
+ break;
+ case GLX_BIND_TO_TEXTURE_TARGETS_EXT:
+ *value = (GLX_TEXTURE_1D_BIT_EXT |
+ GLX_TEXTURE_2D_BIT_EXT |
+ GLX_TEXTURE_RECTANGLE_BIT_EXT); /*XXX*/
+ break;
+ case GLX_Y_INVERTED_EXT:
+ *value = True; /*XXX*/
+ break;
+#endif
+
+ default:
+ return GLX_BAD_ATTRIBUTE;
+ }
+ return Success;
+}
+
+
+int
+glXGetConfig( Display *dpy, XVisualInfo *visinfo,
+ int attrib, int *value )
+{
+ XMesaVisual xmvis;
+ int k;
+ if (!dpy || !visinfo)
+ return GLX_BAD_ATTRIBUTE;
+
+ xmvis = find_glx_visual( dpy, visinfo );
+ if (!xmvis) {
+ /* this visual wasn't obtained with glXChooseVisual */
+ xmvis = create_glx_visual( dpy, visinfo );
+ if (!xmvis) {
+ /* this visual can't be used for GL rendering */
+ if (attrib==GLX_USE_GL) {
+ *value = (int) False;
+ return 0;
+ }
+ else {
+ return GLX_BAD_VISUAL;
+ }
+ }
+ }
+
+ k = get_config(xmvis, attrib, value, GL_FALSE);
+ return k;
+}
+
+
+void
+glXWaitGL( void )
+{
+ XMesaContext xmesa = XMesaGetCurrentContext();
+ XMesaFlush( xmesa );
+}
+
+
+
+void
+glXWaitX( void )
+{
+ XMesaContext xmesa = XMesaGetCurrentContext();
+ XMesaFlush( xmesa );
+}
+
+
+static const char *
+get_extensions( void )
+{
+ return EXTENSIONS;
+}
+
+
+
+/* GLX 1.1 and later */
+const char *
+glXQueryExtensionsString( Display *dpy, int screen )
+{
+ (void) dpy;
+ (void) screen;
+ return get_extensions();
+}
+
+
+
+/* GLX 1.1 and later */
+const char *
+glXQueryServerString( Display *dpy, int screen, int name )
+{
+ static char version[1000];
+ _mesa_sprintf(version, "%d.%d %s",
+ SERVER_MAJOR_VERSION, SERVER_MINOR_VERSION, MESA_GLX_VERSION);
+
+ (void) dpy;
+ (void) screen;
+
+ switch (name) {
+ case GLX_EXTENSIONS:
+ return get_extensions();
+ case GLX_VENDOR:
+ return VENDOR;
+ case GLX_VERSION:
+ return version;
+ default:
+ return NULL;
+ }
+}
+
+
+
+/* GLX 1.1 and later */
+const char *
+glXGetClientString( Display *dpy, int name )
+{
+ static char version[1000];
+ _mesa_sprintf(version, "%d.%d %s", CLIENT_MAJOR_VERSION,
+ CLIENT_MINOR_VERSION, MESA_GLX_VERSION);
+
+ (void) dpy;
+
+ switch (name) {
+ case GLX_EXTENSIONS:
+ return get_extensions();
+ case GLX_VENDOR:
+ return VENDOR;
+ case GLX_VERSION:
+ return version;
+ default:
+ return NULL;
+ }
+}
+
+
+
+/*
+ * GLX 1.3 and later
+ */
+
+
+int
+glXGetFBConfigAttrib( Display *dpy, GLXFBConfig config,
+ int attribute, int *value )
+{
+ XMesaVisual v = (XMesaVisual) config;
+ (void) dpy;
+ (void) config;
+
+ if (!dpy || !config || !value)
+ return -1;
+
+ return get_config(v, attribute, value, GL_TRUE);
+}
+
+
+GLXFBConfig *
+glXGetFBConfigs( Display *dpy, int screen, int *nelements )
+{
+ XVisualInfo *visuals, visTemplate;
+ const long visMask = VisualScreenMask;
+ int i;
+
+ /* Get list of all X visuals */
+ visTemplate.screen = screen;
+ visuals = XGetVisualInfo(dpy, visMask, &visTemplate, nelements);
+ if (*nelements > 0) {
+ XMesaVisual *results;
+ results = (XMesaVisual *) _mesa_malloc(*nelements * sizeof(XMesaVisual));
+ if (!results) {
+ *nelements = 0;
+ return NULL;
+ }
+ for (i = 0; i < *nelements; i++) {
+ results[i] = create_glx_visual(dpy, visuals + i);
+ }
+ return (GLXFBConfig *) results;
+ }
+ return NULL;
+}
+
+
+GLXFBConfig *
+glXChooseFBConfig( Display *dpy, int screen,
+ const int *attribList, int *nitems )
+{
+ XMesaVisual xmvis;
+
+ if (!attribList || !attribList[0]) {
+ /* return list of all configs (per GLX_SGIX_fbconfig spec) */
+ return glXGetFBConfigs(dpy, screen, nitems);
+ }
+
+ xmvis = choose_visual(dpy, screen, attribList, GL_TRUE);
+ if (xmvis) {
+ GLXFBConfig *config = (GLXFBConfig *) _mesa_malloc(sizeof(XMesaVisual));
+ if (!config) {
+ *nitems = 0;
+ return NULL;
+ }
+ *nitems = 1;
+ config[0] = (GLXFBConfig) xmvis;
+ return (GLXFBConfig *) config;
+ }
+ else {
+ *nitems = 0;
+ return NULL;
+ }
+}
+
+
+XVisualInfo *
+glXGetVisualFromFBConfig( Display *dpy, GLXFBConfig config )
+{
+ if (dpy && config) {
+ XMesaVisual xmvis = (XMesaVisual) config;
+#if 0
+ return xmvis->vishandle;
+#else
+ /* create a new vishandle - the cached one may be stale */
+ xmvis->vishandle = (XVisualInfo *) _mesa_malloc(sizeof(XVisualInfo));
+ if (xmvis->vishandle) {
+ _mesa_memcpy(xmvis->vishandle, xmvis->visinfo, sizeof(XVisualInfo));
+ }
+ return xmvis->vishandle;
+#endif
+ }
+ else {
+ return NULL;
+ }
+}
+
+
+GLXWindow
+glXCreateWindow( Display *dpy, GLXFBConfig config, Window win,
+ const int *attribList )
+{
+ XMesaVisual xmvis = (XMesaVisual) config;
+ XMesaBuffer xmbuf;
+ if (!xmvis)
+ return 0;
+
+ xmbuf = XMesaCreateWindowBuffer(xmvis, win);
+ if (!xmbuf)
+ return 0;
+
+ (void) dpy;
+ (void) attribList; /* Ignored in GLX 1.3 */
+
+ return win; /* A hack for now */
+}
+
+
+void
+glXDestroyWindow( Display *dpy, GLXWindow window )
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, (Drawable) window);
+ if (b)
+ XMesaDestroyBuffer(b);
+ /* don't destroy X window */
+}
+
+
+/* XXX untested */
+GLXPixmap
+glXCreatePixmap( Display *dpy, GLXFBConfig config, Pixmap pixmap,
+ const int *attribList )
+{
+ XMesaVisual v = (XMesaVisual) config;
+ XMesaBuffer b;
+ const int *attr;
+ int target = 0, format = 0, mipmap = 0;
+ int value;
+
+ if (!dpy || !config || !pixmap)
+ return 0;
+
+ for (attr = attribList; attr && *attr; attr++) {
+ switch (*attr) {
+ case GLX_TEXTURE_FORMAT_EXT:
+ attr++;
+ switch (*attr) {
+ case GLX_TEXTURE_FORMAT_NONE_EXT:
+ case GLX_TEXTURE_FORMAT_RGB_EXT:
+ case GLX_TEXTURE_FORMAT_RGBA_EXT:
+ format = *attr;
+ break;
+ default:
+ /* error */
+ return 0;
+ }
+ break;
+ case GLX_TEXTURE_TARGET_EXT:
+ attr++;
+ switch (*attr) {
+ case GLX_TEXTURE_1D_EXT:
+ case GLX_TEXTURE_2D_EXT:
+ case GLX_TEXTURE_RECTANGLE_EXT:
+ target = *attr;
+ break;
+ default:
+ /* error */
+ return 0;
+ }
+ break;
+ case GLX_MIPMAP_TEXTURE_EXT:
+ attr++;
+ if (*attr)
+ mipmap = 1;
+ break;
+ default:
+ /* error */
+ return 0;
+ }
+ }
+
+ if (format == GLX_TEXTURE_FORMAT_RGB_EXT) {
+ if (get_config(v, GLX_BIND_TO_TEXTURE_RGB_EXT,
+ &value, GL_TRUE) != Success
+ || !value) {
+ return 0; /* error! */
+ }
+ }
+ else if (format == GLX_TEXTURE_FORMAT_RGBA_EXT) {
+ if (get_config(v, GLX_BIND_TO_TEXTURE_RGBA_EXT,
+ &value, GL_TRUE) != Success
+ || !value) {
+ return 0; /* error! */
+ }
+ }
+ if (mipmap) {
+ if (get_config(v, GLX_BIND_TO_MIPMAP_TEXTURE_EXT,
+ &value, GL_TRUE) != Success
+ || !value) {
+ return 0; /* error! */
+ }
+ }
+ if (target == GLX_TEXTURE_1D_EXT) {
+ if (get_config(v, GLX_BIND_TO_TEXTURE_TARGETS_EXT,
+ &value, GL_TRUE) != Success
+ || (value & GLX_TEXTURE_1D_BIT_EXT) == 0) {
+ return 0; /* error! */
+ }
+ }
+ else if (target == GLX_TEXTURE_2D_EXT) {
+ if (get_config(v, GLX_BIND_TO_TEXTURE_TARGETS_EXT,
+ &value, GL_TRUE) != Success
+ || (value & GLX_TEXTURE_2D_BIT_EXT) == 0) {
+ return 0; /* error! */
+ }
+ }
+ if (target == GLX_TEXTURE_RECTANGLE_EXT) {
+ if (get_config(v, GLX_BIND_TO_TEXTURE_TARGETS_EXT,
+ &value, GL_TRUE) != Success
+ || (value & GLX_TEXTURE_RECTANGLE_BIT_EXT) == 0) {
+ return 0; /* error! */
+ }
+ }
+
+ if (format || target || mipmap) {
+ /* texture from pixmap */
+ b = XMesaCreatePixmapTextureBuffer(v, pixmap, 0, format, target, mipmap);
+ }
+ else {
+ b = XMesaCreatePixmapBuffer( v, pixmap, 0 );
+ }
+ if (!b) {
+ return 0;
+ }
+
+ return pixmap;
+}
+
+
+void
+glXDestroyPixmap( Display *dpy, GLXPixmap pixmap )
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, (Drawable)pixmap);
+ if (b)
+ XMesaDestroyBuffer(b);
+ /* don't destroy X pixmap */
+}
+
+
+GLXPbuffer
+glXCreatePbuffer( Display *dpy, GLXFBConfig config,
+ const int *attribList )
+{
+ XMesaVisual xmvis = (XMesaVisual) config;
+ XMesaBuffer xmbuf;
+ const int *attrib;
+ int width = 0, height = 0;
+ GLboolean useLargest = GL_FALSE, preserveContents = GL_FALSE;
+
+ (void) dpy;
+
+ for (attrib = attribList; *attrib; attrib++) {
+ switch (*attrib) {
+ case GLX_PBUFFER_WIDTH:
+ attrib++;
+ width = *attrib;
+ break;
+ case GLX_PBUFFER_HEIGHT:
+ attrib++;
+ height = *attrib;
+ break;
+ case GLX_PRESERVED_CONTENTS:
+ attrib++;
+ preserveContents = *attrib; /* ignored */
+ break;
+ case GLX_LARGEST_PBUFFER:
+ attrib++;
+ useLargest = *attrib; /* ignored */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ /* not used at this time */
+ (void) useLargest;
+ (void) preserveContents;
+
+ if (width == 0 || height == 0)
+ return 0;
+
+ xmbuf = XMesaCreatePBuffer( xmvis, 0, width, height);
+ /* A GLXPbuffer handle must be an X Drawable because that's what
+ * glXMakeCurrent takes.
+ */
+ if (xmbuf)
+ return (GLXPbuffer) xmbuf->drawable;
+ else
+ return 0;
+}
+
+
+void
+glXDestroyPbuffer( Display *dpy, GLXPbuffer pbuf )
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, pbuf);
+ if (b) {
+ XMesaDestroyBuffer(b);
+ }
+}
+
+
+void
+glXQueryDrawable( Display *dpy, GLXDrawable draw, int attribute,
+ unsigned int *value )
+{
+ XMesaBuffer xmbuf = XMesaFindBuffer(dpy, draw);
+ if (!xmbuf)
+ return;
+
+ switch (attribute) {
+ case GLX_WIDTH:
+ *value = xmesa_buffer_width(xmbuf);
+ break;
+ case GLX_HEIGHT:
+ *value = xmesa_buffer_width(xmbuf);
+ break;
+ case GLX_PRESERVED_CONTENTS:
+ *value = True;
+ break;
+ case GLX_LARGEST_PBUFFER:
+ *value = xmesa_buffer_width(xmbuf) * xmesa_buffer_height(xmbuf);
+ break;
+ case GLX_FBCONFIG_ID:
+ *value = xmbuf->xm_visual->visinfo->visualid;
+ return;
+#ifdef GLX_EXT_texture_from_pixmap
+ case GLX_TEXTURE_FORMAT_EXT:
+ *value = xmbuf->TextureFormat;
+ break;
+ case GLX_TEXTURE_TARGET_EXT:
+ *value = xmbuf->TextureTarget;
+ break;
+ case GLX_MIPMAP_TEXTURE_EXT:
+ *value = xmbuf->TextureMipmap;
+ break;
+#endif
+
+ default:
+ return; /* raise BadValue error */
+ }
+}
+
+
+GLXContext
+glXCreateNewContext( Display *dpy, GLXFBConfig config,
+ int renderType, GLXContext shareList, Bool direct )
+{
+ GLXContext glxCtx;
+ GLXContext shareCtx = shareList;
+ XMesaVisual xmvis = (XMesaVisual) config;
+
+ if (!dpy || !config ||
+ (renderType != GLX_RGBA_TYPE && renderType != GLX_COLOR_INDEX_TYPE))
+ return 0;
+
+ glxCtx = CALLOC_STRUCT(__GLXcontextRec);
+ if (!glxCtx)
+ return 0;
+
+ /* deallocate unused windows/buffers */
+ XMesaGarbageCollect();
+
+ glxCtx->xmesaContext = XMesaCreateContext(xmvis,
+ shareCtx ? shareCtx->xmesaContext : NULL);
+ if (!glxCtx->xmesaContext) {
+ _mesa_free(glxCtx);
+ return NULL;
+ }
+
+ glxCtx->isDirect = DEFAULT_DIRECT;
+ glxCtx->currentDpy = dpy;
+ glxCtx->xid = (XID) glxCtx; /* self pointer */
+
+ return glxCtx;
+}
+
+
+int
+glXQueryContext( Display *dpy, GLXContext ctx, int attribute, int *value )
+{
+ GLXContext glxCtx = ctx;
+ XMesaContext xmctx = glxCtx->xmesaContext;
+
+ (void) dpy;
+ (void) ctx;
+
+ switch (attribute) {
+ case GLX_FBCONFIG_ID:
+ *value = xmctx->xm_visual->visinfo->visualid;
+ break;
+ case GLX_RENDER_TYPE:
+ if (xmctx->xm_visual->mesa_visual.rgbMode)
+ *value = GLX_RGBA_BIT;
+ else
+ *value = GLX_COLOR_INDEX_BIT;
+ break;
+ case GLX_SCREEN:
+ *value = 0;
+ return Success;
+ default:
+ return GLX_BAD_ATTRIBUTE;
+ }
+ return 0;
+}
+
+
+void
+glXSelectEvent( Display *dpy, GLXDrawable drawable, unsigned long mask )
+{
+ XMesaBuffer xmbuf = XMesaFindBuffer(dpy, drawable);
+ if (xmbuf)
+ xmbuf->selectedEvents = mask;
+}
+
+
+void
+glXGetSelectedEvent( Display *dpy, GLXDrawable drawable,
+ unsigned long *mask )
+{
+ XMesaBuffer xmbuf = XMesaFindBuffer(dpy, drawable);
+ if (xmbuf)
+ *mask = xmbuf->selectedEvents;
+ else
+ *mask = 0;
+}
+
+
+
+/*** GLX_SGI_swap_control ***/
+
+int
+glXSwapIntervalSGI(int interval)
+{
+ (void) interval;
+ return 0;
+}
+
+
+
+/*** GLX_SGI_video_sync ***/
+
+static unsigned int FrameCounter = 0;
+
+int
+glXGetVideoSyncSGI(unsigned int *count)
+{
+ /* this is a bogus implementation */
+ *count = FrameCounter++;
+ return 0;
+}
+
+int
+glXWaitVideoSyncSGI(int divisor, int remainder, unsigned int *count)
+{
+ if (divisor <= 0 || remainder < 0)
+ return GLX_BAD_VALUE;
+ /* this is a bogus implementation */
+ FrameCounter++;
+ while (FrameCounter % divisor != remainder)
+ FrameCounter++;
+ *count = FrameCounter;
+ return 0;
+}
+
+
+
+/*** GLX_SGI_make_current_read ***/
+
+Bool
+glXMakeCurrentReadSGI(Display *dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx)
+{
+ return glXMakeContextCurrent( dpy, draw, read, ctx );
+}
+
+/* not used
+static GLXDrawable
+glXGetCurrentReadDrawableSGI(void)
+{
+ return 0;
+}
+*/
+
+
+/*** GLX_SGIX_video_source ***/
+#if defined(_VL_H)
+
+GLXVideoSourceSGIX
+glXCreateGLXVideoSourceSGIX(Display *dpy, int screen, VLServer server, VLPath path, int nodeClass, VLNode drainNode)
+{
+ (void) dpy;
+ (void) screen;
+ (void) server;
+ (void) path;
+ (void) nodeClass;
+ (void) drainNode;
+ return 0;
+}
+
+void
+glXDestroyGLXVideoSourceSGIX(Display *dpy, GLXVideoSourceSGIX src)
+{
+ (void) dpy;
+ (void) src;
+}
+
+#endif
+
+
+/*** GLX_EXT_import_context ***/
+
+void
+glXFreeContextEXT(Display *dpy, GLXContext context)
+{
+ (void) dpy;
+ (void) context;
+}
+
+GLXContextID
+glXGetContextIDEXT(const GLXContext context)
+{
+ (void) context;
+ return 0;
+}
+
+GLXContext
+glXImportContextEXT(Display *dpy, GLXContextID contextID)
+{
+ (void) dpy;
+ (void) contextID;
+ return 0;
+}
+
+int
+glXQueryContextInfoEXT(Display *dpy, GLXContext context, int attribute, int *value)
+{
+ (void) dpy;
+ (void) context;
+ (void) attribute;
+ (void) value;
+ return 0;
+}
+
+
+
+/*** GLX_SGIX_fbconfig ***/
+
+int
+glXGetFBConfigAttribSGIX(Display *dpy, GLXFBConfigSGIX config, int attribute, int *value)
+{
+ return glXGetFBConfigAttrib(dpy, config, attribute, value);
+}
+
+GLXFBConfigSGIX *
+glXChooseFBConfigSGIX(Display *dpy, int screen, int *attrib_list, int *nelements)
+{
+ return (GLXFBConfig *) glXChooseFBConfig(dpy, screen, attrib_list, nelements);
+}
+
+
+GLXPixmap
+glXCreateGLXPixmapWithConfigSGIX(Display *dpy, GLXFBConfigSGIX config, Pixmap pixmap)
+{
+ XMesaVisual xmvis = (XMesaVisual) config;
+ XMesaBuffer xmbuf = XMesaCreatePixmapBuffer(xmvis, pixmap, 0);
+ return xmbuf->drawable; /* need to return an X ID */
+}
+
+
+GLXContext
+glXCreateContextWithConfigSGIX(Display *dpy, GLXFBConfigSGIX config, int render_type, GLXContext share_list, Bool direct)
+{
+ XMesaVisual xmvis = (XMesaVisual) config;
+ GLXContext glxCtx;
+ GLXContext shareCtx = share_list;
+
+ glxCtx = CALLOC_STRUCT(__GLXcontextRec);
+ if (!glxCtx)
+ return 0;
+
+ /* deallocate unused windows/buffers */
+ XMesaGarbageCollect();
+
+ glxCtx->xmesaContext = XMesaCreateContext(xmvis,
+ shareCtx ? shareCtx->xmesaContext : NULL);
+ if (!glxCtx->xmesaContext) {
+ _mesa_free(glxCtx);
+ return NULL;
+ }
+
+ glxCtx->isDirect = DEFAULT_DIRECT;
+ glxCtx->currentDpy = dpy;
+ glxCtx->xid = (XID) glxCtx; /* self pointer */
+
+ return glxCtx;
+}
+
+
+XVisualInfo *
+glXGetVisualFromFBConfigSGIX(Display *dpy, GLXFBConfigSGIX config)
+{
+ return glXGetVisualFromFBConfig(dpy, config);
+}
+
+
+GLXFBConfigSGIX
+glXGetFBConfigFromVisualSGIX(Display *dpy, XVisualInfo *vis)
+{
+ XMesaVisual xmvis = find_glx_visual(dpy, vis);
+ if (!xmvis) {
+ /* This visual wasn't found with glXChooseVisual() */
+ xmvis = create_glx_visual(dpy, vis);
+ }
+
+ return (GLXFBConfigSGIX) xmvis;
+}
+
+
+
+/*** GLX_SGIX_pbuffer ***/
+
+GLXPbufferSGIX
+glXCreateGLXPbufferSGIX(Display *dpy, GLXFBConfigSGIX config,
+ unsigned int width, unsigned int height,
+ int *attribList)
+{
+ XMesaVisual xmvis = (XMesaVisual) config;
+ XMesaBuffer xmbuf;
+ const int *attrib;
+ GLboolean useLargest = GL_FALSE, preserveContents = GL_FALSE;
+
+ (void) dpy;
+
+ for (attrib = attribList; attrib && *attrib; attrib++) {
+ switch (*attrib) {
+ case GLX_PRESERVED_CONTENTS_SGIX:
+ attrib++;
+ preserveContents = *attrib; /* ignored */
+ break;
+ case GLX_LARGEST_PBUFFER_SGIX:
+ attrib++;
+ useLargest = *attrib; /* ignored */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ /* not used at this time */
+ (void) useLargest;
+ (void) preserveContents;
+
+ xmbuf = XMesaCreatePBuffer( xmvis, 0, width, height);
+ /* A GLXPbuffer handle must be an X Drawable because that's what
+ * glXMakeCurrent takes.
+ */
+ return (GLXPbuffer) xmbuf->drawable;
+}
+
+
+void
+glXDestroyGLXPbufferSGIX(Display *dpy, GLXPbufferSGIX pbuf)
+{
+ XMesaBuffer xmbuf = XMesaFindBuffer(dpy, pbuf);
+ if (xmbuf) {
+ XMesaDestroyBuffer(xmbuf);
+ }
+}
+
+
+int
+glXQueryGLXPbufferSGIX(Display *dpy, GLXPbufferSGIX pbuf, int attribute, unsigned int *value)
+{
+ const XMesaBuffer xmbuf = XMesaFindBuffer(dpy, pbuf);
+
+ if (!xmbuf) {
+ /* Generate GLXBadPbufferSGIX for bad pbuffer */
+ return 0;
+ }
+
+ switch (attribute) {
+ case GLX_PRESERVED_CONTENTS_SGIX:
+ *value = True;
+ break;
+ case GLX_LARGEST_PBUFFER_SGIX:
+ *value = xmesa_buffer_width(xmbuf) * xmesa_buffer_height(xmbuf);
+ break;
+ case GLX_WIDTH_SGIX:
+ *value = xmesa_buffer_width(xmbuf);
+ break;
+ case GLX_HEIGHT_SGIX:
+ *value = xmesa_buffer_height(xmbuf);
+ break;
+ case GLX_EVENT_MASK_SGIX:
+ *value = 0; /* XXX might be wrong */
+ break;
+ default:
+ *value = 0;
+ }
+ return 0;
+}
+
+
+void
+glXSelectEventSGIX(Display *dpy, GLXDrawable drawable, unsigned long mask)
+{
+ XMesaBuffer xmbuf = XMesaFindBuffer(dpy, drawable);
+ if (xmbuf) {
+ /* Note: we'll never generate clobber events */
+ xmbuf->selectedEvents = mask;
+ }
+}
+
+
+void
+glXGetSelectedEventSGIX(Display *dpy, GLXDrawable drawable, unsigned long *mask)
+{
+ XMesaBuffer xmbuf = XMesaFindBuffer(dpy, drawable);
+ if (xmbuf) {
+ *mask = xmbuf->selectedEvents;
+ }
+ else {
+ *mask = 0;
+ }
+}
+
+
+
+/*** GLX_SGI_cushion ***/
+
+void
+glXCushionSGI(Display *dpy, Window win, float cushion)
+{
+ (void) dpy;
+ (void) win;
+ (void) cushion;
+}
+
+
+
+/*** GLX_SGIX_video_resize ***/
+
+int
+glXBindChannelToWindowSGIX(Display *dpy, int screen, int channel , Window window)
+{
+ (void) dpy;
+ (void) screen;
+ (void) channel;
+ (void) window;
+ return 0;
+}
+
+int
+glXChannelRectSGIX(Display *dpy, int screen, int channel, int x, int y, int w, int h)
+{
+ (void) dpy;
+ (void) screen;
+ (void) channel;
+ (void) x;
+ (void) y;
+ (void) w;
+ (void) h;
+ return 0;
+}
+
+int
+glXQueryChannelRectSGIX(Display *dpy, int screen, int channel, int *x, int *y, int *w, int *h)
+{
+ (void) dpy;
+ (void) screen;
+ (void) channel;
+ (void) x;
+ (void) y;
+ (void) w;
+ (void) h;
+ return 0;
+}
+
+int
+glXQueryChannelDeltasSGIX(Display *dpy, int screen, int channel, int *dx, int *dy, int *dw, int *dh)
+{
+ (void) dpy;
+ (void) screen;
+ (void) channel;
+ (void) dx;
+ (void) dy;
+ (void) dw;
+ (void) dh;
+ return 0;
+}
+
+int
+glXChannelRectSyncSGIX(Display *dpy, int screen, int channel, GLenum synctype)
+{
+ (void) dpy;
+ (void) screen;
+ (void) channel;
+ (void) synctype;
+ return 0;
+}
+
+
+
+/*** GLX_SGIX_dmbuffer **/
+
+#if defined(_DM_BUFFER_H_)
+Bool
+glXAssociateDMPbufferSGIX(Display *dpy, GLXPbufferSGIX pbuffer, DMparams *params, DMbuffer dmbuffer)
+{
+ (void) dpy;
+ (void) pbuffer;
+ (void) params;
+ (void) dmbuffer;
+ return False;
+}
+#endif
+
+
+/*** GLX_SGIX_swap_group ***/
+
+void
+glXJoinSwapGroupSGIX(Display *dpy, GLXDrawable drawable, GLXDrawable member)
+{
+ (void) dpy;
+ (void) drawable;
+ (void) member;
+}
+
+
+
+/*** GLX_SGIX_swap_barrier ***/
+
+void
+glXBindSwapBarrierSGIX(Display *dpy, GLXDrawable drawable, int barrier)
+{
+ (void) dpy;
+ (void) drawable;
+ (void) barrier;
+}
+
+Bool
+glXQueryMaxSwapBarriersSGIX(Display *dpy, int screen, int *max)
+{
+ (void) dpy;
+ (void) screen;
+ (void) max;
+ return False;
+}
+
+
+
+/*** GLX_SUN_get_transparent_index ***/
+
+Status
+glXGetTransparentIndexSUN(Display *dpy, Window overlay, Window underlay, long *pTransparent)
+{
+ (void) dpy;
+ (void) overlay;
+ (void) underlay;
+ (void) pTransparent;
+ return 0;
+}
+
+
+
+/*** GLX_MESA_release_buffers ***/
+
+/*
+ * Release the depth, stencil, accum buffers attached to a GLXDrawable
+ * (a window or pixmap) prior to destroying the GLXDrawable.
+ */
+Bool
+glXReleaseBuffersMESA( Display *dpy, GLXDrawable d )
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, d);
+ if (b) {
+ XMesaDestroyBuffer(b);
+ return True;
+ }
+ return False;
+}
+
+/*** GLX_EXT_texture_from_pixmap ***/
+
+void
+glXBindTexImageEXT(Display *dpy, GLXDrawable drawable, int buffer,
+ const int *attrib_list)
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, drawable);
+ if (b)
+ XMesaBindTexImage(dpy, b, buffer, attrib_list);
+}
+
+void
+glXReleaseTexImageEXT(Display *dpy, GLXDrawable drawable, int buffer)
+{
+ XMesaBuffer b = XMesaFindBuffer(dpy, drawable);
+ if (b)
+ XMesaReleaseTexImage(dpy, b, buffer);
+}
diff --git a/src/gallium/state_trackers/glx/xlib/glx_getproc.c b/src/gallium/state_trackers/glx/xlib/glx_getproc.c
new file mode 100644
index 0000000000..ca7d88c922
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/glx_getproc.c
@@ -0,0 +1,214 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 7.6
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * glXGetProcAddress()
+ */
+
+
+#define GLX_GLXEXT_PROTOTYPES
+
+#include <string.h>
+#include "GL/glx.h"
+#include "glapi/glapi.h"
+
+
+struct name_address_pair {
+ const char *Name;
+ __GLXextFuncPtr Address;
+};
+
+
+static struct name_address_pair GLX_functions[] = {
+ /*** GLX_VERSION_1_0 ***/
+ { "glXChooseVisual", (__GLXextFuncPtr) glXChooseVisual },
+ { "glXCopyContext", (__GLXextFuncPtr) glXCopyContext },
+ { "glXCreateContext", (__GLXextFuncPtr) glXCreateContext },
+ { "glXCreateGLXPixmap", (__GLXextFuncPtr) glXCreateGLXPixmap },
+ { "glXDestroyContext", (__GLXextFuncPtr) glXDestroyContext },
+ { "glXDestroyGLXPixmap", (__GLXextFuncPtr) glXDestroyGLXPixmap },
+ { "glXGetConfig", (__GLXextFuncPtr) glXGetConfig },
+ { "glXGetCurrentContext", (__GLXextFuncPtr) glXGetCurrentContext },
+ { "glXGetCurrentDrawable", (__GLXextFuncPtr) glXGetCurrentDrawable },
+ { "glXIsDirect", (__GLXextFuncPtr) glXIsDirect },
+ { "glXMakeCurrent", (__GLXextFuncPtr) glXMakeCurrent },
+ { "glXQueryExtension", (__GLXextFuncPtr) glXQueryExtension },
+ { "glXQueryVersion", (__GLXextFuncPtr) glXQueryVersion },
+ { "glXSwapBuffers", (__GLXextFuncPtr) glXSwapBuffers },
+ { "glXUseXFont", (__GLXextFuncPtr) glXUseXFont },
+ { "glXWaitGL", (__GLXextFuncPtr) glXWaitGL },
+ { "glXWaitX", (__GLXextFuncPtr) glXWaitX },
+
+ /*** GLX_VERSION_1_1 ***/
+ { "glXGetClientString", (__GLXextFuncPtr) glXGetClientString },
+ { "glXQueryExtensionsString", (__GLXextFuncPtr) glXQueryExtensionsString },
+ { "glXQueryServerString", (__GLXextFuncPtr) glXQueryServerString },
+
+ /*** GLX_VERSION_1_2 ***/
+ { "glXGetCurrentDisplay", (__GLXextFuncPtr) glXGetCurrentDisplay },
+
+ /*** GLX_VERSION_1_3 ***/
+ { "glXChooseFBConfig", (__GLXextFuncPtr) glXChooseFBConfig },
+ { "glXCreateNewContext", (__GLXextFuncPtr) glXCreateNewContext },
+ { "glXCreatePbuffer", (__GLXextFuncPtr) glXCreatePbuffer },
+ { "glXCreatePixmap", (__GLXextFuncPtr) glXCreatePixmap },
+ { "glXCreateWindow", (__GLXextFuncPtr) glXCreateWindow },
+ { "glXDestroyPbuffer", (__GLXextFuncPtr) glXDestroyPbuffer },
+ { "glXDestroyPixmap", (__GLXextFuncPtr) glXDestroyPixmap },
+ { "glXDestroyWindow", (__GLXextFuncPtr) glXDestroyWindow },
+ { "glXGetCurrentReadDrawable", (__GLXextFuncPtr) glXGetCurrentReadDrawable },
+ { "glXGetFBConfigAttrib", (__GLXextFuncPtr) glXGetFBConfigAttrib },
+ { "glXGetFBConfigs", (__GLXextFuncPtr) glXGetFBConfigs },
+ { "glXGetSelectedEvent", (__GLXextFuncPtr) glXGetSelectedEvent },
+ { "glXGetVisualFromFBConfig", (__GLXextFuncPtr) glXGetVisualFromFBConfig },
+ { "glXMakeContextCurrent", (__GLXextFuncPtr) glXMakeContextCurrent },
+ { "glXQueryContext", (__GLXextFuncPtr) glXQueryContext },
+ { "glXQueryDrawable", (__GLXextFuncPtr) glXQueryDrawable },
+ { "glXSelectEvent", (__GLXextFuncPtr) glXSelectEvent },
+
+ /*** GLX_VERSION_1_4 ***/
+ { "glXGetProcAddress", (__GLXextFuncPtr) glXGetProcAddress },
+
+ /*** GLX_SGI_swap_control ***/
+ { "glXSwapIntervalSGI", (__GLXextFuncPtr) glXSwapIntervalSGI },
+
+ /*** GLX_SGI_video_sync ***/
+ { "glXGetVideoSyncSGI", (__GLXextFuncPtr) glXGetVideoSyncSGI },
+ { "glXWaitVideoSyncSGI", (__GLXextFuncPtr) glXWaitVideoSyncSGI },
+
+ /*** GLX_SGI_make_current_read ***/
+ { "glXMakeCurrentReadSGI", (__GLXextFuncPtr) glXMakeCurrentReadSGI },
+ { "glXGetCurrentReadDrawableSGI", (__GLXextFuncPtr) glXGetCurrentReadDrawableSGI },
+
+ /*** GLX_SGIX_video_source ***/
+#if defined(_VL_H)
+ { "glXCreateGLXVideoSourceSGIX", (__GLXextFuncPtr) glXCreateGLXVideoSourceSGIX },
+ { "glXDestroyGLXVideoSourceSGIX", (__GLXextFuncPtr) glXDestroyGLXVideoSourceSGIX },
+#endif
+
+ /*** GLX_EXT_import_context ***/
+ { "glXFreeContextEXT", (__GLXextFuncPtr) glXFreeContextEXT },
+ { "glXGetContextIDEXT", (__GLXextFuncPtr) glXGetContextIDEXT },
+ { "glXGetCurrentDisplayEXT", (__GLXextFuncPtr) glXGetCurrentDisplayEXT },
+ { "glXImportContextEXT", (__GLXextFuncPtr) glXImportContextEXT },
+ { "glXQueryContextInfoEXT", (__GLXextFuncPtr) glXQueryContextInfoEXT },
+
+ /*** GLX_SGIX_fbconfig ***/
+ { "glXGetFBConfigAttribSGIX", (__GLXextFuncPtr) glXGetFBConfigAttribSGIX },
+ { "glXChooseFBConfigSGIX", (__GLXextFuncPtr) glXChooseFBConfigSGIX },
+ { "glXCreateGLXPixmapWithConfigSGIX", (__GLXextFuncPtr) glXCreateGLXPixmapWithConfigSGIX },
+ { "glXCreateContextWithConfigSGIX", (__GLXextFuncPtr) glXCreateContextWithConfigSGIX },
+ { "glXGetVisualFromFBConfigSGIX", (__GLXextFuncPtr) glXGetVisualFromFBConfigSGIX },
+ { "glXGetFBConfigFromVisualSGIX", (__GLXextFuncPtr) glXGetFBConfigFromVisualSGIX },
+
+ /*** GLX_SGIX_pbuffer ***/
+ { "glXCreateGLXPbufferSGIX", (__GLXextFuncPtr) glXCreateGLXPbufferSGIX },
+ { "glXDestroyGLXPbufferSGIX", (__GLXextFuncPtr) glXDestroyGLXPbufferSGIX },
+ { "glXQueryGLXPbufferSGIX", (__GLXextFuncPtr) glXQueryGLXPbufferSGIX },
+ { "glXSelectEventSGIX", (__GLXextFuncPtr) glXSelectEventSGIX },
+ { "glXGetSelectedEventSGIX", (__GLXextFuncPtr) glXGetSelectedEventSGIX },
+
+ /*** GLX_SGI_cushion ***/
+ { "glXCushionSGI", (__GLXextFuncPtr) glXCushionSGI },
+
+ /*** GLX_SGIX_video_resize ***/
+ { "glXBindChannelToWindowSGIX", (__GLXextFuncPtr) glXBindChannelToWindowSGIX },
+ { "glXChannelRectSGIX", (__GLXextFuncPtr) glXChannelRectSGIX },
+ { "glXQueryChannelRectSGIX", (__GLXextFuncPtr) glXQueryChannelRectSGIX },
+ { "glXQueryChannelDeltasSGIX", (__GLXextFuncPtr) glXQueryChannelDeltasSGIX },
+ { "glXChannelRectSyncSGIX", (__GLXextFuncPtr) glXChannelRectSyncSGIX },
+
+ /*** GLX_SGIX_dmbuffer **/
+#if defined(_DM_BUFFER_H_)
+ { "glXAssociateDMPbufferSGIX", (__GLXextFuncPtr) glXAssociateDMPbufferSGIX },
+#endif
+
+ /*** GLX_SGIX_swap_group ***/
+ { "glXJoinSwapGroupSGIX", (__GLXextFuncPtr) glXJoinSwapGroupSGIX },
+
+ /*** GLX_SGIX_swap_barrier ***/
+ { "glXBindSwapBarrierSGIX", (__GLXextFuncPtr) glXBindSwapBarrierSGIX },
+ { "glXQueryMaxSwapBarriersSGIX", (__GLXextFuncPtr) glXQueryMaxSwapBarriersSGIX },
+
+ /*** GLX_SUN_get_transparent_index ***/
+ { "glXGetTransparentIndexSUN", (__GLXextFuncPtr) glXGetTransparentIndexSUN },
+
+ /*** GLX_MESA_copy_sub_buffer ***/
+ { "glXCopySubBufferMESA", (__GLXextFuncPtr) glXCopySubBufferMESA },
+
+ /*** GLX_MESA_pixmap_colormap ***/
+ { "glXCreateGLXPixmapMESA", (__GLXextFuncPtr) glXCreateGLXPixmapMESA },
+
+ /*** GLX_MESA_release_buffers ***/
+ { "glXReleaseBuffersMESA", (__GLXextFuncPtr) glXReleaseBuffersMESA },
+
+ /*** GLX_ARB_get_proc_address ***/
+ { "glXGetProcAddressARB", (__GLXextFuncPtr) glXGetProcAddressARB },
+
+ /*** GLX_EXT_texture_from_pixmap ***/
+ { "glXBindTexImageEXT", (__GLXextFuncPtr) glXBindTexImageEXT },
+ { "glXReleaseTexImageEXT", (__GLXextFuncPtr) glXReleaseTexImageEXT },
+
+ { NULL, NULL } /* end of list */
+};
+
+
+
+/**
+ * Return address of named glX function, or NULL if not found.
+ */
+static __GLXextFuncPtr
+_glxapi_get_proc_address(const char *funcName)
+{
+ GLuint i;
+ for (i = 0; GLX_functions[i].Name; i++) {
+ if (strcmp(GLX_functions[i].Name, funcName) == 0)
+ return GLX_functions[i].Address;
+ }
+ return NULL;
+}
+
+
+__GLXextFuncPtr
+glXGetProcAddressARB(const GLubyte *procName)
+{
+ __GLXextFuncPtr f;
+
+ f = _glxapi_get_proc_address((const char *) procName);
+ if (f) {
+ return f;
+ }
+
+ f = (__GLXextFuncPtr) _glapi_get_proc_address((const char *) procName);
+ return f;
+}
+
+
+/* GLX 1.4 */
+void (*glXGetProcAddress(const GLubyte *procName))()
+{
+ return glXGetProcAddressARB(procName);
+}
diff --git a/src/gallium/state_trackers/glx/xlib/glx_usefont.c b/src/gallium/state_trackers/glx/xlib/glx_usefont.c
new file mode 100644
index 0000000000..acc64df62b
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/glx_usefont.c
@@ -0,0 +1,375 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 7.6
+ *
+ * Copyright (C) 1995 Thorsten.Ohl @ Physik.TH-Darmstadt.de
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * Fake implementation of glXUseXFont().
+ */
+
+
+#include "main/context.h"
+#include "main/imports.h"
+#include <GL/glx.h>
+
+
+/* Some debugging info. */
+
+#ifdef DEBUG
+#undef _R
+#undef _G
+#undef _B
+#include <ctype.h>
+
+int debug_xfonts = 0;
+
+static void
+dump_char_struct(XCharStruct * ch, char *prefix)
+{
+ printf("%slbearing = %d, rbearing = %d, width = %d\n",
+ prefix, ch->lbearing, ch->rbearing, ch->width);
+ printf("%sascent = %d, descent = %d, attributes = %u\n",
+ prefix, ch->ascent, ch->descent, (unsigned int) ch->attributes);
+}
+
+static void
+dump_font_struct(XFontStruct * font)
+{
+ printf("ascent = %d, descent = %d\n", font->ascent, font->descent);
+ printf("char_or_byte2 = (%u,%u)\n",
+ font->min_char_or_byte2, font->max_char_or_byte2);
+ printf("byte1 = (%u,%u)\n", font->min_byte1, font->max_byte1);
+ printf("all_chars_exist = %s\n", font->all_chars_exist ? "True" : "False");
+ printf("default_char = %c (\\%03o)\n",
+ (char) (isprint(font->default_char) ? font->default_char : ' '),
+ font->default_char);
+ dump_char_struct(&font->min_bounds, "min> ");
+ dump_char_struct(&font->max_bounds, "max> ");
+#if 0
+ for (c = font->min_char_or_byte2; c <= font->max_char_or_byte2; c++) {
+ char prefix[8];
+ sprintf(prefix, "%d> ", c);
+ dump_char_struct(&font->per_char[c], prefix);
+ }
+#endif
+}
+
+static void
+dump_bitmap(unsigned int width, unsigned int height, GLubyte * bitmap)
+{
+ unsigned int x, y;
+
+ printf(" ");
+ for (x = 0; x < 8 * width; x++)
+ printf("%o", 7 - (x % 8));
+ putchar('\n');
+ for (y = 0; y < height; y++) {
+ printf("%3o:", y);
+ for (x = 0; x < 8 * width; x++)
+ putchar((bitmap[width * (height - y - 1) + x / 8] & (1 << (7 - (x %
+ 8))))
+ ? '*' : '.');
+ printf(" ");
+ for (x = 0; x < width; x++)
+ printf("0x%02x, ", bitmap[width * (height - y - 1) + x]);
+ putchar('\n');
+ }
+}
+#endif /* DEBUG */
+
+
+/* Implementation. */
+
+/* Fill a BITMAP with a character C from thew current font
+ in the graphics context GC. WIDTH is the width in bytes
+ and HEIGHT is the height in bits.
+
+ Note that the generated bitmaps must be used with
+
+ glPixelStorei (GL_UNPACK_SWAP_BYTES, GL_FALSE);
+ glPixelStorei (GL_UNPACK_LSB_FIRST, GL_FALSE);
+ glPixelStorei (GL_UNPACK_ROW_LENGTH, 0);
+ glPixelStorei (GL_UNPACK_SKIP_ROWS, 0);
+ glPixelStorei (GL_UNPACK_SKIP_PIXELS, 0);
+ glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
+
+ Possible optimizations:
+
+ * use only one reusable pixmap with the maximum dimensions.
+ * draw the entire font into a single pixmap (careful with
+ proportional fonts!).
+*/
+
+
+/*
+ * Generate OpenGL-compatible bitmap.
+ */
+static void
+fill_bitmap(Display * dpy, Window win, GC gc,
+ unsigned int width, unsigned int height,
+ int x0, int y0, unsigned int c, GLubyte * bitmap)
+{
+ XImage *image;
+ unsigned int x, y;
+ Pixmap pixmap;
+ XChar2b char2b;
+
+ pixmap = XCreatePixmap(dpy, win, 8 * width, height, 1);
+ XSetForeground(dpy, gc, 0);
+ XFillRectangle(dpy, pixmap, gc, 0, 0, 8 * width, height);
+ XSetForeground(dpy, gc, 1);
+
+ char2b.byte1 = (c >> 8) & 0xff;
+ char2b.byte2 = (c & 0xff);
+
+ XDrawString16(dpy, pixmap, gc, x0, y0, &char2b, 1);
+
+ image = XGetImage(dpy, pixmap, 0, 0, 8 * width, height, 1, XYPixmap);
+ if (image) {
+ /* Fill the bitmap (X11 and OpenGL are upside down wrt each other). */
+ for (y = 0; y < height; y++)
+ for (x = 0; x < 8 * width; x++)
+ if (XGetPixel(image, x, y))
+ bitmap[width * (height - y - 1) + x / 8] |=
+ (1 << (7 - (x % 8)));
+ XDestroyImage(image);
+ }
+
+ XFreePixmap(dpy, pixmap);
+}
+
+/*
+ * determine if a given glyph is valid and return the
+ * corresponding XCharStruct.
+ */
+static XCharStruct *
+isvalid(XFontStruct * fs, unsigned int which)
+{
+ unsigned int rows, pages;
+ unsigned int byte1 = 0, byte2 = 0;
+ int i, valid = 1;
+
+ rows = fs->max_byte1 - fs->min_byte1 + 1;
+ pages = fs->max_char_or_byte2 - fs->min_char_or_byte2 + 1;
+
+ if (rows == 1) {
+ /* "linear" fonts */
+ if ((fs->min_char_or_byte2 > which) || (fs->max_char_or_byte2 < which))
+ valid = 0;
+ }
+ else {
+ /* "matrix" fonts */
+ byte2 = which & 0xff;
+ byte1 = which >> 8;
+ if ((fs->min_char_or_byte2 > byte2) ||
+ (fs->max_char_or_byte2 < byte2) ||
+ (fs->min_byte1 > byte1) || (fs->max_byte1 < byte1))
+ valid = 0;
+ }
+
+ if (valid) {
+ if (fs->per_char) {
+ if (rows == 1) {
+ /* "linear" fonts */
+ return (fs->per_char + (which - fs->min_char_or_byte2));
+ }
+ else {
+ /* "matrix" fonts */
+ i = ((byte1 - fs->min_byte1) * pages) +
+ (byte2 - fs->min_char_or_byte2);
+ return (fs->per_char + i);
+ }
+ }
+ else {
+ return (&fs->min_bounds);
+ }
+ }
+ return (NULL);
+}
+
+
+void
+glXUseXFont(Font font, int first, int count, int listbase)
+{
+ Display *dpy;
+ Window win;
+ Pixmap pixmap;
+ GC gc;
+ XGCValues values;
+ unsigned long valuemask;
+ XFontStruct *fs;
+ GLint swapbytes, lsbfirst, rowlength;
+ GLint skiprows, skippixels, alignment;
+ unsigned int max_width, max_height, max_bm_width, max_bm_height;
+ GLubyte *bm;
+ int i;
+
+ dpy = glXGetCurrentDisplay();
+ if (!dpy)
+ return; /* I guess glXMakeCurrent wasn't called */
+ i = DefaultScreen(dpy);
+ win = RootWindow(dpy, i);
+
+ fs = XQueryFont(dpy, font);
+ if (!fs) {
+ _mesa_error(NULL, GL_INVALID_VALUE,
+ "Couldn't get font structure information");
+ return;
+ }
+
+ /* Allocate a bitmap that can fit all characters. */
+ max_width = fs->max_bounds.rbearing - fs->min_bounds.lbearing;
+ max_height = fs->max_bounds.ascent + fs->max_bounds.descent;
+ max_bm_width = (max_width + 7) / 8;
+ max_bm_height = max_height;
+
+ bm = (GLubyte *) MALLOC((max_bm_width * max_bm_height) * sizeof(GLubyte));
+ if (!bm) {
+ XFreeFontInfo(NULL, fs, 1);
+ _mesa_error(NULL, GL_OUT_OF_MEMORY,
+ "Couldn't allocate bitmap in glXUseXFont()");
+ return;
+ }
+
+#if 0
+ /* get the page info */
+ pages = fs->max_char_or_byte2 - fs->min_char_or_byte2 + 1;
+ firstchar = (fs->min_byte1 << 8) + fs->min_char_or_byte2;
+ lastchar = (fs->max_byte1 << 8) + fs->max_char_or_byte2;
+ rows = fs->max_byte1 - fs->min_byte1 + 1;
+ unsigned int first_char, last_char, pages, rows;
+#endif
+
+ /* Save the current packing mode for bitmaps. */
+ glGetIntegerv(GL_UNPACK_SWAP_BYTES, &swapbytes);
+ glGetIntegerv(GL_UNPACK_LSB_FIRST, &lsbfirst);
+ glGetIntegerv(GL_UNPACK_ROW_LENGTH, &rowlength);
+ glGetIntegerv(GL_UNPACK_SKIP_ROWS, &skiprows);
+ glGetIntegerv(GL_UNPACK_SKIP_PIXELS, &skippixels);
+ glGetIntegerv(GL_UNPACK_ALIGNMENT, &alignment);
+
+ /* Enforce a standard packing mode which is compatible with
+ fill_bitmap() from above. This is actually the default mode,
+ except for the (non)alignment. */
+ glPixelStorei(GL_UNPACK_SWAP_BYTES, GL_FALSE);
+ glPixelStorei(GL_UNPACK_LSB_FIRST, GL_FALSE);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
+ glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+
+ pixmap = XCreatePixmap(dpy, win, 10, 10, 1);
+ values.foreground = BlackPixel(dpy, DefaultScreen(dpy));
+ values.background = WhitePixel(dpy, DefaultScreen(dpy));
+ values.font = fs->fid;
+ valuemask = GCForeground | GCBackground | GCFont;
+ gc = XCreateGC(dpy, pixmap, valuemask, &values);
+ XFreePixmap(dpy, pixmap);
+
+#ifdef DEBUG
+ if (debug_xfonts)
+ dump_font_struct(fs);
+#endif
+
+ for (i = 0; i < count; i++) {
+ unsigned int width, height, bm_width, bm_height;
+ GLfloat x0, y0, dx, dy;
+ XCharStruct *ch;
+ int x, y;
+ unsigned int c = first + i;
+ int list = listbase + i;
+ int valid;
+
+ /* check on index validity and get the bounds */
+ ch = isvalid(fs, c);
+ if (!ch) {
+ ch = &fs->max_bounds;
+ valid = 0;
+ }
+ else {
+ valid = 1;
+ }
+
+#ifdef DEBUG
+ if (debug_xfonts) {
+ char s[7];
+ sprintf(s, isprint(c) ? "%c> " : "\\%03o> ", c);
+ dump_char_struct(ch, s);
+ }
+#endif
+
+ /* glBitmap()' parameters:
+ straight from the glXUseXFont(3) manpage. */
+ width = ch->rbearing - ch->lbearing;
+ height = ch->ascent + ch->descent;
+ x0 = -ch->lbearing;
+ y0 = ch->descent - 0; /* XXX used to subtract 1 here */
+ /* but that caused a conformace failure */
+ dx = ch->width;
+ dy = 0;
+
+ /* X11's starting point. */
+ x = -ch->lbearing;
+ y = ch->ascent;
+
+ /* Round the width to a multiple of eight. We will use this also
+ for the pixmap for capturing the X11 font. This is slightly
+ inefficient, but it makes the OpenGL part real easy. */
+ bm_width = (width + 7) / 8;
+ bm_height = height;
+
+ glNewList(list, GL_COMPILE);
+ if (valid && (bm_width > 0) && (bm_height > 0)) {
+
+ MEMSET(bm, '\0', bm_width * bm_height);
+ fill_bitmap(dpy, win, gc, bm_width, bm_height, x, y, c, bm);
+
+ glBitmap(width, height, x0, y0, dx, dy, bm);
+#ifdef DEBUG
+ if (debug_xfonts) {
+ printf("width/height = %u/%u\n", width, height);
+ printf("bm_width/bm_height = %u/%u\n", bm_width, bm_height);
+ dump_bitmap(bm_width, bm_height, bm);
+ }
+#endif
+ }
+ else {
+ glBitmap(0, 0, 0.0, 0.0, dx, dy, NULL);
+ }
+ glEndList();
+ }
+
+ FREE(bm);
+ XFreeFontInfo(NULL, fs, 1);
+ XFreeGC(dpy, gc);
+
+ /* Restore saved packing modes. */
+ glPixelStorei(GL_UNPACK_SWAP_BYTES, swapbytes);
+ glPixelStorei(GL_UNPACK_LSB_FIRST, lsbfirst);
+ glPixelStorei(GL_UNPACK_ROW_LENGTH, rowlength);
+ glPixelStorei(GL_UNPACK_SKIP_ROWS, skiprows);
+ glPixelStorei(GL_UNPACK_SKIP_PIXELS, skippixels);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, alignment);
+}
diff --git a/src/gallium/state_trackers/glx/xlib/xm_api.c b/src/gallium/state_trackers/glx/xlib/xm_api.c
new file mode 100644
index 0000000000..957002ddd5
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/xm_api.c
@@ -0,0 +1,1220 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 7.1
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file xm_api.c
+ *
+ * All the XMesa* API functions.
+ *
+ *
+ * NOTES:
+ *
+ * The window coordinate system origin (0,0) is in the lower-left corner
+ * of the window. X11's window coordinate origin is in the upper-left
+ * corner of the window. Therefore, most drawing functions in this
+ * file have to flip Y coordinates.
+ *
+ * Define USE_XSHM in the Makefile with -DUSE_XSHM if you want to compile
+ * in support for the MIT Shared Memory extension. If enabled, when you
+ * use an Ximage for the back buffer in double buffered mode, the "swap"
+ * operation will be faster. You must also link with -lXext.
+ *
+ * Byte swapping: If the Mesa host and the X display use a different
+ * byte order then there's some trickiness to be aware of when using
+ * XImages. The byte ordering used for the XImage is that of the X
+ * display, not the Mesa host.
+ * The color-to-pixel encoding for True/DirectColor must be done
+ * according to the display's visual red_mask, green_mask, and blue_mask.
+ * If XPutPixel is used to put a pixel into an XImage then XPutPixel will
+ * do byte swapping if needed. If one wants to directly "poke" the pixel
+ * into the XImage's buffer then the pixel must be byte swapped first.
+ *
+ */
+
+#ifdef __CYGWIN__
+#undef WIN32
+#undef __WIN32__
+#endif
+
+#include "xm_api.h"
+#include "main/context.h"
+#include "main/framebuffer.h"
+
+#include "state_tracker/st_public.h"
+#include "state_tracker/st_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_context.h"
+
+#include "xm_winsys.h"
+#include <GL/glx.h>
+
+
+/* Driver interface routines, set up by xlib backend on library
+ * _init(). These are global in the same way that function names are
+ * global.
+ */
+static struct xm_driver driver;
+
+void xmesa_set_driver( const struct xm_driver *templ )
+{
+ driver = *templ;
+}
+
+/**
+ * Global X driver lock
+ */
+pipe_mutex _xmesa_lock;
+
+
+
+/**********************************************************************/
+/***** X Utility Functions *****/
+/**********************************************************************/
+
+
+/**
+ * Return the host's byte order as LSBFirst or MSBFirst ala X.
+ */
+static int host_byte_order( void )
+{
+ int i = 1;
+ char *cptr = (char *) &i;
+ return (*cptr==1) ? LSBFirst : MSBFirst;
+}
+
+
+/**
+ * Check if the X Shared Memory extension is available.
+ * Return: 0 = not available
+ * 1 = shared XImage support available
+ * 2 = shared Pixmap support available also
+ */
+int xmesa_check_for_xshm( Display *display )
+{
+#if defined(USE_XSHM)
+ int major, minor, ignore;
+ Bool pixmaps;
+
+ if (getenv("SP_NO_RAST"))
+ return 0;
+
+ if (getenv("MESA_NOSHM")) {
+ return 0;
+ }
+
+ if (XQueryExtension( display, "MIT-SHM", &ignore, &ignore, &ignore )) {
+ if (XShmQueryVersion( display, &major, &minor, &pixmaps )==True) {
+ return (pixmaps==True) ? 2 : 1;
+ }
+ else {
+ return 0;
+ }
+ }
+ else {
+ return 0;
+ }
+#else
+ /* No XSHM support */
+ return 0;
+#endif
+}
+
+
+/**
+ * Return the true number of bits per pixel for XImages.
+ * For example, if we request a 24-bit deep visual we may actually need/get
+ * 32bpp XImages. This function returns the appropriate bpp.
+ * Input: dpy - the X display
+ * visinfo - desribes the visual to be used for XImages
+ * Return: true number of bits per pixel for XImages
+ */
+static int
+bits_per_pixel( XMesaVisual xmv )
+{
+ Display *dpy = xmv->display;
+ XVisualInfo * visinfo = xmv->visinfo;
+ XImage *img;
+ int bitsPerPixel;
+ /* Create a temporary XImage */
+ img = XCreateImage( dpy, visinfo->visual, visinfo->depth,
+ ZPixmap, 0, /*format, offset*/
+ (char*) MALLOC(8), /*data*/
+ 1, 1, /*width, height*/
+ 32, /*bitmap_pad*/
+ 0 /*bytes_per_line*/
+ );
+ assert(img);
+ /* grab the bits/pixel value */
+ bitsPerPixel = img->bits_per_pixel;
+ /* free the XImage */
+ _mesa_free( img->data );
+ img->data = NULL;
+ XDestroyImage( img );
+ return bitsPerPixel;
+}
+
+
+
+/*
+ * Determine if a given X window ID is valid (window exists).
+ * Do this by calling XGetWindowAttributes() for the window and
+ * checking if we catch an X error.
+ * Input: dpy - the display
+ * win - the window to check for existance
+ * Return: GL_TRUE - window exists
+ * GL_FALSE - window doesn't exist
+ */
+static GLboolean WindowExistsFlag;
+
+static int window_exists_err_handler( Display* dpy, XErrorEvent* xerr )
+{
+ (void) dpy;
+ if (xerr->error_code == BadWindow) {
+ WindowExistsFlag = GL_FALSE;
+ }
+ return 0;
+}
+
+static GLboolean window_exists( Display *dpy, Window win )
+{
+ XWindowAttributes wa;
+ int (*old_handler)( Display*, XErrorEvent* );
+ WindowExistsFlag = GL_TRUE;
+ old_handler = XSetErrorHandler(window_exists_err_handler);
+ XGetWindowAttributes( dpy, win, &wa ); /* dummy request */
+ XSetErrorHandler(old_handler);
+ return WindowExistsFlag;
+}
+
+static Status
+get_drawable_size( Display *dpy, Drawable d, uint *width, uint *height )
+{
+ Window root;
+ Status stat;
+ int xpos, ypos;
+ unsigned int w, h, bw, depth;
+ stat = XGetGeometry(dpy, d, &root, &xpos, &ypos, &w, &h, &bw, &depth);
+ *width = w;
+ *height = h;
+ return stat;
+}
+
+
+/**
+ * Return the size of the window (or pixmap) that corresponds to the
+ * given XMesaBuffer.
+ * \param width returns width in pixels
+ * \param height returns height in pixels
+ */
+static void
+xmesa_get_window_size(Display *dpy, XMesaBuffer b,
+ GLuint *width, GLuint *height)
+{
+ Status stat;
+
+ pipe_mutex_lock(_xmesa_lock);
+ XSync(b->xm_visual->display, 0); /* added for Chromium */
+ stat = get_drawable_size(dpy, b->drawable, width, height);
+ pipe_mutex_unlock(_xmesa_lock);
+
+ if (!stat) {
+ /* probably querying a window that's recently been destroyed */
+ _mesa_warning(NULL, "XGetGeometry failed!\n");
+ *width = *height = 1;
+ }
+}
+
+#define GET_REDMASK(__v) __v->mesa_visual.redMask
+#define GET_GREENMASK(__v) __v->mesa_visual.greenMask
+#define GET_BLUEMASK(__v) __v->mesa_visual.blueMask
+
+
+/**
+ * Choose the pixel format for the given visual.
+ * This will tell the gallium driver how to pack pixel data into
+ * drawing surfaces.
+ */
+static GLuint
+choose_pixel_format(XMesaVisual v)
+{
+ boolean native_byte_order = (host_byte_order() ==
+ ImageByteOrder(v->display));
+
+ if ( GET_REDMASK(v) == 0x0000ff
+ && GET_GREENMASK(v) == 0x00ff00
+ && GET_BLUEMASK(v) == 0xff0000
+ && v->BitsPerPixel == 32) {
+ if (native_byte_order) {
+ /* no byteswapping needed */
+ return 0 /* PIXEL_FORMAT_U_A8_B8_G8_R8 */;
+ }
+ else {
+ return PIPE_FORMAT_R8G8B8A8_UNORM;
+ }
+ }
+ else if ( GET_REDMASK(v) == 0xff0000
+ && GET_GREENMASK(v) == 0x00ff00
+ && GET_BLUEMASK(v) == 0x0000ff
+ && v->BitsPerPixel == 32) {
+ if (native_byte_order) {
+ /* no byteswapping needed */
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+ }
+ else {
+ return PIPE_FORMAT_B8G8R8A8_UNORM;
+ }
+ }
+ else if ( GET_REDMASK(v) == 0x0000ff00
+ && GET_GREENMASK(v) == 0x00ff0000
+ && GET_BLUEMASK(v) == 0xff000000
+ && v->BitsPerPixel == 32) {
+ if (native_byte_order) {
+ /* no byteswapping needed */
+ return PIPE_FORMAT_B8G8R8A8_UNORM;
+ }
+ else {
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+ }
+ }
+ else if ( GET_REDMASK(v) == 0xf800
+ && GET_GREENMASK(v) == 0x07e0
+ && GET_BLUEMASK(v) == 0x001f
+ && native_byte_order
+ && v->BitsPerPixel == 16) {
+ /* 5-6-5 RGB */
+ return PIPE_FORMAT_R5G6B5_UNORM;
+ }
+
+ assert(0);
+ return 0;
+}
+
+
+
+/**********************************************************************/
+/***** Linked list of XMesaBuffers *****/
+/**********************************************************************/
+
+XMesaBuffer XMesaBufferList = NULL;
+
+
+/**
+ * Allocate a new XMesaBuffer object which corresponds to the given drawable.
+ * Note that XMesaBuffer is derived from GLframebuffer.
+ * The new XMesaBuffer will not have any size (Width=Height=0).
+ *
+ * \param d the corresponding X drawable (window or pixmap)
+ * \param type either WINDOW, PIXMAP or PBUFFER, describing d
+ * \param vis the buffer's visual
+ * \param cmap the window's colormap, if known.
+ * \return new XMesaBuffer or NULL if any problem
+ */
+static XMesaBuffer
+create_xmesa_buffer(Drawable d, BufferType type,
+ XMesaVisual vis, Colormap cmap)
+{
+ XMesaBuffer b;
+ GLframebuffer *fb;
+ enum pipe_format colorFormat, depthFormat, stencilFormat;
+ uint width, height;
+
+ ASSERT(type == WINDOW || type == PIXMAP || type == PBUFFER);
+
+ b = (XMesaBuffer) CALLOC_STRUCT(xmesa_buffer);
+ if (!b)
+ return NULL;
+
+ b->drawable = d;
+
+ b->xm_visual = vis;
+ b->type = type;
+ b->cmap = cmap;
+
+ /* determine PIPE_FORMATs for buffers */
+ colorFormat = choose_pixel_format(vis);
+
+ if (vis->mesa_visual.depthBits == 0)
+ depthFormat = PIPE_FORMAT_NONE;
+#ifdef GALLIUM_CELL /* XXX temporary for Cell! */
+ else
+ depthFormat = PIPE_FORMAT_S8Z24_UNORM;
+#else
+ else if (vis->mesa_visual.depthBits <= 16)
+ depthFormat = PIPE_FORMAT_Z16_UNORM;
+ else if (vis->mesa_visual.depthBits <= 24)
+ depthFormat = PIPE_FORMAT_S8Z24_UNORM;
+ else
+ depthFormat = PIPE_FORMAT_Z32_UNORM;
+#endif
+
+ if (vis->mesa_visual.stencilBits == 8) {
+ if (depthFormat == PIPE_FORMAT_S8Z24_UNORM)
+ stencilFormat = depthFormat;
+ else
+ stencilFormat = PIPE_FORMAT_S8_UNORM;
+ }
+ else {
+ /* no stencil */
+ stencilFormat = PIPE_FORMAT_NONE;
+ if (depthFormat == PIPE_FORMAT_S8Z24_UNORM) {
+ /* use 24-bit Z, undefined stencil channel */
+ depthFormat = PIPE_FORMAT_X8Z24_UNORM;
+ }
+ }
+
+
+ get_drawable_size(vis->display, d, &width, &height);
+
+ /*
+ * Create framebuffer, but we'll plug in our own renderbuffers below.
+ */
+ b->stfb = st_create_framebuffer(&vis->mesa_visual,
+ colorFormat, depthFormat, stencilFormat,
+ width, height,
+ (void *) b);
+ fb = &b->stfb->Base;
+
+ /*
+ * Create scratch XImage for xmesa_display_surface()
+ */
+ b->tempImage = XCreateImage(vis->display,
+ vis->visinfo->visual,
+ vis->visinfo->depth,
+ ZPixmap, 0, /* format, offset */
+ NULL, /* data */
+ 0, 0, /* size */
+ 32, /* bitmap_pad */
+ 0); /* bytes_per_line */
+
+ /* GLX_EXT_texture_from_pixmap */
+ b->TextureTarget = 0;
+ b->TextureFormat = GLX_TEXTURE_FORMAT_NONE_EXT;
+ b->TextureMipmap = 0;
+
+ /* insert buffer into linked list */
+ b->Next = XMesaBufferList;
+ XMesaBufferList = b;
+
+ return b;
+}
+
+
+/**
+ * Find an XMesaBuffer by matching X display and colormap but NOT matching
+ * the notThis buffer.
+ */
+XMesaBuffer
+xmesa_find_buffer(Display *dpy, Colormap cmap, XMesaBuffer notThis)
+{
+ XMesaBuffer b;
+ for (b = XMesaBufferList; b; b = b->Next) {
+ if (b->xm_visual->display == dpy &&
+ b->cmap == cmap &&
+ b != notThis) {
+ return b;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Remove buffer from linked list, delete if no longer referenced.
+ */
+static void
+xmesa_free_buffer(XMesaBuffer buffer)
+{
+ XMesaBuffer prev = NULL, b;
+
+ for (b = XMesaBufferList; b; b = b->Next) {
+ if (b == buffer) {
+ struct gl_framebuffer *fb = &buffer->stfb->Base;
+
+ /* unlink buffer from list */
+ if (prev)
+ prev->Next = buffer->Next;
+ else
+ XMesaBufferList = buffer->Next;
+
+ /* mark as delete pending */
+ fb->DeletePending = GL_TRUE;
+
+ /* Since the X window for the XMesaBuffer is going away, we don't
+ * want to dereference this pointer in the future.
+ */
+ b->drawable = 0;
+
+ buffer->tempImage->data = NULL;
+ XDestroyImage(buffer->tempImage);
+
+ /* Unreference. If count = zero we'll really delete the buffer */
+ _mesa_reference_framebuffer(&fb, NULL);
+
+ XFreeGC(b->xm_visual->display, b->gc);
+
+ free(buffer);
+
+ return;
+ }
+ /* continue search */
+ prev = b;
+ }
+ /* buffer not found in XMesaBufferList */
+ _mesa_problem(NULL,"xmesa_free_buffer() - buffer not found\n");
+}
+
+
+
+/**********************************************************************/
+/***** Misc Private Functions *****/
+/**********************************************************************/
+
+
+/**
+ * When a context is bound for the first time, we can finally finish
+ * initializing the context's visual and buffer information.
+ * \param v the XMesaVisual to initialize
+ * \param b the XMesaBuffer to initialize (may be NULL)
+ * \param rgb_flag TRUE = RGBA mode, FALSE = color index mode
+ * \param window the window/pixmap we're rendering into
+ * \param cmap the colormap associated with the window/pixmap
+ * \return GL_TRUE=success, GL_FALSE=failure
+ */
+static GLboolean
+initialize_visual_and_buffer(XMesaVisual v, XMesaBuffer b,
+ GLboolean rgb_flag, Drawable window,
+ Colormap cmap)
+{
+ ASSERT(!b || b->xm_visual == v);
+
+ /* Save true bits/pixel */
+ v->BitsPerPixel = bits_per_pixel(v);
+ assert(v->BitsPerPixel > 0);
+
+ if (rgb_flag == GL_FALSE) {
+ /* COLOR-INDEXED WINDOW: not supported*/
+ return GL_FALSE;
+ }
+ else {
+ /* RGB WINDOW:
+ * We support RGB rendering into almost any kind of visual.
+ */
+ const int xclass = v->mesa_visual.visualType;
+ if (xclass != GLX_TRUE_COLOR && xclass == !GLX_DIRECT_COLOR) {
+ _mesa_warning(NULL,
+ "XMesa: RGB mode rendering not supported in given visual.\n");
+ return GL_FALSE;
+ }
+ v->mesa_visual.indexBits = 0;
+
+ if (v->BitsPerPixel == 32) {
+ /* We use XImages for all front/back buffers. If an X Window or
+ * X Pixmap is 32bpp, there's no guarantee that the alpha channel
+ * will be preserved. For XImages we're in luck.
+ */
+ v->mesa_visual.alphaBits = 8;
+ }
+ }
+
+ /*
+ * If MESA_INFO env var is set print out some debugging info
+ * which can help Brian figure out what's going on when a user
+ * reports bugs.
+ */
+ if (_mesa_getenv("MESA_INFO")) {
+ _mesa_printf("X/Mesa visual = %p\n", (void *) v);
+ _mesa_printf("X/Mesa level = %d\n", v->mesa_visual.level);
+ _mesa_printf("X/Mesa depth = %d\n", v->visinfo->depth);
+ _mesa_printf("X/Mesa bits per pixel = %d\n", v->BitsPerPixel);
+ }
+
+ if (b && window) {
+ /* these should have been set in create_xmesa_buffer */
+ ASSERT(b->drawable == window);
+
+ /* Setup for single/double buffering */
+ if (v->mesa_visual.doubleBufferMode) {
+ /* Double buffered */
+ b->shm = xmesa_check_for_xshm( v->display );
+ }
+
+ /* X11 graphics context */
+ b->gc = XCreateGC( v->display, window, 0, NULL );
+ XSetFunction( v->display, b->gc, GXcopy );
+ }
+
+ return GL_TRUE;
+}
+
+
+
+#define NUM_VISUAL_TYPES 6
+
+/**
+ * Convert an X visual type to a GLX visual type.
+ *
+ * \param visualType X visual type (i.e., \c TrueColor, \c StaticGray, etc.)
+ * to be converted.
+ * \return If \c visualType is a valid X visual type, a GLX visual type will
+ * be returned. Otherwise \c GLX_NONE will be returned.
+ *
+ * \note
+ * This code was lifted directly from lib/GL/glx/glcontextmodes.c in the
+ * DRI CVS tree.
+ */
+static GLint
+xmesa_convert_from_x_visual_type( int visualType )
+{
+ static const int glx_visual_types[ NUM_VISUAL_TYPES ] = {
+ GLX_STATIC_GRAY, GLX_GRAY_SCALE,
+ GLX_STATIC_COLOR, GLX_PSEUDO_COLOR,
+ GLX_TRUE_COLOR, GLX_DIRECT_COLOR
+ };
+
+ return ( (unsigned) visualType < NUM_VISUAL_TYPES )
+ ? glx_visual_types[ visualType ] : GLX_NONE;
+}
+
+
+/**********************************************************************/
+/***** Public Functions *****/
+/**********************************************************************/
+
+
+/*
+ * Create a new X/Mesa visual.
+ * Input: display - X11 display
+ * visinfo - an XVisualInfo pointer
+ * rgb_flag - GL_TRUE = RGB mode,
+ * GL_FALSE = color index mode
+ * alpha_flag - alpha buffer requested?
+ * db_flag - GL_TRUE = double-buffered,
+ * GL_FALSE = single buffered
+ * stereo_flag - stereo visual?
+ * ximage_flag - GL_TRUE = use an XImage for back buffer,
+ * GL_FALSE = use an off-screen pixmap for back buffer
+ * depth_size - requested bits/depth values, or zero
+ * stencil_size - requested bits/stencil values, or zero
+ * accum_red_size - requested bits/red accum values, or zero
+ * accum_green_size - requested bits/green accum values, or zero
+ * accum_blue_size - requested bits/blue accum values, or zero
+ * accum_alpha_size - requested bits/alpha accum values, or zero
+ * num_samples - number of samples/pixel if multisampling, or zero
+ * level - visual level, usually 0
+ * visualCaveat - ala the GLX extension, usually GLX_NONE
+ * Return; a new XMesaVisual or 0 if error.
+ */
+PUBLIC
+XMesaVisual XMesaCreateVisual( Display *display,
+ XVisualInfo * visinfo,
+ GLboolean rgb_flag,
+ GLboolean alpha_flag,
+ GLboolean db_flag,
+ GLboolean stereo_flag,
+ GLboolean ximage_flag,
+ GLint depth_size,
+ GLint stencil_size,
+ GLint accum_red_size,
+ GLint accum_green_size,
+ GLint accum_blue_size,
+ GLint accum_alpha_size,
+ GLint num_samples,
+ GLint level,
+ GLint visualCaveat )
+{
+ XMesaVisual v;
+ GLint red_bits, green_bits, blue_bits, alpha_bits;
+
+ /* For debugging only */
+ if (_mesa_getenv("MESA_XSYNC")) {
+ /* This makes debugging X easier.
+ * In your debugger, set a breakpoint on _XError to stop when an
+ * X protocol error is generated.
+ */
+ XSynchronize( display, 1 );
+ }
+
+ v = (XMesaVisual) CALLOC_STRUCT(xmesa_visual);
+ if (!v) {
+ return NULL;
+ }
+
+ v->display = display;
+
+ /* Save a copy of the XVisualInfo struct because the user may X_mesa_free()
+ * the struct but we may need some of the information contained in it
+ * at a later time.
+ */
+ v->visinfo = (XVisualInfo *) MALLOC(sizeof(*visinfo));
+ if (!v->visinfo) {
+ _mesa_free(v);
+ return NULL;
+ }
+ MEMCPY(v->visinfo, visinfo, sizeof(*visinfo));
+
+ v->ximage_flag = ximage_flag;
+
+ v->mesa_visual.redMask = visinfo->red_mask;
+ v->mesa_visual.greenMask = visinfo->green_mask;
+ v->mesa_visual.blueMask = visinfo->blue_mask;
+ v->mesa_visual.visualID = visinfo->visualid;
+ v->mesa_visual.screen = visinfo->screen;
+
+#if !(defined(__cplusplus) || defined(c_plusplus))
+ v->mesa_visual.visualType = xmesa_convert_from_x_visual_type(visinfo->class);
+#else
+ v->mesa_visual.visualType = xmesa_convert_from_x_visual_type(visinfo->c_class);
+#endif
+
+ v->mesa_visual.visualRating = visualCaveat;
+
+ if (alpha_flag)
+ v->mesa_visual.alphaBits = 8;
+
+ (void) initialize_visual_and_buffer( v, NULL, rgb_flag, 0, 0 );
+
+ {
+ const int xclass = v->mesa_visual.visualType;
+ if (xclass == GLX_TRUE_COLOR || xclass == GLX_DIRECT_COLOR) {
+ red_bits = _mesa_bitcount(GET_REDMASK(v));
+ green_bits = _mesa_bitcount(GET_GREENMASK(v));
+ blue_bits = _mesa_bitcount(GET_BLUEMASK(v));
+ }
+ else {
+ /* this is an approximation */
+ int depth;
+ depth = v->visinfo->depth;
+ red_bits = depth / 3;
+ depth -= red_bits;
+ green_bits = depth / 2;
+ depth -= green_bits;
+ blue_bits = depth;
+ alpha_bits = 0;
+ assert( red_bits + green_bits + blue_bits == v->visinfo->depth );
+ }
+ alpha_bits = v->mesa_visual.alphaBits;
+ }
+
+ _mesa_initialize_visual( &v->mesa_visual,
+ rgb_flag, db_flag, stereo_flag,
+ red_bits, green_bits,
+ blue_bits, alpha_bits,
+ v->mesa_visual.indexBits,
+ depth_size,
+ stencil_size,
+ accum_red_size, accum_green_size,
+ accum_blue_size, accum_alpha_size,
+ 0 );
+
+ /* XXX minor hack */
+ v->mesa_visual.level = level;
+ return v;
+}
+
+
+PUBLIC
+void XMesaDestroyVisual( XMesaVisual v )
+{
+ _mesa_free(v->visinfo);
+ _mesa_free(v);
+}
+
+
+
+/**
+ * Create a new XMesaContext.
+ * \param v the XMesaVisual
+ * \param share_list another XMesaContext with which to share display
+ * lists or NULL if no sharing is wanted.
+ * \return an XMesaContext or NULL if error.
+ */
+PUBLIC
+XMesaContext XMesaCreateContext( XMesaVisual v, XMesaContext share_list )
+{
+ static GLboolean firstTime = GL_TRUE;
+ static struct pipe_screen *screen = NULL;
+ struct pipe_context *pipe = NULL;
+ XMesaContext c;
+ GLcontext *mesaCtx;
+ uint pf;
+
+ if (firstTime) {
+ pipe_mutex_init(_xmesa_lock);
+ screen = driver.create_pipe_screen();
+ firstTime = GL_FALSE;
+ }
+
+ /* Note: the XMesaContext contains a Mesa GLcontext struct (inheritance) */
+ c = (XMesaContext) CALLOC_STRUCT(xmesa_context);
+ if (!c)
+ return NULL;
+
+ pf = choose_pixel_format(v);
+ assert(pf);
+
+ c->xm_visual = v;
+ c->xm_buffer = NULL; /* set later by XMesaMakeCurrent */
+ c->xm_read_buffer = NULL;
+
+ if (screen == NULL)
+ goto fail;
+
+ pipe = driver.create_pipe_context(screen, (void *) c);
+ if (pipe == NULL)
+ goto fail;
+
+ c->st = st_create_context(pipe,
+ &v->mesa_visual,
+ share_list ? share_list->st : NULL);
+ if (c->st == NULL)
+ goto fail;
+
+ mesaCtx = c->st->ctx;
+ c->st->ctx->DriverCtx = c;
+
+ return c;
+
+fail:
+ if (c->st)
+ st_destroy_context(c->st);
+ else if (pipe)
+ pipe->destroy(pipe);
+
+ _mesa_free(c);
+ return NULL;
+}
+
+
+
+PUBLIC
+void XMesaDestroyContext( XMesaContext c )
+{
+ st_destroy_context(c->st);
+
+ /* FIXME: We should destroy the screen here, but if we do so, surfaces may
+ * outlive it, causing segfaults
+ struct pipe_screen *screen = c->st->pipe->screen;
+ screen->destroy(screen);
+ */
+
+ _mesa_free(c);
+}
+
+
+
+/**
+ * Private function for creating an XMesaBuffer which corresponds to an
+ * X window or pixmap.
+ * \param v the window's XMesaVisual
+ * \param w the window we're wrapping
+ * \return new XMesaBuffer or NULL if error
+ */
+PUBLIC XMesaBuffer
+XMesaCreateWindowBuffer(XMesaVisual v, Window w)
+{
+ XWindowAttributes attr;
+ XMesaBuffer b;
+ Colormap cmap;
+ int depth;
+
+ assert(v);
+ assert(w);
+
+ /* Check that window depth matches visual depth */
+ XGetWindowAttributes( v->display, w, &attr );
+ depth = attr.depth;
+ if (v->visinfo->depth != depth) {
+ _mesa_warning(NULL, "XMesaCreateWindowBuffer: depth mismatch between visual (%d) and window (%d)!\n",
+ v->visinfo->depth, depth);
+ return NULL;
+ }
+
+ /* Find colormap */
+ if (attr.colormap) {
+ cmap = attr.colormap;
+ }
+ else {
+ _mesa_warning(NULL, "Window %u has no colormap!\n", (unsigned int) w);
+ /* this is weird, a window w/out a colormap!? */
+ /* OK, let's just allocate a new one and hope for the best */
+ cmap = XCreateColormap(v->display, w, attr.visual, AllocNone);
+ }
+
+ b = create_xmesa_buffer((Drawable) w, WINDOW, v, cmap);
+ if (!b)
+ return NULL;
+
+ if (!initialize_visual_and_buffer( v, b, v->mesa_visual.rgbMode,
+ (Drawable) w, cmap )) {
+ xmesa_free_buffer(b);
+ return NULL;
+ }
+
+ return b;
+}
+
+
+
+/**
+ * Create a new XMesaBuffer from an X pixmap.
+ *
+ * \param v the XMesaVisual
+ * \param p the pixmap
+ * \param cmap the colormap, may be 0 if using a \c GLX_TRUE_COLOR or
+ * \c GLX_DIRECT_COLOR visual for the pixmap
+ * \returns new XMesaBuffer or NULL if error
+ */
+PUBLIC XMesaBuffer
+XMesaCreatePixmapBuffer(XMesaVisual v, Pixmap p, Colormap cmap)
+{
+ XMesaBuffer b;
+
+ assert(v);
+
+ b = create_xmesa_buffer((Drawable) p, PIXMAP, v, cmap);
+ if (!b)
+ return NULL;
+
+ if (!initialize_visual_and_buffer(v, b, v->mesa_visual.rgbMode,
+ (Drawable) p, cmap)) {
+ xmesa_free_buffer(b);
+ return NULL;
+ }
+
+ return b;
+}
+
+
+/**
+ * For GLX_EXT_texture_from_pixmap
+ */
+XMesaBuffer
+XMesaCreatePixmapTextureBuffer(XMesaVisual v, Pixmap p,
+ Colormap cmap,
+ int format, int target, int mipmap)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ XMesaBuffer b;
+ GLuint width, height;
+
+ assert(v);
+
+ b = create_xmesa_buffer((Drawable) p, PIXMAP, v, cmap);
+ if (!b)
+ return NULL;
+
+ /* get pixmap size, update framebuffer/renderbuffer dims */
+ xmesa_get_window_size(v->display, b, &width, &height);
+ _mesa_resize_framebuffer(NULL, &(b->stfb->Base), width, height);
+
+ if (target == 0) {
+ /* examine dims */
+ if (ctx->Extensions.ARB_texture_non_power_of_two) {
+ target = GLX_TEXTURE_2D_EXT;
+ }
+ else if ( _mesa_bitcount(width) == 1
+ && _mesa_bitcount(height) == 1) {
+ /* power of two size */
+ if (height == 1) {
+ target = GLX_TEXTURE_1D_EXT;
+ }
+ else {
+ target = GLX_TEXTURE_2D_EXT;
+ }
+ }
+ else if (ctx->Extensions.NV_texture_rectangle) {
+ target = GLX_TEXTURE_RECTANGLE_EXT;
+ }
+ else {
+ /* non power of two textures not supported */
+ XMesaDestroyBuffer(b);
+ return 0;
+ }
+ }
+
+ b->TextureTarget = target;
+ b->TextureFormat = format;
+ b->TextureMipmap = mipmap;
+
+ if (!initialize_visual_and_buffer(v, b, v->mesa_visual.rgbMode,
+ (Drawable) p, cmap)) {
+ xmesa_free_buffer(b);
+ return NULL;
+ }
+
+ return b;
+}
+
+
+
+XMesaBuffer
+XMesaCreatePBuffer(XMesaVisual v, Colormap cmap,
+ unsigned int width, unsigned int height)
+{
+ Window root;
+ Drawable drawable; /* X Pixmap Drawable */
+ XMesaBuffer b;
+
+ /* allocate pixmap for front buffer */
+ root = RootWindow( v->display, v->visinfo->screen );
+ drawable = XCreatePixmap(v->display, root, width, height,
+ v->visinfo->depth);
+ if (!drawable)
+ return NULL;
+
+ b = create_xmesa_buffer(drawable, PBUFFER, v, cmap);
+ if (!b)
+ return NULL;
+
+ if (!initialize_visual_and_buffer(v, b, v->mesa_visual.rgbMode,
+ drawable, cmap)) {
+ xmesa_free_buffer(b);
+ return NULL;
+ }
+
+ return b;
+}
+
+
+
+/*
+ * Deallocate an XMesaBuffer structure and all related info.
+ */
+PUBLIC void
+XMesaDestroyBuffer(XMesaBuffer b)
+{
+ xmesa_free_buffer(b);
+}
+
+
+/**
+ * Query the current window size and update the corresponding GLframebuffer
+ * and all attached renderbuffers.
+ * Called when:
+ * 1. the first time a buffer is bound to a context.
+ * 2. SwapBuffers. XXX probabaly from xm_flush_frontbuffer() too...
+ * Note: it's possible (and legal) for xmctx to be NULL. That can happen
+ * when resizing a buffer when no rendering context is bound.
+ */
+void
+xmesa_check_and_update_buffer_size(XMesaContext xmctx, XMesaBuffer drawBuffer)
+{
+ GLuint width, height;
+ xmesa_get_window_size(drawBuffer->xm_visual->display, drawBuffer, &width, &height);
+ st_resize_framebuffer(drawBuffer->stfb, width, height);
+}
+
+
+
+
+/*
+ * Bind buffer b to context c and make c the current rendering context.
+ */
+PUBLIC
+GLboolean XMesaMakeCurrent2( XMesaContext c, XMesaBuffer drawBuffer,
+ XMesaBuffer readBuffer )
+{
+ XMesaContext old_ctx = XMesaGetCurrentContext();
+
+ if (old_ctx && old_ctx != c) {
+ XMesaFlush(old_ctx);
+ old_ctx->xm_buffer = NULL;
+ old_ctx->xm_read_buffer = NULL;
+ }
+
+ if (c) {
+ if (!drawBuffer || !readBuffer)
+ return GL_FALSE; /* must specify buffers! */
+
+ if (c == old_ctx &&
+ c->xm_buffer == drawBuffer &&
+ c->xm_read_buffer == readBuffer)
+ return GL_TRUE;
+
+ c->xm_buffer = drawBuffer;
+ c->xm_read_buffer = readBuffer;
+
+ st_make_current(c->st, drawBuffer->stfb, readBuffer->stfb);
+
+ xmesa_check_and_update_buffer_size(c, drawBuffer);
+ if (readBuffer != drawBuffer)
+ xmesa_check_and_update_buffer_size(c, readBuffer);
+
+ /* Solution to Stephane Rehel's problem with glXReleaseBuffersMESA(): */
+ drawBuffer->wasCurrent = GL_TRUE;
+ }
+ else {
+ /* Detach */
+ st_make_current( NULL, NULL, NULL );
+
+ }
+ return GL_TRUE;
+}
+
+
+/*
+ * Unbind the context c from its buffer.
+ */
+GLboolean XMesaUnbindContext( XMesaContext c )
+{
+ /* A no-op for XFree86 integration purposes */
+ return GL_TRUE;
+}
+
+
+XMesaContext XMesaGetCurrentContext( void )
+{
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx) {
+ XMesaContext xmesa = xmesa_context(ctx);
+ return xmesa;
+ }
+ else {
+ return 0;
+ }
+}
+
+
+
+/**
+ * Swap front and back color buffers and have winsys display front buffer.
+ * If there's no front color buffer no swap actually occurs.
+ */
+PUBLIC
+void XMesaSwapBuffers( XMesaBuffer b )
+{
+ struct pipe_surface *frontLeftSurf;
+
+ st_swapbuffers(b->stfb, &frontLeftSurf, NULL);
+
+ if (frontLeftSurf) {
+ driver.display_surface(b, frontLeftSurf);
+ }
+
+ xmesa_check_and_update_buffer_size(NULL, b);
+}
+
+
+
+/*
+ * Copy sub-region of back buffer to front buffer
+ */
+void XMesaCopySubBuffer( XMesaBuffer b, int x, int y, int width, int height )
+{
+ struct pipe_surface *surf_front;
+ struct pipe_surface *surf_back;
+ struct pipe_context *pipe = NULL; /* XXX fix */
+
+ st_get_framebuffer_surface(b->stfb, ST_SURFACE_FRONT_LEFT, &surf_front);
+ st_get_framebuffer_surface(b->stfb, ST_SURFACE_BACK_LEFT, &surf_back);
+
+ if (!surf_front || !surf_back)
+ return;
+
+ pipe->surface_copy(pipe,
+ surf_front, x, y, /* dest */
+ surf_back, x, y, /* src */
+ width, height);
+}
+
+
+
+void XMesaFlush( XMesaContext c )
+{
+ if (c && c->xm_visual->display) {
+ st_finish(c->st);
+ XSync( c->xm_visual->display, False );
+ }
+}
+
+
+
+
+
+XMesaBuffer XMesaFindBuffer( Display *dpy, Drawable d )
+{
+ XMesaBuffer b;
+ for (b = XMesaBufferList; b; b = b->Next) {
+ if (b->drawable == d && b->xm_visual->display == dpy) {
+ return b;
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * Free/destroy all XMesaBuffers associated with given display.
+ */
+void xmesa_destroy_buffers_on_display(Display *dpy)
+{
+ XMesaBuffer b, next;
+ for (b = XMesaBufferList; b; b = next) {
+ next = b->Next;
+ if (b->xm_visual->display == dpy) {
+ xmesa_free_buffer(b);
+ }
+ }
+}
+
+
+/*
+ * Look for XMesaBuffers whose X window has been destroyed.
+ * Deallocate any such XMesaBuffers.
+ */
+void XMesaGarbageCollect( void )
+{
+ XMesaBuffer b, next;
+ for (b=XMesaBufferList; b; b=next) {
+ next = b->Next;
+ if (b->xm_visual &&
+ b->xm_visual->display &&
+ b->drawable &&
+ b->type == WINDOW) {
+ XSync(b->xm_visual->display, False);
+ if (!window_exists( b->xm_visual->display, b->drawable )) {
+ /* found a dead window, free the ancillary info */
+ XMesaDestroyBuffer( b );
+ }
+ }
+ }
+}
+
+
+
+
+PUBLIC void
+XMesaBindTexImage(Display *dpy, XMesaBuffer drawable, int buffer,
+ const int *attrib_list)
+{
+}
+
+
+
+PUBLIC void
+XMesaReleaseTexImage(Display *dpy, XMesaBuffer drawable, int buffer)
+{
+}
+
diff --git a/src/gallium/state_trackers/glx/xlib/xm_api.h b/src/gallium/state_trackers/glx/xlib/xm_api.h
new file mode 100644
index 0000000000..ce97a3ec76
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/xm_api.h
@@ -0,0 +1,394 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 7.1
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+
+/* Sample Usage:
+
+In addition to the usual X calls to select a visual, create a colormap
+and create a window, you must do the following to use the X/Mesa interface:
+
+1. Call XMesaCreateVisual() to make an XMesaVisual from an XVisualInfo.
+
+2. Call XMesaCreateContext() to create an X/Mesa rendering context, given
+ the XMesaVisual.
+
+3. Call XMesaCreateWindowBuffer() to create an XMesaBuffer from an X window
+ and XMesaVisual.
+
+4. Call XMesaMakeCurrent() to bind the XMesaBuffer to an XMesaContext and
+ to make the context the current one.
+
+5. Make gl* calls to render your graphics.
+
+6. Use XMesaSwapBuffers() when double buffering to swap front/back buffers.
+
+7. Before the X window is destroyed, call XMesaDestroyBuffer().
+
+8. Before exiting, call XMesaDestroyVisual and XMesaDestroyContext.
+
+*/
+
+
+
+
+#ifndef XMESA_H
+#define XMESA_H
+
+
+#include "main/mtypes.h"
+#include "state_tracker/st_context.h"
+#include "state_tracker/st_public.h"
+#include "pipe/p_thread.h"
+
+
+# include <X11/Xlib.h>
+# include <X11/Xlibint.h>
+# include <X11/Xutil.h>
+# ifdef USE_XSHM /* was SHM */
+# include <sys/ipc.h>
+# include <sys/shm.h>
+# include <X11/extensions/XShm.h>
+# endif
+
+typedef struct xmesa_buffer *XMesaBuffer;
+typedef struct xmesa_context *XMesaContext;
+typedef struct xmesa_visual *XMesaVisual;
+
+
+
+/*
+ * Create a new X/Mesa visual.
+ * Input: display - X11 display
+ * visinfo - an XVisualInfo pointer
+ * rgb_flag - GL_TRUE = RGB mode,
+ * GL_FALSE = color index mode
+ * alpha_flag - alpha buffer requested?
+ * db_flag - GL_TRUE = double-buffered,
+ * GL_FALSE = single buffered
+ * stereo_flag - stereo visual?
+ * ximage_flag - GL_TRUE = use an XImage for back buffer,
+ * GL_FALSE = use an off-screen pixmap for back buffer
+ * depth_size - requested bits/depth values, or zero
+ * stencil_size - requested bits/stencil values, or zero
+ * accum_red_size - requested bits/red accum values, or zero
+ * accum_green_size - requested bits/green accum values, or zero
+ * accum_blue_size - requested bits/blue accum values, or zero
+ * accum_alpha_size - requested bits/alpha accum values, or zero
+ * num_samples - number of samples/pixel if multisampling, or zero
+ * level - visual level, usually 0
+ * visualCaveat - ala the GLX extension, usually GLX_NONE_EXT
+ * Return; a new XMesaVisual or 0 if error.
+ */
+extern XMesaVisual XMesaCreateVisual( Display *display,
+ XVisualInfo * visinfo,
+ GLboolean rgb_flag,
+ GLboolean alpha_flag,
+ GLboolean db_flag,
+ GLboolean stereo_flag,
+ GLboolean ximage_flag,
+ GLint depth_size,
+ GLint stencil_size,
+ GLint accum_red_size,
+ GLint accum_green_size,
+ GLint accum_blue_size,
+ GLint accum_alpha_size,
+ GLint num_samples,
+ GLint level,
+ GLint visualCaveat );
+
+/*
+ * Destroy an XMesaVisual, but not the associated XVisualInfo.
+ */
+extern void XMesaDestroyVisual( XMesaVisual v );
+
+
+
+/*
+ * Create a new XMesaContext for rendering into an X11 window.
+ *
+ * Input: visual - an XMesaVisual
+ * share_list - another XMesaContext with which to share display
+ * lists or NULL if no sharing is wanted.
+ * Return: an XMesaContext or NULL if error.
+ */
+extern XMesaContext XMesaCreateContext( XMesaVisual v,
+ XMesaContext share_list );
+
+
+/*
+ * Destroy a rendering context as returned by XMesaCreateContext()
+ */
+extern void XMesaDestroyContext( XMesaContext c );
+
+
+
+/*
+ * Create an XMesaBuffer from an X window.
+ */
+extern XMesaBuffer XMesaCreateWindowBuffer( XMesaVisual v, Window w );
+
+
+/*
+ * Create an XMesaBuffer from an X pixmap.
+ */
+extern XMesaBuffer XMesaCreatePixmapBuffer( XMesaVisual v,
+ Pixmap p,
+ Colormap cmap );
+
+
+/*
+ * Destroy an XMesaBuffer, but not the corresponding window or pixmap.
+ */
+extern void XMesaDestroyBuffer( XMesaBuffer b );
+
+
+/*
+ * Return the XMesaBuffer handle which corresponds to an X drawable, if any.
+ *
+ * New in Mesa 2.3.
+ */
+extern XMesaBuffer XMesaFindBuffer( Display *dpy,
+ Drawable d );
+
+
+
+/*
+ * Bind two buffers (read and draw) to a context and make the
+ * context the current one.
+ * New in Mesa 3.3
+ */
+extern GLboolean XMesaMakeCurrent2( XMesaContext c,
+ XMesaBuffer drawBuffer,
+ XMesaBuffer readBuffer );
+
+
+/*
+ * Unbind the current context from its buffer.
+ */
+extern GLboolean XMesaUnbindContext( XMesaContext c );
+
+
+/*
+ * Return a handle to the current context.
+ */
+extern XMesaContext XMesaGetCurrentContext( void );
+
+
+/*
+ * Swap the front and back buffers for the given buffer. No action is
+ * taken if the buffer is not double buffered.
+ */
+extern void XMesaSwapBuffers( XMesaBuffer b );
+
+
+/*
+ * Copy a sub-region of the back buffer to the front buffer.
+ *
+ * New in Mesa 2.6
+ */
+extern void XMesaCopySubBuffer( XMesaBuffer b,
+ int x,
+ int y,
+ int width,
+ int height );
+
+
+
+
+
+/*
+ * Flush/sync a context
+ */
+extern void XMesaFlush( XMesaContext c );
+
+
+
+/*
+ * Scan for XMesaBuffers whose window/pixmap has been destroyed, then free
+ * any memory used by that buffer.
+ *
+ * New in Mesa 2.3.
+ */
+extern void XMesaGarbageCollect( void );
+
+
+
+/*
+ * Create a pbuffer.
+ * New in Mesa 4.1
+ */
+extern XMesaBuffer XMesaCreatePBuffer(XMesaVisual v, Colormap cmap,
+ unsigned int width, unsigned int height);
+
+
+
+/*
+ * Texture from Pixmap
+ * New in Mesa 7.1
+ */
+extern void
+XMesaBindTexImage(Display *dpy, XMesaBuffer drawable, int buffer,
+ const int *attrib_list);
+
+extern void
+XMesaReleaseTexImage(Display *dpy, XMesaBuffer drawable, int buffer);
+
+
+extern XMesaBuffer
+XMesaCreatePixmapTextureBuffer(XMesaVisual v, Pixmap p,
+ Colormap cmap,
+ int format, int target, int mipmap);
+
+
+
+
+/***********************************************************************
+ */
+
+extern pipe_mutex _xmesa_lock;
+
+extern struct xmesa_buffer *XMesaBufferList;
+
+
+/**
+ * Visual inforation, derived from GLvisual.
+ * Basically corresponds to an XVisualInfo.
+ */
+struct xmesa_visual {
+ GLvisual mesa_visual; /* Device independent visual parameters */
+ Display *display; /* The X11 display */
+ XVisualInfo * visinfo; /* X's visual info (pointer to private copy) */
+ XVisualInfo *vishandle; /* Only used in fakeglx.c */
+ GLint BitsPerPixel; /* True bits per pixel for XImages */
+
+ GLboolean ximage_flag; /* Use XImage for back buffer (not pixmap)? */
+};
+
+
+/**
+ * Context info, derived from st_context.
+ * Basically corresponds to a GLXContext.
+ */
+struct xmesa_context {
+ struct st_context *st;
+ XMesaVisual xm_visual; /** pixel format info */
+ XMesaBuffer xm_buffer; /** current drawbuffer */
+ XMesaBuffer xm_read_buffer; /** current readbuffer */
+};
+
+
+/**
+ * Types of X/GLX drawables we might render into.
+ */
+typedef enum {
+ WINDOW, /* An X window */
+ GLXWINDOW, /* GLX window */
+ PIXMAP, /* GLX pixmap */
+ PBUFFER /* GLX Pbuffer */
+} BufferType;
+
+
+/**
+ * Framebuffer information, derived from.
+ * Basically corresponds to a GLXDrawable.
+ */
+struct xmesa_buffer {
+ struct st_framebuffer *stfb;
+
+ GLboolean wasCurrent; /* was ever the current buffer? */
+ XMesaVisual xm_visual; /* the X/Mesa visual */
+ Drawable drawable; /* Usually the X window ID */
+ Colormap cmap; /* the X colormap */
+ BufferType type; /* window, pixmap, pbuffer or glxwindow */
+
+ XImage *tempImage;
+ unsigned long selectedEvents;/* for pbuffers only */
+
+ GLuint shm; /* X Shared Memory extension status: */
+ /* 0 = not available */
+ /* 1 = XImage support available */
+ /* 2 = Pixmap support available too */
+#if defined(USE_XSHM)
+ XShmSegmentInfo shminfo;
+#endif
+
+ GC gc; /* scratch GC for span, line, tri drawing */
+
+ /* GLX_EXT_texture_from_pixmap */
+ GLint TextureTarget; /** GLX_TEXTURE_1D_EXT, for example */
+ GLint TextureFormat; /** GLX_TEXTURE_FORMAT_RGB_EXT, for example */
+ GLint TextureMipmap; /** 0 or 1 */
+
+ struct xmesa_buffer *Next; /* Linked list pointer: */
+};
+
+
+
+/** cast wrapper */
+static INLINE XMesaContext
+xmesa_context(GLcontext *ctx)
+{
+ return (XMesaContext) ctx->DriverCtx;
+}
+
+
+/** cast wrapper */
+static INLINE XMesaBuffer
+xmesa_buffer(GLframebuffer *fb)
+{
+ struct st_framebuffer *stfb = (struct st_framebuffer *) fb;
+ return (XMesaBuffer) st_framebuffer_private(stfb);
+}
+
+
+extern void
+xmesa_delete_framebuffer(struct gl_framebuffer *fb);
+
+extern XMesaBuffer
+xmesa_find_buffer(Display *dpy, Colormap cmap, XMesaBuffer notThis);
+
+extern void
+xmesa_check_and_update_buffer_size(XMesaContext xmctx, XMesaBuffer drawBuffer);
+
+extern void
+xmesa_destroy_buffers_on_display(Display *dpy);
+
+static INLINE GLuint
+xmesa_buffer_width(XMesaBuffer b)
+{
+ return b->stfb->Base.Width;
+}
+
+static INLINE GLuint
+xmesa_buffer_height(XMesaBuffer b)
+{
+ return b->stfb->Base.Height;
+}
+
+extern int
+xmesa_check_for_xshm(Display *display);
+
+
+#endif
diff --git a/src/gallium/state_trackers/glx/xlib/xm_winsys.h b/src/gallium/state_trackers/glx/xlib/xm_winsys.h
new file mode 100644
index 0000000000..0e57605c34
--- /dev/null
+++ b/src/gallium/state_trackers/glx/xlib/xm_winsys.h
@@ -0,0 +1,59 @@
+
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef XM_WINSYS_H
+#define XM_WINSYS_H
+
+struct pipe_context;
+struct pipe_screen;
+struct pipe_surface;
+struct xmesa_buffer;
+
+
+struct xm_driver {
+
+ struct pipe_screen *(*create_pipe_screen)( void );
+
+ /* The context_private argument needs to go away. Is currently used
+ * in a round-about way to associate a display-target surface with its
+ * Xlib window.
+ */
+ struct pipe_context *(*create_pipe_context)( struct pipe_screen *,
+ void *context_private );
+
+ void (*display_surface)( struct xmesa_buffer *,
+ struct pipe_surface * );
+
+};
+
+
+extern void
+xmesa_set_driver( const struct xm_driver *driver );
+
+
+#endif
diff --git a/src/gallium/state_trackers/python/README b/src/gallium/state_trackers/python/README
new file mode 100644
index 0000000000..e03d546830
--- /dev/null
+++ b/src/gallium/state_trackers/python/README
@@ -0,0 +1,43 @@
+This directory contains Python bindings to Gallium3D. It looks like a state
+tracker from the pipe driver perspective, and it looks like a pipe driver from
+the python script perspective.
+
+
+To build you'll need:
+* Python (with development packages)
+* SCons
+* SWIG, http://www.swig.org/download.html
+* Python Imaging Library with TK support, http://www.pythonware.com/products/pil/,
+ for the samples
+
+On a debian-based distro you can simply do:
+
+ aptitude install python-dev scons swig python-imaging python-imaging-tk
+
+On a Windows machine ensure the swig command is in your PATH.
+
+Invoke scons on the top dir as
+
+ scons debug=yes statetrackers=python drivers=softpipe,trace winsys=none
+
+To use it set PYTHONPATH appropriately, e.g, in Linux do:
+
+ export PYTHONPATH=$PWD/build/linux-x86-debug/gallium/state_trackers/python
+
+or (in Windows)
+
+ set PYTHONPATH=%CD%\build\windows-x86-debug\gallium\state_trackers\python
+
+and then try running
+
+ python src/gallium/state_trackers/python/samples/tri.py
+
+which should show a triangle.
+
+
+This is still work in progress:
+- errors are not handled properly and almost always result in crash
+- state atoms with array members are awkward to set
+
+--
+Jose Fonseca <jfonseca@vmware.com>
diff --git a/src/gallium/state_trackers/python/SConscript b/src/gallium/state_trackers/python/SConscript
new file mode 100644
index 0000000000..ec385e7c44
--- /dev/null
+++ b/src/gallium/state_trackers/python/SConscript
@@ -0,0 +1,47 @@
+import sys
+import os.path
+
+Import('*')
+
+if 'python' in env['statetrackers']:
+
+ env = env.Clone()
+
+ env.Tool('python')
+
+ env.Tool('swig')
+ env.Append(SWIGPATH = ['#src/gallium/include', '#src/gallium/include/pipe'])
+ env.Append(SWIGFLAGS = ['-python', '-keyword'])
+
+ env.Append(CPPPATH = '.')
+
+ if env['platform'] == 'windows':
+ env.Append(LIBS = [
+ 'opengl32',
+ 'gdi32',
+ 'user32',
+ 'kernel32',
+ ])
+ else:
+ env.Append(LIBS = [
+ 'GL',
+ 'X11',
+ ])
+
+ pyst = env.ConvenienceLibrary(
+ target = 'pyst',
+ source = [
+ 'gallium.i',
+ 'st_device.c',
+ 'st_sample.c',
+ 'st_softpipe_winsys.c',
+ ],
+ )
+
+ env.SharedLibrary(
+ target = '_gallium',
+ source = [
+ 'st_hardpipe_winsys.c',
+ ],
+ LIBS = [pyst, softpipe, trace] + auxiliaries + env['LIBS'],
+ )
diff --git a/src/gallium/state_trackers/python/gallium.i b/src/gallium/state_trackers/python/gallium.i
new file mode 100644
index 0000000000..3f79cc1a3d
--- /dev/null
+++ b/src/gallium/state_trackers/python/gallium.i
@@ -0,0 +1,103 @@
+ /**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * SWIG interface definion for Gallium types.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+%module gallium;
+
+%{
+
+#include <stdio.h>
+
+#include "pipe/p_screen.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_shader_tokens.h"
+#include "cso_cache/cso_context.h"
+#include "util/u_draw_quad.h"
+#include "util/u_tile.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "tgsi/tgsi_text.h"
+#include "tgsi/tgsi_dump.h"
+
+#include "st_device.h"
+#include "st_sample.h"
+
+%}
+
+%include "typemaps.i"
+%include "exception.i"
+%include "cstring.i"
+
+%include "carrays.i"
+%array_class(unsigned char, ByteArray);
+%array_class(int, IntArray);
+%array_class(unsigned, UnsignedArray);
+%array_class(float, FloatArray);
+
+
+%rename(Device) st_device;
+%rename(Context) st_context;
+%rename(Texture) pipe_texture;
+%rename(Surface) st_surface;
+%rename(Buffer) pipe_buffer;
+
+%rename(BlendColor) pipe_blend_color;
+%rename(Blend) pipe_blend_state;
+%rename(Clip) pipe_clip_state;
+%rename(ConstantBuffer) pipe_constant_buffer;
+%rename(Depth) pipe_depth_state;
+%rename(Stencil) pipe_stencil_state;
+%rename(Alpha) pipe_alpha_state;
+%rename(DepthStencilAlpha) pipe_depth_stencil_alpha_state;
+%rename(FormatBlock) pipe_format_block;
+%rename(Framebuffer) pipe_framebuffer_state;
+%rename(PolyStipple) pipe_poly_stipple;
+%rename(Rasterizer) pipe_rasterizer_state;
+%rename(Sampler) pipe_sampler_state;
+%rename(Scissor) pipe_scissor_state;
+%rename(Shader) pipe_shader_state;
+%rename(VertexBuffer) pipe_vertex_buffer;
+%rename(VertexElement) pipe_vertex_element;
+%rename(Viewport) pipe_viewport_state;
+
+
+%include "p_compiler.i"
+%include "p_defines.h";
+%include "p_format.i"
+
+%include "p_device.i"
+%include "p_context.i"
+%include "p_texture.i"
+%include "p_state.i"
+
diff --git a/src/gallium/state_trackers/python/p_compiler.i b/src/gallium/state_trackers/python/p_compiler.i
new file mode 100644
index 0000000000..15f6ba5b9d
--- /dev/null
+++ b/src/gallium/state_trackers/python/p_compiler.i
@@ -0,0 +1,29 @@
+ /**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+typedef unsigned char ubyte;
diff --git a/src/gallium/state_trackers/python/p_context.i b/src/gallium/state_trackers/python/p_context.i
new file mode 100644
index 0000000000..a40aa1e518
--- /dev/null
+++ b/src/gallium/state_trackers/python/p_context.i
@@ -0,0 +1,319 @@
+ /**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * SWIG interface definion for Gallium types.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+%nodefaultctor st_context;
+%nodefaultdtor st_context;
+
+struct st_context {
+};
+
+%extend st_context {
+
+ ~st_context() {
+ st_context_destroy($self);
+ }
+
+ /*
+ * State functions (create/bind/destroy state objects)
+ */
+
+ void set_blend( const struct pipe_blend_state *state ) {
+ cso_set_blend($self->cso, state);
+ }
+
+ void set_sampler( unsigned index, const struct pipe_sampler_state *state ) {
+ cso_single_sampler($self->cso, index, state);
+ cso_single_sampler_done($self->cso);
+ }
+
+ void set_rasterizer( const struct pipe_rasterizer_state *state ) {
+ cso_set_rasterizer($self->cso, state);
+ }
+
+ void set_depth_stencil_alpha(const struct pipe_depth_stencil_alpha_state *state) {
+ cso_set_depth_stencil_alpha($self->cso, state);
+ }
+
+ void set_fragment_shader( const struct pipe_shader_state *state ) {
+ void *fs;
+
+ if(!state) {
+ cso_set_fragment_shader_handle($self->cso, NULL);
+ return;
+ }
+
+ fs = $self->pipe->create_fs_state($self->pipe, state);
+ if(!fs)
+ return;
+
+ if(cso_set_fragment_shader_handle($self->cso, fs) != PIPE_OK)
+ return;
+
+ cso_delete_fragment_shader($self->cso, $self->fs);
+ $self->fs = fs;
+ }
+
+ void set_vertex_shader( const struct pipe_shader_state *state ) {
+ void *vs;
+
+ if(!state) {
+ cso_set_vertex_shader_handle($self->cso, NULL);
+ return;
+ }
+
+ vs = $self->pipe->create_vs_state($self->pipe, state);
+ if(!vs)
+ return;
+
+ if(cso_set_vertex_shader_handle($self->cso, vs) != PIPE_OK)
+ return;
+
+ cso_delete_vertex_shader($self->cso, $self->vs);
+ $self->vs = vs;
+ }
+
+ /*
+ * Parameter-like state (or properties)
+ */
+
+ void set_blend_color(const struct pipe_blend_color *state ) {
+ cso_set_blend_color($self->cso, state);
+ }
+
+ void set_clip(const struct pipe_clip_state *state ) {
+ $self->pipe->set_clip_state($self->pipe, state);
+ }
+
+ void set_constant_buffer(unsigned shader, unsigned index,
+ struct pipe_buffer *buffer )
+ {
+ struct pipe_constant_buffer state;
+ memset(&state, 0, sizeof(state));
+ state.buffer = buffer;
+ $self->pipe->set_constant_buffer($self->pipe, shader, index, &state);
+ }
+
+ void set_framebuffer(const struct pipe_framebuffer_state *state )
+ {
+ memcpy(&$self->framebuffer, state, sizeof *state);
+ cso_set_framebuffer($self->cso, state);
+ }
+
+ void set_polygon_stipple(const struct pipe_poly_stipple *state ) {
+ $self->pipe->set_polygon_stipple($self->pipe, state);
+ }
+
+ void set_scissor(const struct pipe_scissor_state *state ) {
+ $self->pipe->set_scissor_state($self->pipe, state);
+ }
+
+ void set_viewport(const struct pipe_viewport_state *state) {
+ cso_set_viewport($self->cso, state);
+ }
+
+ void set_sampler_texture(unsigned index,
+ struct pipe_texture *texture) {
+ if(!texture)
+ texture = $self->default_texture;
+ pipe_texture_reference(&$self->sampler_textures[index], texture);
+ $self->pipe->set_sampler_textures($self->pipe,
+ PIPE_MAX_SAMPLERS,
+ $self->sampler_textures);
+ }
+
+ void set_vertex_buffer(unsigned index,
+ unsigned stride,
+ unsigned max_index,
+ unsigned buffer_offset,
+ struct pipe_buffer *buffer)
+ {
+ unsigned i;
+ struct pipe_vertex_buffer state;
+
+ memset(&state, 0, sizeof(state));
+ state.stride = stride;
+ state.max_index = max_index;
+ state.buffer_offset = buffer_offset;
+ state.buffer = buffer;
+
+ memcpy(&$self->vertex_buffers[index], &state, sizeof(state));
+
+ for(i = 0; i < PIPE_MAX_ATTRIBS; ++i)
+ if(self->vertex_buffers[i].buffer)
+ $self->num_vertex_buffers = i + 1;
+
+ $self->pipe->set_vertex_buffers($self->pipe,
+ $self->num_vertex_buffers,
+ $self->vertex_buffers);
+ }
+
+ void set_vertex_element(unsigned index,
+ const struct pipe_vertex_element *element)
+ {
+ memcpy(&$self->vertex_elements[index], element, sizeof(*element));
+ }
+
+ void set_vertex_elements(unsigned num)
+ {
+ $self->num_vertex_elements = num;
+ $self->pipe->set_vertex_elements($self->pipe,
+ $self->num_vertex_elements,
+ $self->vertex_elements);
+ }
+
+ /*
+ * Draw functions
+ */
+
+ void draw_arrays(unsigned mode, unsigned start, unsigned count) {
+ $self->pipe->draw_arrays($self->pipe, mode, start, count);
+ }
+
+ void draw_elements( struct pipe_buffer *indexBuffer,
+ unsigned indexSize,
+ unsigned mode, unsigned start, unsigned count)
+ {
+ $self->pipe->draw_elements($self->pipe,
+ indexBuffer,
+ indexSize,
+ mode, start, count);
+ }
+
+ void draw_range_elements( struct pipe_buffer *indexBuffer,
+ unsigned indexSize, unsigned minIndex, unsigned maxIndex,
+ unsigned mode, unsigned start, unsigned count)
+ {
+ $self->pipe->draw_range_elements($self->pipe,
+ indexBuffer,
+ indexSize, minIndex, maxIndex,
+ mode, start, count);
+ }
+
+ void draw_vertices(unsigned prim,
+ unsigned num_verts,
+ unsigned num_attribs,
+ const float *vertices)
+ {
+ struct pipe_context *pipe = $self->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_buffer *vbuf;
+ float *map;
+ unsigned size;
+
+ size = num_verts * num_attribs * 4 * sizeof(float);
+
+ vbuf = pipe_buffer_create(screen,
+ 32,
+ PIPE_BUFFER_USAGE_VERTEX,
+ size);
+ if(!vbuf)
+ goto error1;
+
+ map = pipe_buffer_map(screen, vbuf, PIPE_BUFFER_USAGE_CPU_WRITE);
+ if (!map)
+ goto error2;
+ memcpy(map, vertices, size);
+ pipe_buffer_unmap(screen, vbuf);
+
+ util_draw_vertex_buffer(pipe, vbuf, 0, prim, num_verts, num_attribs);
+
+error2:
+ pipe_buffer_reference(&vbuf, NULL);
+error1:
+ ;
+ }
+
+ void
+ flush(unsigned flags = 0) {
+ struct pipe_fence_handle *fence = NULL;
+ $self->pipe->flush($self->pipe, flags | PIPE_FLUSH_RENDER_CACHE, &fence);
+ if(fence) {
+ /* TODO: allow asynchronous operation */
+ $self->pipe->screen->fence_finish( $self->pipe->screen, fence, 0 );
+ $self->pipe->screen->fence_reference( $self->pipe->screen, &fence, NULL );
+ }
+ }
+
+ /*
+ * Surface functions
+ */
+
+ void surface_copy(struct st_surface *dst,
+ unsigned destx, unsigned desty,
+ struct st_surface *src,
+ unsigned srcx, unsigned srcy,
+ unsigned width, unsigned height)
+ {
+ struct pipe_surface *_dst = NULL;
+ struct pipe_surface *_src = NULL;
+
+ _dst = st_pipe_surface(dst, PIPE_BUFFER_USAGE_GPU_WRITE);
+ if(!_dst)
+ SWIG_exception(SWIG_ValueError, "couldn't acquire destination surface for writing");
+
+ _src = st_pipe_surface(src, PIPE_BUFFER_USAGE_GPU_READ);
+ if(!_src)
+ SWIG_exception(SWIG_ValueError, "couldn't acquire source surface for reading");
+
+ $self->pipe->surface_copy($self->pipe, _dst, destx, desty, _src, srcx, srcy, width, height);
+
+ fail:
+ pipe_surface_reference(&_src, NULL);
+ pipe_surface_reference(&_dst, NULL);
+ }
+
+ void surface_fill(struct st_surface *dst,
+ unsigned x, unsigned y,
+ unsigned width, unsigned height,
+ unsigned value)
+ {
+ struct pipe_surface *_dst = NULL;
+
+ _dst = st_pipe_surface(dst, PIPE_BUFFER_USAGE_GPU_WRITE);
+ if(!_dst)
+ SWIG_exception(SWIG_ValueError, "couldn't acquire destination surface for writing");
+
+ $self->pipe->surface_fill($self->pipe, _dst, x, y, width, height, value);
+
+ fail:
+ pipe_surface_reference(&_dst, NULL);
+ }
+
+ void clear(unsigned buffers, const float *rgba, double depth = 0.0f,
+ unsigned stencil = 0)
+ {
+ $self->pipe->clear($self->pipe, buffers, rgba, depth, stencil);
+ }
+
+};
diff --git a/src/gallium/state_trackers/python/p_device.i b/src/gallium/state_trackers/python/p_device.i
new file mode 100644
index 0000000000..f16fe5b0ff
--- /dev/null
+++ b/src/gallium/state_trackers/python/p_device.i
@@ -0,0 +1,130 @@
+ /**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * SWIG interface definion for Gallium types.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+%nodefaultctor st_device;
+%nodefaultdtor st_device;
+
+
+struct st_device {
+};
+
+%newobject st_device::texture_create;
+%newobject st_device::context_create;
+%newobject st_device::buffer_create;
+
+%extend st_device {
+
+ st_device(int hardware = 1) {
+ return st_device_create(hardware ? TRUE : FALSE);
+ }
+
+ ~st_device() {
+ st_device_destroy($self);
+ }
+
+ const char * get_name( void ) {
+ return $self->screen->get_name($self->screen);
+ }
+
+ const char * get_vendor( void ) {
+ return $self->screen->get_vendor($self->screen);
+ }
+
+ /**
+ * Query an integer-valued capability/parameter/limit
+ * \param param one of PIPE_CAP_x
+ */
+ int get_param( int param ) {
+ return $self->screen->get_param($self->screen, param);
+ }
+
+ /**
+ * Query a float-valued capability/parameter/limit
+ * \param param one of PIPE_CAP_x
+ */
+ float get_paramf( int param ) {
+ return $self->screen->get_paramf($self->screen, param);
+ }
+
+ /**
+ * Check if the given pipe_format is supported as a texture or
+ * drawing surface.
+ * \param type one of PIPE_TEXTURE, PIPE_SURFACE
+ */
+ int is_format_supported( enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned tex_usage,
+ unsigned geom_flags ) {
+ return $self->screen->is_format_supported( $self->screen,
+ format,
+ target,
+ tex_usage,
+ geom_flags );
+ }
+
+ struct st_context *
+ context_create(void) {
+ return st_context_create($self);
+ }
+
+ struct pipe_texture *
+ texture_create(
+ enum pipe_format format,
+ unsigned width,
+ unsigned height,
+ unsigned depth = 1,
+ unsigned last_level = 0,
+ enum pipe_texture_target target = PIPE_TEXTURE_2D,
+ unsigned tex_usage = 0
+ ) {
+ struct pipe_texture templat;
+ memset(&templat, 0, sizeof(templat));
+ templat.format = format;
+ pf_get_block(templat.format, &templat.block);
+ templat.width[0] = width;
+ templat.height[0] = height;
+ templat.depth[0] = depth;
+ templat.last_level = last_level;
+ templat.target = target;
+ templat.tex_usage = tex_usage;
+ return $self->screen->texture_create($self->screen, &templat);
+ }
+
+ struct pipe_buffer *
+ buffer_create(unsigned size, unsigned alignment = 0, unsigned usage = 0) {
+ return pipe_buffer_create($self->screen, alignment, usage, size);
+ }
+
+};
diff --git a/src/gallium/state_trackers/python/p_format.i b/src/gallium/state_trackers/python/p_format.i
new file mode 100644
index 0000000000..26fb12b387
--- /dev/null
+++ b/src/gallium/state_trackers/python/p_format.i
@@ -0,0 +1,162 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/*
+ * XXX: SWIG can't parse p_format.h, so we need to duplicate the relevant
+ * declarations here
+ */
+
+%{
+#include "pipe/p_format.h"
+%}
+
+enum pipe_format {
+ PIPE_FORMAT_NONE,
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_FORMAT_A1R5G5B5_UNORM,
+ PIPE_FORMAT_A4R4G4B4_UNORM,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_A2B10G10R10_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_I8_UNORM,
+ PIPE_FORMAT_A8L8_UNORM,
+ PIPE_FORMAT_L16_UNORM,
+ PIPE_FORMAT_YCBCR,
+ PIPE_FORMAT_YCBCR_REV,
+ PIPE_FORMAT_Z16_UNORM,
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_Z32_FLOAT,
+ PIPE_FORMAT_S8Z24_UNORM,
+ PIPE_FORMAT_Z24S8_UNORM,
+ PIPE_FORMAT_X8Z24_UNORM,
+ PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_FORMAT_S8_UNORM,
+ PIPE_FORMAT_R64_FLOAT,
+ PIPE_FORMAT_R64G64_FLOAT,
+ PIPE_FORMAT_R64G64B64_FLOAT,
+ PIPE_FORMAT_R64G64B64A64_FLOAT,
+ PIPE_FORMAT_R32_FLOAT,
+ PIPE_FORMAT_R32G32_FLOAT,
+ PIPE_FORMAT_R32G32B32_FLOAT,
+ PIPE_FORMAT_R32G32B32A32_FLOAT,
+ PIPE_FORMAT_R32_UNORM,
+ PIPE_FORMAT_R32G32_UNORM,
+ PIPE_FORMAT_R32G32B32_UNORM,
+ PIPE_FORMAT_R32G32B32A32_UNORM,
+ PIPE_FORMAT_R32_USCALED,
+ PIPE_FORMAT_R32G32_USCALED,
+ PIPE_FORMAT_R32G32B32_USCALED,
+ PIPE_FORMAT_R32G32B32A32_USCALED,
+ PIPE_FORMAT_R32_SNORM,
+ PIPE_FORMAT_R32G32_SNORM,
+ PIPE_FORMAT_R32G32B32_SNORM,
+ PIPE_FORMAT_R32G32B32A32_SNORM,
+ PIPE_FORMAT_R32_SSCALED,
+ PIPE_FORMAT_R32G32_SSCALED,
+ PIPE_FORMAT_R32G32B32_SSCALED,
+ PIPE_FORMAT_R32G32B32A32_SSCALED,
+ PIPE_FORMAT_R16_UNORM,
+ PIPE_FORMAT_R16G16_UNORM,
+ PIPE_FORMAT_R16G16B16_UNORM,
+ PIPE_FORMAT_R16G16B16A16_UNORM,
+ PIPE_FORMAT_R16_USCALED,
+ PIPE_FORMAT_R16G16_USCALED,
+ PIPE_FORMAT_R16G16B16_USCALED,
+ PIPE_FORMAT_R16G16B16A16_USCALED,
+ PIPE_FORMAT_R16_SNORM,
+ PIPE_FORMAT_R16G16_SNORM,
+ PIPE_FORMAT_R16G16B16_SNORM,
+ PIPE_FORMAT_R16G16B16A16_SNORM,
+ PIPE_FORMAT_R16_SSCALED,
+ PIPE_FORMAT_R16G16_SSCALED,
+ PIPE_FORMAT_R16G16B16_SSCALED,
+ PIPE_FORMAT_R16G16B16A16_SSCALED,
+ PIPE_FORMAT_R8_UNORM,
+ PIPE_FORMAT_R8G8_UNORM,
+ PIPE_FORMAT_R8G8B8_UNORM,
+ PIPE_FORMAT_R8G8B8A8_UNORM,
+ PIPE_FORMAT_R8G8B8X8_UNORM,
+ PIPE_FORMAT_R8_USCALED,
+ PIPE_FORMAT_R8G8_USCALED,
+ PIPE_FORMAT_R8G8B8_USCALED,
+ PIPE_FORMAT_R8G8B8A8_USCALED,
+ PIPE_FORMAT_R8G8B8X8_USCALED,
+ PIPE_FORMAT_R8_SNORM,
+ PIPE_FORMAT_R8G8_SNORM,
+ PIPE_FORMAT_R8G8B8_SNORM,
+ PIPE_FORMAT_R8G8B8A8_SNORM,
+ PIPE_FORMAT_R8G8B8X8_SNORM,
+ PIPE_FORMAT_B6G5R5_SNORM,
+ PIPE_FORMAT_A8B8G8R8_SNORM,
+ PIPE_FORMAT_X8B8G8R8_SNORM,
+ PIPE_FORMAT_R8_SSCALED,
+ PIPE_FORMAT_R8G8_SSCALED,
+ PIPE_FORMAT_R8G8B8_SSCALED,
+ PIPE_FORMAT_R8G8B8A8_SSCALED,
+ PIPE_FORMAT_R8G8B8X8_SSCALED,
+ PIPE_FORMAT_R32_FIXED,
+ PIPE_FORMAT_R32G32_FIXED,
+ PIPE_FORMAT_R32G32B32_FIXED,
+ PIPE_FORMAT_R32G32B32A32_FIXED,
+
+ PIPE_FORMAT_L8_SRGB,
+ PIPE_FORMAT_A8L8_SRGB,
+ PIPE_FORMAT_R8G8B8_SRGB,
+ PIPE_FORMAT_R8G8B8A8_SRGB,
+ PIPE_FORMAT_R8G8B8X8_SRGB,
+ PIPE_FORMAT_A8R8G8B8_SRGB,
+ PIPE_FORMAT_X8R8G8B8_SRGB,
+ PIPE_FORMAT_B8G8R8A8_SRGB,
+ PIPE_FORMAT_B8G8R8X8_SRGB,
+
+ PIPE_FORMAT_X8UB8UG8SR8S_NORM,
+ PIPE_FORMAT_B6UG5SR5S_NORM,
+
+ PIPE_FORMAT_DXT1_RGB,
+ PIPE_FORMAT_DXT1_RGBA,
+ PIPE_FORMAT_DXT3_RGBA,
+ PIPE_FORMAT_DXT5_RGBA,
+
+ PIPE_FORMAT_DXT1_SRGB,
+ PIPE_FORMAT_DXT1_SRGBA,
+ PIPE_FORMAT_DXT3_SRGBA,
+ PIPE_FORMAT_DXT5_SRGBA,
+};
+
+
+struct pipe_format_block
+{
+ unsigned size;
+ unsigned width;
+ unsigned height;
+};
+
diff --git a/src/gallium/state_trackers/python/p_state.i b/src/gallium/state_trackers/python/p_state.i
new file mode 100644
index 0000000000..90f157e0ab
--- /dev/null
+++ b/src/gallium/state_trackers/python/p_state.i
@@ -0,0 +1,137 @@
+ /**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * SWIG interface definion for Gallium types.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+%module gallium;
+
+%ignore winsys;
+%ignore pipe_vertex_buffer::buffer;
+
+%include "pipe/p_compiler.h";
+%include "pipe/p_state.h";
+
+
+%array_class(struct pipe_stencil_state, StencilArray);
+
+
+%extend pipe_framebuffer_state {
+
+ pipe_framebuffer_state(void) {
+ return CALLOC_STRUCT(pipe_framebuffer_state);
+ }
+
+ ~pipe_framebuffer_state() {
+ unsigned index;
+ for(index = 0; index < PIPE_MAX_COLOR_BUFS; ++index)
+ pipe_surface_reference(&$self->cbufs[index], NULL);
+ pipe_surface_reference(&$self->zsbuf, NULL);
+ FREE($self);
+ }
+
+ void
+ set_cbuf(unsigned index, struct st_surface *surface)
+ {
+ struct pipe_surface *_surface = NULL;
+
+ if(index >= PIPE_MAX_COLOR_BUFS)
+ SWIG_exception(SWIG_ValueError, "index out of bounds");
+
+ if(surface) {
+ _surface = st_pipe_surface(surface, PIPE_BUFFER_USAGE_GPU_WRITE);
+ if(!_surface)
+ SWIG_exception(SWIG_ValueError, "couldn't acquire surface for writing");
+ }
+
+ pipe_surface_reference(&$self->cbufs[index], _surface);
+
+ fail:
+ return;
+ }
+
+ void
+ set_zsbuf(struct st_surface *surface)
+ {
+ struct pipe_surface *_surface = NULL;
+
+ if(surface) {
+ _surface = st_pipe_surface(surface, PIPE_BUFFER_USAGE_GPU_WRITE);
+ if(!_surface)
+ SWIG_exception(SWIG_ValueError, "couldn't acquire surface for writing");
+ }
+
+ pipe_surface_reference(&$self->zsbuf, _surface);
+
+ fail:
+ return;
+ }
+
+};
+
+
+%extend pipe_shader_state {
+
+ pipe_shader_state(const char *text, unsigned num_tokens = 1024) {
+ struct tgsi_token *tokens;
+ struct pipe_shader_state *shader;
+
+ tokens = MALLOC(num_tokens * sizeof(struct tgsi_token));
+ if(!tokens)
+ goto error1;
+
+ if(tgsi_text_translate(text, tokens, num_tokens ) != TRUE)
+ goto error2;
+
+ shader = CALLOC_STRUCT(pipe_shader_state);
+ if(!shader)
+ goto error3;
+
+ shader->tokens = tokens;
+
+ return shader;
+
+error3:
+error2:
+ FREE(tokens);
+error1:
+ return NULL;
+ }
+
+ ~pipe_shader_state() {
+ FREE((void*)$self->tokens);
+ FREE($self);
+ }
+
+ void dump(unsigned flags = 0) {
+ tgsi_dump($self->tokens, flags);
+ }
+}
diff --git a/src/gallium/state_trackers/python/p_texture.i b/src/gallium/state_trackers/python/p_texture.i
new file mode 100644
index 0000000000..1d513abf3c
--- /dev/null
+++ b/src/gallium/state_trackers/python/p_texture.i
@@ -0,0 +1,450 @@
+ /**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * SWIG interface definion for Gallium types.
+ *
+ * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
+ */
+
+
+%nodefaultctor pipe_texture;
+%nodefaultctor st_surface;
+%nodefaultctor pipe_buffer;
+
+%nodefaultdtor pipe_texture;
+%nodefaultdtor st_surface;
+%nodefaultdtor pipe_buffer;
+
+%ignore pipe_texture::screen;
+
+%immutable st_surface::texture;
+%immutable st_surface::face;
+%immutable st_surface::level;
+%immutable st_surface::zslice;
+
+%newobject pipe_texture::get_surface;
+
+
+%extend pipe_texture {
+
+ ~pipe_texture() {
+ struct pipe_texture *ptr = $self;
+ pipe_texture_reference(&ptr, NULL);
+ }
+
+ unsigned get_width(unsigned level=0) {
+ return $self->width[level];
+ }
+
+ unsigned get_height(unsigned level=0) {
+ return $self->height[level];
+ }
+
+ unsigned get_depth(unsigned level=0) {
+ return $self->depth[level];
+ }
+
+ unsigned get_nblocksx(unsigned level=0) {
+ return $self->nblocksx[level];
+ }
+
+ unsigned get_nblocksy(unsigned level=0) {
+ return $self->nblocksy[level];
+ }
+
+ /** Get a surface which is a "view" into a texture */
+ struct st_surface *
+ get_surface(unsigned face=0, unsigned level=0, unsigned zslice=0)
+ {
+ struct st_surface *surface;
+
+ if(face >= ($self->target == PIPE_TEXTURE_CUBE ? 6U : 1U))
+ SWIG_exception(SWIG_ValueError, "face out of bounds");
+ if(level > $self->last_level)
+ SWIG_exception(SWIG_ValueError, "level out of bounds");
+ if(zslice >= $self->depth[level])
+ SWIG_exception(SWIG_ValueError, "zslice out of bounds");
+
+ surface = CALLOC_STRUCT(st_surface);
+ if(!surface)
+ return NULL;
+
+ pipe_texture_reference(&surface->texture, $self);
+ surface->face = face;
+ surface->level = level;
+ surface->zslice = zslice;
+
+ return surface;
+
+ fail:
+ return NULL;
+ }
+
+};
+
+struct st_surface
+{
+ %immutable;
+
+ struct pipe_texture *texture;
+ unsigned face;
+ unsigned level;
+ unsigned zslice;
+
+};
+
+%extend st_surface {
+
+ %immutable;
+
+ unsigned format;
+ unsigned width;
+ unsigned height;
+ unsigned nblocksx;
+ unsigned nblocksy;
+
+ ~st_surface() {
+ pipe_texture_reference(&$self->texture, NULL);
+ FREE($self);
+ }
+
+ %cstring_output_allocate_size(char **STRING, int *LENGTH, free(*$1));
+ void get_tile_raw(unsigned x, unsigned y, unsigned w, unsigned h, char **STRING, int *LENGTH)
+ {
+ struct pipe_texture *texture = $self->texture;
+ struct pipe_screen *screen = texture->screen;
+ struct pipe_transfer *transfer;
+ unsigned stride;
+
+ stride = pf_get_nblocksx(&texture->block, w) * texture->block.size;
+ *LENGTH = pf_get_nblocksy(&texture->block, h) * stride;
+ *STRING = (char *) malloc(*LENGTH);
+ if(!*STRING)
+ return;
+
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_READ,
+ x, y, w, h);
+ if(transfer) {
+ pipe_get_tile_raw(transfer, 0, 0, w, h, *STRING, stride);
+ screen->tex_transfer_destroy(transfer);
+ }
+ }
+
+ %cstring_input_binary(const char *STRING, unsigned LENGTH);
+ void put_tile_raw(unsigned x, unsigned y, unsigned w, unsigned h, const char *STRING, unsigned LENGTH, unsigned stride = 0)
+ {
+ struct pipe_texture *texture = $self->texture;
+ struct pipe_screen *screen = texture->screen;
+ struct pipe_transfer *transfer;
+
+ if(stride == 0)
+ stride = pf_get_nblocksx(&texture->block, w) * texture->block.size;
+
+ if(LENGTH < pf_get_nblocksy(&texture->block, h) * stride)
+ SWIG_exception(SWIG_ValueError, "offset must be smaller than buffer size");
+
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_WRITE,
+ x, y, w, h);
+ if(!transfer)
+ SWIG_exception(SWIG_MemoryError, "couldn't initiate transfer");
+
+ pipe_put_tile_raw(transfer, 0, 0, w, h, STRING, stride);
+ screen->tex_transfer_destroy(transfer);
+
+ fail:
+ return;
+ }
+
+ void
+ get_tile_rgba(unsigned x, unsigned y, unsigned w, unsigned h, float *rgba)
+ {
+ struct pipe_screen *screen = $self->texture->screen;
+ struct pipe_transfer *transfer;
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_READ,
+ x, y, w, h);
+ if(transfer) {
+ pipe_get_tile_rgba(transfer, 0, 0, w, h, rgba);
+ screen->tex_transfer_destroy(transfer);
+ }
+ }
+
+ void
+ put_tile_rgba(unsigned x, unsigned y, unsigned w, unsigned h, const float *rgba)
+ {
+ struct pipe_screen *screen = $self->texture->screen;
+ struct pipe_transfer *transfer;
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_WRITE,
+ x, y, w, h);
+ if(transfer) {
+ pipe_put_tile_rgba(transfer, 0, 0, w, h, rgba);
+ screen->tex_transfer_destroy(transfer);
+ }
+ }
+
+ %cstring_output_allocate_size(char **STRING, int *LENGTH, free(*$1));
+ void
+ get_tile_rgba8(unsigned x, unsigned y, unsigned w, unsigned h, char **STRING, int *LENGTH)
+ {
+ struct pipe_screen *screen = $self->texture->screen;
+ struct pipe_transfer *transfer;
+ float *rgba;
+ unsigned char *rgba8;
+ unsigned i, j, k;
+
+ *LENGTH = 0;
+ *STRING = NULL;
+
+ if (!$self)
+ return;
+
+ *LENGTH = h*w*4;
+ *STRING = (char *) malloc(*LENGTH);
+ if(!*STRING)
+ return;
+
+ rgba = malloc(h*w*4*sizeof(float));
+ if(!rgba)
+ return;
+
+ rgba8 = (unsigned char *) *STRING;
+
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_READ,
+ x, y,
+ w, h);
+ if(transfer) {
+ pipe_get_tile_rgba(transfer, 0, 0, w, h, rgba);
+ for(j = 0; j < h; ++j) {
+ for(i = 0; i < w; ++i)
+ for(k = 0; k <4; ++k)
+ rgba8[j*w*4 + i*4 + k] = float_to_ubyte(rgba[j*w*4 + i*4 + k]);
+ }
+ screen->tex_transfer_destroy(transfer);
+ }
+
+ free(rgba);
+ }
+
+ void
+ get_tile_z(unsigned x, unsigned y, unsigned w, unsigned h, unsigned *z)
+ {
+ struct pipe_screen *screen = $self->texture->screen;
+ struct pipe_transfer *transfer;
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_READ,
+ x, y, w, h);
+ if(transfer) {
+ pipe_get_tile_z(transfer, 0, 0, w, h, z);
+ screen->tex_transfer_destroy(transfer);
+ }
+ }
+
+ void
+ put_tile_z(unsigned x, unsigned y, unsigned w, unsigned h, const unsigned *z)
+ {
+ struct pipe_screen *screen = $self->texture->screen;
+ struct pipe_transfer *transfer;
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_WRITE,
+ x, y, w, h);
+ if(transfer) {
+ pipe_put_tile_z(transfer, 0, 0, w, h, z);
+ screen->tex_transfer_destroy(transfer);
+ }
+ }
+
+ void
+ sample_rgba(float *rgba) {
+ st_sample_surface($self, rgba);
+ }
+
+ unsigned
+ compare_tile_rgba(unsigned x, unsigned y, unsigned w, unsigned h, const float *rgba, float tol = 0.0)
+ {
+ struct pipe_screen *screen = $self->texture->screen;
+ struct pipe_transfer *transfer;
+ float *rgba2;
+ const float *p1;
+ const float *p2;
+ unsigned i, j, n;
+
+ rgba2 = MALLOC(h*w*4*sizeof(float));
+ if(!rgba2)
+ return ~0;
+
+ transfer = screen->get_tex_transfer(screen,
+ $self->texture,
+ $self->face,
+ $self->level,
+ $self->zslice,
+ PIPE_TRANSFER_READ,
+ x, y, w, h);
+ if(!transfer) {
+ FREE(rgba2);
+ return ~0;
+ }
+
+ pipe_get_tile_rgba(transfer, 0, 0, w, h, rgba2);
+ screen->tex_transfer_destroy(transfer);
+
+ p1 = rgba;
+ p2 = rgba2;
+ n = 0;
+ for(i = h*w; i; --i) {
+ unsigned differs = 0;
+ for(j = 4; j; --j) {
+ float delta = *p2++ - *p1++;
+ if (delta < -tol || delta > tol)
+ differs = 1;
+ }
+ n += differs;
+ }
+
+ FREE(rgba2);
+
+ return n;
+ }
+
+};
+
+%{
+ static enum pipe_format
+ st_surface_format_get(struct st_surface *surface)
+ {
+ return surface->texture->format;
+ }
+
+ static unsigned
+ st_surface_width_get(struct st_surface *surface)
+ {
+ return surface->texture->width[surface->level];
+ }
+
+ static unsigned
+ st_surface_height_get(struct st_surface *surface)
+ {
+ return surface->texture->height[surface->level];
+ }
+
+ static unsigned
+ st_surface_nblocksx_get(struct st_surface *surface)
+ {
+ return surface->texture->nblocksx[surface->level];
+ }
+
+ static unsigned
+ st_surface_nblocksy_get(struct st_surface *surface)
+ {
+ return surface->texture->nblocksy[surface->level];
+ }
+%}
+
+/* Avoid naming conflict with p_inlines.h's pipe_buffer_read/write */
+%rename(read) read_;
+%rename(write) write_;
+
+%extend pipe_buffer {
+
+ ~pipe_buffer() {
+ struct pipe_buffer *ptr = $self;
+ pipe_buffer_reference(&ptr, NULL);
+ }
+
+ unsigned __len__(void)
+ {
+ assert(p_atomic_read(&$self->reference.count) > 0);
+ return $self->size;
+ }
+
+ %cstring_output_allocate_size(char **STRING, int *LENGTH, free(*$1));
+ void read_(char **STRING, int *LENGTH)
+ {
+ struct pipe_screen *screen = $self->screen;
+
+ assert(p_atomic_read(&$self->reference.count) > 0);
+
+ *LENGTH = $self->size;
+ *STRING = (char *) malloc($self->size);
+ if(!*STRING)
+ return;
+
+ pipe_buffer_read(screen, $self, 0, $self->size, *STRING);
+ }
+
+ %cstring_input_binary(const char *STRING, unsigned LENGTH);
+ void write_(const char *STRING, unsigned LENGTH, unsigned offset = 0)
+ {
+ struct pipe_screen *screen = $self->screen;
+
+ assert(p_atomic_read(&$self->reference.count) > 0);
+
+ if(offset > $self->size)
+ SWIG_exception(SWIG_ValueError, "offset must be smaller than buffer size");
+
+ if(offset + LENGTH > $self->size)
+ SWIG_exception(SWIG_ValueError, "data length must fit inside the buffer");
+
+ pipe_buffer_write(screen, $self, offset, LENGTH, STRING);
+
+fail:
+ return;
+ }
+};
diff --git a/src/gallium/state_trackers/python/retrace/README b/src/gallium/state_trackers/python/retrace/README
new file mode 100644
index 0000000000..822cd11404
--- /dev/null
+++ b/src/gallium/state_trackers/python/retrace/README
@@ -0,0 +1,17 @@
+This is an application written in python to replay the traces captured by the
+ trace pipe driver.
+
+
+To use it follow the instructions in src/gallium/drivers/trace/README and
+src/gallium/state_trackers/python/README, and then do
+
+ python src/gallium/state_trackers/python/samples/retrace/interpreter.py filename.trace
+
+
+This is still work in progress:
+- not everything is captured/replayed
+ - surface/textures contents
+- any tiny error will result in a crash
+
+--
+Jose Fonseca <jrfonseca@tungstengraphics.com>
diff --git a/src/gallium/state_trackers/python/retrace/format.py b/src/gallium/state_trackers/python/retrace/format.py
new file mode 100755
index 0000000000..a4285bfe07
--- /dev/null
+++ b/src/gallium/state_trackers/python/retrace/format.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+import sys
+
+
+class Formatter:
+ '''Plain formatter'''
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def text(self, text):
+ self.stream.write(text)
+
+ def newline(self):
+ self.text('\n')
+
+ def function(self, name):
+ self.text(name)
+
+ def variable(self, name):
+ self.text(name)
+
+ def literal(self, value):
+ self.text(str(value))
+
+ def address(self, addr):
+ self.text(str(addr))
+
+
+class AnsiFormatter(Formatter):
+ '''Formatter for plain-text files which outputs ANSI escape codes. See
+ http://en.wikipedia.org/wiki/ANSI_escape_code for more information
+ concerning ANSI escape codes.
+ '''
+
+ _csi = '\33['
+
+ _normal = '0m'
+ _bold = '1m'
+ _italic = '3m'
+ _red = '31m'
+ _green = '32m'
+ _blue = '34m'
+
+ def _escape(self, code):
+ self.text(self._csi + code)
+
+ def function(self, name):
+ self._escape(self._bold)
+ Formatter.function(self, name)
+ self._escape(self._normal)
+
+ def variable(self, name):
+ self._escape(self._italic)
+ Formatter.variable(self, name)
+ self._escape(self._normal)
+
+ def literal(self, value):
+ self._escape(self._blue)
+ Formatter.literal(self, value)
+ self._escape(self._normal)
+
+ def address(self, value):
+ self._escape(self._green)
+ Formatter.address(self, value)
+ self._escape(self._normal)
+
+
+class WindowsConsoleFormatter(Formatter):
+ '''Formatter for the Windows Console. See
+ http://code.activestate.com/recipes/496901/ for more information.
+ '''
+
+ STD_INPUT_HANDLE = -10
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+
+ FOREGROUND_BLUE = 0x01
+ FOREGROUND_GREEN = 0x02
+ FOREGROUND_RED = 0x04
+ FOREGROUND_INTENSITY = 0x08
+ BACKGROUND_BLUE = 0x10
+ BACKGROUND_GREEN = 0x20
+ BACKGROUND_RED = 0x40
+ BACKGROUND_INTENSITY = 0x80
+
+ _normal = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
+ _bold = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_INTENSITY
+ _italic = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
+ _red = FOREGROUND_RED | FOREGROUND_INTENSITY
+ _green = FOREGROUND_GREEN | FOREGROUND_INTENSITY
+ _blue = FOREGROUND_BLUE | FOREGROUND_INTENSITY
+
+ def __init__(self, stream):
+ Formatter.__init__(self, stream)
+
+ if stream is sys.stdin:
+ nStdHandle = self.STD_INPUT_HANDLE
+ elif stream is sys.stdout:
+ nStdHandle = self.STD_OUTPUT_HANDLE
+ elif stream is sys.stderr:
+ nStdHandle = self.STD_ERROR_HANDLE
+ else:
+ nStdHandle = None
+
+ if nStdHandle:
+ import ctypes
+ self.handle = ctypes.windll.kernel32.GetStdHandle(nStdHandle)
+ else:
+ self.handle = None
+
+ def _attribute(self, attr):
+ if self.handle:
+ import ctypes
+ ctypes.windll.kernel32.SetConsoleTextAttribute(self.handle, attr)
+
+ def function(self, name):
+ self._attribute(self._bold)
+ Formatter.function(self, name)
+ self._attribute(self._normal)
+
+ def variable(self, name):
+ self._attribute(self._italic)
+ Formatter.variable(self, name)
+ self._attribute(self._normal)
+
+ def literal(self, value):
+ self._attribute(self._blue)
+ Formatter.literal(self, value)
+ self._attribute(self._normal)
+
+ def address(self, value):
+ self._attribute(self._green)
+ Formatter.address(self, value)
+ self._attribute(self._normal)
+
+
+def DefaultFormatter(stream):
+ if sys.platform in ('linux2', 'cygwin'):
+ return AnsiFormatter(stream)
+ elif sys.platform in ('win32',):
+ return WindowsConsoleFormatter(stream)
+ else:
+ return Formatter(stream)
+
diff --git a/src/gallium/state_trackers/python/retrace/interpreter.py b/src/gallium/state_trackers/python/retrace/interpreter.py
new file mode 100755
index 0000000000..6f0bd6ae52
--- /dev/null
+++ b/src/gallium/state_trackers/python/retrace/interpreter.py
@@ -0,0 +1,748 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+import sys
+import struct
+
+import gallium
+import model
+import parse as parser
+
+
+try:
+ from struct import unpack_from
+except ImportError:
+ def unpack_from(fmt, buf, offset=0):
+ size = struct.calcsize(fmt)
+ return struct.unpack(fmt, buf[offset:offset + size])
+
+
+def make_image(surface, x=None, y=None, w=None, h=None):
+ if x is None:
+ x = 0
+ if y is None:
+ y = 0
+ if w is None:
+ w = surface.width - x
+ if h is None:
+ h = surface.height - y
+ data = surface.get_tile_rgba8(0, 0, surface.width, surface.height)
+
+ import Image
+ outimage = Image.fromstring('RGBA', (surface.width, surface.height), data, "raw", 'RGBA', 0, 1)
+ return outimage
+
+def save_image(filename, surface, x=None, y=None, w=None, h=None):
+ outimage = make_image(surface, x, y, w, h)
+ outimage.save(filename, "PNG")
+
+def show_image(surface, title, x=None, y=None, w=None, h=None):
+ outimage = make_image(surface, x, y, w, h)
+
+ import Tkinter as tk
+ from PIL import Image, ImageTk
+ root = tk.Tk()
+
+ root.title(title)
+
+ image1 = ImageTk.PhotoImage(outimage)
+ w = image1.width()
+ h = image1.height()
+ x = 100
+ y = 100
+ root.geometry("%dx%d+%d+%d" % (w, h, x, y))
+ panel1 = tk.Label(root, image=image1)
+ panel1.pack(side='top', fill='both', expand='yes')
+ panel1.image = image1
+ root.mainloop()
+
+
+class Struct:
+ """C-like struct"""
+
+ # A basic Python class can pass as a C-like structure
+ pass
+
+
+struct_factories = {
+ "pipe_blend_color": gallium.BlendColor,
+ "pipe_blend_state": gallium.Blend,
+ #"pipe_clip_state": gallium.Clip,
+ #"pipe_constant_buffer": gallium.ConstantBuffer,
+ "pipe_depth_state": gallium.Depth,
+ "pipe_stencil_state": gallium.Stencil,
+ "pipe_alpha_state": gallium.Alpha,
+ "pipe_depth_stencil_alpha_state": gallium.DepthStencilAlpha,
+ "pipe_format_block": gallium.FormatBlock,
+ #"pipe_framebuffer_state": gallium.Framebuffer,
+ "pipe_poly_stipple": gallium.PolyStipple,
+ "pipe_rasterizer_state": gallium.Rasterizer,
+ "pipe_sampler_state": gallium.Sampler,
+ "pipe_scissor_state": gallium.Scissor,
+ #"pipe_shader_state": gallium.Shader,
+ #"pipe_vertex_buffer": gallium.VertexBuffer,
+ "pipe_vertex_element": gallium.VertexElement,
+ "pipe_viewport_state": gallium.Viewport,
+ #"pipe_texture": gallium.Texture,
+}
+
+
+member_array_factories = {
+ "pipe_rasterizer_state": {"sprite_coord_mode": gallium.ByteArray},
+ "pipe_poly_stipple": {"stipple": gallium.UnsignedArray},
+ "pipe_viewport_state": {"scale": gallium.FloatArray, "translate": gallium.FloatArray},
+ #"pipe_clip_state": {"ucp": gallium.FloatArray},
+ "pipe_depth_stencil_alpha_state": {"stencil": gallium.StencilArray},
+ "pipe_blend_color": {"color": gallium.FloatArray},
+ "pipe_sampler_state": {"border_color": gallium.FloatArray},
+}
+
+
+class Translator(model.Visitor):
+ """Translate model arguments into regular Python objects"""
+
+ def __init__(self, interpreter):
+ self.interpreter = interpreter
+ self.result = None
+
+ def visit(self, node):
+ self.result = None
+ node.visit(self)
+ return self.result
+
+ def visit_literal(self, node):
+ self.result = node.value
+
+ def visit_named_constant(self, node):
+ # lookup the named constant in the gallium module
+ self.result = getattr(gallium, node.name)
+
+ def visit_array(self, node):
+ array = []
+ for element in node.elements:
+ array.append(self.visit(element))
+ self.result = array
+
+ def visit_struct(self, node):
+ struct_factory = struct_factories.get(node.name, Struct)
+ struct = struct_factory()
+ for member_name, member_node in node.members:
+ member_value = self.visit(member_node)
+ try:
+ array_factory = member_array_factories[node.name][member_name]
+ except KeyError:
+ pass
+ else:
+ assert isinstance(member_value, list)
+ array = array_factory(len(member_value))
+ for i in range(len(member_value)):
+ array[i] = member_value[i]
+ member_value = array
+ #print node.name, member_name, member_value
+ assert isinstance(struct, Struct) or hasattr(struct, member_name)
+ setattr(struct, member_name, member_value)
+ self.result = struct
+
+ def visit_pointer(self, node):
+ self.result = self.interpreter.lookup_object(node.address)
+
+
+class Object:
+
+ def __init__(self, interpreter, real):
+ self.interpreter = interpreter
+ self.real = real
+
+
+class Global(Object):
+
+ def __init__(self, interpreter, real):
+ self.interpreter = interpreter
+ self.real = real
+
+ def pipe_winsys_create(self):
+ return Winsys(self.interpreter, gallium.Device())
+
+ def pipe_screen_create(self, winsys=None):
+ if winsys is None:
+ real = gallium.Device()
+ else:
+ real = winsys.real
+ return Screen(self.interpreter, real)
+
+ def pipe_context_create(self, screen):
+ context = screen.real.context_create()
+ return Context(self.interpreter, context)
+
+
+class Winsys(Object):
+
+ def __init__(self, interpreter, real):
+ self.interpreter = interpreter
+ self.real = real
+
+ def get_name(self):
+ pass
+
+ def user_buffer_create(self, data, size):
+ # We don't really care to distinguish between user and regular buffers
+ buffer = self.real.buffer_create(size,
+ 4,
+ gallium.PIPE_BUFFER_USAGE_CPU_READ |
+ gallium.PIPE_BUFFER_USAGE_CPU_WRITE )
+ assert size == len(data)
+ buffer.write(data)
+ return buffer
+
+ def buffer_create(self, alignment, usage, size):
+ return self.real.buffer_create(size, alignment, usage)
+
+ def buffer_destroy(self, buffer):
+ pass
+
+ def buffer_write(self, buffer, data, size):
+ assert size == len(data)
+ buffer.write(data)
+
+ def fence_finish(self, fence, flags):
+ pass
+
+ def fence_reference(self, dst, src):
+ pass
+
+ def flush_frontbuffer(self, surface):
+ pass
+
+ def surface_alloc(self):
+ return None
+
+ def surface_release(self, surface):
+ pass
+
+
+class Transfer:
+
+ def __init__(self, surface, x, y, w, h):
+ self.surface = surface
+ self.x = x
+ self.y = y
+ self.w = w
+ self.h = h
+
+
+class Screen(Object):
+
+ def destroy(self):
+ pass
+
+ def get_name(self):
+ pass
+
+ def get_vendor(self):
+ pass
+
+ def get_param(self, param):
+ pass
+
+ def get_paramf(self, param):
+ pass
+
+ def is_format_supported(self, format, target, tex_usage, geom_flags):
+ return self.real.is_format_supported(format, target, tex_usage, geom_flags)
+
+ def texture_create(self, templat):
+ return self.real.texture_create(
+ format = templat.format,
+ width = templat.width[0],
+ height = templat.height[0],
+ depth = templat.depth[0],
+ last_level = templat.last_level,
+ target = templat.target,
+ tex_usage = templat.tex_usage,
+ )
+
+ def texture_destroy(self, texture):
+ self.interpreter.unregister_object(texture)
+
+ def texture_release(self, surface):
+ pass
+
+ def get_tex_surface(self, texture, face, level, zslice, usage):
+ if texture is None:
+ return None
+ return texture.get_surface(face, level, zslice)
+
+ def tex_surface_destroy(self, surface):
+ self.interpreter.unregister_object(surface)
+
+ def tex_surface_release(self, surface):
+ pass
+
+ def surface_write(self, surface, data, stride, size):
+ if surface is None:
+ return
+ assert surface.nblocksy * stride == size
+ surface.put_tile_raw(0, 0, surface.width, surface.height, data, stride)
+
+ def get_tex_transfer(self, texture, face, level, zslice, usage, x, y, w, h):
+ if texture is None:
+ return None
+ transfer = Transfer(texture.get_surface(face, level, zslice), x, y, w, h)
+ if transfer and usage != gallium.PIPE_TRANSFER_WRITE:
+ if self.interpreter.options.all:
+ self.interpreter.present(transfer.surface, 'transf_read', x, y, w, h)
+ return transfer
+
+ def tex_transfer_destroy(self, transfer):
+ self.interpreter.unregister_object(transfer)
+
+ def transfer_write(self, transfer, stride, data, size):
+ if transfer is None:
+ return
+ transfer.surface.put_tile_raw(transfer.x, transfer.y, transfer.w, transfer.h, data, stride)
+ if self.interpreter.options.all:
+ self.interpreter.present(transfer.surface, 'transf_write', transfer.x, transfer.y, transfer.w, transfer.h)
+
+ def user_buffer_create(self, data, size):
+ # We don't really care to distinguish between user and regular buffers
+ buffer = self.real.buffer_create(size,
+ 4,
+ gallium.PIPE_BUFFER_USAGE_CPU_READ |
+ gallium.PIPE_BUFFER_USAGE_CPU_WRITE )
+ assert size == len(data)
+ buffer.write(data)
+ return buffer
+
+ def buffer_create(self, alignment, usage, size):
+ return self.real.buffer_create(size, alignment, usage)
+
+ def buffer_destroy(self, buffer):
+ pass
+
+ def buffer_write(self, buffer, data, size, offset=0):
+ assert size == len(data)
+ buffer.write(data)
+
+ def fence_finish(self, fence, flags):
+ pass
+
+ def fence_reference(self, dst, src):
+ pass
+
+ def flush_frontbuffer(self, surface):
+ pass
+
+
+class Context(Object):
+
+ def __init__(self, interpreter, real):
+ Object.__init__(self, interpreter, real)
+ self.cbufs = []
+ self.zsbuf = None
+ self.vbufs = []
+ self.velems = []
+ self.dirty = False
+
+ def destroy(self):
+ pass
+
+ def create_blend_state(self, state):
+ return state
+
+ def bind_blend_state(self, state):
+ if state is not None:
+ self.real.set_blend(state)
+
+ def delete_blend_state(self, state):
+ pass
+
+ def create_sampler_state(self, state):
+ return state
+
+ def delete_sampler_state(self, state):
+ pass
+
+ def bind_sampler_states(self, num_states, states):
+ for i in range(num_states):
+ self.real.set_sampler(i, states[i])
+
+ def create_rasterizer_state(self, state):
+ return state
+
+ def bind_rasterizer_state(self, state):
+ if state is not None:
+ self.real.set_rasterizer(state)
+
+ def delete_rasterizer_state(self, state):
+ pass
+
+ def create_depth_stencil_alpha_state(self, state):
+ return state
+
+ def bind_depth_stencil_alpha_state(self, state):
+ if state is not None:
+ self.real.set_depth_stencil_alpha(state)
+
+ def delete_depth_stencil_alpha_state(self, state):
+ pass
+
+ def create_fs_state(self, state):
+ tokens = str(state.tokens)
+ shader = gallium.Shader(tokens)
+ return shader
+
+ create_vs_state = create_fs_state
+
+ def bind_fs_state(self, state):
+ self.real.set_fragment_shader(state)
+
+ def bind_vs_state(self, state):
+ self.real.set_vertex_shader(state)
+
+ def delete_fs_state(self, state):
+ pass
+
+ delete_vs_state = delete_fs_state
+
+ def set_blend_color(self, state):
+ self.real.set_blend_color(state)
+
+ def set_clip_state(self, state):
+ _state = gallium.Clip()
+ _state.nr = state.nr
+ if state.nr:
+ # FIXME
+ ucp = gallium.FloatArray(gallium.PIPE_MAX_CLIP_PLANES*4)
+ for i in range(len(state.ucp)):
+ for j in range(len(state.ucp[i])):
+ ucp[i*4 + j] = state.ucp[i][j]
+ _state.ucp = ucp
+ self.real.set_clip(_state)
+
+ def dump_constant_buffer(self, buffer):
+ if not self.interpreter.verbosity(2):
+ return
+
+ data = buffer.read()
+ format = '4f'
+ index = 0
+ for offset in range(0, len(data), struct.calcsize(format)):
+ x, y, z, w = unpack_from(format, data, offset)
+ sys.stdout.write('\tCONST[%2u] = {%10.4f, %10.4f, %10.4f, %10.4f}\n' % (index, x, y, z, w))
+ index += 1
+ sys.stdout.flush()
+
+ def set_constant_buffer(self, shader, index, buffer):
+ if buffer is not None:
+ self.real.set_constant_buffer(shader, index, buffer.buffer)
+
+ self.dump_constant_buffer(buffer.buffer)
+
+ def set_framebuffer_state(self, state):
+ _state = gallium.Framebuffer()
+ _state.width = state.width
+ _state.height = state.height
+ _state.nr_cbufs = state.nr_cbufs
+ for i in range(len(state.cbufs)):
+ _state.set_cbuf(i, state.cbufs[i])
+ _state.set_zsbuf(state.zsbuf)
+ self.real.set_framebuffer(_state)
+
+ self.cbufs = state.cbufs
+ self.zsbuf = state.zsbuf
+
+ def set_polygon_stipple(self, state):
+ self.real.set_polygon_stipple(state)
+
+ def set_scissor_state(self, state):
+ self.real.set_scissor(state)
+
+ def set_viewport_state(self, state):
+ self.real.set_viewport(state)
+
+ def set_sampler_textures(self, num_textures, textures):
+ for i in range(num_textures):
+ self.real.set_sampler_texture(i, textures[i])
+
+ def set_vertex_buffers(self, num_buffers, buffers):
+ self.vbufs = buffers[0:num_buffers]
+ for i in range(num_buffers):
+ vbuf = buffers[i]
+ self.real.set_vertex_buffer(
+ i,
+ stride = vbuf.stride,
+ max_index = vbuf.max_index,
+ buffer_offset = vbuf.buffer_offset,
+ buffer = vbuf.buffer,
+ )
+
+ def set_vertex_elements(self, num_elements, elements):
+ self.velems = elements[0:num_elements]
+ for i in range(num_elements):
+ self.real.set_vertex_element(i, elements[i])
+ self.real.set_vertex_elements(num_elements)
+
+ def set_edgeflags(self, bitfield):
+ # FIXME
+ pass
+
+ def dump_vertices(self, start, count):
+ if not self.interpreter.verbosity(2):
+ return
+
+ for index in range(start, start + count):
+ if index >= start + 16:
+ sys.stdout.write('\t...\n')
+ break
+ sys.stdout.write('\t{\n')
+ for velem in self.velems:
+ vbuf = self.vbufs[velem.vertex_buffer_index]
+
+ offset = vbuf.buffer_offset + velem.src_offset + vbuf.stride*index
+ format = {
+ gallium.PIPE_FORMAT_R32_FLOAT: 'f',
+ gallium.PIPE_FORMAT_R32G32_FLOAT: '2f',
+ gallium.PIPE_FORMAT_R32G32B32_FLOAT: '3f',
+ gallium.PIPE_FORMAT_R32G32B32A32_FLOAT: '4f',
+ gallium.PIPE_FORMAT_B8G8R8A8_UNORM: '4B',
+ }[velem.src_format]
+
+ data = vbuf.buffer.read()
+ values = unpack_from(format, data, offset)
+ sys.stdout.write('\t\t{' + ', '.join(map(str, values)) + '},\n')
+ assert len(values) == velem.nr_components
+ sys.stdout.write('\t},\n')
+ sys.stdout.flush()
+
+ def dump_indices(self, ibuf, isize, start, count):
+ if not self.interpreter.verbosity(2):
+ return
+
+ format = {
+ 1: 'B',
+ 2: 'H',
+ 4: 'I',
+ }[isize]
+
+ assert struct.calcsize(format) == isize
+
+ data = ibuf.read()
+ maxindex, minindex = 0, 0xffffffff
+
+ sys.stdout.write('\t{\n')
+ for i in range(start, start + count):
+ if i >= start + 16:
+ sys.stdout.write('\t...\n')
+ break
+ offset = i*isize
+ index, = unpack_from(format, data, offset)
+ sys.stdout.write('\t\t%u,\n' % index)
+ minindex = min(minindex, index)
+ maxindex = max(maxindex, index)
+ sys.stdout.write('\t},\n')
+ sys.stdout.flush()
+
+ return minindex, maxindex
+
+ def draw_arrays(self, mode, start, count):
+ self.dump_vertices(start, count)
+
+ self.real.draw_arrays(mode, start, count)
+ self._set_dirty()
+
+ def draw_elements(self, indexBuffer, indexSize, mode, start, count):
+ if self.interpreter.verbosity(2):
+ minindex, maxindex = self.dump_indices(indexBuffer, indexSize, start, count)
+ self.dump_vertices(minindex, maxindex - minindex)
+
+ self.real.draw_elements(indexBuffer, indexSize, mode, start, count)
+ self._set_dirty()
+
+ def draw_range_elements(self, indexBuffer, indexSize, minIndex, maxIndex, mode, start, count):
+ if self.interpreter.verbosity(2):
+ minindex, maxindex = self.dump_indices(indexBuffer, indexSize, start, count)
+ minindex = min(minindex, minIndex)
+ maxindex = min(maxindex, maxIndex)
+ self.dump_vertices(minindex, maxindex - minindex)
+
+ self.real.draw_range_elements(indexBuffer, indexSize, minIndex, maxIndex, mode, start, count)
+ self._set_dirty()
+
+ def surface_copy(self, dest, destx, desty, src, srcx, srcy, width, height):
+ if dest is not None and src is not None:
+ if self.interpreter.options.all:
+ self.interpreter.present(src, 'surface_copy_src', srcx, srcy, width, height)
+ self.real.surface_copy(dest, destx, desty, src, srcx, srcy, width, height)
+ if dest in self.cbufs:
+ self._set_dirty()
+ flags = gallium.PIPE_FLUSH_FRAME
+ else:
+ flags = 0
+ self.flush(flags)
+ if self.interpreter.options.all:
+ self.interpreter.present(dest, 'surface_copy_dest', destx, desty, width, height)
+
+ def is_texture_referenced(self, texture, face, level):
+ #return self.real.is_texture_referenced(format, texture, face, level)
+ pass
+
+ def is_buffer_referenced(self, buf):
+ #return self.real.is_buffer_referenced(format, buf)
+ pass
+
+ def _set_dirty(self):
+ if self.interpreter.options.step:
+ self._present()
+ else:
+ self.dirty = True
+
+ def flush(self, flags):
+ self.real.flush(flags)
+ if self.dirty:
+ if flags & gallium.PIPE_FLUSH_FRAME:
+ self._present()
+ self.dirty = False
+ return None
+
+ def clear(self, buffers, rgba, depth, stencil):
+ _rgba = gallium.FloatArray(4)
+ for i in range(4):
+ _rgba[i] = rgba[i]
+ self.real.clear(buffers, _rgba, depth, stencil)
+
+ def _present(self):
+ self.real.flush()
+
+ if self.cbufs and self.cbufs[0]:
+ self.interpreter.present(self.cbufs[0], "cbuf")
+ if self.zsbuf:
+ if self.interpreter.options.all:
+ self.interpreter.present(self.zsbuf, "zsbuf")
+
+
+class Interpreter(parser.TraceDumper):
+
+ ignore_calls = set((
+ ('pipe_screen', 'is_format_supported'),
+ ('pipe_screen', 'get_param'),
+ ('pipe_screen', 'get_paramf'),
+ ))
+
+ def __init__(self, stream, options):
+ parser.TraceDumper.__init__(self, stream)
+ self.options = options
+ self.objects = {}
+ self.result = None
+ self.globl = Global(self, None)
+ self.call_no = None
+
+ def register_object(self, address, object):
+ self.objects[address] = object
+
+ def unregister_object(self, object):
+ # FIXME:
+ pass
+
+ def lookup_object(self, address):
+ return self.objects[address]
+
+ def interpret(self, trace):
+ for call in trace.calls:
+ self.interpret_call(call)
+
+ def handle_call(self, call):
+ if self.options.stop and call.no > self.options.stop:
+ sys.exit(0)
+
+ if (call.klass, call.method) in self.ignore_calls:
+ return
+
+ self.call_no = call.no
+
+ if self.verbosity(1):
+ parser.TraceDumper.handle_call(self, call)
+ sys.stdout.flush()
+
+ args = [(str(name), self.interpret_arg(arg)) for name, arg in call.args]
+
+ if call.klass:
+ name, obj = args[0]
+ args = args[1:]
+ else:
+ obj = self.globl
+
+ method = getattr(obj, call.method)
+ ret = method(**dict(args))
+
+ if call.ret and isinstance(call.ret, model.Pointer):
+ if ret is None:
+ sys.stderr.write('warning: NULL returned\n')
+ self.register_object(call.ret.address, ret)
+
+ self.call_no = None
+
+ def interpret_arg(self, node):
+ translator = Translator(self)
+ return translator.visit(node)
+
+ def verbosity(self, level):
+ return self.options.verbosity >= level
+
+ def present(self, surface, description, x=None, y=None, w=None, h=None):
+ if self.call_no < self.options.start:
+ return
+
+ if self.options.images:
+ filename = '%04u_%s.png' % (self.call_no, description)
+ save_image(filename, surface, x, y, w, h)
+ else:
+ title = '%u. %s' % (self.call_no, description)
+ show_image(surface, title, x, y, w, h)
+
+
+class Main(parser.Main):
+
+ def get_optparser(self):
+ optparser = parser.Main.get_optparser(self)
+ optparser.add_option("-q", "--quiet", action="store_const", const=0, dest="verbosity", help="no messages")
+ optparser.add_option("-v", "--verbose", action="count", dest="verbosity", default=1, help="increase verbosity level")
+ optparser.add_option("-i", "--images", action="store_true", dest="images", default=False, help="save images instead of showing them")
+ optparser.add_option("-a", "--all", action="store_true", dest="all", default=False, help="show depth, stencil, and transfers")
+ optparser.add_option("-s", "--step", action="store_true", dest="step", default=False, help="step trhough every draw")
+ optparser.add_option("-f", "--from", action="store", type="int", dest="start", default=0, help="from call no")
+ optparser.add_option("-t", "--to", action="store", type="int", dest="stop", default=0, help="until call no")
+ return optparser
+
+ def process_arg(self, stream, options):
+ parser = Interpreter(stream, options)
+ parser.parse()
+
+
+if __name__ == '__main__':
+ Main().main()
diff --git a/src/gallium/state_trackers/python/retrace/model.py b/src/gallium/state_trackers/python/retrace/model.py
new file mode 100755
index 0000000000..d4a079fb1e
--- /dev/null
+++ b/src/gallium/state_trackers/python/retrace/model.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+'''Trace data model.'''
+
+
+import sys
+import string
+import format
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+
+class Node:
+
+ def visit(self, visitor):
+ raise NotImplementedError
+
+ def __str__(self):
+ stream = StringIO()
+ formatter = format.DefaultFormatter(stream)
+ pretty_printer = PrettyPrinter(formatter)
+ self.visit(pretty_printer)
+ return stream.getvalue()
+
+
+class Literal(Node):
+
+ def __init__(self, value):
+ self.value = value
+
+ def visit(self, visitor):
+ visitor.visit_literal(self)
+
+
+class NamedConstant(Node):
+
+ def __init__(self, name):
+ self.name = name
+
+ def visit(self, visitor):
+ visitor.visit_named_constant(self)
+
+
+class Array(Node):
+
+ def __init__(self, elements):
+ self.elements = elements
+
+ def visit(self, visitor):
+ visitor.visit_array(self)
+
+
+class Struct(Node):
+
+ def __init__(self, name, members):
+ self.name = name
+ self.members = members
+
+ def visit(self, visitor):
+ visitor.visit_struct(self)
+
+
+class Pointer(Node):
+
+ def __init__(self, address):
+ self.address = address
+
+ def visit(self, visitor):
+ visitor.visit_pointer(self)
+
+
+class Call:
+
+ def __init__(self, no, klass, method, args, ret):
+ self.no = no
+ self.klass = klass
+ self.method = method
+ self.args = args
+ self.ret = ret
+
+ def visit(self, visitor):
+ visitor.visit_call(self)
+
+
+class Trace:
+
+ def __init__(self, calls):
+ self.calls = calls
+
+ def visit(self, visitor):
+ visitor.visit_trace(self)
+
+
+class Visitor:
+
+ def visit_literal(self, node):
+ raise NotImplementedError
+
+ def visit_named_constant(self, node):
+ raise NotImplementedError
+
+ def visit_array(self, node):
+ raise NotImplementedError
+
+ def visit_struct(self, node):
+ raise NotImplementedError
+
+ def visit_pointer(self, node):
+ raise NotImplementedError
+
+ def visit_call(self, node):
+ raise NotImplementedError
+
+ def visit_trace(self, node):
+ raise NotImplementedError
+
+
+class PrettyPrinter:
+
+ def __init__(self, formatter):
+ self.formatter = formatter
+
+ def visit_literal(self, node):
+ if isinstance(node.value, basestring):
+ if len(node.value) >= 4096 or node.value.strip(string.printable):
+ self.formatter.text('...')
+ return
+
+ self.formatter.literal('"' + node.value + '"')
+ return
+
+ self.formatter.literal(repr(node.value))
+
+ def visit_named_constant(self, node):
+ self.formatter.literal(node.name)
+
+ def visit_array(self, node):
+ self.formatter.text('{')
+ sep = ''
+ for value in node.elements:
+ self.formatter.text(sep)
+ value.visit(self)
+ sep = ', '
+ self.formatter.text('}')
+
+ def visit_struct(self, node):
+ self.formatter.text('{')
+ sep = ''
+ for name, value in node.members:
+ self.formatter.text(sep)
+ self.formatter.variable(name)
+ self.formatter.text(' = ')
+ value.visit(self)
+ sep = ', '
+ self.formatter.text('}')
+
+ def visit_pointer(self, node):
+ self.formatter.address(node.address)
+
+ def visit_call(self, node):
+ self.formatter.text('%s ' % node.no)
+ if node.klass is not None:
+ self.formatter.function(node.klass + '::' + node.method)
+ else:
+ self.formatter.function(node.method)
+ self.formatter.text('(')
+ sep = ''
+ for name, value in node.args:
+ self.formatter.text(sep)
+ self.formatter.variable(name)
+ self.formatter.text(' = ')
+ value.visit(self)
+ sep = ', '
+ self.formatter.text(')')
+ if node.ret is not None:
+ self.formatter.text(' = ')
+ node.ret.visit(self)
+
+ def visit_trace(self, node):
+ for call in node.calls:
+ call.visit(self)
+ self.formatter.newline()
+
diff --git a/src/gallium/state_trackers/python/retrace/parse.py b/src/gallium/state_trackers/python/retrace/parse.py
new file mode 100755
index 0000000000..b08d368671
--- /dev/null
+++ b/src/gallium/state_trackers/python/retrace/parse.py
@@ -0,0 +1,392 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+import sys
+import xml.parsers.expat
+import binascii
+import optparse
+
+from model import *
+
+
+ELEMENT_START, ELEMENT_END, CHARACTER_DATA, EOF = range(4)
+
+
+class XmlToken:
+
+ def __init__(self, type, name_or_data, attrs = None, line = None, column = None):
+ assert type in (ELEMENT_START, ELEMENT_END, CHARACTER_DATA, EOF)
+ self.type = type
+ self.name_or_data = name_or_data
+ self.attrs = attrs
+ self.line = line
+ self.column = column
+
+ def __str__(self):
+ if self.type == ELEMENT_START:
+ return '<' + self.name_or_data + ' ...>'
+ if self.type == ELEMENT_END:
+ return '</' + self.name_or_data + '>'
+ if self.type == CHARACTER_DATA:
+ return self.name_or_data
+ if self.type == EOF:
+ return 'end of file'
+ assert 0
+
+
+class XmlTokenizer:
+ """Expat based XML tokenizer."""
+
+ def __init__(self, fp, skip_ws = True):
+ self.fp = fp
+ self.tokens = []
+ self.index = 0
+ self.final = False
+ self.skip_ws = skip_ws
+
+ self.character_pos = 0, 0
+ self.character_data = ''
+
+ self.parser = xml.parsers.expat.ParserCreate()
+ self.parser.StartElementHandler = self.handle_element_start
+ self.parser.EndElementHandler = self.handle_element_end
+ self.parser.CharacterDataHandler = self.handle_character_data
+
+ def handle_element_start(self, name, attributes):
+ self.finish_character_data()
+ line, column = self.pos()
+ token = XmlToken(ELEMENT_START, name, attributes, line, column)
+ self.tokens.append(token)
+
+ def handle_element_end(self, name):
+ self.finish_character_data()
+ line, column = self.pos()
+ token = XmlToken(ELEMENT_END, name, None, line, column)
+ self.tokens.append(token)
+
+ def handle_character_data(self, data):
+ if not self.character_data:
+ self.character_pos = self.pos()
+ self.character_data += data
+
+ def finish_character_data(self):
+ if self.character_data:
+ if not self.skip_ws or not self.character_data.isspace():
+ line, column = self.character_pos
+ token = XmlToken(CHARACTER_DATA, self.character_data, None, line, column)
+ self.tokens.append(token)
+ self.character_data = ''
+
+ def next(self):
+ size = 16*1024
+ while self.index >= len(self.tokens) and not self.final:
+ self.tokens = []
+ self.index = 0
+ data = self.fp.read(size)
+ self.final = len(data) < size
+ data = data.rstrip('\0')
+ try:
+ self.parser.Parse(data, self.final)
+ except xml.parsers.expat.ExpatError, e:
+ #if e.code == xml.parsers.expat.errors.XML_ERROR_NO_ELEMENTS:
+ if e.code == 3:
+ pass
+ else:
+ raise e
+ if self.index >= len(self.tokens):
+ line, column = self.pos()
+ token = XmlToken(EOF, None, None, line, column)
+ else:
+ token = self.tokens[self.index]
+ self.index += 1
+ return token
+
+ def pos(self):
+ return self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber
+
+
+class TokenMismatch(Exception):
+
+ def __init__(self, expected, found):
+ self.expected = expected
+ self.found = found
+
+ def __str__(self):
+ return '%u:%u: %s expected, %s found' % (self.found.line, self.found.column, str(self.expected), str(self.found))
+
+
+
+class XmlParser:
+ """Base XML document parser."""
+
+ def __init__(self, fp):
+ self.tokenizer = XmlTokenizer(fp)
+ self.consume()
+
+ def consume(self):
+ self.token = self.tokenizer.next()
+
+ def match_element_start(self, name):
+ return self.token.type == ELEMENT_START and self.token.name_or_data == name
+
+ def match_element_end(self, name):
+ return self.token.type == ELEMENT_END and self.token.name_or_data == name
+
+ def element_start(self, name):
+ while self.token.type == CHARACTER_DATA:
+ self.consume()
+ if self.token.type != ELEMENT_START:
+ raise TokenMismatch(XmlToken(ELEMENT_START, name), self.token)
+ if self.token.name_or_data != name:
+ raise TokenMismatch(XmlToken(ELEMENT_START, name), self.token)
+ attrs = self.token.attrs
+ self.consume()
+ return attrs
+
+ def element_end(self, name):
+ while self.token.type == CHARACTER_DATA:
+ self.consume()
+ if self.token.type != ELEMENT_END:
+ raise TokenMismatch(XmlToken(ELEMENT_END, name), self.token)
+ if self.token.name_or_data != name:
+ raise TokenMismatch(XmlToken(ELEMENT_END, name), self.token)
+ self.consume()
+
+ def character_data(self, strip = True):
+ data = ''
+ while self.token.type == CHARACTER_DATA:
+ data += self.token.name_or_data
+ self.consume()
+ if strip:
+ data = data.strip()
+ return data
+
+
+class TraceParser(XmlParser):
+
+ def __init__(self, fp):
+ XmlParser.__init__(self, fp)
+ self.last_call_no = 0
+
+ def parse(self):
+ self.element_start('trace')
+ while self.token.type not in (ELEMENT_END, EOF):
+ call = self.parse_call()
+ self.handle_call(call)
+ if self.token.type != EOF:
+ self.element_end('trace')
+
+ def parse_call(self):
+ attrs = self.element_start('call')
+ try:
+ no = int(attrs['no'])
+ except KeyError:
+ self.last_call_no += 1
+ no = self.last_call_no
+ else:
+ self.last_call_no = no
+ klass = attrs['class']
+ method = attrs['method']
+ args = []
+ ret = None
+ while self.token.type == ELEMENT_START:
+ if self.token.name_or_data == 'arg':
+ arg = self.parse_arg()
+ args.append(arg)
+ elif self.token.name_or_data == 'ret':
+ ret = self.parse_ret()
+ elif self.token.name_or_data == 'call':
+ # ignore nested function calls
+ self.parse_call()
+ else:
+ raise TokenMismatch("<arg ...> or <ret ...>", self.token)
+ self.element_end('call')
+
+ return Call(no, klass, method, args, ret)
+
+ def parse_arg(self):
+ attrs = self.element_start('arg')
+ name = attrs['name']
+ value = self.parse_value()
+ self.element_end('arg')
+
+ return name, value
+
+ def parse_ret(self):
+ attrs = self.element_start('ret')
+ value = self.parse_value()
+ self.element_end('ret')
+
+ return value
+
+ def parse_value(self):
+ expected_tokens = ('null', 'bool', 'int', 'uint', 'float', 'string', 'enum', 'array', 'struct', 'ptr', 'bytes')
+ if self.token.type == ELEMENT_START:
+ if self.token.name_or_data in expected_tokens:
+ method = getattr(self, 'parse_' + self.token.name_or_data)
+ return method()
+ raise TokenMismatch(" or " .join(expected_tokens), self.token)
+
+ def parse_null(self):
+ self.element_start('null')
+ self.element_end('null')
+ return Literal(None)
+
+ def parse_bool(self):
+ self.element_start('bool')
+ value = int(self.character_data())
+ self.element_end('bool')
+ return Literal(value)
+
+ def parse_int(self):
+ self.element_start('int')
+ value = int(self.character_data())
+ self.element_end('int')
+ return Literal(value)
+
+ def parse_uint(self):
+ self.element_start('uint')
+ value = int(self.character_data())
+ self.element_end('uint')
+ return Literal(value)
+
+ def parse_float(self):
+ self.element_start('float')
+ value = float(self.character_data())
+ self.element_end('float')
+ return Literal(value)
+
+ def parse_enum(self):
+ self.element_start('enum')
+ name = self.character_data()
+ self.element_end('enum')
+ return NamedConstant(name)
+
+ def parse_string(self):
+ self.element_start('string')
+ value = self.character_data()
+ self.element_end('string')
+ return Literal(value)
+
+ def parse_bytes(self):
+ self.element_start('bytes')
+ value = binascii.a2b_hex(self.character_data())
+ self.element_end('bytes')
+ return Literal(value)
+
+ def parse_array(self):
+ self.element_start('array')
+ elems = []
+ while self.token.type != ELEMENT_END:
+ elems.append(self.parse_elem())
+ self.element_end('array')
+ return Array(elems)
+
+ def parse_elem(self):
+ self.element_start('elem')
+ value = self.parse_value()
+ self.element_end('elem')
+ return value
+
+ def parse_struct(self):
+ attrs = self.element_start('struct')
+ name = attrs['name']
+ members = []
+ while self.token.type != ELEMENT_END:
+ members.append(self.parse_member())
+ self.element_end('struct')
+ return Struct(name, members)
+
+ def parse_member(self):
+ attrs = self.element_start('member')
+ name = attrs['name']
+ value = self.parse_value()
+ self.element_end('member')
+
+ return name, value
+
+ def parse_ptr(self):
+ self.element_start('ptr')
+ address = self.character_data()
+ self.element_end('ptr')
+
+ return Pointer(address)
+
+ def handle_call(self, call):
+ pass
+
+
+class TraceDumper(TraceParser):
+
+ def __init__(self, fp):
+ TraceParser.__init__(self, fp)
+ self.formatter = format.DefaultFormatter(sys.stdout)
+ self.pretty_printer = PrettyPrinter(self.formatter)
+
+ def handle_call(self, call):
+ call.visit(self.pretty_printer)
+ self.formatter.newline()
+
+
+class Main:
+ '''Common main class for all retrace command line utilities.'''
+
+ def __init__(self):
+ pass
+
+ def main(self):
+ optparser = self.get_optparser()
+ (options, args) = optparser.parse_args(sys.argv[1:])
+
+ if args:
+ for arg in args:
+ if arg.endswith('.gz'):
+ from gzip import GzipFile
+ stream = GzipFile(arg, 'rt')
+ elif arg.endswith('.bz2'):
+ from bz2 import BZ2File
+ stream = BZ2File(arg, 'rU')
+ else:
+ stream = open(arg, 'rt')
+ self.process_arg(stream, options)
+ else:
+ self.process_arg(stream, options)
+
+ def get_optparser(self):
+ optparser = optparse.OptionParser(
+ usage="\n\t%prog [options] [traces] ...")
+ return optparser
+
+ def process_arg(self, stream, options):
+ parser = TraceDumper(stream)
+ parser.parse()
+
+
+if __name__ == '__main__':
+ Main().main()
diff --git a/src/gallium/state_trackers/python/retrace/parser.py b/src/gallium/state_trackers/python/retrace/parser.py
new file mode 100755
index 0000000000..bd47c9a6b0
--- /dev/null
+++ b/src/gallium/state_trackers/python/retrace/parser.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from parse import *
+
+
+if __name__ == '__main__':
+ Main().main()
diff --git a/src/gallium/state_trackers/python/samples/tri.py b/src/gallium/state_trackers/python/samples/tri.py
new file mode 100644
index 0000000000..b721e0b575
--- /dev/null
+++ b/src/gallium/state_trackers/python/samples/tri.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from gallium import *
+
+
+def make_image(surface):
+ data = surface.get_tile_rgba8(0, 0, surface.width, surface.height)
+
+ import Image
+ outimage = Image.fromstring('RGBA', (surface.width, surface.height), data, "raw", 'RGBA', 0, 1)
+ return outimage
+
+def save_image(filename, surface):
+ outimage = make_image(surface)
+ outimage.save(filename, "PNG")
+
+def show_image(surface):
+ outimage = make_image(surface)
+
+ import Tkinter as tk
+ from PIL import Image, ImageTk
+ root = tk.Tk()
+
+ root.title('background image')
+
+ image1 = ImageTk.PhotoImage(outimage)
+ w = image1.width()
+ h = image1.height()
+ x = 100
+ y = 100
+ root.geometry("%dx%d+%d+%d" % (w, h, x, y))
+ panel1 = tk.Label(root, image=image1)
+ panel1.pack(side='top', fill='both', expand='yes')
+ panel1.image = image1
+ root.mainloop()
+
+
+def test(dev):
+ ctx = dev.context_create()
+
+ width = 255
+ height = 255
+ minz = 0.0
+ maxz = 1.0
+
+ # disabled blending/masking
+ blend = Blend()
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.colormask = PIPE_MASK_RGBA
+ ctx.set_blend(blend)
+
+ # depth/stencil/alpha
+ depth_stencil_alpha = DepthStencilAlpha()
+ depth_stencil_alpha.depth.enabled = 1
+ depth_stencil_alpha.depth.writemask = 1
+ depth_stencil_alpha.depth.func = PIPE_FUNC_LESS
+ ctx.set_depth_stencil_alpha(depth_stencil_alpha)
+
+ # rasterizer
+ rasterizer = Rasterizer()
+ rasterizer.front_winding = PIPE_WINDING_CW
+ rasterizer.cull_mode = PIPE_WINDING_NONE
+ rasterizer.scissor = 1
+ ctx.set_rasterizer(rasterizer)
+
+ # viewport
+ viewport = Viewport()
+ scale = FloatArray(4)
+ scale[0] = width / 2.0
+ scale[1] = -height / 2.0
+ scale[2] = (maxz - minz) / 2.0
+ scale[3] = 1.0
+ viewport.scale = scale
+ translate = FloatArray(4)
+ translate[0] = width / 2.0
+ translate[1] = height / 2.0
+ translate[2] = (maxz - minz) / 2.0
+ translate[3] = 0.0
+ viewport.translate = translate
+ ctx.set_viewport(viewport)
+
+ # samplers
+ sampler = Sampler()
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.normalized_coords = 1
+ ctx.set_sampler(0, sampler)
+
+ # scissor
+ scissor = Scissor()
+ scissor.minx = 0
+ scissor.miny = 0
+ scissor.maxx = width
+ scissor.maxy = height
+ ctx.set_scissor(scissor)
+
+ clip = Clip()
+ clip.nr = 0
+ ctx.set_clip(clip)
+
+ # framebuffer
+ cbuf = dev.texture_create(
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ width, height,
+ tex_usage=PIPE_TEXTURE_USAGE_DISPLAY_TARGET,
+ ).get_surface()
+ zbuf = dev.texture_create(
+ PIPE_FORMAT_Z16_UNORM,
+ width, height,
+ tex_usage=PIPE_TEXTURE_USAGE_DEPTH_STENCIL,
+ ).get_surface()
+ fb = Framebuffer()
+ fb.width = width
+ fb.height = height
+ fb.nr_cbufs = 1
+ fb.set_cbuf(0, cbuf)
+ fb.set_zsbuf(zbuf)
+ ctx.set_framebuffer(fb)
+ rgba = FloatArray(4);
+ rgba[0] = 0.0
+ rgba[1] = 0.0
+ rgba[2] = 0.0
+ rgba[3] = 0.0
+ ctx.clear(PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTHSTENCIL, rgba, 1.0, 0xff)
+
+ # vertex shader
+ vs = Shader('''
+ VERT1.1
+ DCL IN[0], POSITION, CONSTANT
+ DCL IN[1], COLOR, CONSTANT
+ DCL OUT[0], POSITION, CONSTANT
+ DCL OUT[1], COLOR, CONSTANT
+ 0:MOV OUT[0], IN[0]
+ 1:MOV OUT[1], IN[1]
+ 2:END
+ ''')
+ ctx.set_vertex_shader(vs)
+
+ # fragment shader
+ fs = Shader('''
+ FRAG1.1
+ DCL IN[0], COLOR, LINEAR
+ DCL OUT[0], COLOR, CONSTANT
+ 0:MOV OUT[0], IN[0]
+ 1:END
+ ''')
+ ctx.set_fragment_shader(fs)
+
+ nverts = 3
+ nattrs = 2
+ verts = FloatArray(nverts * nattrs * 4)
+
+ verts[ 0] = 0.0 # x1
+ verts[ 1] = 0.8 # y1
+ verts[ 2] = 0.2 # z1
+ verts[ 3] = 1.0 # w1
+ verts[ 4] = 1.0 # r1
+ verts[ 5] = 0.0 # g1
+ verts[ 6] = 0.0 # b1
+ verts[ 7] = 1.0 # a1
+ verts[ 8] = -0.8 # x2
+ verts[ 9] = -0.8 # y2
+ verts[10] = 0.5 # z2
+ verts[11] = 1.0 # w2
+ verts[12] = 0.0 # r2
+ verts[13] = 1.0 # g2
+ verts[14] = 0.0 # b2
+ verts[15] = 1.0 # a2
+ verts[16] = 0.8 # x3
+ verts[17] = -0.8 # y3
+ verts[18] = 0.8 # z3
+ verts[19] = 1.0 # w3
+ verts[20] = 0.0 # r3
+ verts[21] = 0.0 # g3
+ verts[22] = 1.0 # b3
+ verts[23] = 1.0 # a3
+
+ ctx.draw_vertices(PIPE_PRIM_TRIANGLES,
+ nverts,
+ nattrs,
+ verts)
+
+ ctx.flush()
+
+ show_image(cbuf)
+ #show_image(zbuf)
+ #save_image('cbuf.png', cbuf)
+ #save_image('zbuf.png', zbuf)
+
+
+
+def main():
+ dev = Device()
+ test(dev)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/st_device.c b/src/gallium/state_trackers/python/st_device.c
new file mode 100644
index 0000000000..ea7d18738f
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_device.c
@@ -0,0 +1,305 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_screen.h"
+#include "pipe/p_context.h"
+#include "pipe/p_shader_tokens.h"
+#include "pipe/p_inlines.h"
+#include "cso_cache/cso_context.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_simple_shaders.h"
+#include "trace/tr_screen.h"
+#include "trace/tr_context.h"
+
+#include "st_device.h"
+#include "st_winsys.h"
+
+
+static void
+st_device_really_destroy(struct st_device *st_dev)
+{
+ if(st_dev->screen) {
+ /* FIXME: Don't really destroy until we keep track of every single
+ * reference or we end up causing a segmentation fault every time
+ * python exits. */
+#if 0
+ st_dev->screen->destroy(st_dev->screen);
+#endif
+ }
+
+ FREE(st_dev);
+}
+
+
+static void
+st_device_reference(struct st_device **ptr, struct st_device *st_dev)
+{
+ struct st_device *old_dev = *ptr;
+
+ if (pipe_reference((struct pipe_reference **)ptr, &st_dev->reference))
+ st_device_really_destroy(old_dev);
+}
+
+
+void
+st_device_destroy(struct st_device *st_dev)
+{
+ st_device_reference(&st_dev, NULL);
+}
+
+
+static struct st_device *
+st_device_create_from_st_winsys(const struct st_winsys *st_ws)
+{
+ struct st_device *st_dev;
+
+ if(!st_ws->screen_create ||
+ !st_ws->context_create)
+ return NULL;
+
+ st_dev = CALLOC_STRUCT(st_device);
+ if(!st_dev)
+ return NULL;
+
+ pipe_reference_init(&st_dev->reference, 1);
+ st_dev->st_ws = st_ws;
+
+ st_dev->real_screen = st_ws->screen_create();
+ if(!st_dev->real_screen) {
+ st_device_destroy(st_dev);
+ return NULL;
+ }
+
+ st_dev->screen = trace_screen_create(st_dev->real_screen);
+ if(!st_dev->screen) {
+ st_device_destroy(st_dev);
+ return NULL;
+ }
+
+ return st_dev;
+}
+
+
+struct st_device *
+st_device_create(boolean hardware) {
+ if(hardware)
+ return st_device_create_from_st_winsys(&st_hardpipe_winsys);
+ else
+ return st_device_create_from_st_winsys(&st_softpipe_winsys);
+}
+
+
+void
+st_context_destroy(struct st_context *st_ctx)
+{
+ unsigned i;
+
+ if(st_ctx) {
+ struct st_device *st_dev = st_ctx->st_dev;
+
+ if(st_ctx->cso) {
+ cso_delete_vertex_shader(st_ctx->cso, st_ctx->vs);
+ cso_delete_fragment_shader(st_ctx->cso, st_ctx->fs);
+
+ cso_destroy_context(st_ctx->cso);
+ }
+
+ if(st_ctx->pipe)
+ st_ctx->pipe->destroy(st_ctx->pipe);
+
+ for(i = 0; i < PIPE_MAX_SAMPLERS; ++i)
+ pipe_texture_reference(&st_ctx->sampler_textures[i], NULL);
+ pipe_texture_reference(&st_ctx->default_texture, NULL);
+
+ FREE(st_ctx);
+
+ st_device_reference(&st_dev, NULL);
+ }
+}
+
+
+struct st_context *
+st_context_create(struct st_device *st_dev)
+{
+ struct st_context *st_ctx;
+
+ st_ctx = CALLOC_STRUCT(st_context);
+ if(!st_ctx)
+ return NULL;
+
+ st_device_reference(&st_ctx->st_dev, st_dev);
+
+ st_ctx->real_pipe = st_dev->st_ws->context_create(st_dev->real_screen);
+ if(!st_ctx->real_pipe) {
+ st_context_destroy(st_ctx);
+ return NULL;
+ }
+
+ st_ctx->pipe = trace_context_create(st_dev->screen, st_ctx->real_pipe);
+ if(!st_ctx->pipe) {
+ st_context_destroy(st_ctx);
+ return NULL;
+ }
+
+ st_ctx->cso = cso_create_context(st_ctx->pipe);
+ if(!st_ctx->cso) {
+ st_context_destroy(st_ctx);
+ return NULL;
+ }
+
+ /* disabled blending/masking */
+ {
+ struct pipe_blend_state blend;
+ memset(&blend, 0, sizeof(blend));
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.colormask = PIPE_MASK_RGBA;
+ cso_set_blend(st_ctx->cso, &blend);
+ }
+
+ /* no-op depth/stencil/alpha */
+ {
+ struct pipe_depth_stencil_alpha_state depthstencil;
+ memset(&depthstencil, 0, sizeof(depthstencil));
+ cso_set_depth_stencil_alpha(st_ctx->cso, &depthstencil);
+ }
+
+ /* rasterizer */
+ {
+ struct pipe_rasterizer_state rasterizer;
+ memset(&rasterizer, 0, sizeof(rasterizer));
+ rasterizer.front_winding = PIPE_WINDING_CW;
+ rasterizer.cull_mode = PIPE_WINDING_NONE;
+ cso_set_rasterizer(st_ctx->cso, &rasterizer);
+ }
+
+ /* clip */
+ {
+ struct pipe_clip_state clip;
+ memset(&clip, 0, sizeof(clip));
+ st_ctx->pipe->set_clip_state(st_ctx->pipe, &clip);
+ }
+
+ /* identity viewport */
+ {
+ struct pipe_viewport_state viewport;
+ viewport.scale[0] = 1.0;
+ viewport.scale[1] = 1.0;
+ viewport.scale[2] = 1.0;
+ viewport.scale[3] = 1.0;
+ viewport.translate[0] = 0.0;
+ viewport.translate[1] = 0.0;
+ viewport.translate[2] = 0.0;
+ viewport.translate[3] = 0.0;
+ cso_set_viewport(st_ctx->cso, &viewport);
+ }
+
+ /* samplers */
+ {
+ struct pipe_sampler_state sampler;
+ unsigned i;
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ sampler.normalized_coords = 1;
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ cso_single_sampler(st_ctx->cso, i, &sampler);
+ cso_single_sampler_done(st_ctx->cso);
+ }
+
+ /* default textures */
+ {
+ struct pipe_screen *screen = st_dev->screen;
+ struct pipe_texture templat;
+ struct pipe_transfer *transfer;
+ unsigned i;
+
+ memset( &templat, 0, sizeof( templat ) );
+ templat.target = PIPE_TEXTURE_2D;
+ templat.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ templat.block.size = 4;
+ templat.block.width = 1;
+ templat.block.height = 1;
+ templat.width[0] = 1;
+ templat.height[0] = 1;
+ templat.depth[0] = 1;
+ templat.last_level = 0;
+
+ st_ctx->default_texture = screen->texture_create( screen, &templat );
+ if(st_ctx->default_texture) {
+ transfer = screen->get_tex_transfer(screen,
+ st_ctx->default_texture,
+ 0, 0, 0,
+ PIPE_TRANSFER_WRITE,
+ 0, 0,
+ st_ctx->default_texture->width[0],
+ st_ctx->default_texture->height[0]);
+ if (transfer) {
+ uint32_t *map;
+ map = (uint32_t *) screen->transfer_map(screen, transfer);
+ if(map) {
+ *map = 0x00000000;
+ screen->transfer_unmap(screen, transfer);
+ }
+ screen->tex_transfer_destroy(transfer);
+ }
+ }
+
+ for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
+ pipe_texture_reference(&st_ctx->sampler_textures[i], st_ctx->default_texture);
+
+ cso_set_sampler_textures(st_ctx->cso, PIPE_MAX_SAMPLERS, st_ctx->sampler_textures);
+ }
+
+ /* vertex shader */
+ {
+ const uint semantic_names[] = { TGSI_SEMANTIC_POSITION,
+ TGSI_SEMANTIC_GENERIC };
+ const uint semantic_indexes[] = { 0, 0 };
+ st_ctx->vs = util_make_vertex_passthrough_shader(st_ctx->pipe,
+ 2,
+ semantic_names,
+ semantic_indexes);
+ cso_set_vertex_shader_handle(st_ctx->cso, st_ctx->vs);
+ }
+
+ /* fragment shader */
+ {
+ st_ctx->fs = util_make_fragment_passthrough_shader(st_ctx->pipe);
+ cso_set_fragment_shader_handle(st_ctx->cso, st_ctx->fs);
+ }
+
+ return st_ctx;
+}
diff --git a/src/gallium/state_trackers/python/st_device.h b/src/gallium/state_trackers/python/st_device.h
new file mode 100644
index 0000000000..a246b6a1f2
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_device.h
@@ -0,0 +1,106 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef ST_DEVICE_H_
+#define ST_DEVICE_H_
+
+
+#include "pipe/p_state.h"
+
+struct cso_context;
+struct pipe_screen;
+struct pipe_context;
+struct st_winsys;
+
+
+struct st_surface
+{
+ struct pipe_texture *texture;
+ unsigned face;
+ unsigned level;
+ unsigned zslice;
+};
+
+
+struct st_context {
+ struct st_device *st_dev;
+
+ struct pipe_context *real_pipe;
+ struct pipe_context *pipe;
+
+ struct cso_context *cso;
+
+ void *vs;
+ void *fs;
+
+ struct pipe_texture *default_texture;
+ struct pipe_texture *sampler_textures[PIPE_MAX_SAMPLERS];
+
+ unsigned num_vertex_buffers;
+ struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
+
+ unsigned num_vertex_elements;
+ struct pipe_vertex_element vertex_elements[PIPE_MAX_ATTRIBS];
+
+ struct pipe_framebuffer_state framebuffer;
+};
+
+
+struct st_device {
+ /* FIXME: we also need to refcount for textures and surfaces... */
+ struct pipe_reference reference;
+
+ const struct st_winsys *st_ws;
+
+ struct pipe_screen *real_screen;
+ struct pipe_screen *screen;
+};
+
+
+static INLINE struct pipe_surface *
+st_pipe_surface(struct st_surface *surface, unsigned usage)
+{
+ struct pipe_texture *texture = surface->texture;
+ struct pipe_screen *screen = texture->screen;
+ return screen->get_tex_surface(screen, texture, surface->face, surface->level, surface->zslice, usage);
+}
+
+struct st_context *
+st_context_create(struct st_device *st_dev);
+
+void
+st_context_destroy(struct st_context *st_ctx);
+
+struct st_device *
+st_device_create(boolean hardware);
+
+void
+st_device_destroy(struct st_device *st_dev);
+
+
+#endif /* ST_DEVICE_H_ */
diff --git a/src/gallium/state_trackers/python/st_hardpipe_winsys.c b/src/gallium/state_trackers/python/st_hardpipe_winsys.c
new file mode 100644
index 0000000000..43aaaabf2a
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_hardpipe_winsys.c
@@ -0,0 +1,237 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Get a hardware accelerated Gallium screen/context from the OpenGL driver.
+ */
+
+
+#include "pipe/p_compiler.h"
+
+#ifdef PIPE_OS_WINDOWS
+#include <windows.h>
+#include <GL/gl.h>
+#else
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <GL/gl.h>
+#include <GL/glx.h>
+#endif
+
+#include "st_winsys.h"
+
+
+typedef struct pipe_screen * (GLAPIENTRY *PFNGETGALLIUMSCREENMESAPROC) (void);
+typedef struct pipe_context * (GLAPIENTRY* PFNCREATEGALLIUMCONTEXTMESAPROC) (void);
+
+static PFNGETGALLIUMSCREENMESAPROC pfnGetGalliumScreenMESA = NULL;
+static PFNCREATEGALLIUMCONTEXTMESAPROC pfnCreateGalliumContextMESA = NULL;
+
+
+/* XXX: Force init_gallium symbol to be linked */
+extern void init_gallium(void);
+void (*force_init_gallium_linkage)(void) = &init_gallium;
+
+
+#ifdef PIPE_OS_WINDOWS
+
+static INLINE boolean
+st_hardpipe_load(void)
+{
+ WNDCLASS wc;
+ HWND hwnd;
+ HGLRC hglrc;
+ HDC hdc;
+ PIXELFORMATDESCRIPTOR pfd;
+ int iPixelFormat;
+
+ if(pfnGetGalliumScreenMESA && pfnCreateGalliumContextMESA)
+ return TRUE;
+
+ memset(&wc, 0, sizeof wc);
+ wc.lpfnWndProc = DefWindowProc;
+ wc.lpszClassName = "gallium";
+ wc.style = CS_OWNDC | CS_HREDRAW | CS_VREDRAW;
+ RegisterClass(&wc);
+
+ hwnd = CreateWindow(wc.lpszClassName, "gallium", 0, 0, 0, 0, 0, NULL, 0, wc.hInstance, NULL);
+ if (!hwnd)
+ return FALSE;
+
+ hdc = GetDC(hwnd);
+ if (!hdc)
+ return FALSE;
+
+ pfd.cColorBits = 3;
+ pfd.cRedBits = 1;
+ pfd.cGreenBits = 1;
+ pfd.cBlueBits = 1;
+ pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL;
+ pfd.iLayerType = PFD_MAIN_PLANE;
+ pfd.iPixelType = PFD_TYPE_RGBA;
+ pfd.nSize = sizeof(pfd);
+ pfd.nVersion = 1;
+
+ iPixelFormat = ChoosePixelFormat(hdc, &pfd);
+ if (!iPixelFormat) {
+ pfd.dwFlags |= PFD_DOUBLEBUFFER;
+ iPixelFormat = ChoosePixelFormat(hdc, &pfd);
+ }
+ if (!iPixelFormat)
+ return FALSE;
+
+ SetPixelFormat(hdc, iPixelFormat, &pfd);
+ hglrc = wglCreateContext(hdc);
+ if (!hglrc)
+ return FALSE;
+
+ if (!wglMakeCurrent(hdc, hglrc))
+ return FALSE;
+
+ pfnGetGalliumScreenMESA = (PFNGETGALLIUMSCREENMESAPROC)wglGetProcAddress("wglGetGalliumScreenMESA");
+ if(!pfnGetGalliumScreenMESA)
+ return FALSE;
+
+ pfnCreateGalliumContextMESA = (PFNCREATEGALLIUMCONTEXTMESAPROC)wglGetProcAddress("wglCreateGalliumContextMESA");
+ if(!pfnCreateGalliumContextMESA)
+ return FALSE;
+
+ DestroyWindow(hwnd);
+
+ return TRUE;
+}
+
+#else
+
+static INLINE boolean
+st_hardpipe_load(void)
+{
+ Display *dpy;
+ int scrnum;
+ Window root;
+ int attribSingle[] = {
+ GLX_RGBA,
+ GLX_RED_SIZE, 1,
+ GLX_GREEN_SIZE, 1,
+ GLX_BLUE_SIZE, 1,
+ None };
+ int attribDouble[] = {
+ GLX_RGBA,
+ GLX_RED_SIZE, 1,
+ GLX_GREEN_SIZE, 1,
+ GLX_BLUE_SIZE, 1,
+ GLX_DOUBLEBUFFER,
+ None };
+ XVisualInfo *visinfo;
+ GLXContext ctx = NULL;
+ XSetWindowAttributes attr;
+ unsigned long mask;
+ int width = 100, height = 100;
+ Window win;
+
+ dpy = XOpenDisplay(NULL);
+ if (!dpy)
+ return FALSE;
+
+ scrnum = 0;
+
+ root = RootWindow(dpy, scrnum);
+
+ visinfo = glXChooseVisual(dpy, scrnum, attribSingle);
+ if (!visinfo)
+ visinfo = glXChooseVisual(dpy, scrnum, attribDouble);
+ if (!visinfo)
+ return FALSE;
+
+ ctx = glXCreateContext( dpy, visinfo, NULL, True );
+
+ if (!ctx)
+ return FALSE;
+
+ attr.background_pixel = 0;
+ attr.border_pixel = 0;
+ attr.colormap = XCreateColormap(dpy, root, visinfo->visual, AllocNone);
+ attr.event_mask = StructureNotifyMask | ExposureMask;
+
+ mask = CWBackPixel | CWBorderPixel | CWColormap | CWEventMask;
+
+ win = XCreateWindow(dpy, root, 0, 0, width, height,
+ 0, visinfo->depth, InputOutput,
+ visinfo->visual, mask, &attr);
+
+ if (!glXMakeCurrent(dpy, win, ctx))
+ return FALSE;
+
+ pfnGetGalliumScreenMESA = (PFNGETGALLIUMSCREENMESAPROC)glXGetProcAddressARB((const GLubyte *)"glXGetGalliumScreenMESA");
+ if(!pfnGetGalliumScreenMESA)
+ return FALSE;
+
+ pfnCreateGalliumContextMESA = (PFNCREATEGALLIUMCONTEXTMESAPROC)glXGetProcAddressARB((const GLubyte *)"glXCreateGalliumContextMESA");
+ if(!pfnCreateGalliumContextMESA)
+ return FALSE;
+
+ glXDestroyContext(dpy, ctx);
+ XFree(visinfo);
+ XDestroyWindow(dpy, win);
+ XCloseDisplay(dpy);
+
+ return TRUE;
+}
+
+#endif
+
+
+static struct pipe_screen *
+st_hardpipe_screen_create(void)
+{
+ if(st_hardpipe_load())
+ return pfnGetGalliumScreenMESA();
+ else
+ return st_softpipe_winsys.screen_create();
+}
+
+
+static struct pipe_context *
+st_hardpipe_context_create(struct pipe_screen *screen)
+{
+ if(st_hardpipe_load()) {
+ if(screen == pfnGetGalliumScreenMESA())
+ return pfnCreateGalliumContextMESA();
+ else
+ return NULL;
+ }
+ else
+ return st_softpipe_winsys.context_create(screen);
+}
+
+
+const struct st_winsys st_hardpipe_winsys = {
+ &st_hardpipe_screen_create,
+ &st_hardpipe_context_create
+};
diff --git a/src/gallium/state_trackers/python/st_sample.c b/src/gallium/state_trackers/python/st_sample.c
new file mode 100644
index 0000000000..53a01891e1
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_sample.c
@@ -0,0 +1,570 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "util/u_tile.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "st_device.h"
+#include "st_sample.h"
+
+
+/**
+ * Use our own pseudo random generator to ensure consistent runs among
+ * multiple runs and platforms.
+ *
+ * @sa http://en.wikipedia.org/wiki/Linear_congruential_generator
+ */
+static uint32_t st_random(void) {
+ static uint64_t seed = UINT64_C(0xbb9a063afb0a739d);
+
+ seed = UINT64_C(134775813) * seed + UINT64_C(1);
+
+ return (uint16_t)(seed >> 32);
+}
+
+
+/**
+ * We don't want to include the patent-encumbered DXT code here, so instead
+ * we store several uncompressed/compressed data pairs for hardware testing
+ * purposes.
+ */
+struct dxt_data
+{
+ uint8_t rgba[16*4];
+ uint8_t raw[16];
+};
+
+
+static const struct dxt_data
+dxt1_rgb_data[] = {
+ {
+ {
+ 0x99, 0xb0, 0x8e, 0xff,
+ 0x5d, 0x62, 0x89, 0xff,
+ 0x99, 0xb0, 0x8e, 0xff,
+ 0x99, 0xb0, 0x8e, 0xff,
+ 0xd6, 0xff, 0x94, 0xff,
+ 0x5d, 0x62, 0x89, 0xff,
+ 0x99, 0xb0, 0x8e, 0xff,
+ 0xd6, 0xff, 0x94, 0xff,
+ 0x5d, 0x62, 0x89, 0xff,
+ 0x5d, 0x62, 0x89, 0xff,
+ 0x99, 0xb0, 0x8e, 0xff,
+ 0x21, 0x14, 0x84, 0xff,
+ 0x5d, 0x62, 0x89, 0xff,
+ 0x21, 0x14, 0x84, 0xff,
+ 0x21, 0x14, 0x84, 0xff,
+ 0x99, 0xb0, 0x8e, 0xff
+ },
+ {0xf2, 0xd7, 0xb0, 0x20, 0xae, 0x2c, 0x6f, 0x97}
+ },
+ {
+ {
+ 0xb5, 0xcf, 0x9c, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff,
+ 0x21, 0x08, 0x6b, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff,
+ 0x52, 0x4a, 0x7b, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff,
+ 0xb5, 0xcf, 0x9c, 0xff,
+ 0x21, 0x08, 0x6b, 0xff,
+ 0xb5, 0xcf, 0x9c, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff,
+ 0x52, 0x4a, 0x7b, 0xff,
+ 0xb5, 0xcf, 0x9c, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff,
+ 0x52, 0x4a, 0x7b, 0xff,
+ 0x83, 0x8c, 0x8b, 0xff
+ },
+ {0x73, 0xb6, 0x4d, 0x20, 0x98, 0x2b, 0xe1, 0xb8}
+ },
+ {
+ {
+ 0x00, 0x2c, 0xff, 0xff,
+ 0x94, 0x8d, 0x7b, 0xff,
+ 0x4a, 0x5c, 0xbd, 0xff,
+ 0x4a, 0x5c, 0xbd, 0xff,
+ 0x4a, 0x5c, 0xbd, 0xff,
+ 0x94, 0x8d, 0x7b, 0xff,
+ 0x94, 0x8d, 0x7b, 0xff,
+ 0x94, 0x8d, 0x7b, 0xff,
+ 0xde, 0xbe, 0x39, 0xff,
+ 0x94, 0x8d, 0x7b, 0xff,
+ 0xde, 0xbe, 0x39, 0xff,
+ 0xde, 0xbe, 0x39, 0xff,
+ 0xde, 0xbe, 0x39, 0xff,
+ 0xde, 0xbe, 0x39, 0xff,
+ 0xde, 0xbe, 0x39, 0xff,
+ 0x94, 0x8d, 0x7b, 0xff
+ },
+ {0xe7, 0xdd, 0x7f, 0x01, 0xf9, 0xab, 0x08, 0x80}
+ },
+ {
+ {
+ 0x6b, 0x24, 0x21, 0xff,
+ 0x7b, 0x4f, 0x5d, 0xff,
+ 0x7b, 0x4f, 0x5d, 0xff,
+ 0x8b, 0x7a, 0x99, 0xff,
+ 0x7b, 0x4f, 0x5d, 0xff,
+ 0x7b, 0x4f, 0x5d, 0xff,
+ 0x6b, 0x24, 0x21, 0xff,
+ 0x8b, 0x7a, 0x99, 0xff,
+ 0x9c, 0xa6, 0xd6, 0xff,
+ 0x6b, 0x24, 0x21, 0xff,
+ 0x7b, 0x4f, 0x5d, 0xff,
+ 0x8b, 0x7a, 0x99, 0xff,
+ 0x6b, 0x24, 0x21, 0xff,
+ 0x8b, 0x7a, 0x99, 0xff,
+ 0x7b, 0x4f, 0x5d, 0xff,
+ 0x9c, 0xa6, 0xd6, 0xff
+ },
+ {0x3a, 0x9d, 0x24, 0x69, 0xbd, 0x9f, 0xb4, 0x39}
+ }
+};
+
+
+static const struct dxt_data
+dxt1_rgba_data[] = {
+ {
+ {
+ 0x00, 0x00, 0x00, 0x00,
+ 0x4e, 0xaa, 0x90, 0xff,
+ 0x4e, 0xaa, 0x90, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x4e, 0xaa, 0x90, 0xff,
+ 0x29, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x4e, 0xaa, 0x90, 0xff,
+ 0x73, 0x55, 0x21, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x4e, 0xaa, 0x90, 0xff,
+ 0x4e, 0xaa, 0x90, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x4e, 0xaa, 0x90, 0xff
+ },
+ {0xff, 0x2f, 0xa4, 0x72, 0xeb, 0xb2, 0xbd, 0xbe}
+ },
+ {
+ {
+ 0xb5, 0xe3, 0x63, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x6b, 0x24, 0x84, 0xff,
+ 0xb5, 0xe3, 0x63, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xb5, 0xe3, 0x63, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x6b, 0x24, 0x84, 0xff,
+ 0x6b, 0x24, 0x84, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xb5, 0xe3, 0x63, 0xff,
+ 0x90, 0x83, 0x73, 0xff,
+ 0xb5, 0xe3, 0x63, 0xff
+ },
+ {0x30, 0x69, 0x0c, 0xb7, 0x4d, 0xf7, 0x0f, 0x67}
+ },
+ {
+ {
+ 0x00, 0x00, 0x00, 0x00,
+ 0xc6, 0x86, 0x8c, 0xff,
+ 0xc6, 0x86, 0x8c, 0xff,
+ 0x21, 0x65, 0x42, 0xff,
+ 0x21, 0x65, 0x42, 0xff,
+ 0x21, 0x65, 0x42, 0xff,
+ 0x21, 0x65, 0x42, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x21, 0x65, 0x42, 0xff,
+ 0xc6, 0x86, 0x8c, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xc6, 0x86, 0x8c, 0xff
+ },
+ {0x28, 0x23, 0x31, 0xc4, 0x17, 0xc0, 0xd3, 0x7f}
+ },
+ {
+ {
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xc6, 0xe3, 0x9c, 0xff,
+ 0x7b, 0x1c, 0x52, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x7b, 0x1c, 0x52, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x7b, 0x1c, 0x52, 0xff,
+ 0xa0, 0x7f, 0x77, 0xff,
+ 0xc6, 0xe3, 0x9c, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xa0, 0x7f, 0x77, 0xff
+ },
+ {0xea, 0x78, 0x13, 0xc7, 0x7f, 0xfc, 0x33, 0xb6}
+ },
+};
+
+
+static const struct dxt_data
+dxt3_rgba_data[] = {
+ {
+ {
+ 0x6d, 0xc6, 0x96, 0x77,
+ 0x6d, 0xc6, 0x96, 0xee,
+ 0x6d, 0xc6, 0x96, 0xaa,
+ 0x8c, 0xff, 0xb5, 0x44,
+ 0x6d, 0xc6, 0x96, 0xff,
+ 0x6d, 0xc6, 0x96, 0x88,
+ 0x31, 0x55, 0x5a, 0x66,
+ 0x6d, 0xc6, 0x96, 0x99,
+ 0x31, 0x55, 0x5a, 0xbb,
+ 0x31, 0x55, 0x5a, 0x55,
+ 0x31, 0x55, 0x5a, 0x11,
+ 0x6d, 0xc6, 0x96, 0xcc,
+ 0x6d, 0xc6, 0x96, 0xcc,
+ 0x6d, 0xc6, 0x96, 0x11,
+ 0x31, 0x55, 0x5a, 0x44,
+ 0x31, 0x55, 0x5a, 0x88
+ },
+ {0xe7, 0x4a, 0x8f, 0x96, 0x5b, 0xc1, 0x1c, 0x84, 0xf6, 0x8f, 0xab, 0x32, 0x2a, 0x9a, 0x95, 0x5a}
+ },
+ {
+ {
+ 0xad, 0xeb, 0x73, 0x99,
+ 0x97, 0xaa, 0x86, 0x66,
+ 0x6b, 0x28, 0xad, 0x99,
+ 0xad, 0xeb, 0x73, 0x99,
+ 0x6b, 0x28, 0xad, 0x22,
+ 0xad, 0xeb, 0x73, 0xff,
+ 0x97, 0xaa, 0x86, 0x55,
+ 0x6b, 0x28, 0xad, 0x55,
+ 0x6b, 0x28, 0xad, 0x44,
+ 0xad, 0xeb, 0x73, 0x33,
+ 0x6b, 0x28, 0xad, 0xee,
+ 0x6b, 0x28, 0xad, 0x99,
+ 0x97, 0xaa, 0x86, 0x66,
+ 0xad, 0xeb, 0x73, 0xbb,
+ 0x97, 0xaa, 0x86, 0x99,
+ 0xad, 0xeb, 0x73, 0xbb
+ },
+ {0x69, 0x99, 0xf2, 0x55, 0x34, 0x9e, 0xb6, 0xb9, 0x4e, 0xaf, 0x55, 0x69, 0x18, 0x61, 0x51, 0x22}
+ },
+ {
+ {
+ 0x63, 0xd7, 0xd6, 0x00,
+ 0x57, 0x62, 0x5d, 0xdd,
+ 0x57, 0x62, 0x5d, 0xcc,
+ 0x57, 0x62, 0x5d, 0xbb,
+ 0x52, 0x28, 0x21, 0xaa,
+ 0x57, 0x62, 0x5d, 0xcc,
+ 0x57, 0x62, 0x5d, 0xcc,
+ 0x57, 0x62, 0x5d, 0x66,
+ 0x57, 0x62, 0x5d, 0x22,
+ 0x57, 0x62, 0x5d, 0xdd,
+ 0x63, 0xd7, 0xd6, 0xee,
+ 0x57, 0x62, 0x5d, 0x33,
+ 0x63, 0xd7, 0xd6, 0x55,
+ 0x52, 0x28, 0x21, 0x55,
+ 0x57, 0x62, 0x5d, 0x11,
+ 0x5d, 0x9c, 0x99, 0xee
+ },
+ {0xd0, 0xbc, 0xca, 0x6c, 0xd2, 0x3e, 0x55, 0xe1, 0xba, 0x66, 0x44, 0x51, 0xfc, 0xfd, 0xcf, 0xb4}
+ },
+ {
+ {
+ 0x94, 0x6f, 0x60, 0x22,
+ 0x94, 0x6f, 0x60, 0x22,
+ 0xc5, 0xab, 0x76, 0x11,
+ 0xc5, 0xab, 0x76, 0xee,
+ 0x63, 0x34, 0x4a, 0xdd,
+ 0x63, 0x34, 0x4a, 0x33,
+ 0x94, 0x6f, 0x60, 0x77,
+ 0xf7, 0xe7, 0x8c, 0x00,
+ 0x94, 0x6f, 0x60, 0x33,
+ 0x63, 0x34, 0x4a, 0xaa,
+ 0x94, 0x6f, 0x60, 0x77,
+ 0x63, 0x34, 0x4a, 0xcc,
+ 0x94, 0x6f, 0x60, 0xaa,
+ 0xf7, 0xe7, 0x8c, 0x99,
+ 0x63, 0x34, 0x4a, 0x44,
+ 0xc5, 0xab, 0x76, 0xaa
+ },
+ {0x22, 0xe1, 0x3d, 0x07, 0xa3, 0xc7, 0x9a, 0xa4, 0x31, 0xf7, 0xa9, 0x61, 0xaf, 0x35, 0x77, 0x93}
+ },
+};
+
+
+static const struct dxt_data
+dxt5_rgba_data[] = {
+ {
+ {
+ 0x6d, 0xc6, 0x96, 0x74,
+ 0x6d, 0xc6, 0x96, 0xf8,
+ 0x6d, 0xc6, 0x96, 0xb6,
+ 0x8c, 0xff, 0xb5, 0x53,
+ 0x6d, 0xc6, 0x96, 0xf8,
+ 0x6d, 0xc6, 0x96, 0x95,
+ 0x31, 0x55, 0x5a, 0x53,
+ 0x6d, 0xc6, 0x96, 0x95,
+ 0x31, 0x55, 0x5a, 0xb6,
+ 0x31, 0x55, 0x5a, 0x53,
+ 0x31, 0x55, 0x5a, 0x11,
+ 0x6d, 0xc6, 0x96, 0xd7,
+ 0x6d, 0xc6, 0x96, 0xb6,
+ 0x6d, 0xc6, 0x96, 0x11,
+ 0x31, 0x55, 0x5a, 0x32,
+ 0x31, 0x55, 0x5a, 0x95
+ },
+ {0xf8, 0x11, 0xc5, 0x0c, 0x9a, 0x73, 0xb4, 0x9c, 0xf6, 0x8f, 0xab, 0x32, 0x2a, 0x9a, 0x95, 0x5a}
+ },
+ {
+ {
+ 0xad, 0xeb, 0x73, 0xa1,
+ 0x97, 0xaa, 0x86, 0x65,
+ 0x6b, 0x28, 0xad, 0xa1,
+ 0xad, 0xeb, 0x73, 0xa1,
+ 0x6b, 0x28, 0xad, 0x2a,
+ 0xad, 0xeb, 0x73, 0xfb,
+ 0x97, 0xaa, 0x86, 0x47,
+ 0x6b, 0x28, 0xad, 0x65,
+ 0x6b, 0x28, 0xad, 0x47,
+ 0xad, 0xeb, 0x73, 0x47,
+ 0x6b, 0x28, 0xad, 0xdd,
+ 0x6b, 0x28, 0xad, 0xa1,
+ 0x97, 0xaa, 0x86, 0x65,
+ 0xad, 0xeb, 0x73, 0xbf,
+ 0x97, 0xaa, 0x86, 0xa1,
+ 0xad, 0xeb, 0x73, 0xbf
+ },
+ {0xfb, 0x2a, 0x34, 0x19, 0xdc, 0xbf, 0xe8, 0x71, 0x4e, 0xaf, 0x55, 0x69, 0x18, 0x61, 0x51, 0x22}
+ },
+ {
+ {
+ 0x63, 0xd7, 0xd6, 0x00,
+ 0x57, 0x62, 0x5d, 0xf5,
+ 0x57, 0x62, 0x5d, 0xd2,
+ 0x57, 0x62, 0x5d, 0xaf,
+ 0x52, 0x28, 0x21, 0xaf,
+ 0x57, 0x62, 0x5d, 0xd2,
+ 0x57, 0x62, 0x5d, 0xd2,
+ 0x57, 0x62, 0x5d, 0x69,
+ 0x57, 0x62, 0x5d, 0x23,
+ 0x57, 0x62, 0x5d, 0xd2,
+ 0x63, 0xd7, 0xd6, 0xf5,
+ 0x57, 0x62, 0x5d, 0x46,
+ 0x63, 0xd7, 0xd6, 0x46,
+ 0x52, 0x28, 0x21, 0x69,
+ 0x57, 0x62, 0x5d, 0x23,
+ 0x5d, 0x9c, 0x99, 0xf5
+ },
+ {0xf5, 0x00, 0x81, 0x36, 0xa9, 0x17, 0xec, 0x1e, 0xba, 0x66, 0x44, 0x51, 0xfc, 0xfd, 0xcf, 0xb4}
+ },
+ {
+ {
+ 0x94, 0x6f, 0x60, 0x25,
+ 0x94, 0x6f, 0x60, 0x25,
+ 0xc5, 0xab, 0x76, 0x05,
+ 0xc5, 0xab, 0x76, 0xe8,
+ 0x63, 0x34, 0x4a, 0xe8,
+ 0x63, 0x34, 0x4a, 0x25,
+ 0x94, 0x6f, 0x60, 0x86,
+ 0xf7, 0xe7, 0x8c, 0x05,
+ 0x94, 0x6f, 0x60, 0x25,
+ 0x63, 0x34, 0x4a, 0xa7,
+ 0x94, 0x6f, 0x60, 0x66,
+ 0x63, 0x34, 0x4a, 0xc7,
+ 0x94, 0x6f, 0x60, 0xa7,
+ 0xf7, 0xe7, 0x8c, 0xa7,
+ 0x63, 0x34, 0x4a, 0x45,
+ 0xc5, 0xab, 0x76, 0xa7
+ },
+ {0xe8, 0x05, 0x7f, 0x80, 0x33, 0x5f, 0xb5, 0x79, 0x31, 0xf7, 0xa9, 0x61, 0xaf, 0x35, 0x77, 0x93}
+ },
+};
+
+
+static INLINE void
+st_sample_dxt_pixel_block(enum pipe_format format,
+ const struct pipe_format_block *block,
+ uint8_t *raw,
+ float *rgba, unsigned rgba_stride,
+ unsigned w, unsigned h)
+{
+ const struct dxt_data *data;
+ unsigned n;
+ unsigned i;
+ unsigned x, y, ch;
+
+ switch(format) {
+ case PIPE_FORMAT_DXT1_RGB:
+ data = dxt1_rgb_data;
+ n = sizeof(dxt1_rgb_data)/sizeof(dxt1_rgb_data[0]);
+ break;
+ case PIPE_FORMAT_DXT1_RGBA:
+ data = dxt1_rgba_data;
+ n = sizeof(dxt1_rgba_data)/sizeof(dxt1_rgba_data[0]);
+ break;
+ case PIPE_FORMAT_DXT3_RGBA:
+ data = dxt3_rgba_data;
+ n = sizeof(dxt3_rgba_data)/sizeof(dxt3_rgba_data[0]);
+ break;
+ case PIPE_FORMAT_DXT5_RGBA:
+ data = dxt5_rgba_data;
+ n = sizeof(dxt5_rgba_data)/sizeof(dxt5_rgba_data[0]);
+ break;
+ default:
+ assert(0);
+ return;
+ }
+
+ i = st_random() % n;
+
+ for(y = 0; y < h; ++y)
+ for(x = 0; x < w; ++x)
+ for(ch = 0; ch < 4; ++ch)
+ rgba[y*rgba_stride + x*4 + ch] = (float)(data[i].rgba[y*4*4 + x*4 + ch])/255.0f;
+
+ memcpy(raw, data[i].raw, block->size);
+}
+
+
+static INLINE void
+st_sample_generic_pixel_block(enum pipe_format format,
+ const struct pipe_format_block *block,
+ uint8_t *raw,
+ float *rgba, unsigned rgba_stride,
+ unsigned w, unsigned h)
+{
+ unsigned i;
+ unsigned x, y, ch;
+
+ for(i = 0; i < block->size; ++i)
+ raw[i] = (uint8_t)st_random();
+
+
+ pipe_tile_raw_to_rgba(format,
+ raw,
+ w, h,
+ rgba, rgba_stride);
+
+ if(format == PIPE_FORMAT_YCBCR || format == PIPE_FORMAT_YCBCR_REV) {
+ for(y = 0; y < h; ++y) {
+ for(x = 0; x < w; ++x) {
+ for(ch = 0; ch < 4; ++ch) {
+ unsigned offset = y*rgba_stride + x*4 + ch;
+ rgba[offset] = CLAMP(rgba[offset], 0.0f, 1.0f);
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * Randomly sample pixels.
+ */
+void
+st_sample_pixel_block(enum pipe_format format,
+ const struct pipe_format_block *block,
+ void *raw,
+ float *rgba, unsigned rgba_stride,
+ unsigned w, unsigned h)
+{
+ switch(format) {
+ case PIPE_FORMAT_DXT1_RGB:
+ case PIPE_FORMAT_DXT1_RGBA:
+ case PIPE_FORMAT_DXT3_RGBA:
+ case PIPE_FORMAT_DXT5_RGBA:
+ st_sample_dxt_pixel_block(format, block, raw, rgba, rgba_stride, w, h);
+ break;
+
+ default:
+ st_sample_generic_pixel_block(format, block, raw, rgba, rgba_stride, w, h);
+ break;
+ }
+}
+
+
+void
+st_sample_surface(struct st_surface *surface, float *rgba)
+{
+ struct pipe_texture *texture = surface->texture;
+ struct pipe_screen *screen = texture->screen;
+ unsigned width = texture->width[surface->level];
+ unsigned height = texture->height[surface->level];
+ uint rgba_stride = width * 4;
+ struct pipe_transfer *transfer;
+ void *raw;
+
+ transfer = screen->get_tex_transfer(screen,
+ surface->texture,
+ surface->face,
+ surface->level,
+ surface->zslice,
+ PIPE_TRANSFER_WRITE,
+ 0, 0,
+ width,
+ height);
+ if (!transfer)
+ return;
+
+ raw = screen->transfer_map(screen, transfer);
+ if (raw) {
+ const struct pipe_format_block *block = &texture->block;
+ uint x, y;
+
+ for (y = 0; y < transfer->nblocksy; ++y) {
+ for (x = 0; x < transfer->nblocksx; ++x) {
+ st_sample_pixel_block(texture->format,
+ block,
+ (uint8_t *) raw + y * transfer->stride + x * block->size,
+ rgba + y * block->height * rgba_stride + x * block->width * 4,
+ rgba_stride,
+ MIN2(block->width, width - x*block->width),
+ MIN2(block->height, height - y*block->height));
+ }
+ }
+
+ screen->transfer_unmap(screen, transfer);
+ }
+
+ screen->tex_transfer_destroy(transfer);
+}
diff --git a/src/gallium/state_trackers/python/st_sample.h b/src/gallium/state_trackers/python/st_sample.h
new file mode 100644
index 0000000000..0a27083549
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_sample.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef ST_SAMPLE_H_
+#define ST_SAMPLE_H_
+
+
+#include "pipe/p_format.h"
+
+
+void
+st_sample_pixel_block(enum pipe_format format,
+ const struct pipe_format_block *block,
+ void *raw,
+ float *rgba, unsigned rgba_stride,
+ unsigned w, unsigned h);
+
+void
+st_sample_surface(struct st_surface *surface, float *rgba);
+
+
+#endif /* ST_SAMPLE_H_ */
diff --git a/src/gallium/state_trackers/python/st_softpipe_winsys.c b/src/gallium/state_trackers/python/st_softpipe_winsys.c
new file mode 100644
index 0000000000..f0a4826a00
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_softpipe_winsys.c
@@ -0,0 +1,270 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Softpipe support.
+ *
+ * @author Keith Whitwell
+ * @author Brian Paul
+ * @author Jose Fonseca
+ */
+
+
+#include "pipe/internal/p_winsys_screen.h"/* port to just p_screen */
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "softpipe/sp_winsys.h"
+#include "st_winsys.h"
+
+
+struct st_softpipe_buffer
+{
+ struct pipe_buffer base;
+ boolean userBuffer; /** Is this a user-space buffer? */
+ void *data;
+ void *mapped;
+};
+
+
+/** Cast wrapper */
+static INLINE struct st_softpipe_buffer *
+st_softpipe_buffer( struct pipe_buffer *buf )
+{
+ return (struct st_softpipe_buffer *)buf;
+}
+
+
+static void *
+st_softpipe_buffer_map(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf,
+ unsigned flags)
+{
+ struct st_softpipe_buffer *st_softpipe_buf = st_softpipe_buffer(buf);
+ st_softpipe_buf->mapped = st_softpipe_buf->data;
+ return st_softpipe_buf->mapped;
+}
+
+
+static void
+st_softpipe_buffer_unmap(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ struct st_softpipe_buffer *st_softpipe_buf = st_softpipe_buffer(buf);
+ st_softpipe_buf->mapped = NULL;
+}
+
+
+static void
+st_softpipe_buffer_destroy(struct pipe_buffer *buf)
+{
+ struct st_softpipe_buffer *oldBuf = st_softpipe_buffer(buf);
+
+ if (oldBuf->data) {
+ if (!oldBuf->userBuffer)
+ align_free(oldBuf->data);
+
+ oldBuf->data = NULL;
+ }
+
+ FREE(oldBuf);
+}
+
+
+static void
+st_softpipe_flush_frontbuffer(struct pipe_winsys *winsys,
+ struct pipe_surface *surf,
+ void *context_private)
+{
+}
+
+
+
+static const char *
+st_softpipe_get_name(struct pipe_winsys *winsys)
+{
+ return "softpipe";
+}
+
+
+static struct pipe_buffer *
+st_softpipe_buffer_create(struct pipe_winsys *winsys,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct st_softpipe_buffer *buffer = CALLOC_STRUCT(st_softpipe_buffer);
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.alignment = alignment;
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+
+ buffer->data = align_malloc(size, alignment);
+
+ return &buffer->base;
+}
+
+
+/**
+ * Create buffer which wraps user-space data.
+ */
+static struct pipe_buffer *
+st_softpipe_user_buffer_create(struct pipe_winsys *winsys,
+ void *ptr,
+ unsigned bytes)
+{
+ struct st_softpipe_buffer *buffer;
+
+ buffer = CALLOC_STRUCT(st_softpipe_buffer);
+ if(!buffer)
+ return NULL;
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.size = bytes;
+ buffer->userBuffer = TRUE;
+ buffer->data = ptr;
+
+ return &buffer->base;
+}
+
+
+/**
+ * Round n up to next multiple.
+ */
+static INLINE unsigned
+round_up(unsigned n, unsigned multiple)
+{
+ return (n + multiple - 1) & ~(multiple - 1);
+}
+
+
+static struct pipe_buffer *
+st_softpipe_surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned *stride)
+{
+ const unsigned alignment = 64;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = round_up(nblocksx * block.size, alignment);
+
+ return winsys->buffer_create(winsys, alignment,
+ usage,
+ *stride * nblocksy);
+}
+
+
+static void
+st_softpipe_fence_reference(struct pipe_winsys *winsys,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+}
+
+
+static int
+st_softpipe_fence_signalled(struct pipe_winsys *winsys,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static int
+st_softpipe_fence_finish(struct pipe_winsys *winsys,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static void
+st_softpipe_destroy(struct pipe_winsys *winsys)
+{
+ FREE(winsys);
+}
+
+
+static struct pipe_screen *
+st_softpipe_screen_create(void)
+{
+ static struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ winsys = CALLOC_STRUCT(pipe_winsys);
+ if(!winsys)
+ return NULL;
+
+ winsys->destroy = st_softpipe_destroy;
+
+ winsys->buffer_create = st_softpipe_buffer_create;
+ winsys->user_buffer_create = st_softpipe_user_buffer_create;
+ winsys->buffer_map = st_softpipe_buffer_map;
+ winsys->buffer_unmap = st_softpipe_buffer_unmap;
+ winsys->buffer_destroy = st_softpipe_buffer_destroy;
+
+ winsys->surface_buffer_create = st_softpipe_surface_buffer_create;
+
+ winsys->fence_reference = st_softpipe_fence_reference;
+ winsys->fence_signalled = st_softpipe_fence_signalled;
+ winsys->fence_finish = st_softpipe_fence_finish;
+
+ winsys->flush_frontbuffer = st_softpipe_flush_frontbuffer;
+ winsys->get_name = st_softpipe_get_name;
+
+ screen = softpipe_create_screen(winsys);
+ if(!screen)
+ st_softpipe_destroy(winsys);
+
+ return screen;
+}
+
+
+static struct pipe_context *
+st_softpipe_context_create(struct pipe_screen *screen)
+{
+ return softpipe_create(screen);
+}
+
+
+const struct st_winsys st_softpipe_winsys = {
+ &st_softpipe_screen_create,
+ &st_softpipe_context_create,
+};
diff --git a/src/gallium/state_trackers/python/st_winsys.h b/src/gallium/state_trackers/python/st_winsys.h
new file mode 100644
index 0000000000..b8cb612d86
--- /dev/null
+++ b/src/gallium/state_trackers/python/st_winsys.h
@@ -0,0 +1,52 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef ST_WINSYS_H_
+#define ST_WINSYS_H_
+
+
+struct pipe_screen;
+struct pipe_context;
+
+
+struct st_winsys
+{
+ struct pipe_screen *
+ (*screen_create)(void);
+
+ struct pipe_context *
+ (*context_create)(struct pipe_screen *screen);
+};
+
+
+extern const struct st_winsys st_softpipe_winsys;
+
+extern const struct st_winsys st_hardpipe_winsys;
+
+
+#endif /* ST_WINSYS_H_ */
diff --git a/src/gallium/state_trackers/python/tests/.gitignore b/src/gallium/state_trackers/python/tests/.gitignore
new file mode 100644
index 0000000000..0dbbaeea16
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/.gitignore
@@ -0,0 +1,3 @@
+*.txt
+*.tsv
+*.dot
diff --git a/src/gallium/state_trackers/python/tests/base.py b/src/gallium/state_trackers/python/tests/base.py
new file mode 100755
index 0000000000..202ccfc350
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/base.py
@@ -0,0 +1,344 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+"""Base classes for tests.
+
+Loosely inspired on Python's unittest module.
+"""
+
+
+import os.path
+import sys
+
+from gallium import *
+
+
+# Enumerate all pixel formats
+formats = {}
+for name, value in globals().items():
+ if name.startswith("PIPE_FORMAT_") and isinstance(value, int):
+ formats[value] = name
+
+def is_depth_stencil_format(format):
+ # FIXME: make and use binding to pf_is_depth_stencil
+ return format in (
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_Z24S8_UNORM,
+ PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_FORMAT_Z16_UNORM,
+ )
+
+def make_image(width, height, rgba):
+ import Image
+ outimage = Image.new(
+ mode='RGB',
+ size=(width, height),
+ color=(0,0,0))
+ outpixels = outimage.load()
+ for y in range(0, height):
+ for x in range(0, width):
+ offset = (y*width + x)*4
+ r, g, b, a = [int(min(max(rgba[offset + ch], 0.0), 1.0)*255) for ch in range(4)]
+ outpixels[x, y] = r, g, b
+ return outimage
+
+def save_image(width, height, rgba, filename):
+ outimage = make_image(width, height, rgba)
+ outimage.save(filename, "PNG")
+
+def show_image(width, height, **rgbas):
+ import Tkinter as tk
+ from PIL import Image, ImageTk
+
+ root = tk.Tk()
+
+ x = 64
+ y = 64
+
+ labels = rgbas.keys()
+ labels.sort()
+ for i in range(len(labels)):
+ label = labels[i]
+ outimage = make_image(width, height, rgbas[label])
+
+ if i:
+ window = tk.Toplevel(root)
+ else:
+ window = root
+ window.title(label)
+ image1 = ImageTk.PhotoImage(outimage)
+ w = image1.width()
+ h = image1.height()
+ window.geometry("%dx%d+%d+%d" % (w, h, x, y))
+ panel1 = tk.Label(window, image=image1)
+ panel1.pack(side='top', fill='both', expand='yes')
+ panel1.image = image1
+ x += w + 2
+
+ root.mainloop()
+
+
+class TestFailure(Exception):
+
+ pass
+
+class TestSkip(Exception):
+
+ pass
+
+
+class Test:
+
+ def __init__(self):
+ pass
+
+ def _run(self, result):
+ raise NotImplementedError
+
+ def run(self):
+ result = TestResult()
+ self._run(result)
+ result.summary()
+
+ def assert_rgba(self, surface, x, y, w, h, expected_rgba, pixel_tol=4.0/256, surface_tol=0.85):
+ total = h*w
+ different = surface.compare_tile_rgba(x, y, w, h, expected_rgba, tol=pixel_tol)
+ if different:
+ sys.stderr.write("%u out of %u pixels differ\n" % (different, total))
+
+ if float(total - different)/float(total) < surface_tol:
+ if 0:
+ rgba = FloatArray(h*w*4)
+ surface.get_tile_rgba(x, y, w, h, rgba)
+ show_image(w, h, Result=rgba, Expected=expected_rgba)
+ save_image(w, h, rgba, "result.png")
+ save_image(w, h, expected_rgba, "expected.png")
+ #sys.exit(0)
+
+ raise TestFailure
+
+
+class TestCase(Test):
+
+ tags = ()
+
+ def __init__(self, dev, **kargs):
+ Test.__init__(self)
+ self.dev = dev
+ self.__dict__.update(kargs)
+
+ def description(self):
+ descriptions = []
+ for tag in self.tags:
+ value = self.get(tag)
+ if value is not None and value != '':
+ descriptions.append(tag + '=' + str(value))
+ return ' '.join(descriptions)
+
+ def get(self, tag):
+ try:
+ method = getattr(self, '_get_' + tag)
+ except AttributeError:
+ return getattr(self, tag, None)
+ else:
+ return method()
+
+ def _get_target(self):
+ return {
+ PIPE_TEXTURE_1D: "1d",
+ PIPE_TEXTURE_2D: "2d",
+ PIPE_TEXTURE_3D: "3d",
+ PIPE_TEXTURE_CUBE: "cube",
+ }[self.target]
+
+ def _get_format(self):
+ name = formats[self.format]
+ if name.startswith('PIPE_FORMAT_'):
+ name = name[12:]
+ name = name.lower()
+ return name
+
+ def _get_face(self):
+ if self.target == PIPE_TEXTURE_CUBE:
+ return {
+ PIPE_TEX_FACE_POS_X: "+x",
+ PIPE_TEX_FACE_NEG_X: "-x",
+ PIPE_TEX_FACE_POS_Y: "+y",
+ PIPE_TEX_FACE_NEG_Y: "-y",
+ PIPE_TEX_FACE_POS_Z: "+z",
+ PIPE_TEX_FACE_NEG_Z: "-z",
+ }[self.face]
+ else:
+ return ''
+
+ def test(self):
+ raise NotImplementedError
+
+ def _run(self, result):
+ result.test_start(self)
+ try:
+ self.test()
+ except KeyboardInterrupt:
+ raise
+ except TestSkip:
+ result.test_skipped(self)
+ except TestFailure:
+ result.test_failed(self)
+ else:
+ result.test_passed(self)
+
+
+class TestSuite(Test):
+
+ def __init__(self, tests = None):
+ Test.__init__(self)
+ if tests is None:
+ self.tests = []
+ else:
+ self.tests = tests
+
+ def add_test(self, test):
+ self.tests.append(test)
+
+ def _run(self, result):
+ for test in self.tests:
+ test._run(result)
+
+
+class TestResult:
+
+ def __init__(self):
+ self.tests = 0
+ self.passed = 0
+ self.skipped = 0
+ self.failed = 0
+
+ self.names = ['result']
+ self.types = ['pass skip fail']
+ self.rows = []
+
+ def test_start(self, test):
+ sys.stdout.write("Running %s...\n" % test.description())
+ sys.stdout.flush()
+ self.tests += 1
+
+ def test_passed(self, test):
+ sys.stdout.write("PASS\n")
+ sys.stdout.flush()
+ self.passed += 1
+ self.log_result(test, 'pass')
+
+ def test_skipped(self, test):
+ sys.stdout.write("SKIP\n")
+ sys.stdout.flush()
+ self.skipped += 1
+ #self.log_result(test, 'skip')
+
+ def test_failed(self, test):
+ sys.stdout.write("FAIL\n")
+ sys.stdout.flush()
+ self.failed += 1
+ self.log_result(test, 'fail')
+
+ def log_result(self, test, result):
+ row = ['']*len(self.names)
+
+ # add result
+ assert self.names[0] == 'result'
+ assert result in ('pass', 'skip', 'fail')
+ row[0] = result
+
+ # add tags
+ for tag in test.tags:
+ value = test.get(tag)
+
+ # infer type
+ if value is None:
+ continue
+ elif isinstance(value, (int, float)):
+ value = str(value)
+ type = 'c' # continous
+ elif isinstance(value, basestring):
+ type = 'd' # discrete
+ else:
+ assert False
+ value = str(value)
+ type = 'd' # discrete
+
+ # insert value
+ try:
+ col = self.names.index(tag, 1)
+ except ValueError:
+ self.names.append(tag)
+ self.types.append(type)
+ row.append(value)
+ else:
+ row[col] = value
+ assert self.types[col] == type
+
+ self.rows.append(row)
+
+ def summary(self):
+ sys.stdout.write("%u tests, %u passed, %u skipped, %u failed\n\n" % (self.tests, self.passed, self.skipped, self.failed))
+ sys.stdout.flush()
+
+ name, ext = os.path.splitext(os.path.basename(sys.argv[0]))
+ filename = name + '.tsv'
+ stream = file(filename, 'wt')
+
+ # header
+ stream.write('\t'.join(self.names) + '\n')
+ stream.write('\t'.join(self.types) + '\n')
+ stream.write('class\n')
+
+ # rows
+ for row in self.rows:
+ row += ['']*(len(self.names) - len(row))
+ stream.write('\t'.join(row) + '\n')
+
+ stream.close()
+
+ # See http://www.ailab.si/orange/doc/ofb/c_otherclass.htm
+ try:
+ import orange
+ import orngTree
+ except ImportError:
+ sys.stderr.write('Install Orange from http://www.ailab.si/orange/ for a classification tree.\n')
+ return
+
+ data = orange.ExampleTable(filename)
+
+ tree = orngTree.TreeLearner(data, sameMajorityPruning=1, mForPruning=2)
+
+ orngTree.printTxt(tree, maxDepth=4)
+
+ file(name+'.txt', 'wt').write(orngTree.dumpTree(tree))
+
+ orngTree.printDot(tree, fileName=name+'.dot', nodeShape='ellipse', leafShape='box')
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-abs.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-abs.sh
new file mode 100644
index 0000000000..7a0006bf66
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-abs.sh
@@ -0,0 +1,13 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { -0.5, -0.4, -0.6, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+ABS OUT[0], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-add.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-add.sh
new file mode 100644
index 0000000000..f7836c85dd
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-add.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+ADD OUT[0], IN[0], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp3.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp3.sh
new file mode 100644
index 0000000000..c89cd748a8
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp3.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DP3 OUT[0], IN[0], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp4.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp4.sh
new file mode 100644
index 0000000000..6517e3c494
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dp4.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DP4 OUT[0], IN[0].xyzx, IN[0].xyzx
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dst.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dst.sh
new file mode 100644
index 0000000000..464880ba68
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-dst.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DST OUT[0], IN[0], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-ex2.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-ex2.sh
new file mode 100644
index 0000000000..2684076f1d
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-ex2.sh
@@ -0,0 +1,11 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+EX2 TEMP[0], IN[0].xxxx
+MUL OUT[0], TEMP[0], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-flr.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-flr.sh
new file mode 100644
index 0000000000..ad11e28918
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-flr.sh
@@ -0,0 +1,15 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 2.5, 4.0, 2.0, 1.0 }
+IMM FLT32 { 0.4, 0.25, 0.5, 1.0 }
+
+MUL TEMP[0], IN[0], IMM[0]
+FLR TEMP[0], TEMP[0]
+MUL OUT[0], TEMP[0], IMM[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-frc.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-frc.sh
new file mode 100644
index 0000000000..4f3aa30d66
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-frc.sh
@@ -0,0 +1,13 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 2.7, 3.1, 4.5, 1.0 }
+
+MUL TEMP[0], IN[0], IMM[0]
+FRC OUT[0], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lg2.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lg2.sh
new file mode 100644
index 0000000000..54c7c64459
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lg2.sh
@@ -0,0 +1,15 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 1.0, 0.0, 0.0, 0.0 }
+IMM FLT32 { 0.5, 0.0, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+LG2 TEMP[0].x, TEMP[0].xxxx
+ADD OUT[0], TEMP[0], IMM[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lit.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lit.sh
new file mode 100644
index 0000000000..0e78ef86b5
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lit.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+LIT OUT[0], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lrp.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lrp.sh
new file mode 100644
index 0000000000..e9ee0f8147
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-lrp.sh
@@ -0,0 +1,11 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+ABS TEMP[0], IN[0]
+LRP OUT[0], TEMP[0], IN[0].xxxx, IN[0].yyyy
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mad.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mad.sh
new file mode 100644
index 0000000000..439acd5bbd
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mad.sh
@@ -0,0 +1,11 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+IMM FLT32 { 0.5, 0.4, 0.6, 1.0 }
+IMM FLT32 { 0.5, 0.4, 0.6, 0.0 }
+
+MAD OUT[0], IN[0], IMM[0], IMM[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-max.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-max.sh
new file mode 100644
index 0000000000..ab21b245dd
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-max.sh
@@ -0,0 +1,10 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+IMM FLT32 { 0.4, 0.4, 0.4, 0.0 }
+
+MAX OUT[0], IN[0], IMM[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-min.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-min.sh
new file mode 100644
index 0000000000..969ae73d98
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-min.sh
@@ -0,0 +1,10 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+IMM FLT32 { 0.6, 0.6, 0.6, 1.0 }
+
+MIN OUT[0], IN[0], IMM[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mov.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mov.sh
new file mode 100644
index 0000000000..612975e057
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mov.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+MOV OUT[0], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mul.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mul.sh
new file mode 100644
index 0000000000..ed158b0fc6
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-mul.sh
@@ -0,0 +1,10 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+IMM FLT32 { 0.5, 0.6, 0.7, 1.0 }
+
+MUL OUT[0], IN[0], IMM[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rcp.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rcp.sh
new file mode 100644
index 0000000000..cc9feef07e
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rcp.sh
@@ -0,0 +1,15 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 1.0, 0.0, 0.0, 0.0 }
+IMM FLT32 { 1.5, 0.0, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+RCP TEMP[0].x, TEMP[0].xxxx
+SUB OUT[0], TEMP[0], IMM[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rsq.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rsq.sh
new file mode 100644
index 0000000000..695621fdc9
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-rsq.sh
@@ -0,0 +1,15 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 1.0, 0.0, 0.0, 0.0 }
+IMM FLT32 { 1.5, 0.0, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+RSQ TEMP[0].x, TEMP[0].xxxx
+SUB OUT[0], TEMP[0], IMM[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sge.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sge.sh
new file mode 100644
index 0000000000..9505bc3c3e
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sge.sh
@@ -0,0 +1,13 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 0.6, 0.6, 0.6, 0.0 }
+
+SGE TEMP[0], IN[0], IMM[0]
+MUL OUT[0], IN[0], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-slt.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-slt.sh
new file mode 100644
index 0000000000..f2a1521cbf
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-slt.sh
@@ -0,0 +1,13 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 0.6, 0.6, 0.6, 0.0 }
+
+SLT TEMP[0], IN[0], IMM[0]
+MUL OUT[0], IN[0], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-abs.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-abs.sh
new file mode 100644
index 0000000000..9cd4b68295
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-abs.sh
@@ -0,0 +1,13 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { -0.3, -0.5, -0.4, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+MOV OUT[0], |TEMP[0]|
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-absneg.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-absneg.sh
new file mode 100644
index 0000000000..acd6aa750d
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-absneg.sh
@@ -0,0 +1,15 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { -0.2, -0.3, -0.4, 0.0 }
+IMM FLT32 { -1.0, -1.0, -1.0, -1.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+MOV TEMP[0], -|TEMP[0]|
+MUL OUT[0], TEMP[0], IMM[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-neg.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-neg.sh
new file mode 100644
index 0000000000..ba1b61503b
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-neg.sh
@@ -0,0 +1,11 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+DCL TEMP[0]
+
+SUB TEMP[0], IN[0], IN[0].yzxw
+MOV OUT[0], -TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-swz.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-swz.sh
new file mode 100644
index 0000000000..192aa7bb26
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-srcmod-swz.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+MOV OUT[0], IN[0].yxzw
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sub.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sub.sh
new file mode 100644
index 0000000000..83441fa820
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-sub.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+SUB OUT[0], IN[0], IN[0].yzxw
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-xpd.sh b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-xpd.sh
new file mode 100644
index 0000000000..d6f66c4927
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/frag-xpd.sh
@@ -0,0 +1,8 @@
+FRAG1.1
+
+DCL IN[0], COLOR, LINEAR
+DCL OUT[0], COLOR
+
+XPD OUT[0], IN[0], IN[0].yzxw
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/fragment-shader/fragment-shader.py b/src/gallium/state_trackers/python/tests/regress/fragment-shader/fragment-shader.py
new file mode 100644
index 0000000000..d60fb38d1a
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/fragment-shader/fragment-shader.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from gallium import *
+
+def make_image(surface):
+ data = surface.get_tile_rgba8(0, 0, surface.width, surface.height)
+
+ import Image
+ outimage = Image.fromstring('RGBA', (surface.width, surface.height), data, "raw", 'RGBA', 0, 1)
+ return outimage
+
+def save_image(filename, surface):
+ outimage = make_image(surface)
+ outimage.save(filename, "PNG")
+
+def test(dev, name):
+ ctx = dev.context_create()
+
+ width = 320
+ height = 320
+ minz = 0.0
+ maxz = 1.0
+
+ # disabled blending/masking
+ blend = Blend()
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.colormask = PIPE_MASK_RGBA
+ ctx.set_blend(blend)
+
+ # depth/stencil/alpha
+ depth_stencil_alpha = DepthStencilAlpha()
+ depth_stencil_alpha.depth.enabled = 0
+ depth_stencil_alpha.depth.writemask = 1
+ depth_stencil_alpha.depth.func = PIPE_FUNC_LESS
+ ctx.set_depth_stencil_alpha(depth_stencil_alpha)
+
+ # rasterizer
+ rasterizer = Rasterizer()
+ rasterizer.front_winding = PIPE_WINDING_CW
+ rasterizer.cull_mode = PIPE_WINDING_NONE
+ rasterizer.scissor = 1
+ ctx.set_rasterizer(rasterizer)
+
+ # viewport
+ viewport = Viewport()
+ scale = FloatArray(4)
+ scale[0] = width / 2.0
+ scale[1] = -height / 2.0
+ scale[2] = (maxz - minz) / 2.0
+ scale[3] = 1.0
+ viewport.scale = scale
+ translate = FloatArray(4)
+ translate[0] = width / 2.0
+ translate[1] = height / 2.0
+ translate[2] = (maxz - minz) / 2.0
+ translate[3] = 0.0
+ viewport.translate = translate
+ ctx.set_viewport(viewport)
+
+ # samplers
+ sampler = Sampler()
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.normalized_coords = 1
+ ctx.set_sampler(0, sampler)
+
+ # scissor
+ scissor = Scissor()
+ scissor.minx = 0
+ scissor.miny = 0
+ scissor.maxx = width
+ scissor.maxy = height
+ ctx.set_scissor(scissor)
+
+ clip = Clip()
+ clip.nr = 0
+ ctx.set_clip(clip)
+
+ # framebuffer
+ cbuf = dev.texture_create(
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ width, height,
+ tex_usage=PIPE_TEXTURE_USAGE_DISPLAY_TARGET,
+ ).get_surface()
+ fb = Framebuffer()
+ fb.width = width
+ fb.height = height
+ fb.nr_cbufs = 1
+ fb.set_cbuf(0, cbuf)
+ ctx.set_framebuffer(fb)
+ rgba = FloatArray(4);
+ rgba[0] = 0.5
+ rgba[1] = 0.5
+ rgba[2] = 0.5
+ rgba[3] = 0.5
+ ctx.clear(PIPE_CLEAR_COLOR, rgba, 0.0, 0)
+
+ # vertex shader
+ vs = Shader('''
+ VERT1.1
+ DCL IN[0], POSITION
+ DCL IN[1], COLOR
+ DCL OUT[0], POSITION
+ DCL OUT[1], COLOR
+ MOV OUT[0], IN[0]
+ MOV OUT[1], IN[1]
+ END
+ ''')
+ ctx.set_vertex_shader(vs)
+
+ # fragment shader
+ fs = Shader(file('frag-' + name + '.sh', 'rt').read())
+ ctx.set_fragment_shader(fs)
+
+ xy = [
+ -0.8, -0.8,
+ 0.8, -0.8,
+ 0.0, 0.8,
+ ]
+ color = [
+ 1.0, 0.0, 0.0,
+ 0.0, 1.0, 0.0,
+ 0.0, 0.0, 1.0,
+ ]
+
+ nverts = 3
+ nattrs = 2
+ verts = FloatArray(nverts * nattrs * 4)
+
+ for i in range(0, nverts):
+ verts[i * nattrs * 4 + 0] = xy[i * 2 + 0] # x
+ verts[i * nattrs * 4 + 1] = xy[i * 2 + 1] # y
+ verts[i * nattrs * 4 + 2] = 0.5 # z
+ verts[i * nattrs * 4 + 3] = 1.0 # w
+ verts[i * nattrs * 4 + 4] = color[i * 3 + 0] # r
+ verts[i * nattrs * 4 + 5] = color[i * 3 + 1] # g
+ verts[i * nattrs * 4 + 6] = color[i * 3 + 2] # b
+ verts[i * nattrs * 4 + 7] = 1.0 # a
+
+ ctx.draw_vertices(PIPE_PRIM_TRIANGLES,
+ nverts,
+ nattrs,
+ verts)
+
+ ctx.flush()
+
+ save_image('frag-' + name + '.png', cbuf)
+
+def main():
+ tests = [
+ 'abs',
+ 'add',
+ 'dp3',
+ 'dp4',
+ 'dst',
+ 'ex2',
+ 'flr',
+ 'frc',
+ 'lg2',
+ 'lit',
+ 'lrp',
+ 'mad',
+ 'max',
+ 'min',
+ 'mov',
+ 'mul',
+ 'rcp',
+ 'rsq',
+ 'sge',
+ 'slt',
+ 'srcmod-abs',
+ 'srcmod-absneg',
+ 'srcmod-neg',
+ 'srcmod-swz',
+ 'sub',
+ 'xpd',
+ ]
+
+ dev = Device()
+ for t in tests:
+ test(dev, t)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-abs.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-abs.sh
new file mode 100644
index 0000000000..f0d0d5de17
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-abs.sh
@@ -0,0 +1,15 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+DCL TEMP[0]
+
+IMM FLT32 { 0.2, 0.2, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+ABS OUT[0], TEMP[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-add.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-add.sh
new file mode 100644
index 0000000000..936c851c9d
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-add.sh
@@ -0,0 +1,13 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+IMM FLT32 { 0.2, -0.1, 0.0, 0.0 }
+
+ADD OUT[0], IN[0], IMM[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arl.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arl.sh
new file mode 100644
index 0000000000..7638e96346
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arl.sh
@@ -0,0 +1,23 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+DCL ADDR[0]
+
+IMM FLT32 { 3.0, 1.0, 1.0, 1.0 }
+IMM FLT32 { 1.0, 0.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 1.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 0.0, 1.0, 1.0 }
+IMM FLT32 { 1.0, 1.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 1.0, 1.0, 1.0 }
+
+MOV OUT[0], IN[0]
+MUL TEMP[0], IN[0], IMM[0]
+ARL ADDR[0].x, TEMP[0]
+MOV OUT[1], IMM[ADDR[0].x + 3]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arr.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arr.sh
new file mode 100644
index 0000000000..28ce6f9a0c
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-arr.sh
@@ -0,0 +1,23 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+DCL ADDR[0]
+
+IMM FLT32 { 3.0, 1.0, 1.0, 1.0 }
+IMM FLT32 { 1.0, 0.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 1.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 0.0, 1.0, 1.0 }
+IMM FLT32 { 1.0, 1.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 1.0, 1.0, 1.0 }
+
+MOV OUT[0], IN[0]
+MUL TEMP[0], IN[0], IMM[0]
+ARR ADDR[0].x, TEMP[0]
+MOV OUT[1], IMM[ADDR[0].x + 3]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp3.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp3.sh
new file mode 100644
index 0000000000..b57d68520f
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp3.sh
@@ -0,0 +1,16 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+DCL TEMP[0]
+
+IMM FLT32 { 0.0, 0.0, 1.0, 1.0 }
+
+DP3 TEMP[0].xy, IN[0], IN[0]
+MOV TEMP[0].zw, IMM[0]
+MUL OUT[0], IN[0], TEMP[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp4.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp4.sh
new file mode 100644
index 0000000000..0eb31719c5
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dp4.sh
@@ -0,0 +1,16 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+DCL TEMP[0]
+
+IMM FLT32 { 0.0, 0.0, 1.0, 1.0 }
+
+DP4 TEMP[0].xy, IN[0], IN[0]
+MOV TEMP[0].zw, IMM[0]
+MUL OUT[0], IN[0], TEMP[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dst.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dst.sh
new file mode 100644
index 0000000000..dc5e0eb92e
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-dst.sh
@@ -0,0 +1,11 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+MOV OUT[0], IN[0]
+DST OUT[1], IN[1], IN[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-ex2.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-ex2.sh
new file mode 100644
index 0000000000..34057af4e6
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-ex2.sh
@@ -0,0 +1,18 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0..1]
+
+IMM FLT32 { 0.3, 0.3, 0.3, 1.0 }
+
+EX2 TEMP[0], IN[0]
+EX2 TEMP[1], IN[1].yyyy
+MUL TEMP[0], TEMP[0], IMM[0]
+MOV OUT[0], IN[0]
+MUL OUT[1], TEMP[0], TEMP[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-flr.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-flr.sh
new file mode 100644
index 0000000000..44ad708119
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-flr.sh
@@ -0,0 +1,23 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+DCL ADDR[0]
+
+IMM FLT32 { 3.0, 1.0, 1.0, 1.0 }
+IMM FLT32 { 1.0, 0.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 1.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 0.0, 1.0, 1.0 }
+IMM FLT32 { 1.0, 1.0, 0.0, 1.0 }
+IMM FLT32 { 0.0, 1.0, 1.0, 1.0 }
+
+MOV OUT[0], IN[0]
+MUL TEMP[0], IN[0], IMM[0]
+FLR ADDR[0].x, TEMP[0]
+MOV OUT[1], IMM[ADDR[0].x + 3]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-frc.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-frc.sh
new file mode 100644
index 0000000000..d179749de8
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-frc.sh
@@ -0,0 +1,15 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 2.7, 3.1, 4.5, 1.0 }
+
+MUL TEMP[0], IN[0].xyxw, IMM[0]
+MOV OUT[0], IN[0]
+FRC OUT[1], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lg2.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lg2.sh
new file mode 100644
index 0000000000..f6e08d087c
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lg2.sh
@@ -0,0 +1,18 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 1.0, 0.0, 0.0, 0.0 }
+IMM FLT32 { 0.5, 0.0, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+LG2 TEMP[0].x, TEMP[0].xxxx
+ADD OUT[0], TEMP[0], IMM[1]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lit.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lit.sh
new file mode 100644
index 0000000000..da98f30928
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lit.sh
@@ -0,0 +1,11 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+MOV OUT[0], IN[0]
+LIT OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lrp.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lrp.sh
new file mode 100644
index 0000000000..8c262580e2
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-lrp.sh
@@ -0,0 +1,14 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+ABS TEMP[0], IN[0]
+MOV OUT[0], IN[0]
+LRP OUT[1], TEMP[0], IN[1].xxxx, IN[1].yyyy
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mad.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mad.sh
new file mode 100644
index 0000000000..eb07a3bd56
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mad.sh
@@ -0,0 +1,14 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+IMM FLT32 { 0.5, 1.0, 1.0, 1.0 }
+IMM FLT32 { 0.5, 0.0, 0.0, 0.0 }
+
+MAD OUT[0], IN[0], IMM[0], IMM[1]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-max.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-max.sh
new file mode 100644
index 0000000000..2d8b1fe3bf
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-max.sh
@@ -0,0 +1,13 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+IMM FLT32 { 0.5, 0.5, 0.5, 0.0 }
+
+MOV OUT[0], IN[0]
+MAX OUT[1], IN[1], IMM[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-min.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-min.sh
new file mode 100644
index 0000000000..84af0e2905
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-min.sh
@@ -0,0 +1,13 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+IMM FLT32 { 0.5, 0.5, 0.5, 0.0 }
+
+MOV OUT[0], IN[0]
+MIN OUT[1], IN[1], IMM[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mov.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mov.sh
new file mode 100644
index 0000000000..bcdec07c20
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mov.sh
@@ -0,0 +1,11 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+MOV OUT[0], IN[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mul.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mul.sh
new file mode 100644
index 0000000000..f3b57c3038
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-mul.sh
@@ -0,0 +1,13 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+IMM FLT32 { 0.6, 0.6, 1.0, 1.0 }
+
+MUL OUT[0], IN[0], IMM[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rcp.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rcp.sh
new file mode 100644
index 0000000000..78af589b5c
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rcp.sh
@@ -0,0 +1,18 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 1.0, 0.0, 0.0, 0.0 }
+IMM FLT32 { 1.5, 0.0, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+RCP TEMP[0].x, TEMP[0].xxxx
+SUB OUT[0], TEMP[0], IMM[1]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rsq.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rsq.sh
new file mode 100644
index 0000000000..1675c7d5ff
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-rsq.sh
@@ -0,0 +1,18 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 1.0, 0.0, 0.0, 0.0 }
+IMM FLT32 { 1.5, 0.0, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+RSQ TEMP[0].x, TEMP[0].xxxx
+SUB OUT[0], TEMP[0], IMM[1]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sge.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sge.sh
new file mode 100644
index 0000000000..3d92cd5aae
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sge.sh
@@ -0,0 +1,16 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { -0.1, -0.1, 1.0, 0.0 }
+
+SGE TEMP[0], IN[0], IMM[0]
+MOV OUT[0], IN[0]
+MUL OUT[1], IN[1], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-slt.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-slt.sh
new file mode 100644
index 0000000000..85c60ff4ec
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-slt.sh
@@ -0,0 +1,16 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+DCL TEMP[0]
+
+IMM FLT32 { 0.6, 0.6, 0.0, 0.0 }
+
+SLT TEMP[0], IN[0], IMM[0]
+MOV OUT[0], IN[0]
+MUL OUT[1], IN[1], TEMP[0]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-abs.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-abs.sh
new file mode 100644
index 0000000000..6db417a62e
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-abs.sh
@@ -0,0 +1,15 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+DCL TEMP[0]
+
+IMM FLT32 { 0.1, 0.1, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+MOV OUT[0], |TEMP[0]|
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-absneg.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-absneg.sh
new file mode 100644
index 0000000000..fc83238052
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-absneg.sh
@@ -0,0 +1,16 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+DCL TEMP[0]
+
+IMM FLT32 { -0.2, -0.2, 0.0, 0.0 }
+
+ADD TEMP[0], IN[0], IMM[0]
+MOV OUT[0].xy, -|TEMP[0]|
+MOV OUT[0].zw, IN[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-neg.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-neg.sh
new file mode 100644
index 0000000000..ce4e90b5e1
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-neg.sh
@@ -0,0 +1,12 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+MOV OUT[0].xy, -IN[0]
+MOV OUT[0].zw, IN[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-swz.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-swz.sh
new file mode 100644
index 0000000000..c03de4c674
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-srcmod-swz.sh
@@ -0,0 +1,11 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+MOV OUT[0], IN[0].yxzw
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sub.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sub.sh
new file mode 100644
index 0000000000..a583b95828
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-sub.sh
@@ -0,0 +1,13 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+IMM FLT32 { 0.1, 0.1, 0.0, 0.0 }
+
+SUB OUT[0], IN[0], IMM[0]
+MOV OUT[1], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-xpd.sh b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-xpd.sh
new file mode 100644
index 0000000000..8def8943b0
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vert-xpd.sh
@@ -0,0 +1,11 @@
+VERT1.1
+
+DCL IN[0], POSITION
+DCL IN[1], COLOR
+DCL OUT[0], POSITION
+DCL OUT[1], COLOR
+
+MOV OUT[0], IN[0]
+XPD OUT[1], IN[0], IN[1]
+
+END
diff --git a/src/gallium/state_trackers/python/tests/regress/vertex-shader/vertex-shader.py b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vertex-shader.py
new file mode 100644
index 0000000000..472769f259
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/regress/vertex-shader/vertex-shader.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from gallium import *
+
+def make_image(surface):
+ data = surface.get_tile_rgba8(0, 0, surface.width, surface.height)
+
+ import Image
+ outimage = Image.fromstring('RGBA', (surface.width, surface.height), data, "raw", 'RGBA', 0, 1)
+ return outimage
+
+def save_image(filename, surface):
+ outimage = make_image(surface)
+ outimage.save(filename, "PNG")
+
+def test(dev, name):
+ ctx = dev.context_create()
+
+ width = 320
+ height = 320
+ minz = 0.0
+ maxz = 1.0
+
+ # disabled blending/masking
+ blend = Blend()
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.colormask = PIPE_MASK_RGBA
+ ctx.set_blend(blend)
+
+ # depth/stencil/alpha
+ depth_stencil_alpha = DepthStencilAlpha()
+ depth_stencil_alpha.depth.enabled = 0
+ depth_stencil_alpha.depth.writemask = 1
+ depth_stencil_alpha.depth.func = PIPE_FUNC_LESS
+ ctx.set_depth_stencil_alpha(depth_stencil_alpha)
+
+ # rasterizer
+ rasterizer = Rasterizer()
+ rasterizer.front_winding = PIPE_WINDING_CW
+ rasterizer.cull_mode = PIPE_WINDING_NONE
+ rasterizer.scissor = 1
+ ctx.set_rasterizer(rasterizer)
+
+ # viewport
+ viewport = Viewport()
+ scale = FloatArray(4)
+ scale[0] = width / 2.0
+ scale[1] = -height / 2.0
+ scale[2] = (maxz - minz) / 2.0
+ scale[3] = 1.0
+ viewport.scale = scale
+ translate = FloatArray(4)
+ translate[0] = width / 2.0
+ translate[1] = height / 2.0
+ translate[2] = (maxz - minz) / 2.0
+ translate[3] = 0.0
+ viewport.translate = translate
+ ctx.set_viewport(viewport)
+
+ # samplers
+ sampler = Sampler()
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.normalized_coords = 1
+ ctx.set_sampler(0, sampler)
+
+ # scissor
+ scissor = Scissor()
+ scissor.minx = 0
+ scissor.miny = 0
+ scissor.maxx = width
+ scissor.maxy = height
+ ctx.set_scissor(scissor)
+
+ clip = Clip()
+ clip.nr = 0
+ ctx.set_clip(clip)
+
+ # framebuffer
+ cbuf = dev.texture_create(
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ width, height,
+ tex_usage=PIPE_TEXTURE_USAGE_DISPLAY_TARGET,
+ ).get_surface()
+ fb = Framebuffer()
+ fb.width = width
+ fb.height = height
+ fb.nr_cbufs = 1
+ fb.set_cbuf(0, cbuf)
+ ctx.set_framebuffer(fb)
+ rgba = FloatArray(4);
+ rgba[0] = 0.5
+ rgba[1] = 0.5
+ rgba[2] = 0.5
+ rgba[3] = 0.5
+ ctx.clear(PIPE_CLEAR_COLOR, rgba, 0.0, 0)
+
+ # vertex shader
+ vs = Shader(file('vert-' + name + '.sh', 'rt').read())
+ ctx.set_vertex_shader(vs)
+
+ # fragment shader
+ fs = Shader('''
+ FRAG1.1
+ DCL IN[0], COLOR, LINEAR
+ DCL OUT[0], COLOR, CONSTANT
+ 0:MOV OUT[0], IN[0]
+ 1:END
+ ''')
+ ctx.set_fragment_shader(fs)
+
+ xy = [
+ 0.0, 0.8,
+ -0.2, 0.4,
+ 0.2, 0.4,
+ -0.4, 0.0,
+ 0.0, 0.0,
+ 0.4, 0.0,
+ -0.6, -0.4,
+ -0.2, -0.4,
+ 0.2, -0.4,
+ 0.6, -0.4,
+ -0.8, -0.8,
+ -0.4, -0.8,
+ 0.0, -0.8,
+ 0.4, -0.8,
+ 0.8, -0.8,
+ ]
+ color = [
+ 1.0, 0.0, 0.0,
+ 0.0, 1.0, 0.0,
+ 0.0, 0.0, 1.0,
+ ]
+ tri = [
+ 1, 2, 0,
+ 3, 4, 1,
+ 4, 2, 1,
+ 4, 5, 2,
+ 6, 7, 3,
+ 7, 4, 3,
+ 7, 8, 4,
+ 8, 5, 4,
+ 8, 9, 5,
+ 10, 11, 6,
+ 11, 7, 6,
+ 11, 12, 7,
+ 12, 8, 7,
+ 12, 13, 8,
+ 13, 9, 8,
+ 13, 14, 9,
+ ]
+
+ nverts = 16 * 3
+ nattrs = 2
+ verts = FloatArray(nverts * nattrs * 4)
+
+ for i in range(0, nverts):
+ verts[i * nattrs * 4 + 0] = xy[tri[i] * 2 + 0] # x
+ verts[i * nattrs * 4 + 1] = xy[tri[i] * 2 + 1] # y
+ verts[i * nattrs * 4 + 2] = 0.5 # z
+ verts[i * nattrs * 4 + 3] = 1.0 # w
+ verts[i * nattrs * 4 + 4] = color[(i % 3) * 3 + 0] # r
+ verts[i * nattrs * 4 + 5] = color[(i % 3) * 3 + 1] # g
+ verts[i * nattrs * 4 + 6] = color[(i % 3) * 3 + 2] # b
+ verts[i * nattrs * 4 + 7] = 1.0 # a
+
+ ctx.draw_vertices(PIPE_PRIM_TRIANGLES,
+ nverts,
+ nattrs,
+ verts)
+
+ ctx.flush()
+
+ save_image('vert-' + name + '.png', cbuf)
+
+def main():
+ tests = [
+ 'abs',
+ 'add',
+ 'arl',
+ 'arr',
+ 'dp3',
+ 'dp4',
+ 'dst',
+ 'ex2',
+ 'flr',
+ 'frc',
+ 'lg2',
+ 'lit',
+ 'lrp',
+ 'mad',
+ 'max',
+ 'min',
+ 'mov',
+ 'mul',
+ 'rcp',
+ 'rsq',
+ 'sge',
+ 'slt',
+ 'srcmod-abs',
+ 'srcmod-absneg',
+ 'srcmod-neg',
+ 'srcmod-swz',
+ 'sub',
+ 'xpd',
+ ]
+
+ dev = Device()
+ for t in tests:
+ test(dev, t)
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/tests/surface_copy.py b/src/gallium/state_trackers/python/tests/surface_copy.py
new file mode 100755
index 0000000000..3ceecbbd3a
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/surface_copy.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from gallium import *
+from base import *
+
+
+def lods(*dims):
+ size = max(dims)
+ lods = 0
+ while size:
+ lods += 1
+ size >>= 1
+ return lods
+
+
+class TextureTest(TestCase):
+
+ tags = (
+ 'target',
+ 'format',
+ 'width',
+ 'height',
+ 'depth',
+ 'last_level',
+ 'face',
+ 'level',
+ 'zslice',
+ )
+
+ def test(self):
+ dev = self.dev
+
+ target = self.target
+ format = self.format
+ width = self.width
+ height = self.height
+ depth = self.depth
+ last_level = self.last_level
+ face = self.face
+ level = self.level
+ zslice = self.zslice
+
+ # textures
+ dst_texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = width,
+ height = height,
+ depth = depth,
+ last_level = last_level,
+ tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ )
+ if dst_texture is None:
+ raise TestSkip
+
+ dst_surface = dst_texture.get_surface(face = face, level = level, zslice = zslice)
+
+ src_texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = dst_surface.width,
+ height = dst_surface.height,
+ depth = 1,
+ last_level = 0,
+ tex_usage = PIPE_TEXTURE_USAGE_SAMPLER,
+ )
+
+ src_surface = src_texture.get_surface()
+
+ x = 0
+ y = 0
+ w = dst_surface.width
+ h = dst_surface.height
+
+ stride = dst_surface.nblocksx * dst_texture.block.size
+ size = dst_surface.nblocksy * stride
+ src_raw = os.urandom(size)
+
+ src_surface.put_tile_raw(0, 0, w, h, src_raw, stride)
+
+ ctx = self.dev.context_create()
+
+ ctx.surface_copy(dst_surface, 0, 0,
+ src_surface, 0, 0, w, h)
+
+ ctx.flush()
+
+ dst_raw = dst_surface.get_tile_raw(0, 0, w, h)
+
+ if dst_raw != src_raw:
+ raise TestFailure
+
+
+
+def main():
+ dev = Device()
+ suite = TestSuite()
+
+ targets = [
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_CUBE,
+ #PIPE_TEXTURE_3D,
+ ]
+
+ formats = [
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ PIPE_FORMAT_A8R8G8B8_SRGB,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_A1R5G5B5_UNORM,
+ PIPE_FORMAT_A4R4G4B4_UNORM,
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_Z24S8_UNORM,
+ PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_FORMAT_Z16_UNORM,
+ PIPE_FORMAT_S8_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ PIPE_FORMAT_DXT1_RGB,
+ PIPE_FORMAT_DXT1_RGBA,
+ PIPE_FORMAT_DXT3_RGBA,
+ PIPE_FORMAT_DXT5_RGBA,
+ ]
+
+ sizes = [64, 32, 16, 8, 4, 2, 1]
+ #sizes = [1020, 508, 252, 62, 30, 14, 6, 3]
+ #sizes = [64]
+ #sizes = [63]
+
+ faces = [
+ PIPE_TEX_FACE_POS_X,
+ PIPE_TEX_FACE_NEG_X,
+ PIPE_TEX_FACE_POS_Y,
+ PIPE_TEX_FACE_NEG_Y,
+ PIPE_TEX_FACE_POS_Z,
+ PIPE_TEX_FACE_NEG_Z,
+ ]
+
+ for target in targets:
+ for format in formats:
+ for size in sizes:
+ if target == PIPE_TEXTURE_3D:
+ depth = size
+ else:
+ depth = 1
+ for face in faces:
+ if target != PIPE_TEXTURE_CUBE and face:
+ continue
+ levels = lods(size)
+ for last_level in range(levels):
+ for level in range(0, last_level + 1):
+ zslice = 0
+ while zslice < depth >> level:
+ test = TextureTest(
+ dev = dev,
+ target = target,
+ format = format,
+ width = size,
+ height = size,
+ depth = depth,
+ last_level = last_level,
+ face = face,
+ level = level,
+ zslice = zslice,
+ )
+ suite.add_test(test)
+ zslice = (zslice + 1)*2 - 1
+ suite.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/tests/texture_render.py b/src/gallium/state_trackers/python/tests/texture_render.py
new file mode 100755
index 0000000000..0b76932b6e
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/texture_render.py
@@ -0,0 +1,320 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from gallium import *
+from base import *
+
+
+def lods(*dims):
+ size = max(dims)
+ lods = 0
+ while size:
+ lods += 1
+ size >>= 1
+ return lods
+
+
+class TextureTest(TestCase):
+
+ tags = (
+ 'target',
+ 'format',
+ 'width',
+ 'height',
+ 'depth',
+ 'last_level',
+ 'face',
+ 'level',
+ 'zslice',
+ )
+
+ def test(self):
+ dev = self.dev
+
+ target = self.target
+ format = self.format
+ width = self.width
+ height = self.height
+ depth = self.depth
+ last_level = self.last_level
+ face = self.face
+ level = self.level
+ zslice = self.zslice
+
+ # textures
+ dst_texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = width,
+ height = height,
+ depth = depth,
+ last_level = last_level,
+ tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ )
+ if dst_texture is None:
+ raise TestSkip
+
+ dst_surface = dst_texture.get_surface(face = face, level = level, zslice = zslice)
+
+ ref_texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = dst_surface.width,
+ height = dst_surface.height,
+ depth = 1,
+ last_level = 0,
+ tex_usage = PIPE_TEXTURE_USAGE_SAMPLER,
+ )
+
+ ref_surface = ref_texture.get_surface()
+
+ src_texture = dev.texture_create(
+ target = target,
+ format = PIPE_FORMAT_A8R8G8B8_UNORM,
+ width = dst_surface.width,
+ height = dst_surface.height,
+ depth = 1,
+ last_level = 0,
+ tex_usage = PIPE_TEXTURE_USAGE_SAMPLER,
+ )
+
+ src_surface = src_texture.get_surface()
+
+ expected_rgba = FloatArray(height*width*4)
+ ref_surface.sample_rgba(expected_rgba)
+
+ src_surface.put_tile_rgba(0, 0, src_surface.width, src_surface.height, expected_rgba)
+
+ ctx = self.dev.context_create()
+
+ # disabled blending/masking
+ blend = Blend()
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.colormask = PIPE_MASK_RGBA
+ ctx.set_blend(blend)
+
+ # no-op depth/stencil/alpha
+ depth_stencil_alpha = DepthStencilAlpha()
+ ctx.set_depth_stencil_alpha(depth_stencil_alpha)
+
+ # rasterizer
+ rasterizer = Rasterizer()
+ rasterizer.front_winding = PIPE_WINDING_CW
+ rasterizer.cull_mode = PIPE_WINDING_NONE
+ rasterizer.bypass_vs_clip_and_viewport = 1
+ ctx.set_rasterizer(rasterizer)
+
+ # samplers
+ sampler = Sampler()
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.normalized_coords = 1
+ sampler.min_lod = 0
+ sampler.max_lod = PIPE_MAX_TEXTURE_LEVELS - 1
+ ctx.set_sampler(0, sampler)
+ ctx.set_sampler_texture(0, src_texture)
+
+ # framebuffer
+ cbuf_tex = dev.texture_create(
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ width,
+ height,
+ tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ )
+
+ fb = Framebuffer()
+ fb.width = dst_surface.width
+ fb.height = dst_surface.height
+ fb.nr_cbufs = 1
+ fb.set_cbuf(0, dst_surface)
+ ctx.set_framebuffer(fb)
+ rgba = FloatArray(4);
+ rgba[0] = 0.0
+ rgba[1] = 0.0
+ rgba[2] = 0.0
+ rgba[3] = 0.0
+ ctx.clear(PIPE_CLEAR_COLOR, rgba, 0.0, 0)
+ del fb
+
+ # vertex shader
+ vs = Shader('''
+ VERT1.1
+ DCL IN[0], POSITION, CONSTANT
+ DCL IN[1], GENERIC, CONSTANT
+ DCL OUT[0], POSITION, CONSTANT
+ DCL OUT[1], GENERIC, CONSTANT
+ 0:MOV OUT[0], IN[0]
+ 1:MOV OUT[1], IN[1]
+ 2:END
+ ''')
+ #vs.dump()
+ ctx.set_vertex_shader(vs)
+
+ # fragment shader
+ fs = Shader('''
+ FRAG1.1
+ DCL IN[0], GENERIC[0], LINEAR
+ DCL OUT[0], COLOR, CONSTANT
+ DCL SAMP[0], CONSTANT
+ 0:TEX OUT[0], IN[0], SAMP[0], 2D
+ 1:END
+ ''')
+ #fs.dump()
+ ctx.set_fragment_shader(fs)
+
+ nverts = 4
+ nattrs = 2
+ verts = FloatArray(nverts * nattrs * 4)
+
+ x = 0
+ y = 0
+ w = dst_surface.width
+ h = dst_surface.height
+
+ pos = [
+ [x, y],
+ [x+w, y],
+ [x+w, y+h],
+ [x, y+h],
+ ]
+
+ tex = [
+ [0.0, 0.0],
+ [1.0, 0.0],
+ [1.0, 1.0],
+ [0.0, 1.0],
+ ]
+
+ for i in range(0, 4):
+ j = 8*i
+ verts[j + 0] = pos[i][0] # x
+ verts[j + 1] = pos[i][1] # y
+ verts[j + 2] = 0.0 # z
+ verts[j + 3] = 1.0 # w
+ verts[j + 4] = tex[i][0] # s
+ verts[j + 5] = tex[i][1] # r
+ verts[j + 6] = 0.0
+ verts[j + 7] = 1.0
+
+ ctx.draw_vertices(PIPE_PRIM_TRIANGLE_FAN,
+ nverts,
+ nattrs,
+ verts)
+
+ ctx.flush()
+
+ self.assert_rgba(dst_surface, x, y, w, h, expected_rgba, 4.0/256, 0.85)
+
+
+
+def main():
+ dev = Device()
+ suite = TestSuite()
+
+ targets = [
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_CUBE,
+ #PIPE_TEXTURE_3D,
+ ]
+
+ formats = [
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ #PIPE_FORMAT_A8R8G8B8_SRGB,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_A1R5G5B5_UNORM,
+ PIPE_FORMAT_A4R4G4B4_UNORM,
+ #PIPE_FORMAT_Z32_UNORM,
+ #PIPE_FORMAT_Z24S8_UNORM,
+ #PIPE_FORMAT_Z24X8_UNORM,
+ #PIPE_FORMAT_Z16_UNORM,
+ #PIPE_FORMAT_S8_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ #PIPE_FORMAT_DXT1_RGB,
+ #PIPE_FORMAT_DXT1_RGBA,
+ #PIPE_FORMAT_DXT3_RGBA,
+ #PIPE_FORMAT_DXT5_RGBA,
+ ]
+
+ sizes = [64, 32, 16, 8, 4, 2, 1]
+ #sizes = [1020, 508, 252, 62, 30, 14, 6, 3]
+ #sizes = [64]
+ #sizes = [63]
+
+ faces = [
+ PIPE_TEX_FACE_POS_X,
+ PIPE_TEX_FACE_NEG_X,
+ PIPE_TEX_FACE_POS_Y,
+ PIPE_TEX_FACE_NEG_Y,
+ PIPE_TEX_FACE_POS_Z,
+ PIPE_TEX_FACE_NEG_Z,
+ ]
+
+ for target in targets:
+ for format in formats:
+ for size in sizes:
+ if target == PIPE_TEXTURE_3D:
+ depth = size
+ else:
+ depth = 1
+ for face in faces:
+ if target != PIPE_TEXTURE_CUBE and face:
+ continue
+ levels = lods(size)
+ for last_level in range(levels):
+ for level in range(0, last_level + 1):
+ zslice = 0
+ while zslice < depth >> level:
+ test = TextureTest(
+ dev = dev,
+ target = target,
+ format = format,
+ width = size,
+ height = size,
+ depth = depth,
+ last_level = last_level,
+ face = face,
+ level = level,
+ zslice = zslice,
+ )
+ suite.add_test(test)
+ zslice = (zslice + 1)*2 - 1
+ suite.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/tests/texture_sample.py b/src/gallium/state_trackers/python/tests/texture_sample.py
new file mode 100755
index 0000000000..c7b78abbbe
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/texture_sample.py
@@ -0,0 +1,591 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+from gallium import *
+from base import *
+
+
+def lods(*dims):
+ size = max(dims)
+ lods = 0
+ while size:
+ lods += 1
+ size >>= 1
+ return lods
+
+
+def minify(dims, level = 1):
+ return [max(dim>>level, 1) for dim in dims]
+
+
+def tex_coords(texture, face, level, zslice):
+ st = [
+ [0.0, 0.0],
+ [1.0, 0.0],
+ [1.0, 1.0],
+ [0.0, 1.0],
+ ]
+
+ if texture.target == PIPE_TEXTURE_2D:
+ return [[s, t, 0.0] for s, t in st]
+ elif texture.target == PIPE_TEXTURE_3D:
+ depth = texture.get_depth(level)
+ if depth > 1:
+ r = float(zslice)/float(depth - 1)
+ else:
+ r = 0.0
+ return [[s, t, r] for s, t in st]
+ elif texture.target == PIPE_TEXTURE_CUBE:
+ result = []
+ for s, t in st:
+ # See http://developer.nvidia.com/object/cube_map_ogl_tutorial.html
+ sc = 2.0*s - 1.0
+ tc = 2.0*t - 1.0
+ if face == PIPE_TEX_FACE_POS_X:
+ rx = 1.0
+ ry = -tc
+ rz = -sc
+ if face == PIPE_TEX_FACE_NEG_X:
+ rx = -1.0
+ ry = -tc
+ rz = sc
+ if face == PIPE_TEX_FACE_POS_Y:
+ rx = sc
+ ry = 1.0
+ rz = tc
+ if face == PIPE_TEX_FACE_NEG_Y:
+ rx = sc
+ ry = -1.0
+ rz = -tc
+ if face == PIPE_TEX_FACE_POS_Z:
+ rx = sc
+ ry = -tc
+ rz = 1.0
+ if face == PIPE_TEX_FACE_NEG_Z:
+ rx = -sc
+ ry = -tc
+ rz = -1.0
+ result.append([rx, ry, rz])
+ return result
+
+def is_pot(n):
+ return n & (n - 1) == 0
+
+
+class TextureColorSampleTest(TestCase):
+
+ tags = (
+ 'target',
+ 'format',
+ 'width',
+ 'height',
+ 'depth',
+ 'last_level',
+ 'face',
+ 'level',
+ 'zslice',
+ )
+
+ def test(self):
+ dev = self.dev
+
+ target = self.target
+ format = self.format
+ width = self.width
+ height = self.height
+ depth = self.depth
+ last_level = self.last_level
+ face = self.face
+ level = self.level
+ zslice = self.zslice
+
+ tex_usage = PIPE_TEXTURE_USAGE_SAMPLER
+ geom_flags = 0
+ if width != height:
+ geom_flags |= PIPE_TEXTURE_GEOM_NON_SQUARE
+ if not is_pot(width) or not is_pot(height) or not is_pot(depth):
+ geom_flags |= PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO
+
+ if not dev.is_format_supported(format, target, tex_usage, geom_flags):
+ raise TestSkip
+
+ ctx = self.dev.context_create()
+
+ # disabled blending/masking
+ blend = Blend()
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.colormask = PIPE_MASK_RGBA
+ ctx.set_blend(blend)
+
+ # no-op depth/stencil/alpha
+ depth_stencil_alpha = DepthStencilAlpha()
+ ctx.set_depth_stencil_alpha(depth_stencil_alpha)
+
+ # rasterizer
+ rasterizer = Rasterizer()
+ rasterizer.front_winding = PIPE_WINDING_CW
+ rasterizer.cull_mode = PIPE_WINDING_NONE
+ rasterizer.bypass_vs_clip_and_viewport = 1
+ ctx.set_rasterizer(rasterizer)
+
+ # samplers
+ sampler = Sampler()
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.normalized_coords = 1
+ sampler.min_lod = 0
+ sampler.max_lod = PIPE_MAX_TEXTURE_LEVELS - 1
+ ctx.set_sampler(0, sampler)
+
+ # texture
+ texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = width,
+ height = height,
+ depth = depth,
+ last_level = last_level,
+ tex_usage = tex_usage,
+ )
+
+ expected_rgba = FloatArray(height*width*4)
+ texture.get_surface(
+ face = face,
+ level = level,
+ zslice = zslice,
+ ).sample_rgba(expected_rgba)
+
+ ctx.set_sampler_texture(0, texture)
+
+ # framebuffer
+ cbuf_tex = dev.texture_create(
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ width,
+ height,
+ tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ )
+
+ cbuf = cbuf_tex.get_surface()
+ fb = Framebuffer()
+ fb.width = width
+ fb.height = height
+ fb.nr_cbufs = 1
+ fb.set_cbuf(0, cbuf)
+ ctx.set_framebuffer(fb)
+ rgba = FloatArray(4);
+ rgba[0] = 0.5
+ rgba[1] = 0.5
+ rgba[2] = 0.5
+ rgba[3] = 0.5
+ ctx.clear(PIPE_CLEAR_COLOR, rgba, 0.0, 0)
+ del fb
+
+ # vertex shader
+ vs = Shader('''
+ VERT1.1
+ DCL IN[0], POSITION, CONSTANT
+ DCL IN[1], GENERIC, CONSTANT
+ DCL OUT[0], POSITION, CONSTANT
+ DCL OUT[1], GENERIC, CONSTANT
+ 0:MOV OUT[0], IN[0]
+ 1:MOV OUT[1], IN[1]
+ 2:END
+ ''')
+ #vs.dump()
+ ctx.set_vertex_shader(vs)
+
+ # fragment shader
+ op = {
+ PIPE_TEXTURE_1D: "1D",
+ PIPE_TEXTURE_2D: "2D",
+ PIPE_TEXTURE_3D: "3D",
+ PIPE_TEXTURE_CUBE: "CUBE",
+ }[target]
+ fs = Shader('''
+ FRAG1.1
+ DCL IN[0], GENERIC[0], LINEAR
+ DCL OUT[0], COLOR, CONSTANT
+ DCL SAMP[0], CONSTANT
+ 0:TEX OUT[0], IN[0], SAMP[0], %s
+ 1:END
+ ''' % op)
+ #fs.dump()
+ ctx.set_fragment_shader(fs)
+
+ nverts = 4
+ nattrs = 2
+ verts = FloatArray(nverts * nattrs * 4)
+
+ x = 0
+ y = 0
+ w, h = minify((width, height), level)
+
+ pos = [
+ [x, y],
+ [x+w, y],
+ [x+w, y+h],
+ [x, y+h],
+ ]
+
+ tex = tex_coords(texture, face, level, zslice)
+
+ for i in range(0, 4):
+ j = 8*i
+ verts[j + 0] = pos[i][0] # x
+ verts[j + 1] = pos[i][1] # y
+ verts[j + 2] = 0.0 # z
+ verts[j + 3] = 1.0 # w
+ verts[j + 4] = tex[i][0] # s
+ verts[j + 5] = tex[i][1] # r
+ verts[j + 6] = tex[i][2] # q
+ verts[j + 7] = 1.0
+
+ ctx.draw_vertices(PIPE_PRIM_TRIANGLE_FAN,
+ nverts,
+ nattrs,
+ verts)
+
+ ctx.flush()
+
+ cbuf = cbuf_tex.get_surface()
+
+ self.assert_rgba(cbuf, x, y, w, h, expected_rgba, 4.0/256, 0.85)
+
+
+class TextureDepthSampleTest(TestCase):
+
+ tags = (
+ 'target',
+ 'format',
+ 'width',
+ 'height',
+ 'depth',
+ 'last_level',
+ 'face',
+ 'level',
+ 'zslice',
+ )
+
+ def test(self):
+ dev = self.dev
+
+ target = self.target
+ format = self.format
+ width = self.width
+ height = self.height
+ depth = self.depth
+ last_level = self.last_level
+ face = self.face
+ level = self.level
+ zslice = self.zslice
+
+ tex_usage = PIPE_TEXTURE_USAGE_SAMPLER
+ geom_flags = 0
+ if width != height:
+ geom_flags |= PIPE_TEXTURE_GEOM_NON_SQUARE
+ if not is_pot(width) or not is_pot(height) or not is_pot(depth):
+ geom_flags |= PIPE_TEXTURE_GEOM_NON_POWER_OF_TWO
+
+ if not dev.is_format_supported(format, target, tex_usage, geom_flags):
+ raise TestSkip
+
+ ctx = self.dev.context_create()
+
+ # disabled blending/masking
+ blend = Blend()
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO
+ blend.colormask = PIPE_MASK_RGBA
+ ctx.set_blend(blend)
+
+ # depth/stencil/alpha
+ depth_stencil_alpha = DepthStencilAlpha()
+ depth_stencil_alpha.depth.enabled = 1
+ depth_stencil_alpha.depth.writemask = 1
+ depth_stencil_alpha.depth.func = PIPE_FUNC_LESS
+ ctx.set_depth_stencil_alpha(depth_stencil_alpha)
+
+ # rasterizer
+ rasterizer = Rasterizer()
+ rasterizer.front_winding = PIPE_WINDING_CW
+ rasterizer.cull_mode = PIPE_WINDING_NONE
+ rasterizer.bypass_vs_clip_and_viewport = 1
+ ctx.set_rasterizer(rasterizer)
+
+ # samplers
+ sampler = Sampler()
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST
+ sampler.normalized_coords = 1
+ sampler.min_lod = 0
+ sampler.max_lod = PIPE_MAX_TEXTURE_LEVELS - 1
+ ctx.set_sampler(0, sampler)
+
+ # texture
+ texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = width,
+ height = height,
+ depth = depth,
+ last_level = last_level,
+ tex_usage = tex_usage,
+ )
+
+ expected_rgba = FloatArray(height*width*4)
+ texture.get_surface(
+ face = face,
+ level = level,
+ zslice = zslice,
+ ).sample_rgba(expected_rgba)
+
+ ctx.set_sampler_texture(0, texture)
+
+ # framebuffer
+ cbuf_tex = dev.texture_create(
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ width,
+ height,
+ tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ )
+
+ zsbuf_tex = dev.texture_create(
+ PIPE_FORMAT_Z24X8_UNORM,
+ width,
+ height,
+ tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ )
+
+ cbuf = cbuf_tex.get_surface()
+ zsbuf = zsbuf_tex.get_surface()
+ fb = Framebuffer()
+ fb.width = width
+ fb.height = height
+ fb.nr_cbufs = 1
+ fb.set_cbuf(0, cbuf)
+ fb.set_zsbuf(zsbuf)
+ ctx.set_framebuffer(fb)
+ rgba = FloatArray(4);
+ rgba[0] = 0.5
+ rgba[1] = 0.5
+ rgba[2] = 0.5
+ rgba[3] = 0.5
+ ctx.clear(PIPE_CLEAR_DEPTHSTENCIL, rgba, 1.0, 0)
+ del fb
+
+ # vertex shader
+ vs = Shader('''
+ VERT1.1
+ DCL IN[0], POSITION, CONSTANT
+ DCL IN[1], GENERIC, CONSTANT
+ DCL OUT[0], POSITION, CONSTANT
+ DCL OUT[1], GENERIC, CONSTANT
+ 0:MOV OUT[0], IN[0]
+ 1:MOV OUT[1], IN[1]
+ 2:END
+ ''')
+ #vs.dump()
+ ctx.set_vertex_shader(vs)
+
+ # fragment shader
+ op = {
+ PIPE_TEXTURE_1D: "1D",
+ PIPE_TEXTURE_2D: "2D",
+ PIPE_TEXTURE_3D: "3D",
+ PIPE_TEXTURE_CUBE: "CUBE",
+ }[target]
+ fs = Shader('''
+ FRAG1.1
+ DCL IN[0], GENERIC[0], LINEAR
+ DCL SAMP[0], CONSTANT
+ DCL OUT[0].z, POSITION
+ 0:TEX OUT[0].z, IN[0], SAMP[0], %s
+ 1:END
+ ''' % op)
+ #fs.dump()
+ ctx.set_fragment_shader(fs)
+
+ nverts = 4
+ nattrs = 2
+ verts = FloatArray(nverts * nattrs * 4)
+
+ x = 0
+ y = 0
+ w, h = minify((width, height), level)
+
+ pos = [
+ [x, y],
+ [x+w, y],
+ [x+w, y+h],
+ [x, y+h],
+ ]
+
+ tex = tex_coords(texture, face, level, zslice)
+
+ for i in range(0, 4):
+ j = 8*i
+ verts[j + 0] = pos[i][0] # x
+ verts[j + 1] = pos[i][1] # y
+ verts[j + 2] = 0.0 # z
+ verts[j + 3] = 1.0 # w
+ verts[j + 4] = tex[i][0] # s
+ verts[j + 5] = tex[i][1] # r
+ verts[j + 6] = tex[i][2] # q
+ verts[j + 7] = 1.0
+
+ ctx.draw_vertices(PIPE_PRIM_TRIANGLE_FAN,
+ nverts,
+ nattrs,
+ verts)
+
+ ctx.flush()
+
+ zsbuf = zsbuf_tex.get_surface()
+
+ self.assert_rgba(zsbuf, x, y, w, h, expected_rgba, 4.0/256, 0.85)
+
+
+
+
+def main():
+ dev = Device()
+ suite = TestSuite()
+
+ targets = [
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_CUBE,
+ PIPE_TEXTURE_3D,
+ ]
+
+ color_formats = [
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ #PIPE_FORMAT_A8R8G8B8_SRGB,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_A1R5G5B5_UNORM,
+ PIPE_FORMAT_A4R4G4B4_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ PIPE_FORMAT_YCBCR,
+ PIPE_FORMAT_DXT1_RGB,
+ #PIPE_FORMAT_DXT1_RGBA,
+ #PIPE_FORMAT_DXT3_RGBA,
+ #PIPE_FORMAT_DXT5_RGBA,
+ ]
+
+ depth_formats = [
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_Z24S8_UNORM,
+ PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_FORMAT_Z16_UNORM,
+ ]
+
+ sizes = [64, 32, 16, 8, 4, 2, 1]
+ #sizes = [1020, 508, 252, 62, 30, 14, 6, 3]
+ #sizes = [64]
+ #sizes = [63]
+
+ faces = [
+ PIPE_TEX_FACE_POS_X,
+ PIPE_TEX_FACE_NEG_X,
+ PIPE_TEX_FACE_POS_Y,
+ PIPE_TEX_FACE_NEG_Y,
+ PIPE_TEX_FACE_POS_Z,
+ PIPE_TEX_FACE_NEG_Z,
+ ]
+
+ for format in color_formats:
+ for target in targets:
+ for size in sizes:
+ if target == PIPE_TEXTURE_3D:
+ depth = size
+ else:
+ depth = 1
+ for face in faces:
+ if target != PIPE_TEXTURE_CUBE and face:
+ continue
+ levels = lods(size)
+ for last_level in range(levels):
+ for level in range(0, last_level + 1):
+ zslice = 0
+ while zslice < depth >> level:
+ test = TextureColorSampleTest(
+ dev = dev,
+ target = target,
+ format = format,
+ width = size,
+ height = size,
+ depth = depth,
+ last_level = last_level,
+ face = face,
+ level = level,
+ zslice = zslice,
+ )
+ suite.add_test(test)
+ zslice = (zslice + 1)*2 - 1
+ for format in depth_formats:
+ target = PIPE_TEXTURE_2D
+ depth = 1
+ face = 0
+ last_level = 0
+ level = 0
+ zslice = 0
+ for size in sizes:
+ test = TextureDepthSampleTest(
+ dev = dev,
+ target = target,
+ format = format,
+ width = size,
+ height = size,
+ depth = depth,
+ last_level = last_level,
+ face = face,
+ level = level,
+ zslice = zslice,
+ )
+ suite.add_test(test)
+ suite.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/tests/texture_transfer.py b/src/gallium/state_trackers/python/tests/texture_transfer.py
new file mode 100755
index 0000000000..e65b425adf
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/texture_transfer.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+##########################################################################
+#
+# Copyright 2009 VMware, Inc.
+# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+# All Rights Reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sub license, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice (including the
+# next paragraph) shall be included in all copies or substantial portions
+# of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+# IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+##########################################################################
+
+
+import os
+
+from gallium import *
+from base import *
+
+
+def lods(*dims):
+ size = max(dims)
+ lods = 0
+ while size:
+ lods += 1
+ size >>= 1
+ return lods
+
+
+class TextureTest(TestCase):
+
+ tags = (
+ 'target',
+ 'format',
+ 'width',
+ 'height',
+ 'depth',
+ 'last_level',
+ 'face',
+ 'level',
+ 'zslice',
+ )
+
+ def test(self):
+ dev = self.dev
+
+ target = self.target
+ format = self.format
+ width = self.width
+ height = self.height
+ depth = self.depth
+ last_level = self.last_level
+ face = self.face
+ level = self.level
+ zslice = self.zslice
+
+ tex_usage = 0
+
+ texture = dev.texture_create(
+ target = target,
+ format = format,
+ width = width,
+ height = height,
+ depth = depth,
+ last_level = last_level,
+ tex_usage = tex_usage,
+ )
+ if texture is None:
+ raise TestSkip
+
+ surface = texture.get_surface(face, level, zslice)
+
+ stride = surface.nblocksx * texture.block.size
+ size = surface.nblocksy * stride
+
+ in_raw = os.urandom(size)
+
+ surface.put_tile_raw(0, 0, surface.width, surface.height, in_raw, stride)
+
+ out_raw = surface.get_tile_raw(0, 0, surface.width, surface.height)
+
+ if in_raw != out_raw:
+ raise TestFailure
+
+
+def main():
+ dev = Device()
+ suite = TestSuite()
+
+ targets = [
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_CUBE,
+ PIPE_TEXTURE_3D,
+ ]
+
+ formats = [
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ PIPE_FORMAT_A8R8G8B8_SRGB,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_A1R5G5B5_UNORM,
+ PIPE_FORMAT_A4R4G4B4_UNORM,
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_Z24S8_UNORM,
+ PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_FORMAT_Z16_UNORM,
+ PIPE_FORMAT_S8_UNORM,
+ PIPE_FORMAT_A8_UNORM,
+ PIPE_FORMAT_L8_UNORM,
+ PIPE_FORMAT_DXT1_RGB,
+ PIPE_FORMAT_DXT1_RGBA,
+ PIPE_FORMAT_DXT3_RGBA,
+ PIPE_FORMAT_DXT5_RGBA,
+ ]
+
+ sizes = [64, 32, 16, 8, 4, 2, 1]
+ #sizes = [1020, 508, 252, 62, 30, 14, 6, 3]
+ #sizes = [64]
+ #sizes = [63]
+
+ faces = [
+ PIPE_TEX_FACE_POS_X,
+ PIPE_TEX_FACE_NEG_X,
+ PIPE_TEX_FACE_POS_Y,
+ PIPE_TEX_FACE_NEG_Y,
+ PIPE_TEX_FACE_POS_Z,
+ PIPE_TEX_FACE_NEG_Z,
+ ]
+
+ for target in targets:
+ for format in formats:
+ for size in sizes:
+ if target == PIPE_TEXTURE_3D:
+ depth = size
+ else:
+ depth = 1
+ for face in faces:
+ if target != PIPE_TEXTURE_CUBE and face:
+ continue
+ levels = lods(size)
+ for last_level in range(levels):
+ for level in range(0, last_level + 1):
+ zslice = 0
+ while zslice < depth >> level:
+ test = TextureTest(
+ dev = dev,
+ target = target,
+ format = format,
+ width = size,
+ height = size,
+ depth = depth,
+ last_level = last_level,
+ face = face,
+ level = level,
+ zslice = zslice,
+ )
+ suite.add_test(test)
+ zslice = (zslice + 1)*2 - 1
+ suite.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/gallium/state_trackers/python/tests/tree.py b/src/gallium/state_trackers/python/tests/tree.py
new file mode 100755
index 0000000000..0c1bcda4cf
--- /dev/null
+++ b/src/gallium/state_trackers/python/tests/tree.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+#
+# See also:
+# http://www.ailab.si/orange/doc/ofb/c_otherclass.htm
+
+import os.path
+import sys
+
+import orange
+import orngTree
+
+for arg in sys.argv[1:]:
+ name, ext = os.path.splitext(arg)
+
+ data = orange.ExampleTable(arg)
+
+ tree = orngTree.TreeLearner(data, sameMajorityPruning=1, mForPruning=2)
+
+ orngTree.printTxt(tree)
+
+ file(name+'.txt', 'wt').write(orngTree.dumpTree(tree) + '\n')
+
+ orngTree.printDot(tree, fileName=name+'.dot', nodeShape='ellipse', leafShape='box')
diff --git a/src/gallium/state_trackers/vega/Makefile b/src/gallium/state_trackers/vega/Makefile
new file mode 100644
index 0000000000..b8c805b06c
--- /dev/null
+++ b/src/gallium/state_trackers/vega/Makefile
@@ -0,0 +1,128 @@
+# src/mesa/Makefile
+
+TOP = ../../../..
+include $(TOP)/configs/current
+GALLIUM = $(TOP)
+
+### Lists of source files, included by Makefiles
+
+VG_SOURCES = \
+ api_context.c \
+ api_filters.c \
+ api_images.c \
+ api_masks.c \
+ api_misc.c \
+ api_paint.c \
+ api_params.c \
+ api_path.c \
+ api_text.c \
+ api_transform.c \
+ vgu.c \
+ vg_context.c \
+ vg_state.c \
+ vg_tracker.c \
+ vg_translate.c \
+ polygon.c \
+ bezier.c \
+ path.c \
+ paint.c \
+ arc.c \
+ image.c \
+ renderer.c \
+ stroker.c \
+ mask.c \
+ shader.c \
+ shaders_cache.c
+
+
+### All the core C sources
+
+ALL_SOURCES = \
+ $(VG_SOURCES)
+
+
+### Object files
+VG_OBJECTS = \
+ $(VG_SOURCES:.c=.o)
+
+### Include directories
+
+INCLUDE_DIRS = \
+ -I$(TOP)/include \
+ -I$(GALLIUM)/include \
+ -I$(GALLIUM)/src/gallium/include \
+ -I$(GALLIUM)/src/gallium/auxiliary
+
+VG_LIB = OpenVG
+VG_LIB_NAME = lib$(VG_LIB).so
+
+VG_MAJOR = 1
+VG_MINOR = 0
+VG_TINY = 0
+
+GALLIUM_LIBS = \
+ $(GALLIUM)/src/gallium/auxiliary/pipebuffer/libpipebuffer.a \
+ $(GALLIUM)/src/gallium/auxiliary/sct/libsct.a \
+ $(GALLIUM)/src/gallium/auxiliary/draw/libdraw.a \
+ $(GALLIUM)/src/gallium/auxiliary/rtasm/librtasm.a \
+ $(GALLIUM)/src/gallium/auxiliary/translate/libtranslate.a \
+ $(GALLIUM)/src/gallium/auxiliary/cso_cache/libcso_cache.a \
+ $(GALLIUM)/src/gallium/auxiliary/util/libutil.a \
+ $(GALLIUM)/src/gallium/auxiliary/tgsi/libtgsi.a
+
+.SUFFIXES : .cpp
+
+.c.o:
+ $(CC) -c $(INCLUDE_DIRS) $(CFLAGS) $< -o $@
+
+.cpp.o:
+ $(CXX) -c $(INCLUDE_DIRS) $(CXXFLAGS) $< -o $@
+
+.S.o:
+ $(CC) -c $(INCLUDE_DIRS) $(CFLAGS) $< -o $@
+
+
+default: depend subdirs $(TOP)/$(LIB_DIR)/$(VG_LIB_NAME)
+
+# Make the OpenVG library
+$(TOP)/$(LIB_DIR)/$(VG_LIB_NAME): $(VG_OBJECTS) $(GALLIUM_LIBS)
+ $(TOP)/bin/mklib -o $(VG_LIB) \
+ -major $(VG_MAJOR) \
+ -minor $(VG_MINOR) \
+ -patch $(VG_TINY) \
+ -install $(TOP)/$(LIB_DIR) \
+ $(VG_OBJECTS) $(GALLIUM_LIBS) \
+ -Wl,--whole-archive $(LIBS) -Wl,--no-whole-archive $(SYS_LIBS)
+
+######################################################################
+# Generic stuff
+
+depend: $(ALL_SOURCES)
+ @ echo "running $(MKDEP)"
+ @ rm -f depend # workaround oops on gutsy?!?
+ @ touch depend
+ @ $(MKDEP) $(MKDEP_OPTIONS) $(DEFINES) $(INCLUDE_DIRS) $(ALL_SOURCES) \
+ > /dev/null 2>/dev/null
+
+
+subdirs:
+
+install: default
+ $(INSTALL) -d $(INSTALL_DIR)/include/VG
+ $(INSTALL) -d $(INSTALL_DIR)/$(LIB_DIR)
+ $(INSTALL) -m 644 $(TOP)/include/VG/*.h $(INSTALL_DIR)/include/VG
+ @if [ -e $(TOP)/$(LIB_DIR)/$(VG_LIB_NAME) ]; then \
+ $(INSTALL) $(TOP)/$(LIB_DIR)/libOpenVG* $(INSTALL_DIR)/$(LIB_DIR); \
+ fi
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` $(TOP)/include/VG/*.h
+
+clean:
+ -rm -f *.o
+ -rm -f */*.o
+ -rm -f */*/*.o
+ -rm -f depend depend.bak
+
+include depend
diff --git a/src/gallium/state_trackers/vega/api_consts.h b/src/gallium/state_trackers/vega/api_consts.h
new file mode 100644
index 0000000000..e1b48d4a46
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_consts.h
@@ -0,0 +1,56 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef API_CONSTS_H
+#define API_CONSTS_H
+
+/*must be at least 32*/
+#define VEGA_MAX_SCISSOR_RECTS 32
+
+/*must be at least 16*/
+#define VEGA_MAX_DASH_COUNT 32
+
+/*must be at least 7*/
+#define VEGA_MAX_KERNEL_SIZE 7
+
+/*must be at least 15*/
+#define VEGA_MAX_SEPARABLE_KERNEL_SIZE 15
+
+/*must be at least 32*/
+#define VEGA_MAX_COLOR_RAMP_STOPS 256
+
+#define VEGA_MAX_IMAGE_WIDTH 2048
+
+#define VEGA_MAX_IMAGE_HEIGHT 2048
+
+#define VEGA_MAX_IMAGE_PIXELS (2048*2048)
+
+#define VEGA_MAX_IMAGE_BYTES (2048*2048 * 4)
+
+/*must be at least 128*/
+#define VEGA_MAX_GAUSSIAN_STD_DEVIATION 128
+
+#endif
diff --git a/src/gallium/state_trackers/vega/api_context.c b/src/gallium/state_trackers/vega/api_context.c
new file mode 100644
index 0000000000..47db102dd2
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_context.c
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+
+VGErrorCode vgGetError(void)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGErrorCode error = VG_NO_CONTEXT_ERROR;
+
+ if (!ctx)
+ return error;
+
+ error = ctx->_error;
+ ctx->_error = VG_NO_ERROR;
+
+ return error;
+}
+
+void vgFlush(void)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe;
+
+ if (!ctx)
+ return;
+
+ pipe = ctx->pipe;
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+}
+
+void vgFinish(void)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_fence_handle *fence = NULL;
+ struct pipe_context *pipe;
+
+ if (!ctx)
+ return;
+
+ pipe = ctx->pipe;
+
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_FRAME, &fence);
+
+ pipe->screen->fence_finish(pipe->screen, fence, 0);
+ pipe->screen->fence_reference(pipe->screen, &fence, NULL);
+}
diff --git a/src/gallium/state_trackers/vega/api_filters.c b/src/gallium/state_trackers/vega/api_filters.c
new file mode 100644
index 0000000000..862cbb03c4
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_filters.c
@@ -0,0 +1,805 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+#include "image.h"
+#include "renderer.h"
+#include "shaders_cache.h"
+#include "st_inlines.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "util/u_memory.h"
+
+
+#include "asm_filters.h"
+
+
+struct filter_info {
+ struct vg_image *dst;
+ struct vg_image *src;
+ struct vg_shader * (*setup_shader)(struct vg_context *, void *);
+ void *user_data;
+ const void *const_buffer;
+ VGint const_buffer_len;
+ VGTilingMode tiling_mode;
+ struct pipe_texture *extra_texture;
+};
+
+static INLINE struct pipe_texture *create_texture_1d(struct vg_context *ctx,
+ const VGuint *color_data,
+ const VGint color_data_len)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_texture *tex = 0;
+ struct pipe_texture templ;
+
+ memset(&templ, 0, sizeof(templ));
+ templ.target = PIPE_TEXTURE_1D;
+ templ.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ templ.last_level = 0;
+ templ.width[0] = color_data_len;
+ templ.height[0] = 1;
+ templ.depth[0] = 1;
+ pf_get_block(PIPE_FORMAT_A8R8G8B8_UNORM, &templ.block);
+ templ.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
+
+ tex = screen->texture_create(screen, &templ);
+
+ { /* upload color_data */
+ struct pipe_transfer *transfer =
+ screen->get_tex_transfer(screen, tex,
+ 0, 0, 0,
+ PIPE_TRANSFER_READ_WRITE ,
+ 0, 0, tex->width[0], tex->height[0]);
+ void *map = screen->transfer_map(screen, transfer);
+ memcpy(map, color_data, sizeof(VGint)*color_data_len);
+ screen->transfer_unmap(screen, transfer);
+ screen->tex_transfer_destroy(transfer);
+ }
+
+ return tex;
+}
+
+static INLINE struct pipe_surface * setup_framebuffer(struct vg_image *dst)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_framebuffer_state fb;
+ struct pipe_surface *dst_surf = pipe->screen->get_tex_surface(
+ pipe->screen, dst->texture, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* drawing dest */
+ memset(&fb, 0, sizeof(fb));
+ fb.width = dst->x + dst_surf->width;
+ fb.height = dst->y + dst_surf->height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = dst_surf;
+ {
+ VGint i;
+ for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
+ fb.cbufs[i] = 0;
+ }
+ cso_set_framebuffer(ctx->cso_context, &fb);
+
+ return dst_surf;
+}
+
+static void setup_viewport(struct vg_image *dst)
+{
+ struct vg_context *ctx = vg_current_context();
+ vg_set_viewport(ctx, VEGA_Y0_TOP);
+}
+
+static void setup_blend()
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_blend_state blend;
+ memset(&blend, 0, sizeof(blend));
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ if (ctx->state.vg.filter_channel_mask & VG_RED)
+ blend.colormask |= PIPE_MASK_R;
+ if (ctx->state.vg.filter_channel_mask & VG_GREEN)
+ blend.colormask |= PIPE_MASK_G;
+ if (ctx->state.vg.filter_channel_mask & VG_BLUE)
+ blend.colormask |= PIPE_MASK_B;
+ if (ctx->state.vg.filter_channel_mask & VG_ALPHA)
+ blend.colormask |= PIPE_MASK_A;
+ blend.blend_enable = 1;
+ cso_set_blend(ctx->cso_context, &blend);
+}
+
+static void setup_constant_buffer(struct vg_context *ctx, const void *buffer,
+ VGint param_bytes)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_constant_buffer *cbuf = &ctx->filter.buffer;
+
+ /* We always need to get a new buffer, to keep the drivers simple and
+ * avoid gratuitous rendering synchronization. */
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+
+ cbuf->buffer = pipe_buffer_create(pipe->screen, 16,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ param_bytes);
+
+ if (cbuf->buffer) {
+ st_no_flush_pipe_buffer_write(ctx, cbuf->buffer,
+ 0, param_bytes, buffer);
+ }
+
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
+}
+
+static void setup_samplers(struct vg_context *ctx, struct filter_info *info)
+{
+ struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
+ struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
+ struct pipe_sampler_state sampler[3];
+ int num_samplers = 0;
+ int num_textures = 0;
+
+ samplers[0] = NULL;
+ samplers[1] = NULL;
+ samplers[2] = NULL;
+ samplers[3] = NULL;
+ textures[0] = NULL;
+ textures[1] = NULL;
+ textures[2] = NULL;
+ textures[3] = NULL;
+
+ memset(&sampler[0], 0, sizeof(struct pipe_sampler_state));
+ sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler[0].wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler[0].min_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
+ sampler[0].mag_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
+ sampler[0].normalized_coords = 1;
+
+ switch(info->tiling_mode) {
+ case VG_TILE_FILL:
+ sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ memcpy(sampler[0].border_color,
+ ctx->state.vg.tile_fill_color,
+ sizeof(VGfloat) * 4);
+ break;
+ case VG_TILE_PAD:
+ sampler[0].wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler[0].wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ break;
+ case VG_TILE_REPEAT:
+ sampler[0].wrap_s = PIPE_TEX_WRAP_REPEAT;
+ sampler[0].wrap_t = PIPE_TEX_WRAP_REPEAT;
+ break;
+ case VG_TILE_REFLECT:
+ sampler[0].wrap_s = PIPE_TEX_WRAP_MIRROR_REPEAT;
+ sampler[0].wrap_t = PIPE_TEX_WRAP_MIRROR_REPEAT;
+ break;
+ default:
+ debug_assert(!"Unknown tiling mode");
+ }
+
+ samplers[0] = &sampler[0];
+ textures[0] = info->src->texture;
+ ++num_samplers;
+ ++num_textures;
+
+ if (info->extra_texture) {
+ memcpy(&sampler[1], &sampler[0], sizeof(struct pipe_sampler_state));
+ samplers[1] = &sampler[1];
+ textures[1] = info->extra_texture;
+ ++num_samplers;
+ ++num_textures;
+ }
+
+
+ cso_set_samplers(ctx->cso_context, num_samplers, (const struct pipe_sampler_state **)samplers);
+ cso_set_sampler_textures(ctx->cso_context, num_textures, textures);
+}
+
+static struct vg_shader * setup_color_matrix(struct vg_context *ctx, void *user_data)
+{
+ struct vg_shader *shader =
+ shader_create_from_text(ctx->pipe, color_matrix_asm, 200,
+ PIPE_SHADER_FRAGMENT);
+ cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
+ return shader;
+}
+
+static struct vg_shader * setup_convolution(struct vg_context *ctx, void *user_data)
+{
+ char buffer[1024];
+ VGint num_consts = (VGint)(long)(user_data);
+ struct vg_shader *shader;
+
+ snprintf(buffer, 1023, convolution_asm, num_consts, num_consts / 2 + 1);
+
+ shader = shader_create_from_text(ctx->pipe, buffer, 200,
+ PIPE_SHADER_FRAGMENT);
+
+ cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
+ return shader;
+}
+
+static struct vg_shader * setup_lookup(struct vg_context *ctx, void *user_data)
+{
+ struct vg_shader *shader =
+ shader_create_from_text(ctx->pipe, lookup_asm,
+ 200, PIPE_SHADER_FRAGMENT);
+
+ cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
+ return shader;
+}
+
+
+static struct vg_shader * setup_lookup_single(struct vg_context *ctx, void *user_data)
+{
+ char buffer[1024];
+ VGImageChannel channel = (VGImageChannel)(user_data);
+ struct vg_shader *shader;
+
+ switch(channel) {
+ case VG_RED:
+ snprintf(buffer, 1023, lookup_single_asm, "xxxx");
+ break;
+ case VG_GREEN:
+ snprintf(buffer, 1023, lookup_single_asm, "yyyy");
+ break;
+ case VG_BLUE:
+ snprintf(buffer, 1023, lookup_single_asm, "zzzz");
+ break;
+ case VG_ALPHA:
+ snprintf(buffer, 1023, lookup_single_asm, "wwww");
+ break;
+ default:
+ debug_assert(!"Unknown color channel");
+ }
+
+ shader = shader_create_from_text(ctx->pipe, buffer, 200,
+ PIPE_SHADER_FRAGMENT);
+
+ cso_set_fragment_shader_handle(ctx->cso_context, shader->driver);
+ return shader;
+}
+
+static void execute_filter(struct vg_context *ctx,
+ struct filter_info *info)
+{
+ struct pipe_surface *dst_surf;
+ struct vg_shader *shader;
+
+ cso_save_framebuffer(ctx->cso_context);
+ cso_save_fragment_shader(ctx->cso_context);
+ cso_save_viewport(ctx->cso_context);
+ cso_save_blend(ctx->cso_context);
+ cso_save_samplers(ctx->cso_context);
+ cso_save_sampler_textures(ctx->cso_context);
+
+ dst_surf = setup_framebuffer(info->dst);
+ setup_viewport(info->dst);
+ setup_blend();
+ setup_constant_buffer(ctx, info->const_buffer, info->const_buffer_len);
+ shader = info->setup_shader(ctx, info->user_data);
+ setup_samplers(ctx, info);
+
+ renderer_draw_texture(ctx->renderer,
+ info->src->texture,
+ info->dst->x, info->dst->y,
+ info->dst->x + info->dst->width,
+ info->dst->y + info->dst->height,
+ info->dst->x, info->dst->y,
+ info->dst->x + info->dst->width,
+ info->dst->y + info->dst->height);
+
+ cso_restore_framebuffer(ctx->cso_context);
+ cso_restore_fragment_shader(ctx->cso_context);
+ cso_restore_viewport(ctx->cso_context);
+ cso_restore_blend(ctx->cso_context);
+ cso_restore_samplers(ctx->cso_context);
+ cso_restore_sampler_textures(ctx->cso_context);
+
+ vg_shader_destroy(ctx, shader);
+
+ pipe_surface_reference(&dst_surf, NULL);
+}
+
+void vgColorMatrix(VGImage dst, VGImage src,
+ const VGfloat * matrix)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *d, *s;
+ struct filter_info info;
+
+ if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!matrix || !is_aligned(matrix)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ d = (struct vg_image*)dst;
+ s = (struct vg_image*)src;
+
+ if (vg_image_overlaps(d, s)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ info.dst = d;
+ info.src = s;
+ info.setup_shader = &setup_color_matrix;
+ info.user_data = NULL;
+ info.const_buffer = matrix;
+ info.const_buffer_len = 20 * sizeof(VGfloat);
+ info.tiling_mode = VG_TILE_PAD;
+ info.extra_texture = 0;
+ execute_filter(ctx, &info);
+}
+
+static VGfloat texture_offset(VGfloat width, VGint kernelSize, VGint current, VGint shift)
+{
+ VGfloat diff = current - shift;
+
+ return diff / width;
+}
+
+void vgConvolve(VGImage dst, VGImage src,
+ VGint kernelWidth, VGint kernelHeight,
+ VGint shiftX, VGint shiftY,
+ const VGshort * kernel,
+ VGfloat scale,
+ VGfloat bias,
+ VGTilingMode tilingMode)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGfloat *buffer;
+ VGint buffer_len;
+ VGint i, j;
+ VGint idx = 0;
+ struct vg_image *d, *s;
+ VGint kernel_size = kernelWidth * kernelHeight;
+ struct filter_info info;
+ const VGint max_kernel_size = vgGeti(VG_MAX_KERNEL_SIZE);
+
+ if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (kernelWidth <= 0 || kernelHeight <= 0 ||
+ kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!kernel || !is_aligned_to(kernel, 2)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (tilingMode < VG_TILE_FILL ||
+ tilingMode > VG_TILE_REFLECT) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ d = (struct vg_image*)dst;
+ s = (struct vg_image*)src;
+
+ if (vg_image_overlaps(d, s)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ vg_validate_state(ctx);
+
+ buffer_len = 8 + 2 * 4 * kernel_size;
+ buffer = (VGfloat*)malloc(buffer_len * sizeof(VGfloat));
+
+ buffer[0] = 0.f;
+ buffer[1] = 1.f;
+ buffer[2] = 2.f; /*unused*/
+ buffer[3] = 4.f; /*unused*/
+
+ buffer[4] = kernelWidth * kernelHeight;
+ buffer[5] = scale;
+ buffer[6] = bias;
+ buffer[7] = 0.f;
+
+ idx = 8;
+ for (j = 0; j < kernelHeight; ++j) {
+ for (i = 0; i < kernelWidth; ++i) {
+ VGint index = j * kernelWidth + i;
+ VGfloat x, y;
+
+ x = texture_offset(s->width, kernelWidth, i, shiftX);
+ y = texture_offset(s->height, kernelHeight, j, shiftY);
+
+ buffer[idx + index*4 + 0] = x;
+ buffer[idx + index*4 + 1] = y;
+ buffer[idx + index*4 + 2] = 0.f;
+ buffer[idx + index*4 + 3] = 0.f;
+ }
+ }
+ idx += kernel_size * 4;
+
+ for (j = 0; j < kernelHeight; ++j) {
+ for (i = 0; i < kernelWidth; ++i) {
+ /* transpose the kernel */
+ VGint index = j * kernelWidth + i;
+ VGint kindex = (kernelWidth - i - 1) * kernelHeight + (kernelHeight - j - 1);
+ buffer[idx + index*4 + 0] = kernel[kindex];
+ buffer[idx + index*4 + 1] = kernel[kindex];
+ buffer[idx + index*4 + 2] = kernel[kindex];
+ buffer[idx + index*4 + 3] = kernel[kindex];
+ }
+ }
+
+ info.dst = d;
+ info.src = s;
+ info.setup_shader = &setup_convolution;
+ info.user_data = (void*)(long)(buffer_len/4);
+ info.const_buffer = buffer;
+ info.const_buffer_len = buffer_len * sizeof(VGfloat);
+ info.tiling_mode = tilingMode;
+ info.extra_texture = 0;
+ execute_filter(ctx, &info);
+
+ free(buffer);
+}
+
+void vgSeparableConvolve(VGImage dst, VGImage src,
+ VGint kernelWidth,
+ VGint kernelHeight,
+ VGint shiftX, VGint shiftY,
+ const VGshort * kernelX,
+ const VGshort * kernelY,
+ VGfloat scale,
+ VGfloat bias,
+ VGTilingMode tilingMode)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGshort *kernel;
+ VGint i, j, idx = 0;
+ const VGint max_kernel_size = vgGeti(VG_MAX_SEPARABLE_KERNEL_SIZE);
+
+ if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (kernelWidth <= 0 || kernelHeight <= 0 ||
+ kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!kernelX || !kernelY ||
+ !is_aligned_to(kernelX, 2) || !is_aligned_to(kernelY, 2)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (tilingMode < VG_TILE_FILL ||
+ tilingMode > VG_TILE_REFLECT) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ kernel = malloc(sizeof(VGshort)*kernelWidth*kernelHeight);
+ for (i = 0; i < kernelWidth; ++i) {
+ for (j = 0; j < kernelHeight; ++j) {
+ kernel[idx] = kernelX[i] * kernelY[j];
+ ++idx;
+ }
+ }
+ vgConvolve(dst, src, kernelWidth, kernelHeight, shiftX, shiftY,
+ kernel, scale, bias, tilingMode);
+ free(kernel);
+}
+
+static INLINE VGfloat compute_gaussian_componenet(VGfloat x, VGfloat y,
+ VGfloat stdDeviationX,
+ VGfloat stdDeviationY)
+{
+ VGfloat mult = 1 / ( 2 * M_PI * stdDeviationX * stdDeviationY);
+ VGfloat e = exp( - ( pow(x, 2)/(2*pow(stdDeviationX, 2)) +
+ pow(y, 2)/(2*pow(stdDeviationY, 2)) ) );
+ return mult * e;
+}
+
+static INLINE VGint compute_kernel_size(VGfloat deviation)
+{
+ VGint size = ceil(2.146 * deviation);
+ if (size > 11)
+ return 11;
+ return size;
+}
+
+static void compute_gaussian_kernel(VGfloat *kernel,
+ VGint width, VGint height,
+ VGfloat stdDeviationX,
+ VGfloat stdDeviationY)
+{
+ VGint i, j;
+ VGfloat scale = 0.0f;
+
+ for (j = 0; j < height; ++j) {
+ for (i = 0; i < width; ++i) {
+ VGint idx = (height - j -1) * width + (width - i -1);
+ kernel[idx] = compute_gaussian_componenet(i-(ceil(width/2))-1,
+ j-ceil(height/2)-1,
+ stdDeviationX, stdDeviationY);
+ scale += kernel[idx];
+ }
+ }
+
+ for (j = 0; j < height; ++j) {
+ for (i = 0; i < width; ++i) {
+ VGint idx = j * width + i;
+ kernel[idx] /= scale;
+ }
+ }
+}
+
+void vgGaussianBlur(VGImage dst, VGImage src,
+ VGfloat stdDeviationX,
+ VGfloat stdDeviationY,
+ VGTilingMode tilingMode)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *d, *s;
+ VGfloat *buffer, *kernel;
+ VGint kernel_width, kernel_height, kernel_size;
+ VGint buffer_len;
+ VGint idx, i, j;
+ struct filter_info info;
+
+ if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (stdDeviationX <= 0 || stdDeviationY <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (tilingMode < VG_TILE_FILL ||
+ tilingMode > VG_TILE_REFLECT) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ d = (struct vg_image*)dst;
+ s = (struct vg_image*)src;
+
+ if (vg_image_overlaps(d, s)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ kernel_width = compute_kernel_size(stdDeviationX);
+ kernel_height = compute_kernel_size(stdDeviationY);
+ kernel_size = kernel_width * kernel_height;
+ kernel = malloc(sizeof(VGfloat)*kernel_size);
+ compute_gaussian_kernel(kernel, kernel_width, kernel_height,
+ stdDeviationX, stdDeviationY);
+
+ buffer_len = 8 + 2 * 4 * kernel_size;
+ buffer = (VGfloat*)malloc(buffer_len * sizeof(VGfloat));
+
+ buffer[0] = 0.f;
+ buffer[1] = 1.f;
+ buffer[2] = 2.f; /*unused*/
+ buffer[3] = 4.f; /*unused*/
+
+ buffer[4] = kernel_width * kernel_height;
+ buffer[5] = 1.f;/*scale*/
+ buffer[6] = 0.f;/*bias*/
+ buffer[7] = 0.f;
+
+ idx = 8;
+ for (j = 0; j < kernel_height; ++j) {
+ for (i = 0; i < kernel_width; ++i) {
+ VGint index = j * kernel_width + i;
+ VGfloat x, y;
+
+ x = texture_offset(s->width, kernel_width, i, kernel_width/2);
+ y = texture_offset(s->height, kernel_height, j, kernel_height/2);
+
+ buffer[idx + index*4 + 0] = x;
+ buffer[idx + index*4 + 1] = y;
+ buffer[idx + index*4 + 2] = 0.f;
+ buffer[idx + index*4 + 3] = 0.f;
+ }
+ }
+ idx += kernel_size * 4;
+
+ for (j = 0; j < kernel_height; ++j) {
+ for (i = 0; i < kernel_width; ++i) {
+ /* transpose the kernel */
+ VGint index = j * kernel_width + i;
+ VGint kindex = (kernel_width - i - 1) * kernel_height + (kernel_height - j - 1);
+ buffer[idx + index*4 + 0] = kernel[kindex];
+ buffer[idx + index*4 + 1] = kernel[kindex];
+ buffer[idx + index*4 + 2] = kernel[kindex];
+ buffer[idx + index*4 + 3] = kernel[kindex];
+ }
+ }
+
+ info.dst = d;
+ info.src = s;
+ info.setup_shader = &setup_convolution;
+ info.user_data = (void*)(long)(buffer_len/4);
+ info.const_buffer = buffer;
+ info.const_buffer_len = buffer_len * sizeof(VGfloat);
+ info.tiling_mode = tilingMode;
+ info.extra_texture = 0;
+ execute_filter(ctx, &info);
+
+ free(buffer);
+ free(kernel);
+}
+
+void vgLookup(VGImage dst, VGImage src,
+ const VGubyte * redLUT,
+ const VGubyte * greenLUT,
+ const VGubyte * blueLUT,
+ const VGubyte * alphaLUT,
+ VGboolean outputLinear,
+ VGboolean outputPremultiplied)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *d, *s;
+ VGuint color_data[256];
+ VGint i;
+ struct pipe_texture *lut_texture;
+ VGfloat buffer[4];
+ struct filter_info info;
+
+ if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!redLUT || !greenLUT || !blueLUT || !alphaLUT) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ d = (struct vg_image*)dst;
+ s = (struct vg_image*)src;
+
+ if (vg_image_overlaps(d, s)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ for (i = 0; i < 256; ++i) {
+ color_data[i] = blueLUT[i] << 24 | greenLUT[i] << 16 |
+ redLUT[i] << 8 | alphaLUT[i];
+ }
+ lut_texture = create_texture_1d(ctx, color_data, 255);
+
+ buffer[0] = 0.f;
+ buffer[1] = 0.f;
+ buffer[2] = 1.f;
+ buffer[3] = 1.f;
+
+ info.dst = d;
+ info.src = s;
+ info.setup_shader = &setup_lookup;
+ info.user_data = NULL;
+ info.const_buffer = buffer;
+ info.const_buffer_len = 4 * sizeof(VGfloat);
+ info.tiling_mode = VG_TILE_PAD;
+ info.extra_texture = lut_texture;
+
+ execute_filter(ctx, &info);
+
+ pipe_texture_reference(&lut_texture, NULL);
+}
+
+void vgLookupSingle(VGImage dst, VGImage src,
+ const VGuint * lookupTable,
+ VGImageChannel sourceChannel,
+ VGboolean outputLinear,
+ VGboolean outputPremultiplied)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *d, *s;
+ struct pipe_texture *lut_texture;
+ VGfloat buffer[4];
+ struct filter_info info;
+ VGuint color_data[256];
+ VGint i;
+
+ if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!lookupTable || !is_aligned(lookupTable)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (sourceChannel != VG_RED && sourceChannel != VG_GREEN &&
+ sourceChannel != VG_BLUE && sourceChannel != VG_ALPHA) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ d = (struct vg_image*)dst;
+ s = (struct vg_image*)src;
+
+ if (vg_image_overlaps(d, s)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ for (i = 0; i < 256; ++i) {
+ VGuint rgba = lookupTable[i];
+ VGubyte blue, green, red, alpha;
+ red = (rgba & 0xff000000)>>24;
+ green = (rgba & 0x00ff0000)>>16;
+ blue = (rgba & 0x0000ff00)>> 8;
+ alpha = (rgba & 0x000000ff)>> 0;
+ color_data[i] = blue << 24 | green << 16 |
+ red << 8 | alpha;
+ }
+ lut_texture = create_texture_1d(ctx, color_data, 256);
+
+ buffer[0] = 0.f;
+ buffer[1] = 0.f;
+ buffer[2] = 1.f;
+ buffer[3] = 1.f;
+
+ info.dst = d;
+ info.src = s;
+ info.setup_shader = &setup_lookup_single;
+ info.user_data = (void*)sourceChannel;
+ info.const_buffer = buffer;
+ info.const_buffer_len = 4 * sizeof(VGfloat);
+ info.tiling_mode = VG_TILE_PAD;
+ info.extra_texture = lut_texture;
+
+ execute_filter(ctx, &info);
+
+ pipe_texture_reference(&lut_texture, NULL);
+}
diff --git a/src/gallium/state_trackers/vega/api_images.c b/src/gallium/state_trackers/vega/api_images.c
new file mode 100644
index 0000000000..c437553bc2
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_images.c
@@ -0,0 +1,489 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "image.h"
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+#include "vg_translate.h"
+#include "api_consts.h"
+#include "image.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_blit.h"
+#include "util/u_tile.h"
+#include "util/u_memory.h"
+
+static INLINE VGboolean supported_image_format(VGImageFormat format)
+{
+ switch(format) {
+ case VG_sRGBX_8888:
+ case VG_sRGBA_8888:
+ case VG_sRGBA_8888_PRE:
+ case VG_sRGB_565:
+ case VG_sRGBA_5551:
+ case VG_sRGBA_4444:
+ case VG_sL_8:
+ case VG_lRGBX_8888:
+ case VG_lRGBA_8888:
+ case VG_lRGBA_8888_PRE:
+ case VG_lL_8:
+ case VG_A_8:
+ case VG_BW_1:
+#ifdef OPENVG_VERSION_1_1
+ case VG_A_1:
+ case VG_A_4:
+#endif
+ case VG_sXRGB_8888:
+ case VG_sARGB_8888:
+ case VG_sARGB_8888_PRE:
+ case VG_sARGB_1555:
+ case VG_sARGB_4444:
+ case VG_lXRGB_8888:
+ case VG_lARGB_8888:
+ case VG_lARGB_8888_PRE:
+ case VG_sBGRX_8888:
+ case VG_sBGRA_8888:
+ case VG_sBGRA_8888_PRE:
+ case VG_sBGR_565:
+ case VG_sBGRA_5551:
+ case VG_sBGRA_4444:
+ case VG_lBGRX_8888:
+ case VG_lBGRA_8888:
+ case VG_lBGRA_8888_PRE:
+ case VG_sXBGR_8888:
+ case VG_sABGR_8888:
+ case VG_sABGR_8888_PRE:
+ case VG_sABGR_1555:
+ case VG_sABGR_4444:
+ case VG_lXBGR_8888:
+ case VG_lABGR_8888:
+ case VG_lABGR_8888_PRE:
+ return VG_TRUE;
+ default:
+ return VG_FALSE;
+ }
+ return VG_FALSE;
+}
+
+VGImage vgCreateImage(VGImageFormat format,
+ VGint width, VGint height,
+ VGbitfield allowedQuality)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (!supported_image_format(format)) {
+ vg_set_error(ctx, VG_UNSUPPORTED_IMAGE_FORMAT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (width > vgGeti(VG_MAX_IMAGE_WIDTH) ||
+ height > vgGeti(VG_MAX_IMAGE_HEIGHT)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (width * height > vgGeti(VG_MAX_IMAGE_PIXELS)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ if (!(allowedQuality & ((VG_IMAGE_QUALITY_NONANTIALIASED |
+ VG_IMAGE_QUALITY_FASTER |
+ VG_IMAGE_QUALITY_BETTER)))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ return (VGImage)image_create(format, width, height);
+}
+
+void vgDestroyImage(VGImage image)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *img = (struct vg_image *)image;
+
+ if (image == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!vg_object_is_valid((void*)image, VG_OBJECT_IMAGE)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ image_destroy(img);
+}
+
+void vgClearImage(VGImage image,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *img;
+
+ if (image == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ img = (struct vg_image*)image;
+
+ if (x + width < 0 || y + height < 0)
+ return;
+
+ image_clear(img, x, y, width, height);
+
+}
+
+void vgImageSubData(VGImage image,
+ const void * data,
+ VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *img;
+
+ if (image == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!supported_image_format(dataFormat)) {
+ vg_set_error(ctx, VG_UNSUPPORTED_IMAGE_FORMAT_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0 || !data || !is_aligned(data)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ img = (struct vg_image*)(image);
+ image_sub_data(img, data, dataStride, dataFormat,
+ x, y, width, height);
+}
+
+void vgGetImageSubData(VGImage image,
+ void * data,
+ VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *img;
+
+ if (image == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!supported_image_format(dataFormat)) {
+ vg_set_error(ctx, VG_UNSUPPORTED_IMAGE_FORMAT_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0 || !data || !is_aligned(data)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ img = (struct vg_image*)image;
+ image_get_sub_data(img, data, dataStride, dataFormat,
+ x, y, width, height);
+}
+
+VGImage vgChildImage(VGImage parent,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *p;
+
+ if (parent == VG_INVALID_HANDLE ||
+ !vg_context_is_object_valid(ctx, VG_OBJECT_IMAGE, (void*)parent) ||
+ !vg_object_is_valid((void*)parent, VG_OBJECT_IMAGE)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (width <= 0 || height <= 0 || x < 0 || y < 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ p = (struct vg_image *)parent;
+ if (x > p->width || y > p->height) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (x + width > p->width || y + height > p->height) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ return (VGImage)image_child_image(p, x, y, width, height);
+}
+
+VGImage vgGetParent(VGImage image)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *img;
+
+ if (image == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ img = (struct vg_image*)image;
+ if (img->parent)
+ return (VGImage)img->parent;
+ else
+ return image;
+}
+
+void vgCopyImage(VGImage dst, VGint dx, VGint dy,
+ VGImage src, VGint sx, VGint sy,
+ VGint width, VGint height,
+ VGboolean dither)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (src == VG_INVALID_HANDLE || dst == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ vg_validate_state(ctx);
+ image_copy((struct vg_image*)dst, dx, dy,
+ (struct vg_image*)src, sx, sy,
+ width, height, dither);
+}
+
+void vgDrawImage(VGImage image)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (!ctx)
+ return;
+
+ if (image == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ vg_validate_state(ctx);
+ image_draw((struct vg_image*)image);
+}
+
+void vgSetPixels(VGint dx, VGint dy,
+ VGImage src, VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ vg_validate_state(ctx);
+
+ if (src == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ image_set_pixels(dx, dy, (struct vg_image*)src, sx, sy, width,
+ height);
+}
+
+void vgGetPixels(VGImage dst, VGint dx, VGint dy,
+ VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *img;
+
+ if (dst == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ img = (struct vg_image*)dst;
+
+ image_get_pixels(img, dx, dy,
+ sx, sy, width, height);
+}
+
+void vgWritePixels(const void * data, VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint dx, VGint dy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+
+ if (!supported_image_format(dataFormat)) {
+ vg_set_error(ctx, VG_UNSUPPORTED_IMAGE_FORMAT_ERROR);
+ return;
+ }
+ if (!data || !is_aligned(data)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ vg_validate_state(ctx);
+ {
+ struct vg_image *img = image_create(dataFormat, width, height);
+ image_sub_data(img, data, dataStride, dataFormat, 0, 0,
+ width, height);
+#if 0
+ struct matrix *matrix = &ctx->state.vg.image_user_to_surface_matrix;
+ matrix_translate(matrix, dx, dy);
+ image_draw(img);
+ matrix_translate(matrix, -dx, -dy);
+#else
+ /* this looks like a better approach */
+ image_set_pixels(dx, dy, img, 0, 0, width, height);
+#endif
+ image_destroy(img);
+ }
+ /* make sure rendering has completed */
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+}
+
+void vgReadPixels(void * data, VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+ struct st_renderbuffer *strb = stfb->strb;
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+
+ VGfloat temp[VEGA_MAX_IMAGE_WIDTH][4];
+ VGfloat *df = (VGfloat*)temp;
+ VGint y = (fb->height - sy) - 1, yStep = -1;
+ VGint i;
+ VGubyte *dst = (VGubyte *)data;
+ VGint xoffset = 0, yoffset = 0;
+
+ if (!supported_image_format(dataFormat)) {
+ vg_set_error(ctx, VG_UNSUPPORTED_IMAGE_FORMAT_ERROR);
+ return;
+ }
+ if (!data || !is_aligned(data)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ /* make sure rendering has completed */
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+ if (sx < 0) {
+ xoffset = -sx;
+ xoffset *= _vega_size_for_format(dataFormat);
+ width += sx;
+ sx = 0;
+ }
+ if (sy < 0) {
+ yoffset = -sy;
+ height += sy;
+ sy = 0;
+ y = (fb->height - sy) - 1;
+ yoffset *= dataStride;
+ }
+
+ {
+ struct pipe_transfer *transfer;
+
+ transfer = screen->get_tex_transfer(screen, strb->texture, 0, 0, 0,
+ PIPE_TRANSFER_READ,
+ 0, 0, width, height);
+
+ /* Do a row at a time to flip image data vertically */
+ for (i = 0; i < height; i++) {
+#if 0
+ debug_printf("%d-%d == %d\n", sy, height, y);
+#endif
+ pipe_get_tile_rgba(transfer, sx, y, width, 1, df);
+ y += yStep;
+ _vega_pack_rgba_span_float(ctx, width, temp, dataFormat,
+ dst + yoffset + xoffset);
+ dst += dataStride;
+ }
+
+ screen->tex_transfer_destroy(transfer);
+ }
+}
+
+void vgCopyPixels(VGint dx, VGint dy,
+ VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+ struct st_renderbuffer *strb = ctx->draw_buffer->strb;
+
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ /* do nothing if we copy from outside the fb */
+ if (dx >= (VGint)fb->width || dy >= (VGint)fb->height ||
+ sx >= (VGint)fb->width || sy >= (VGint)fb->height)
+ return;
+
+ vg_validate_state(ctx);
+ /* make sure rendering has completed */
+ vgFinish();
+
+ vg_copy_surface(ctx, strb->surface, dx, dy,
+ strb->surface, sx, sy, width, height);
+}
diff --git a/src/gallium/state_trackers/vega/api_masks.c b/src/gallium/state_trackers/vega/api_masks.c
new file mode 100644
index 0000000000..4f9f3dae17
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_masks.c
@@ -0,0 +1,373 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "mask.h"
+#include "renderer.h"
+
+#include "vg_context.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "pipe/internal/p_winsys_screen.h" /* for winsys->update_buffer */
+
+#include "util/u_pack_color.h"
+#include "util/u_draw_quad.h"
+#include "util/u_memory.h"
+
+
+#define DISABLE_1_1_MASKING 1
+
+/**
+ * Draw a screen-aligned quadrilateral.
+ * Coords are window coords with y=0=bottom. These coords will be transformed
+ * by the vertex shader and viewport transform.
+ */
+static void
+draw_clear_quad(struct vg_context *st,
+ float x0, float y0, float x1, float y1, float z,
+ const VGfloat color[4])
+{
+ struct pipe_context *pipe = st->pipe;
+ struct pipe_buffer *buf;
+ VGuint i;
+
+ /* positions */
+ st->clear.vertices[0][0][0] = x0;
+ st->clear.vertices[0][0][1] = y0;
+
+ st->clear.vertices[1][0][0] = x1;
+ st->clear.vertices[1][0][1] = y0;
+
+ st->clear.vertices[2][0][0] = x1;
+ st->clear.vertices[2][0][1] = y1;
+
+ st->clear.vertices[3][0][0] = x0;
+ st->clear.vertices[3][0][1] = y1;
+
+ /* same for all verts: */
+ for (i = 0; i < 4; i++) {
+ st->clear.vertices[i][0][2] = z;
+ st->clear.vertices[i][0][3] = 1.0;
+ st->clear.vertices[i][1][0] = color[0];
+ st->clear.vertices[i][1][1] = color[1];
+ st->clear.vertices[i][1][2] = color[2];
+ st->clear.vertices[i][1][3] = color[3];
+ }
+
+
+ /* put vertex data into vbuf */
+ buf = pipe_user_buffer_create(pipe->screen,
+ st->clear.vertices,
+ sizeof(st->clear.vertices));
+
+
+ /* draw */
+ if (buf) {
+ util_draw_vertex_buffer(pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference(&buf, NULL);
+ }
+}
+
+/**
+ * Do vgClear by drawing a quadrilateral.
+ */
+static void
+clear_with_quad(struct vg_context *st, float x0, float y0,
+ float width, float height, const VGfloat clear_color[4])
+{
+ VGfloat x1, y1;
+
+ vg_validate_state(st);
+
+ x1 = x0 + width;
+ y1 = y0 + height;
+
+ /*
+ printf("%s %f,%f %f,%f\n", __FUNCTION__,
+ x0, y0,
+ x1, y1);
+ */
+
+ if (st->pipe->winsys && st->pipe->winsys->update_buffer)
+ st->pipe->winsys->update_buffer( st->pipe->winsys,
+ st->pipe->priv );
+
+ cso_save_blend(st->cso_context);
+ cso_save_rasterizer(st->cso_context);
+ cso_save_fragment_shader(st->cso_context);
+ cso_save_vertex_shader(st->cso_context);
+
+ /* blend state: RGBA masking */
+ {
+ struct pipe_blend_state blend;
+ memset(&blend, 0, sizeof(blend));
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.colormask |= PIPE_MASK_R;
+ blend.colormask |= PIPE_MASK_G;
+ blend.colormask |= PIPE_MASK_B;
+ blend.colormask |= PIPE_MASK_A;
+ cso_set_blend(st->cso_context, &blend);
+ }
+
+ cso_set_rasterizer(st->cso_context, &st->clear.raster);
+
+ cso_set_fragment_shader_handle(st->cso_context, st->clear.fs);
+ cso_set_vertex_shader_handle(st->cso_context, vg_clear_vs(st));
+
+ /* draw quad matching scissor rect (XXX verify coord round-off) */
+ draw_clear_quad(st, x0, y0, x1, y1, 0, clear_color);
+
+ /* Restore pipe state */
+ cso_restore_blend(st->cso_context);
+ cso_restore_rasterizer(st->cso_context);
+ cso_restore_fragment_shader(st->cso_context);
+ cso_restore_vertex_shader(st->cso_context);
+}
+
+
+void vgMask(VGHandle mask, VGMaskOperation operation,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (width <=0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (operation < VG_CLEAR_MASK || operation > VG_SUBTRACT_MASK) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+
+ vg_validate_state(ctx);
+
+ if (operation == VG_CLEAR_MASK) {
+ mask_fill(x, y, width, height, 0.f);
+ } else if (operation == VG_FILL_MASK) {
+ mask_fill(x, y, width, height, 1.f);
+ } else if (vg_object_is_valid((void*)mask, VG_OBJECT_IMAGE)) {
+ struct vg_image *image = (struct vg_image *)mask;
+ mask_using_image(image, operation, x, y, width, height);
+ } else if (vg_object_is_valid((void*)mask, VG_OBJECT_MASK)) {
+#if DISABLE_1_1_MASKING
+ return;
+#else
+ struct vg_mask_layer *layer = (struct vg_mask_layer *)mask;
+ mask_using_layer(layer, operation, x, y, width, height);
+#endif
+ } else {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ }
+}
+
+void vgClear(VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_framebuffer_state *fb;
+
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ vg_validate_state(ctx);
+#if 0
+ debug_printf("Clear [%d, %d, %d, %d] with [%f, %f, %f, %f]\n",
+ x, y, width, height,
+ ctx->state.vg.clear_color[0],
+ ctx->state.vg.clear_color[1],
+ ctx->state.vg.clear_color[2],
+ ctx->state.vg.clear_color[3]);
+#endif
+
+ fb = &ctx->state.g3d.fb;
+ /* check for a whole surface clear */
+ if (!ctx->state.vg.scissoring &&
+ (x == 0 && y == 0 && width == fb->width && height == fb->height)) {
+ ctx->pipe->clear(ctx->pipe, PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTHSTENCIL,
+ ctx->state.vg.clear_color, 1., 0);
+ } else {
+ clear_with_quad(ctx, x, y, width, height, ctx->state.vg.clear_color);
+ }
+}
+
+
+#ifdef OPENVG_VERSION_1_1
+
+
+void vgRenderToMask(VGPath path,
+ VGbitfield paintModes,
+ VGMaskOperation operation)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!paintModes || (paintModes&(~(VG_STROKE_PATH|VG_FILL_PATH)))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (operation < VG_CLEAR_MASK ||
+ operation > VG_SUBTRACT_MASK) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (!vg_object_is_valid((void*)path, VG_OBJECT_PATH)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+#if DISABLE_1_1_MASKING
+ return;
+#endif
+
+ vg_validate_state(ctx);
+
+ mask_render_to((struct path *)path, paintModes, operation);
+}
+
+VGMaskLayer vgCreateMaskLayer(VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (width <= 0 || height <= 0 ||
+ width > vgGeti(VG_MAX_IMAGE_WIDTH) ||
+ height > vgGeti(VG_MAX_IMAGE_HEIGHT)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ return (VGMaskLayer)mask_layer_create(width, height);
+}
+
+void vgDestroyMaskLayer(VGMaskLayer maskLayer)
+{
+ struct vg_mask_layer *mask = 0;
+ struct vg_context *ctx = vg_current_context();
+
+ if (maskLayer == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!vg_object_is_valid((void*)maskLayer, VG_OBJECT_MASK)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ mask = (struct vg_mask_layer *)maskLayer;
+ mask_layer_destroy(mask);
+}
+
+void vgFillMaskLayer(VGMaskLayer maskLayer,
+ VGint x, VGint y,
+ VGint width, VGint height,
+ VGfloat value)
+{
+ struct vg_mask_layer *mask = 0;
+ struct vg_context *ctx = vg_current_context();
+
+ if (maskLayer == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (value < 0 || value > 1) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (x < 0 || y < 0 || (x + width) < 0 || (y + height) < 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!vg_object_is_valid((void*)maskLayer, VG_OBJECT_MASK)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ mask = (struct vg_mask_layer*)maskLayer;
+
+ if (x + width > mask_layer_width(mask) ||
+ y + height > mask_layer_height(mask)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+#if DISABLE_1_1_MASKING
+ return;
+#endif
+ mask_layer_fill(mask, x, y, width, height, value);
+}
+
+void vgCopyMask(VGMaskLayer maskLayer,
+ VGint sx, VGint sy,
+ VGint dx, VGint dy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_mask_layer *mask = 0;
+
+ if (maskLayer == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (!vg_object_is_valid((void*)maskLayer, VG_OBJECT_MASK)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+#if DISABLE_1_1_MASKING
+ return;
+#endif
+
+ mask = (struct vg_mask_layer*)maskLayer;
+ mask_copy(mask, sx, sy, dx, dy, width, height);
+}
+
+#endif
diff --git a/src/gallium/state_trackers/vega/api_misc.c b/src/gallium/state_trackers/vega/api_misc.c
new file mode 100644
index 0000000000..78ba0bc110
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_misc.c
@@ -0,0 +1,83 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+
+/* Hardware Queries */
+VGHardwareQueryResult vgHardwareQuery(VGHardwareQueryType key,
+ VGint setting)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (key < VG_IMAGE_FORMAT_QUERY ||
+ key > VG_PATH_DATATYPE_QUERY) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_HARDWARE_UNACCELERATED;
+ }
+
+ if (key == VG_IMAGE_FORMAT_QUERY) {
+ if (setting < VG_sRGBX_8888 ||
+ setting > VG_lABGR_8888_PRE) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_HARDWARE_UNACCELERATED;
+ }
+ } else if (key == VG_PATH_DATATYPE_QUERY) {
+ if (setting < VG_PATH_DATATYPE_S_8 ||
+ setting > VG_PATH_DATATYPE_F) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_HARDWARE_UNACCELERATED;
+ }
+ }
+ /* we're supposed to accelerate everything */
+ return VG_HARDWARE_ACCELERATED;
+}
+
+/* Renderer and Extension Information */
+const VGubyte *vgGetString(VGStringID name)
+{
+ struct vg_context *ctx = vg_current_context();
+ static const VGubyte *vendor = (VGubyte *)"Tungsten Graphics, Inc";
+ static const VGubyte *renderer = (VGubyte *)"Vega OpenVG 1.0";
+ static const VGubyte *version = (VGubyte *)"1.0";
+
+ if (!ctx)
+ return NULL;
+
+ switch(name) {
+ case VG_VENDOR:
+ return vendor;
+ case VG_RENDERER:
+ return renderer;
+ case VG_VERSION:
+ return version;
+ case VG_EXTENSIONS:
+ return NULL;
+ default:
+ return NULL;
+ }
+}
diff --git a/src/gallium/state_trackers/vega/api_paint.c b/src/gallium/state_trackers/vega/api_paint.c
new file mode 100644
index 0000000000..dd3ac5bdb0
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_paint.c
@@ -0,0 +1,166 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+#include "paint.h"
+#include "image.h"
+
+VGPaint vgCreatePaint(void)
+{
+ return (VGPaint) paint_create(vg_current_context());
+}
+
+void vgDestroyPaint(VGPaint p)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_paint *paint;
+
+ if (p == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ paint = (struct vg_paint *)p;
+ paint_destroy(paint);
+}
+
+void vgSetPaint(VGPaint paint, VGbitfield paintModes)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (paint == VG_INVALID_HANDLE) {
+ /* restore the default */
+ paint = (VGPaint)ctx->default_paint;
+ } else if (!vg_object_is_valid((void*)paint, VG_OBJECT_PAINT)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!(paintModes & ((VG_FILL_PATH|VG_STROKE_PATH)))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (paintModes & VG_FILL_PATH) {
+ ctx->state.vg.fill_paint = (struct vg_paint *)paint;
+ }
+ if (paintModes & VG_STROKE_PATH) {
+ ctx->state.vg.stroke_paint = (struct vg_paint *)paint;
+ }
+}
+
+VGPaint vgGetPaint(VGPaintMode paintMode)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGPaint paint = VG_INVALID_HANDLE;
+
+ if (paintMode < VG_STROKE_PATH || paintMode > VG_FILL_PATH) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ if (paintMode == VG_FILL_PATH)
+ paint = (VGPaint)ctx->state.vg.fill_paint;
+ else if (paintMode == VG_STROKE_PATH)
+ paint = (VGPaint)ctx->state.vg.stroke_paint;
+
+ if (paint == (VGPaint)ctx->default_paint)
+ paint = VG_INVALID_HANDLE;
+
+ return paint;
+}
+
+void vgSetColor(VGPaint paint, VGuint rgba)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (paint == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!vg_object_is_valid((void*)paint, VG_OBJECT_PAINT)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ {
+ struct vg_paint *p = (struct vg_paint *)paint;
+ paint_set_colori(p, rgba);
+ }
+}
+
+VGuint vgGetColor(VGPaint paint)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_paint *p;
+ VGuint rgba = 0;
+
+ if (paint == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return rgba;
+ }
+
+ if (!vg_object_is_valid((void*)paint, VG_OBJECT_PAINT)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return rgba;
+ }
+ p = (struct vg_paint *)paint;
+
+ return paint_colori(p);
+}
+
+void vgPaintPattern(VGPaint paint, VGImage pattern)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (paint == VG_INVALID_HANDLE ||
+ !vg_context_is_object_valid(ctx, VG_OBJECT_PAINT, (void *)paint)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (pattern == VG_INVALID_HANDLE) {
+ paint_set_type((struct vg_paint*)paint, VG_PAINT_TYPE_COLOR);
+ return;
+ }
+
+ if (!vg_context_is_object_valid(ctx, VG_OBJECT_IMAGE, (void *)pattern)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+
+ if (!vg_object_is_valid((void*)paint, VG_OBJECT_PAINT) ||
+ !vg_object_is_valid((void*)pattern, VG_OBJECT_IMAGE)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ paint_set_pattern((struct vg_paint*)paint,
+ (struct vg_image*)pattern);
+}
+
diff --git a/src/gallium/state_trackers/vega/api_params.c b/src/gallium/state_trackers/vega/api_params.c
new file mode 100644
index 0000000000..db77fd9cb0
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_params.c
@@ -0,0 +1,1673 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+#include "paint.h"
+#include "path.h"
+#include "image.h"
+#include "matrix.h"
+#include "api_consts.h"
+
+#include "pipe/p_compiler.h"
+#include "util/u_pointer.h"
+#include "util/u_math.h"
+
+#include <math.h>
+
+static INLINE struct vg_state *current_state()
+{
+ struct vg_context *ctx = vg_current_context();
+ if (!ctx)
+ return 0;
+ else
+ return &ctx->state.vg;
+}
+
+static INLINE VGboolean count_in_bounds(VGParamType type, VGint count)
+{
+ if (count < 0)
+ return VG_FALSE;
+
+ if (type == VG_SCISSOR_RECTS)
+ return (!(count % 4) && (count >= 0 || count <= VEGA_MAX_SCISSOR_RECTS * 4));
+ else if (type == VG_STROKE_DASH_PATTERN) {
+ return count <= VEGA_MAX_DASH_COUNT;
+ } else {
+ VGint real_count = vgGetVectorSize(type);
+ return count == real_count;
+ }
+}
+
+void vgSetf (VGParamType type, VGfloat value)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_state *state = current_state();
+ VGErrorCode error = VG_NO_ERROR;
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+#endif
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_DASH_PHASE_RESET:
+ case VG_MASKING:
+ case VG_SCISSORING:
+ case VG_PIXEL_LAYOUT:
+ case VG_SCREEN_LAYOUT:
+ case VG_FILTER_FORMAT_LINEAR:
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ case VG_FILTER_CHANNEL_MASK:
+
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ case VG_MAX_FLOAT:
+ vgSeti(type, floor(value));
+ return;
+ break;
+ case VG_STROKE_LINE_WIDTH:
+ state->stroke.line_width.f = value;
+ state->stroke.line_width.i = float_to_int_floor(*((VGuint*)(&value)));
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ state->stroke.miter_limit.f = value;
+ state->stroke.miter_limit.i = float_to_int_floor(*((VGuint*)(&value)));
+ break;
+ case VG_STROKE_DASH_PHASE:
+ state->stroke.dash_phase.f = value;
+ state->stroke.dash_phase.i = float_to_int_floor(*((VGuint*)(&value)));
+ break;
+ default:
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ break;
+ }
+ vg_set_error(ctx, error);
+}
+
+void vgSeti (VGParamType type, VGint value)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_state *state = current_state();
+ VGErrorCode error = VG_NO_ERROR;
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ if (value < VG_MATRIX_PATH_USER_TO_SURFACE ||
+#ifdef OPENVG_VERSION_1_1
+ value > VG_MATRIX_GLYPH_USER_TO_SURFACE)
+#else
+ value > VG_MATRIX_STROKE_PAINT_TO_USER)
+#endif
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->matrix_mode = value;
+ break;
+ case VG_FILL_RULE:
+ if (value < VG_EVEN_ODD ||
+ value > VG_NON_ZERO)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->fill_rule = value;
+ break;
+ case VG_IMAGE_QUALITY:
+ state->image_quality = value;
+ break;
+ case VG_RENDERING_QUALITY:
+ if (value < VG_RENDERING_QUALITY_NONANTIALIASED ||
+ value > VG_RENDERING_QUALITY_BETTER)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->rendering_quality = value;
+ break;
+ case VG_BLEND_MODE:
+ if (value < VG_BLEND_SRC ||
+ value > VG_BLEND_ADDITIVE)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else {
+ ctx->state.dirty |= BLEND_DIRTY;
+ state->blend_mode = value;
+ }
+ break;
+ case VG_IMAGE_MODE:
+ if (value < VG_DRAW_IMAGE_NORMAL ||
+ value > VG_DRAW_IMAGE_STENCIL)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->image_mode = value;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+ state->color_transform = value;
+#endif
+ break;
+ case VG_STROKE_LINE_WIDTH:
+ state->stroke.line_width.f = value;
+ state->stroke.line_width.i = value;
+ break;
+ case VG_STROKE_CAP_STYLE:
+ if (value < VG_CAP_BUTT ||
+ value > VG_CAP_SQUARE)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->stroke.cap_style = value;
+ break;
+ case VG_STROKE_JOIN_STYLE:
+ if (value < VG_JOIN_MITER ||
+ value > VG_JOIN_BEVEL)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->stroke.join_style = value;
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ state->stroke.miter_limit.f = value;
+ state->stroke.miter_limit.i = value;
+ break;
+ case VG_STROKE_DASH_PHASE:
+ state->stroke.dash_phase.f = value;
+ state->stroke.dash_phase.i = value;
+ break;
+ case VG_STROKE_DASH_PHASE_RESET:
+ state->stroke.dash_phase_reset = value;
+ break;
+ case VG_MASKING:
+ state->masking = value;
+ break;
+ case VG_SCISSORING:
+ state->scissoring = value;
+ ctx->state.dirty |= DEPTH_STENCIL_DIRTY;
+ break;
+ case VG_PIXEL_LAYOUT:
+ if (value < VG_PIXEL_LAYOUT_UNKNOWN ||
+ value > VG_PIXEL_LAYOUT_BGR_HORIZONTAL)
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ else
+ state->pixel_layout = value;
+ break;
+ case VG_SCREEN_LAYOUT:
+ /* read only ignore */
+ break;
+ case VG_FILTER_FORMAT_LINEAR:
+ state->filter_format_linear = value;
+ break;
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ state->filter_format_premultiplied = value;
+ break;
+ case VG_FILTER_CHANNEL_MASK:
+ state->filter_channel_mask = value;
+ break;
+
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ case VG_MAX_FLOAT:
+ /* read only ignore */
+ break;
+ default:
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ break;
+ }
+ vg_set_error(ctx, error);
+}
+
+void vgSetfv(VGParamType type, VGint count,
+ const VGfloat * values)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_state *state = current_state();
+ VGErrorCode error = VG_NO_ERROR;
+
+ if ((count && !values) || !count_in_bounds(type, count) || !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+#endif
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_DASH_PHASE_RESET:
+ case VG_MASKING:
+ case VG_SCISSORING:
+ case VG_PIXEL_LAYOUT:
+ case VG_SCREEN_LAYOUT:
+ case VG_FILTER_FORMAT_LINEAR:
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ case VG_FILTER_CHANNEL_MASK:
+ vgSeti(type, floor(values[0]));
+ return;
+ break;
+ case VG_SCISSOR_RECTS: {
+ VGint i;
+ VGuint *x = (VGuint*)values;
+ for (i = 0; i < count; ++i) {
+ state->scissor_rects[i].f = values[i];
+ state->scissor_rects[i].i = float_to_int_floor(x[i]);
+ }
+ state->scissor_rects_num = count / 4;
+ ctx->state.dirty |= DEPTH_STENCIL_DIRTY;
+ }
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM_VALUES: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ state->color_transform_values[i] = values[i];
+ }
+ }
+ break;
+#endif
+ case VG_STROKE_LINE_WIDTH:
+ state->stroke.line_width.f = values[0];
+ state->stroke.line_width.i = float_to_int_floor(*((VGuint*)(values)));
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ state->stroke.miter_limit.f = values[0];
+ state->stroke.miter_limit.i = float_to_int_floor(*((VGuint*)(values)));
+ break;
+ case VG_STROKE_DASH_PATTERN: {
+ int i;
+ for (i = 0; i < count; ++i) {
+ state->stroke.dash_pattern[i].f = values[i];
+ state->stroke.dash_pattern[i].i =
+ float_to_int_floor(*((VGuint*)(values + i)));
+ }
+ state->stroke.dash_pattern_num = count;
+ }
+ break;
+ case VG_STROKE_DASH_PHASE:
+ state->stroke.dash_phase.f = values[0];
+ state->stroke.dash_phase.i = float_to_int_floor(*((VGuint*)(values)));
+ break;
+ case VG_TILE_FILL_COLOR:
+ state->tile_fill_color[0] = values[0];
+ state->tile_fill_color[1] = values[1];
+ state->tile_fill_color[2] = values[2];
+ state->tile_fill_color[3] = values[3];
+
+ state->tile_fill_colori[0] = float_to_int_floor(*((VGuint*)(values + 0)));
+ state->tile_fill_colori[1] = float_to_int_floor(*((VGuint*)(values + 1)));
+ state->tile_fill_colori[2] = float_to_int_floor(*((VGuint*)(values + 2)));
+ state->tile_fill_colori[3] = float_to_int_floor(*((VGuint*)(values + 3)));
+ break;
+ case VG_CLEAR_COLOR:
+ state->clear_color[0] = values[0];
+ state->clear_color[1] = values[1];
+ state->clear_color[2] = values[2];
+ state->clear_color[3] = values[3];
+
+ state->clear_colori[0] = float_to_int_floor(*((VGuint*)(values + 0)));
+ state->clear_colori[1] = float_to_int_floor(*((VGuint*)(values + 1)));
+ state->clear_colori[2] = float_to_int_floor(*((VGuint*)(values + 2)));
+ state->clear_colori[3] = float_to_int_floor(*((VGuint*)(values + 3)));
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_GLYPH_ORIGIN:
+ state->glyph_origin[0].f = values[0];
+ state->glyph_origin[1].f = values[1];
+
+ state->glyph_origin[0].i = float_to_int_floor(*((VGuint*)(values + 0)));
+ state->glyph_origin[1].i = float_to_int_floor(*((VGuint*)(values + 1)));
+ break;
+#endif
+
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ case VG_MAX_FLOAT:
+ break;
+ default:
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ break;
+ }
+ vg_set_error(ctx, error);
+}
+
+void vgSetiv(VGParamType type, VGint count,
+ const VGint * values)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_state *state = current_state();
+
+ if ((count && !values) || !count_in_bounds(type, count) || !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+#endif
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_DASH_PHASE_RESET:
+ case VG_MASKING:
+ case VG_SCISSORING:
+ case VG_PIXEL_LAYOUT:
+ case VG_SCREEN_LAYOUT:
+ case VG_FILTER_FORMAT_LINEAR:
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ case VG_FILTER_CHANNEL_MASK:
+ vgSeti(type, values[0]);
+ return;
+ break;
+ case VG_SCISSOR_RECTS: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ state->scissor_rects[i].i = values[i];
+ state->scissor_rects[i].f = values[i];
+ }
+ state->scissor_rects_num = count / 4;
+ ctx->state.dirty |= DEPTH_STENCIL_DIRTY;
+ }
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM_VALUES: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ state->color_transform_values[i] = values[i];
+ }
+ }
+ break;
+#endif
+ case VG_STROKE_LINE_WIDTH:
+ state->stroke.line_width.f = values[0];
+ state->stroke.line_width.i = values[0];
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ state->stroke.miter_limit.f = values[0];
+ state->stroke.miter_limit.i = values[0];
+ break;
+ case VG_STROKE_DASH_PATTERN: {
+ int i;
+ for (i = 0; i < count; ++i) {
+ state->stroke.dash_pattern[i].f = values[i];
+ state->stroke.dash_pattern[i].i = values[i];
+ }
+ state->stroke.dash_pattern_num = count;
+ }
+ break;
+ case VG_STROKE_DASH_PHASE:
+ state->stroke.dash_phase.f = values[0];
+ state->stroke.dash_phase.i = values[0];
+ break;
+ case VG_TILE_FILL_COLOR:
+ state->tile_fill_color[0] = values[0];
+ state->tile_fill_color[1] = values[1];
+ state->tile_fill_color[2] = values[2];
+ state->tile_fill_color[3] = values[3];
+
+ state->tile_fill_colori[0] = values[0];
+ state->tile_fill_colori[1] = values[1];
+ state->tile_fill_colori[2] = values[2];
+ state->tile_fill_colori[3] = values[3];
+ break;
+ case VG_CLEAR_COLOR:
+ state->clear_color[0] = values[0];
+ state->clear_color[1] = values[1];
+ state->clear_color[2] = values[2];
+ state->clear_color[3] = values[3];
+
+ state->clear_colori[0] = values[0];
+ state->clear_colori[1] = values[1];
+ state->clear_colori[2] = values[2];
+ state->clear_colori[3] = values[3];
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_GLYPH_ORIGIN:
+ state->glyph_origin[0].f = values[0];
+ state->glyph_origin[1].f = values[1];
+ state->glyph_origin[0].i = values[0];
+ state->glyph_origin[1].i = values[1];
+ break;
+#endif
+
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ case VG_MAX_FLOAT:
+ break;
+
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+}
+
+VGfloat vgGetf(VGParamType type)
+{
+ struct vg_context *ctx = vg_current_context();
+ const struct vg_state *state = current_state();
+ VGErrorCode error = VG_NO_ERROR;
+ VGfloat value = 0.0f;
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+#endif
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_DASH_PHASE_RESET:
+ case VG_MASKING:
+ case VG_SCISSORING:
+ case VG_PIXEL_LAYOUT:
+ case VG_SCREEN_LAYOUT:
+ case VG_FILTER_FORMAT_LINEAR:
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ case VG_FILTER_CHANNEL_MASK:
+ return vgGeti(type);
+ break;
+ case VG_STROKE_LINE_WIDTH:
+ value = state->stroke.line_width.f;
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ value = state->stroke.miter_limit.f;
+ break;
+ case VG_STROKE_DASH_PHASE:
+ value = state->stroke.dash_phase.f;
+ break;
+
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ return vgGeti(type);
+ break;
+ case VG_MAX_FLOAT:
+ value = 1e+10;/*must be at least 1e+10*/
+ break;
+ default:
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ break;
+ }
+ vg_set_error(ctx, error);
+ return value;
+}
+
+VGint vgGeti(VGParamType type)
+{
+ const struct vg_state *state = current_state();
+ struct vg_context *ctx = vg_current_context();
+ VGErrorCode error = VG_NO_ERROR;
+ VGint value = 0;
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ value = state->matrix_mode;
+ break;
+ case VG_FILL_RULE:
+ value = state->fill_rule;
+ break;
+ case VG_IMAGE_QUALITY:
+ value = state->image_quality;
+ break;
+ case VG_RENDERING_QUALITY:
+ value = state->rendering_quality;
+ break;
+ case VG_BLEND_MODE:
+ value = state->blend_mode;
+ break;
+ case VG_IMAGE_MODE:
+ value = state->image_mode;
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+ value = state->color_transform;
+ break;
+#endif
+ case VG_STROKE_LINE_WIDTH:
+ value = state->stroke.line_width.i;
+ break;
+ case VG_STROKE_CAP_STYLE:
+ value = state->stroke.cap_style;
+ break;
+ case VG_STROKE_JOIN_STYLE:
+ value = state->stroke.join_style;
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ value = state->stroke.miter_limit.i;
+ break;
+ case VG_STROKE_DASH_PHASE:
+ value = state->stroke.dash_phase.i;
+ break;
+ case VG_STROKE_DASH_PHASE_RESET:
+ value = state->stroke.dash_phase_reset;
+ break;
+ case VG_MASKING:
+ value = state->masking;
+ break;
+ case VG_SCISSORING:
+ value = state->scissoring;
+ break;
+ case VG_PIXEL_LAYOUT:
+ value = state->pixel_layout;
+ break;
+ case VG_SCREEN_LAYOUT:
+ value = state->screen_layout;
+ break;
+ case VG_FILTER_FORMAT_LINEAR:
+ value = state->filter_format_linear;
+ break;
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ value = state->filter_format_premultiplied;
+ break;
+ case VG_FILTER_CHANNEL_MASK:
+ value = state->filter_channel_mask;
+ break;
+
+ case VG_MAX_SCISSOR_RECTS:
+ value = 32; /*must be at least 32*/
+ break;
+ case VG_MAX_DASH_COUNT:
+ value = 16; /*must be at least 16*/
+ break;
+ case VG_MAX_KERNEL_SIZE:
+ value = 7; /*must be at least 7*/
+ break;
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ value = 15; /*must be at least 15*/
+ break;
+ case VG_MAX_COLOR_RAMP_STOPS:
+ value = 256; /*must be at least 32*/
+ break;
+ case VG_MAX_IMAGE_WIDTH:
+ value = 2048;
+ break;
+ case VG_MAX_IMAGE_HEIGHT:
+ value = 2048;
+ break;
+ case VG_MAX_IMAGE_PIXELS:
+ value = 2048*2048;
+ break;
+ case VG_MAX_IMAGE_BYTES:
+ value = 2048*2048 * 4;
+ break;
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ value = 128; /*must be at least 128*/
+ break;
+
+ case VG_MAX_FLOAT: {
+ VGfloat val = vgGetf(type);
+ value = float_to_int_floor(*((VGuint*)&val));
+ }
+ break;
+ default:
+ error = VG_ILLEGAL_ARGUMENT_ERROR;
+ break;
+ }
+ vg_set_error(ctx, error);
+ return value;
+}
+
+VGint vgGetVectorSize(VGParamType type)
+{
+ struct vg_context *ctx = vg_current_context();
+ const struct vg_state *state = current_state();
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+ return 1;
+ case VG_SCISSOR_RECTS:
+ return state->scissor_rects_num * 4;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+ return 1;
+ case VG_COLOR_TRANSFORM_VALUES:
+ return 8;
+#endif
+ case VG_STROKE_LINE_WIDTH:
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_MITER_LIMIT:
+ return 1;
+ case VG_STROKE_DASH_PATTERN:
+ return state->stroke.dash_pattern_num;
+ case VG_STROKE_DASH_PHASE:
+ return 1;
+ case VG_STROKE_DASH_PHASE_RESET:
+ return 1;
+ case VG_TILE_FILL_COLOR:
+ return 4;
+ case VG_CLEAR_COLOR:
+ return 4;
+#ifdef OPENVG_VERSION_1_1
+ case VG_GLYPH_ORIGIN:
+ return 2;
+#endif
+ case VG_MASKING:
+ return 1;
+ case VG_SCISSORING:
+ return 1;
+ case VG_PIXEL_LAYOUT:
+ return 1;
+ case VG_SCREEN_LAYOUT:
+ return 1;
+ case VG_FILTER_FORMAT_LINEAR:
+ return 1;
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ return 1;
+ case VG_FILTER_CHANNEL_MASK:
+ return 1;
+
+ case VG_MAX_COLOR_RAMP_STOPS:
+ return 1;
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_FLOAT:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ return 1;
+ default:
+ if (ctx)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return 0;
+ }
+}
+
+void vgGetfv(VGParamType type, VGint count,
+ VGfloat * values)
+{
+ const struct vg_state *state = current_state();
+ struct vg_context *ctx = vg_current_context();
+ VGint real_count = vgGetVectorSize(type);
+
+ if (!values || count <= 0 || count > real_count || !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+#endif
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_DASH_PHASE_RESET:
+ case VG_MASKING:
+ case VG_SCISSORING:
+ case VG_PIXEL_LAYOUT:
+ case VG_SCREEN_LAYOUT:
+ case VG_FILTER_FORMAT_LINEAR:
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ case VG_FILTER_CHANNEL_MASK:
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ values[0] = vgGeti(type);
+ break;
+ case VG_MAX_FLOAT:
+ values[0] = vgGetf(type);
+ break;
+ case VG_SCISSOR_RECTS: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ values[i] = state->scissor_rects[i].f;
+ }
+ }
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM_VALUES: {
+ memcpy(values, state->color_transform_values,
+ sizeof(VGfloat) * count);
+ }
+ break;
+#endif
+ case VG_STROKE_LINE_WIDTH:
+ values[0] = state->stroke.line_width.f;
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ values[0] = state->stroke.miter_limit.f;
+ break;
+ case VG_STROKE_DASH_PATTERN: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ values[i] = state->stroke.dash_pattern[i].f;
+ }
+ }
+ break;
+ case VG_STROKE_DASH_PHASE:
+ values[0] = state->stroke.dash_phase.f;
+ break;
+ case VG_TILE_FILL_COLOR:
+ values[0] = state->tile_fill_color[0];
+ values[1] = state->tile_fill_color[1];
+ values[2] = state->tile_fill_color[2];
+ values[3] = state->tile_fill_color[3];
+ break;
+ case VG_CLEAR_COLOR:
+ values[0] = state->clear_color[0];
+ values[1] = state->clear_color[1];
+ values[2] = state->clear_color[2];
+ values[3] = state->clear_color[3];
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_GLYPH_ORIGIN:
+ values[0] = state->glyph_origin[0].f;
+ values[1] = state->glyph_origin[1].f;
+ break;
+#endif
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+}
+
+void vgGetiv(VGParamType type, VGint count,
+ VGint * values)
+{
+ const struct vg_state *state = current_state();
+ struct vg_context *ctx = vg_current_context();
+ VGint real_count = vgGetVectorSize(type);
+
+ if (!values || count <= 0 || count > real_count || !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(type) {
+ case VG_MATRIX_MODE:
+ case VG_FILL_RULE:
+ case VG_IMAGE_QUALITY:
+ case VG_RENDERING_QUALITY:
+ case VG_BLEND_MODE:
+ case VG_IMAGE_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM:
+#endif
+ case VG_STROKE_CAP_STYLE:
+ case VG_STROKE_JOIN_STYLE:
+ case VG_STROKE_DASH_PHASE_RESET:
+ case VG_MASKING:
+ case VG_SCISSORING:
+ case VG_PIXEL_LAYOUT:
+ case VG_SCREEN_LAYOUT:
+ case VG_FILTER_FORMAT_LINEAR:
+ case VG_FILTER_FORMAT_PREMULTIPLIED:
+ case VG_FILTER_CHANNEL_MASK:
+ case VG_MAX_SCISSOR_RECTS:
+ case VG_MAX_DASH_COUNT:
+ case VG_MAX_KERNEL_SIZE:
+ case VG_MAX_SEPARABLE_KERNEL_SIZE:
+ case VG_MAX_COLOR_RAMP_STOPS:
+ case VG_MAX_IMAGE_WIDTH:
+ case VG_MAX_IMAGE_HEIGHT:
+ case VG_MAX_IMAGE_PIXELS:
+ case VG_MAX_IMAGE_BYTES:
+ case VG_MAX_GAUSSIAN_STD_DEVIATION:
+ values[0] = vgGeti(type);
+ break;
+ case VG_MAX_FLOAT: {
+ VGfloat val = vgGetf(type);
+ values[0] = float_to_int_floor(*((VGuint*)&val));
+ }
+ break;
+ case VG_SCISSOR_RECTS: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ values[i] = state->scissor_rects[i].i;
+ }
+ }
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_COLOR_TRANSFORM_VALUES: {
+ VGint i;
+ VGuint *x = (VGuint*)state->color_transform_values;
+ for (i = 0; i < count; ++i) {
+ values[i] = float_to_int_floor(x[i]);
+ }
+ }
+ break;
+#endif
+ case VG_STROKE_LINE_WIDTH:
+ values[0] = state->stroke.line_width.i;
+ break;
+ case VG_STROKE_MITER_LIMIT:
+ values[0] = state->stroke.miter_limit.i;
+ break;
+ case VG_STROKE_DASH_PATTERN: {
+ VGint i;
+ for (i = 0; i < count; ++i) {
+ values[i] = state->stroke.dash_pattern[i].i;
+ }
+ }
+ break;
+ case VG_STROKE_DASH_PHASE:
+ values[0] = state->stroke.dash_phase.i;
+ break;
+ case VG_TILE_FILL_COLOR:
+ values[0] = state->tile_fill_colori[0];
+ values[1] = state->tile_fill_colori[1];
+ values[2] = state->tile_fill_colori[2];
+ values[3] = state->tile_fill_colori[3];
+ break;
+ case VG_CLEAR_COLOR:
+ values[0] = state->clear_colori[0];
+ values[1] = state->clear_colori[1];
+ values[2] = state->clear_colori[2];
+ values[3] = state->clear_colori[3];
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_GLYPH_ORIGIN:
+ values[0] = state->glyph_origin[0].i;
+ values[1] = state->glyph_origin[1].i;
+ break;
+#endif
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+}
+
+void vgSetParameterf(VGHandle object,
+ VGint paramType,
+ VGfloat value)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+
+ if (!object || object == VG_INVALID_HANDLE || !is_aligned(ptr)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ case VG_PAINT_PATTERN_TILING_MODE:
+ vgSetParameteri(object, paramType, floor(value));
+ return;
+ break;
+ case VG_PAINT_COLOR:
+ case VG_PAINT_COLOR_RAMP_STOPS:
+ case VG_PAINT_LINEAR_GRADIENT:
+ case VG_PAINT_RADIAL_GRADIENT:
+ /* it's an error if paramType refers to a vector parameter */
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ paint_set_color_ramp_premultiplied(p, value);
+ }
+ break;
+
+ case VG_PATH_DATATYPE:
+ case VG_PATH_FORMAT:
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+
+ case VG_IMAGE_FORMAT:
+ case VG_IMAGE_WIDTH:
+ case VG_IMAGE_HEIGHT:
+
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ /* read only don't produce an error */
+ break;
+#endif
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+}
+
+void vgSetParameteri(VGHandle object,
+ VGint paramType,
+ VGint value)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+
+ if (!object || object == VG_INVALID_HANDLE || !is_aligned(ptr)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ if (value < VG_PAINT_TYPE_COLOR ||
+ value > VG_PAINT_TYPE_PATTERN)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)ptr;
+ paint_set_type(paint, value);
+ }
+ break;
+ case VG_PAINT_COLOR:
+ case VG_PAINT_COLOR_RAMP_STOPS:
+ case VG_PAINT_LINEAR_GRADIENT:
+ case VG_PAINT_RADIAL_GRADIENT:
+ /* it's an error if paramType refers to a vector parameter */
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ if (value < VG_COLOR_RAMP_SPREAD_PAD ||
+ value > VG_COLOR_RAMP_SPREAD_REFLECT)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)ptr;
+ paint_set_spread_mode(paint, value);
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ paint_set_color_ramp_premultiplied(p, value);
+ }
+ break;
+ case VG_PAINT_PATTERN_TILING_MODE:
+ if (value < VG_TILE_FILL ||
+ value > VG_TILE_REFLECT)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)ptr;
+ paint_set_pattern_tiling(paint, value);
+ }
+ break;
+
+ case VG_PATH_DATATYPE:
+ case VG_PATH_FORMAT:
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+
+ case VG_IMAGE_FORMAT:
+ case VG_IMAGE_WIDTH:
+ case VG_IMAGE_HEIGHT:
+
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ /* read only don't produce an error */
+ break;
+#endif
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+}
+
+void vgSetParameterfv(VGHandle object,
+ VGint paramType,
+ VGint count,
+ const VGfloat * values)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+ VGint real_count = vgGetParameterVectorSize(object, paramType);
+
+ if (!object || object == VG_INVALID_HANDLE || !is_aligned(ptr)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (count < 0 || count < real_count ||
+ (values == NULL && count != 0) ||
+ !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED:
+ case VG_PAINT_PATTERN_TILING_MODE:
+ if (count != 1)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else
+ vgSetParameterf(object, paramType, values[0]);
+ return;
+ break;
+ case VG_PAINT_COLOR: {
+ if (count != 4)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_set_color(paint, values);
+ }
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_STOPS: {
+ if (count && count < 4)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ count = MIN2(count, VEGA_MAX_COLOR_RAMP_STOPS);
+ paint_set_ramp_stops(paint, values, count);
+ {
+ VGint stopsi[VEGA_MAX_COLOR_RAMP_STOPS];
+ int i = 0;
+ for (i = 0; i < count; ++i) {
+ stopsi[i] = float_to_int_floor(*((VGuint*)(values + i)));
+ }
+ paint_set_ramp_stopsi(paint, stopsi, count);
+ }
+ }
+ }
+ break;
+ case VG_PAINT_LINEAR_GRADIENT: {
+ if (count != 4)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_set_linear_gradient(paint, values);
+ {
+ VGint vals[4];
+ vals[0] = FLT_TO_INT(values[0]);
+ vals[1] = FLT_TO_INT(values[1]);
+ vals[2] = FLT_TO_INT(values[2]);
+ vals[3] = FLT_TO_INT(values[3]);
+ paint_set_linear_gradienti(paint, vals);
+ }
+ }
+ }
+ break;
+ case VG_PAINT_RADIAL_GRADIENT: {
+ if (count != 5)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_set_radial_gradient(paint, values);
+ {
+ VGint vals[5];
+ vals[0] = FLT_TO_INT(values[0]);
+ vals[1] = FLT_TO_INT(values[1]);
+ vals[2] = FLT_TO_INT(values[2]);
+ vals[3] = FLT_TO_INT(values[3]);
+ vals[4] = FLT_TO_INT(values[4]);
+ paint_set_radial_gradienti(paint, vals);
+ }
+ }
+ }
+ break;
+
+ case VG_PATH_DATATYPE:
+ case VG_PATH_FORMAT:
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ /* read only don't produce an error */
+ break;
+#endif
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+}
+
+void vgSetParameteriv(VGHandle object,
+ VGint paramType,
+ VGint count,
+ const VGint * values)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+ VGint real_count = vgGetParameterVectorSize(object, paramType);
+
+ if (!object || object == VG_INVALID_HANDLE || !is_aligned(ptr)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (count < 0 || count < real_count ||
+ (values == NULL && count != 0) ||
+ !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED:
+ case VG_PAINT_PATTERN_TILING_MODE:
+ if (count != 1)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else
+ vgSetParameteri(object, paramType, values[0]);
+ return;
+ break;
+ case VG_PAINT_COLOR: {
+ if (count != 4)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_set_coloriv(paint, values);
+ }
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_STOPS: {
+ if ((count % 5))
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ VGfloat *vals = 0;
+ int i;
+ struct vg_paint *paint = (struct vg_paint *)object;
+ if (count) {
+ vals = malloc(sizeof(VGfloat)*count);
+ for (i = 0; i < count; ++i)
+ vals[i] = values[i];
+ }
+
+ paint_set_ramp_stopsi(paint, values, count);
+ paint_set_ramp_stops(paint, vals, count);
+ free(vals);
+ }
+ }
+ break;
+ case VG_PAINT_LINEAR_GRADIENT: {
+ if (count != 4)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ VGfloat vals[4];
+ struct vg_paint *paint = (struct vg_paint *)object;
+ vals[0] = values[0];
+ vals[1] = values[1];
+ vals[2] = values[2];
+ vals[3] = values[3];
+ paint_set_linear_gradient(paint, vals);
+ paint_set_linear_gradienti(paint, values);
+ }
+ }
+ break;
+ case VG_PAINT_RADIAL_GRADIENT: {
+ if (count != 5)
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ else {
+ VGfloat vals[5];
+ struct vg_paint *paint = (struct vg_paint *)object;
+ vals[0] = values[0];
+ vals[1] = values[1];
+ vals[2] = values[2];
+ vals[3] = values[3];
+ vals[4] = values[4];
+ paint_set_radial_gradient(paint, vals);
+ paint_set_radial_gradienti(paint, values);
+ }
+ }
+ break;
+ case VG_PATH_DATATYPE:
+ case VG_PATH_FORMAT:
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+ /* read only don't produce an error */
+ break;
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+}
+
+VGint vgGetParameterVectorSize(VGHandle object,
+ VGint paramType)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+
+ if (!ptr || object == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return 0;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED:
+ case VG_PAINT_PATTERN_TILING_MODE:
+ return 1;
+ case VG_PAINT_COLOR:
+ return 4;
+ case VG_PAINT_COLOR_RAMP_STOPS: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ return paint_num_ramp_stops(p);
+ }
+ break;
+ case VG_PAINT_LINEAR_GRADIENT:
+ return 4;
+ case VG_PAINT_RADIAL_GRADIENT:
+ return 5;
+
+
+ case VG_PATH_FORMAT:
+ case VG_PATH_DATATYPE:
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+ return 1;
+
+ case VG_IMAGE_FORMAT:
+ case VG_IMAGE_WIDTH:
+ case VG_IMAGE_HEIGHT:
+ return 1;
+
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ return 1;
+#endif
+
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+ return 0;
+}
+
+
+VGfloat vgGetParameterf(VGHandle object,
+ VGint paramType)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+
+ if (!ptr || object == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return 0;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED:
+ case VG_PAINT_PATTERN_TILING_MODE:
+ return vgGetParameteri(object, paramType);
+ break;
+ case VG_PAINT_COLOR:
+ case VG_PAINT_COLOR_RAMP_STOPS:
+ case VG_PAINT_LINEAR_GRADIENT:
+ case VG_PAINT_RADIAL_GRADIENT:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+
+ case VG_PATH_FORMAT:
+ return VG_PATH_FORMAT_STANDARD;
+ case VG_PATH_SCALE: {
+ struct path *p = (struct path*)object;
+ return path_scale(p);
+ }
+ case VG_PATH_BIAS: {
+ struct path *p = (struct path*)object;
+ return path_bias(p);
+ }
+ case VG_PATH_DATATYPE:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+ return vgGetParameteri(object, paramType);
+ break;
+
+ case VG_IMAGE_FORMAT:
+ case VG_IMAGE_WIDTH:
+ case VG_IMAGE_HEIGHT:
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ return vgGetParameteri(object, paramType);
+ break;
+#endif
+
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+ return 0;
+}
+
+VGint vgGetParameteri(VGHandle object,
+ VGint paramType)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+
+ if (!ptr || object == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return 0;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE: {
+ struct vg_paint *paint = (struct vg_paint *)ptr;
+ return paint_type(paint);
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ return paint_spread_mode(p);
+ }
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ return paint_color_ramp_premultiplied(p);
+ }
+ break;
+ case VG_PAINT_PATTERN_TILING_MODE: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ return paint_pattern_tiling(p);
+ }
+ break;
+ case VG_PAINT_COLOR:
+ case VG_PAINT_COLOR_RAMP_STOPS:
+ case VG_PAINT_LINEAR_GRADIENT:
+ case VG_PAINT_RADIAL_GRADIENT:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+
+ case VG_PATH_FORMAT:
+ return VG_PATH_FORMAT_STANDARD;
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ return vgGetParameterf(object, paramType);
+ case VG_PATH_DATATYPE: {
+ struct path *p = (struct path*)object;
+ return path_datatype(p);
+ }
+ case VG_PATH_NUM_SEGMENTS: {
+ struct path *p = (struct path*)object;
+ return path_num_segments(p);
+ }
+ case VG_PATH_NUM_COORDS: {
+ struct path *p = (struct path*)object;
+ return path_num_coords(p);
+ }
+ break;
+
+ case VG_IMAGE_FORMAT: {
+ struct vg_image *img = (struct vg_image*)object;
+ return img->format;
+ }
+ break;
+ case VG_IMAGE_WIDTH: {
+ struct vg_image *img = (struct vg_image*)object;
+ return img->width;
+ }
+ break;
+ case VG_IMAGE_HEIGHT: {
+ struct vg_image *img = (struct vg_image*)object;
+ return img->height;
+ }
+ break;
+
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS: {
+ return 1;
+ }
+ break;
+#endif
+
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+ return 0;
+}
+
+void vgGetParameterfv(VGHandle object,
+ VGint paramType,
+ VGint count,
+ VGfloat * values)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+ VGint real_count = vgGetParameterVectorSize(object, paramType);
+
+ if (!ptr || object == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!values || count <= 0 || count > real_count ||
+ !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ values[0] = paint_type(p);
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ values[0] = paint_spread_mode(p);
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED: {
+ struct vg_paint *p = (struct vg_paint *)object;
+ values[0] = paint_color_ramp_premultiplied(p);
+ }
+ break;
+ case VG_PAINT_PATTERN_TILING_MODE: {
+ values[0] = vgGetParameterf(object, paramType);
+ }
+ break;
+ case VG_PAINT_COLOR: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_get_color(paint, values);
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_STOPS: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_ramp_stops(paint, values, count);
+ }
+ break;
+ case VG_PAINT_LINEAR_GRADIENT: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_linear_gradient(paint, values);
+ }
+ break;
+ case VG_PAINT_RADIAL_GRADIENT: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_radial_gradient(paint, values);
+ }
+ break;
+
+ case VG_PATH_FORMAT:
+ case VG_PATH_DATATYPE:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+ values[0] = vgGetParameteri(object, paramType);
+ break;
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ values[0] = vgGetParameterf(object, paramType);
+ break;
+
+ case VG_IMAGE_FORMAT:
+ case VG_IMAGE_WIDTH:
+ case VG_IMAGE_HEIGHT:
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ values[0] = vgGetParameteri(object, paramType);
+ break;
+#endif
+
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+}
+
+void vgGetParameteriv(VGHandle object,
+ VGint paramType,
+ VGint count,
+ VGint * values)
+{
+ struct vg_context *ctx = vg_current_context();
+ void *ptr = (void*)object;
+ VGint real_count = vgGetParameterVectorSize(object, paramType);
+
+ if (!ptr || object == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!values || count <= 0 || count > real_count ||
+ !is_aligned(values)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ switch(paramType) {
+ case VG_PAINT_TYPE:
+ case VG_PAINT_COLOR_RAMP_SPREAD_MODE:
+ case VG_PAINT_COLOR_RAMP_PREMULTIPLIED:
+ case VG_PAINT_PATTERN_TILING_MODE:
+#ifdef OPENVG_VERSION_1_1
+ case VG_FONT_NUM_GLYPHS:
+ values[0] = vgGetParameteri(object, paramType);
+ break;
+#endif
+ case VG_PAINT_COLOR: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_get_coloriv(paint, values);
+ }
+ break;
+ case VG_PAINT_COLOR_RAMP_STOPS: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_ramp_stopsi(paint, values, count);
+ }
+ break;
+ case VG_PAINT_LINEAR_GRADIENT: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_linear_gradienti(paint, values);
+ }
+ break;
+ case VG_PAINT_RADIAL_GRADIENT: {
+ struct vg_paint *paint = (struct vg_paint *)object;
+ paint_radial_gradienti(paint, values);
+ }
+ break;
+
+ case VG_PATH_SCALE:
+ case VG_PATH_BIAS:
+ values[0] = vgGetParameterf(object, paramType);
+ break;
+ case VG_PATH_FORMAT:
+ case VG_PATH_DATATYPE:
+ case VG_PATH_NUM_SEGMENTS:
+ case VG_PATH_NUM_COORDS:
+ values[0] = vgGetParameteri(object, paramType);
+ break;
+
+ case VG_IMAGE_FORMAT:
+ case VG_IMAGE_WIDTH:
+ case VG_IMAGE_HEIGHT:
+ values[0] = vgGetParameteri(object, paramType);
+ break;
+
+ default:
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ break;
+ }
+}
diff --git a/src/gallium/state_trackers/vega/api_path.c b/src/gallium/state_trackers/vega/api_path.c
new file mode 100644
index 0000000000..a6b7a2bb93
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_path.c
@@ -0,0 +1,488 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+#include "path.h"
+#include "polygon.h"
+#include "paint.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "util/u_draw_quad.h"
+
+VGPath vgCreatePath(VGint pathFormat,
+ VGPathDatatype datatype,
+ VGfloat scale, VGfloat bias,
+ VGint segmentCapacityHint,
+ VGint coordCapacityHint,
+ VGbitfield capabilities)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (pathFormat != VG_PATH_FORMAT_STANDARD) {
+ vg_set_error(ctx, VG_UNSUPPORTED_PATH_FORMAT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (datatype < VG_PATH_DATATYPE_S_8 ||
+ datatype > VG_PATH_DATATYPE_F) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+ if (!scale) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ return (VGPath)path_create(datatype, scale, bias,
+ segmentCapacityHint, coordCapacityHint,
+ capabilities);
+}
+
+void vgClearPath(VGPath path, VGbitfield capabilities)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ p = (struct path *)path;
+ path_clear(p, capabilities);
+}
+
+void vgDestroyPath(VGPath p)
+{
+ struct path *path = 0;
+ struct vg_context *ctx = vg_current_context();
+
+ if (p == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ path = (struct path *)p;
+ path_destroy(path);
+}
+
+void vgRemovePathCapabilities(VGPath path,
+ VGbitfield capabilities)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGbitfield current;
+ struct path *p;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ p = (struct path*)path;
+ current = path_capabilities(p);
+ path_set_capabilities(p, (current &
+ (~(capabilities & VG_PATH_CAPABILITY_ALL))));
+}
+
+VGbitfield vgGetPathCapabilities(VGPath path)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return 0;
+ }
+ p = (struct path*)path;
+ return path_capabilities(p);
+}
+
+void vgAppendPath(VGPath dstPath, VGPath srcPath)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *src, *dst;
+
+ if (dstPath == VG_INVALID_HANDLE || srcPath == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ src = (struct path *)srcPath;
+ dst = (struct path *)dstPath;
+
+ if (!(path_capabilities(src) & VG_PATH_CAPABILITY_APPEND_FROM) ||
+ !(path_capabilities(dst) & VG_PATH_CAPABILITY_APPEND_TO)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+ path_append_path(dst, src);
+}
+
+void vgAppendPathData(VGPath dstPath,
+ VGint numSegments,
+ const VGubyte * pathSegments,
+ const void * pathData)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+ VGint i;
+
+ if (dstPath == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!pathSegments) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (numSegments <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ for (i = 0; i < numSegments; ++i) {
+ if (pathSegments[i] < VG_CLOSE_PATH ||
+ pathSegments[i] > VG_LCWARC_TO_REL) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ }
+
+ p = (struct path*)dstPath;
+
+ if (!pathData || !is_aligned_to(pathData, path_datatype_size(p))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!(path_capabilities(p)&VG_PATH_CAPABILITY_APPEND_TO)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+
+ path_append_data(p, numSegments, pathSegments, pathData);
+}
+
+void vgModifyPathCoords(VGPath dstPath,
+ VGint startIndex,
+ VGint numSegments,
+ const void * pathData)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+
+ if (dstPath == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (startIndex < 0 || numSegments <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ p = (struct path *)dstPath;
+
+ if (!pathData || !is_aligned_to(pathData, path_datatype_size(p))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (startIndex + numSegments > path_num_segments(p)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (!(path_capabilities(p)&VG_PATH_CAPABILITY_MODIFY)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+ path_modify_coords(p, startIndex, numSegments, pathData);
+}
+
+void vgTransformPath(VGPath dstPath, VGPath srcPath)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *src = 0, *dst = 0;
+
+ if (dstPath == VG_INVALID_HANDLE || srcPath == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ src = (struct path *)srcPath;
+ dst = (struct path *)dstPath;
+
+ if (!(path_capabilities(src) & VG_PATH_CAPABILITY_TRANSFORM_FROM) ||
+ !(path_capabilities(dst) & VG_PATH_CAPABILITY_TRANSFORM_TO)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+ path_transform(dst, src);
+}
+
+VGboolean vgInterpolatePath(VGPath dstPath,
+ VGPath startPath,
+ VGPath endPath,
+ VGfloat amount)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *start = 0, *dst = 0, *end = 0;
+
+ if (dstPath == VG_INVALID_HANDLE ||
+ startPath == VG_INVALID_HANDLE ||
+ endPath == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return VG_FALSE;
+ }
+ dst = (struct path *)dstPath;
+ start = (struct path *)startPath;
+ end = (struct path *)endPath;
+
+ if (!(path_capabilities(dst) & VG_PATH_CAPABILITY_INTERPOLATE_TO) ||
+ !(path_capabilities(start) & VG_PATH_CAPABILITY_INTERPOLATE_FROM) ||
+ !(path_capabilities(end) & VG_PATH_CAPABILITY_INTERPOLATE_FROM)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return VG_FALSE;
+ }
+
+ return path_interpolate(dst,
+ start, end, amount);
+}
+
+VGfloat vgPathLength(VGPath path,
+ VGint startSegment,
+ VGint numSegments)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return -1;
+ }
+ if (startSegment < 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return -1;
+ }
+ if (numSegments <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return -1;
+ }
+ p = (struct path*)path;
+
+ if (!(path_capabilities(p) & VG_PATH_CAPABILITY_PATH_LENGTH)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return -1;
+ }
+ if (startSegment + numSegments > path_num_segments(p)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return -1;
+ }
+
+ return path_length(p, startSegment, numSegments);
+}
+
+void vgPointAlongPath(VGPath path,
+ VGint startSegment,
+ VGint numSegments,
+ VGfloat distance,
+ VGfloat * x, VGfloat * y,
+ VGfloat * tangentX,
+ VGfloat * tangentY)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (startSegment < 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (numSegments <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!is_aligned(x) || !is_aligned(y) ||
+ !is_aligned(tangentX) || !is_aligned(tangentY)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ p = (struct path*)path;
+
+ caps = path_capabilities(p);
+ if (!(caps & VG_PATH_CAPABILITY_POINT_ALONG_PATH) ||
+ !(caps & VG_PATH_CAPABILITY_TANGENT_ALONG_PATH)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+
+ if (startSegment + numSegments > path_num_segments(p)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ {
+ VGfloat point[2], normal[2];
+ path_point(p, startSegment, numSegments, distance,
+ point, normal);
+ if (x)
+ *x = point[0];
+ if (y)
+ *y = point[1];
+ if (tangentX)
+ *tangentX = -normal[1];
+ if (tangentY)
+ *tangentY = normal[0];
+ }
+}
+
+void vgPathBounds(VGPath path,
+ VGfloat * minX,
+ VGfloat * minY,
+ VGfloat * width,
+ VGfloat * height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!minX || !minY || !width || !height) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!is_aligned(minX) || !is_aligned(minY) ||
+ !is_aligned(width) || !is_aligned(height)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ p = (struct path*)path;
+
+ caps = path_capabilities(p);
+ if (!(caps & VG_PATH_CAPABILITY_PATH_BOUNDS)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+
+ path_bounding_rect(p, minX, minY, width, height);
+}
+
+void vgPathTransformedBounds(VGPath path,
+ VGfloat * minX,
+ VGfloat * minY,
+ VGfloat * width,
+ VGfloat * height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct path *p = 0;
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!minX || !minY || !width || !height) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (!is_aligned(minX) || !is_aligned(minY) ||
+ !is_aligned(width) || !is_aligned(height)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ p = (struct path*)path;
+
+ caps = path_capabilities(p);
+ if (!(caps & VG_PATH_CAPABILITY_PATH_TRANSFORMED_BOUNDS)) {
+ vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR);
+ return;
+ }
+
+#if 0
+ /* faster, but seems to have precision problems... */
+ path_bounding_rect(p, minX, minY, width, height);
+ if (*width > 0 && *height > 0) {
+ VGfloat pts[] = {*minX, *minY,
+ *minX + *width, *minY,
+ *minX + *width, *minY + *height,
+ *minX, *minY + *height};
+ struct matrix *matrix = &ctx->state.vg.path_user_to_surface_matrix;
+ VGfloat maxX, maxY;
+ matrix_map_point(matrix, pts[0], pts[1], pts + 0, pts + 1);
+ matrix_map_point(matrix, pts[2], pts[3], pts + 2, pts + 3);
+ matrix_map_point(matrix, pts[4], pts[5], pts + 4, pts + 5);
+ matrix_map_point(matrix, pts[6], pts[7], pts + 6, pts + 7);
+ *minX = MIN2(pts[0], MIN2(pts[2], MIN2(pts[4], pts[6])));
+ *minY = MIN2(pts[1], MIN2(pts[3], MIN2(pts[5], pts[7])));
+ maxX = MAX2(pts[0], MAX2(pts[2], MAX2(pts[4], pts[6])));
+ maxY = MAX2(pts[1], MAX2(pts[3], MAX2(pts[5], pts[7])));
+ *width = maxX - *minX;
+ *height = maxY - *minY;
+ }
+#else
+ {
+ struct path *dst = path_create(VG_PATH_DATATYPE_F, 1.0, 0,
+ 0, 0, VG_PATH_CAPABILITY_ALL);
+ path_transform(dst, p);
+ path_bounding_rect(dst, minX, minY, width, height);
+ path_destroy(dst);
+ }
+#endif
+}
+
+
+void vgDrawPath(VGPath path, VGbitfield paintModes)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (path == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ if (!(paintModes & (VG_STROKE_PATH | VG_FILL_PATH))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (path_is_empty((struct path*)path))
+ return;
+ path_render((struct path*)path, paintModes);
+}
+
diff --git a/src/gallium/state_trackers/vega/api_text.c b/src/gallium/state_trackers/vega/api_text.c
new file mode 100644
index 0000000000..d8411cf3e8
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_text.c
@@ -0,0 +1,258 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+
+#include "util/u_memory.h"
+
+#ifdef OPENVG_VERSION_1_1
+
+struct vg_font {
+ struct vg_object base;
+
+ VGint glyph_indices[200];
+ VGint num_glyphs;
+};
+
+VGFont vgCreateFont(VGint glyphCapacityHint)
+{
+ struct vg_font *font = 0;
+ struct vg_context *ctx = vg_current_context();
+
+ if (glyphCapacityHint < 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return VG_INVALID_HANDLE;
+ }
+
+ font = CALLOC_STRUCT(vg_font);
+ vg_init_object(&font->base, ctx, VG_OBJECT_FONT);
+ vg_context_add_object(ctx, VG_OBJECT_FONT, font);
+ return (VGFont)font;
+}
+
+void vgDestroyFont(VGFont f)
+{
+ struct vg_font *font = (struct vg_font *)f;
+ struct vg_context *ctx = vg_current_context();
+
+ if (f == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ vg_context_remove_object(ctx, VG_OBJECT_FONT, font);
+ /*free(font);*/
+}
+
+void vgSetGlyphToPath(VGFont font,
+ VGuint glyphIndex,
+ VGPath path,
+ VGboolean isHinted,
+ VGfloat glyphOrigin [2],
+ VGfloat escapement[2])
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_object *pathObj;
+ struct vg_font *f;
+
+ if (font == VG_INVALID_HANDLE ||
+ !vg_context_is_object_valid(ctx, VG_OBJECT_FONT, (void *)font)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!glyphOrigin || !escapement ||
+ !is_aligned(glyphOrigin) || !is_aligned(escapement)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (path != VG_INVALID_HANDLE &&
+ !vg_context_is_object_valid(ctx, VG_OBJECT_PATH, (void *)path)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ pathObj = (struct vg_object*)path;
+ if (pathObj && pathObj->type != VG_OBJECT_PATH) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+
+ f = (struct vg_font*)font;
+ f->glyph_indices[f->num_glyphs] = glyphIndex;
+ ++f->num_glyphs;
+}
+
+void vgSetGlyphToImage(VGFont font,
+ VGuint glyphIndex,
+ VGImage image,
+ VGfloat glyphOrigin [2],
+ VGfloat escapement[2])
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_object *img_obj;
+ struct vg_font *f;
+
+ if (font == VG_INVALID_HANDLE ||
+ !vg_context_is_object_valid(ctx, VG_OBJECT_FONT, (void *)font)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (!glyphOrigin || !escapement ||
+ !is_aligned(glyphOrigin) || !is_aligned(escapement)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (image != VG_INVALID_HANDLE &&
+ !vg_context_is_object_valid(ctx, VG_OBJECT_IMAGE, (void *)image)) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ img_obj = (struct vg_object*)image;
+ if (img_obj && img_obj->type != VG_OBJECT_IMAGE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ f = (struct vg_font*)font;
+ f->glyph_indices[f->num_glyphs] = glyphIndex;
+ ++f->num_glyphs;
+}
+
+static INLINE VGboolean font_contains_glyph(struct vg_font *font,
+ VGuint glyph_index)
+{
+ VGint i;
+ for (i = 0; i < font->num_glyphs; ++i) {
+ if (font->glyph_indices[i] == glyph_index) {
+ return VG_TRUE;
+ }
+ }
+ return VG_FALSE;
+}
+
+void vgClearGlyph(VGFont font,
+ VGuint glyphIndex)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_font *f;
+ VGint i;
+
+ if (font == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (glyphIndex <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ f = (struct vg_font*)font;
+ if (!font_contains_glyph(f, glyphIndex)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ for (i = 0; i < f->num_glyphs; ++i) {
+ if (f->glyph_indices[i] == glyphIndex) {
+ /*FIXME*/
+ f->glyph_indices[f->num_glyphs] = 0;
+ --f->num_glyphs;
+ return;
+ }
+ }
+}
+
+void vgDrawGlyph(VGFont font,
+ VGuint glyphIndex,
+ VGbitfield paintModes,
+ VGboolean allowAutoHinting)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_font *f;
+
+ if (font == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (glyphIndex <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (paintModes & (~(VG_STROKE_PATH|VG_FILL_PATH))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ f = (struct vg_font*)font;
+ if (!font_contains_glyph(f, glyphIndex)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+}
+
+void vgDrawGlyphs(VGFont font,
+ VGint glyphCount,
+ VGuint *glyphIndices,
+ VGfloat *adjustments_x,
+ VGfloat *adjustments_y,
+ VGbitfield paintModes,
+ VGboolean allowAutoHinting)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGint i;
+ struct vg_font *f;
+
+ if (font == VG_INVALID_HANDLE) {
+ vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
+ return;
+ }
+ if (glyphCount <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (!glyphIndices || !is_aligned(glyphIndices)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (!adjustments_x || !is_aligned(adjustments_x) ||
+ !adjustments_y || !is_aligned(adjustments_y)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ if (paintModes & (~(VG_STROKE_PATH|VG_FILL_PATH))) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ f = (struct vg_font*)font;
+ for (i = 0; i < glyphCount; ++i) {
+ VGuint glyph_index = glyphIndices[i];
+ if (!font_contains_glyph(f, glyph_index)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ }
+}
+
+#endif
diff --git a/src/gallium/state_trackers/vega/api_transform.c b/src/gallium/state_trackers/vega/api_transform.c
new file mode 100644
index 0000000000..763a5ec415
--- /dev/null
+++ b/src/gallium/state_trackers/vega/api_transform.c
@@ -0,0 +1,128 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+
+#include "vg_context.h"
+
+#include "matrix.h"
+
+void vgLoadIdentity(void)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *mat = vg_state_matrix(&ctx->state.vg);
+ matrix_load_identity(mat);
+}
+
+void vgLoadMatrix(const VGfloat * m)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *mat;
+
+ if (!ctx)
+ return;
+
+ if (!m || !is_aligned(m)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ mat = vg_state_matrix(&ctx->state.vg);
+ matrix_init(mat, m);
+ if (!matrix_is_affine(mat)) {
+ if (ctx->state.vg.matrix_mode != VG_MATRIX_IMAGE_USER_TO_SURFACE) {
+ matrix_make_affine(mat);
+ }
+ }
+}
+
+void vgGetMatrix(VGfloat * m)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *mat;
+
+ if (!ctx)
+ return;
+
+ if (!m || !is_aligned(m)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ mat = vg_state_matrix(&ctx->state.vg);
+ memcpy(m, mat->m, sizeof(VGfloat)*9);
+}
+
+void vgMultMatrix(const VGfloat * m)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *dst, src;
+
+ if (!ctx)
+ return;
+
+ if (!m || !is_aligned(m)) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ matrix_init(&src, m);
+ dst = vg_state_matrix(&ctx->state.vg);
+ if (!matrix_is_affine(&src)) {
+ if (ctx->state.vg.matrix_mode != VG_MATRIX_IMAGE_USER_TO_SURFACE) {
+ matrix_make_affine(&src);
+ }
+ }
+ matrix_mult(dst, &src);
+
+}
+
+void vgTranslate(VGfloat tx, VGfloat ty)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *dst = vg_state_matrix(&ctx->state.vg);
+ matrix_translate(dst, tx, ty);
+}
+
+void vgScale(VGfloat sx, VGfloat sy)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *dst = vg_state_matrix(&ctx->state.vg);
+ matrix_scale(dst, sx, sy);
+}
+
+void vgShear(VGfloat shx, VGfloat shy)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *dst = vg_state_matrix(&ctx->state.vg);
+ matrix_shear(dst, shx, shy);
+}
+
+void vgRotate(VGfloat angle)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *dst = vg_state_matrix(&ctx->state.vg);
+ matrix_rotate(dst, angle);
+}
diff --git a/src/gallium/state_trackers/vega/arc.c b/src/gallium/state_trackers/vega/arc.c
new file mode 100644
index 0000000000..e74c7f0334
--- /dev/null
+++ b/src/gallium/state_trackers/vega/arc.c
@@ -0,0 +1,708 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "arc.h"
+
+#include "matrix.h"
+#include "bezier.h"
+#include "polygon.h"
+#include "stroker.h"
+#include "path.h"
+
+#include "util/u_debug.h"
+
+#include <math.h>
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+#define DEBUG_ARCS 0
+
+static const VGfloat two_pi = M_PI * 2;
+
+
+static const double coeffs3Low[2][4][4] = {
+ {
+ { 3.85268, -21.229, -0.330434, 0.0127842 },
+ { -1.61486, 0.706564, 0.225945, 0.263682 },
+ { -0.910164, 0.388383, 0.00551445, 0.00671814 },
+ { -0.630184, 0.192402, 0.0098871, 0.0102527 }
+ },
+ {
+ { -0.162211, 9.94329, 0.13723, 0.0124084 },
+ { -0.253135, 0.00187735, 0.0230286, 0.01264 },
+ { -0.0695069, -0.0437594, 0.0120636, 0.0163087 },
+ { -0.0328856, -0.00926032, -0.00173573, 0.00527385 }
+ }
+};
+
+/* coefficients for error estimation
+ while using cubic Bézier curves for approximation
+ 1/4 <= b/a <= 1 */
+static const double coeffs3High[2][4][4] = {
+ {
+ { 0.0899116, -19.2349, -4.11711, 0.183362 },
+ { 0.138148, -1.45804, 1.32044, 1.38474 },
+ { 0.230903, -0.450262, 0.219963, 0.414038 },
+ { 0.0590565, -0.101062, 0.0430592, 0.0204699 }
+ },
+ {
+ { 0.0164649, 9.89394, 0.0919496, 0.00760802 },
+ { 0.0191603, -0.0322058, 0.0134667, -0.0825018 },
+ { 0.0156192, -0.017535, 0.00326508, -0.228157 },
+ { -0.0236752, 0.0405821, -0.0173086, 0.176187 }
+ }
+};
+
+/* safety factor to convert the "best" error approximation
+ into a "max bound" error */
+static const double safety3[] = {
+ 0.001, 4.98, 0.207, 0.0067
+};
+
+/* The code below is from the OpenVG 1.1 Spec
+ * Section 18.4 */
+
+/* Given: Points (x0, y0) and (x1, y1)
+ * Return: TRUE if a solution exists, FALSE otherwise
+ * Circle centers are written to (cx0, cy0) and (cx1, cy1)
+ */
+static VGboolean
+find_unit_circles(double x0, double y0, double x1, double y1,
+ double *cx0, double *cy0,
+ double *cx1, double *cy1)
+{
+ /* Compute differences and averages */
+ double dx = x0 - x1;
+ double dy = y0 - y1;
+ double xm = (x0 + x1)/2;
+ double ym = (y0 + y1)/2;
+ double dsq, disc, s, sdx, sdy;
+
+ /* Solve for intersecting unit circles */
+ dsq = dx*dx + dy*dy;
+ if (dsq == 0.0) return VG_FALSE; /* Points are coincident */
+ disc = 1.0/dsq - 1.0/4.0;
+
+ /* the precision we care about here is around float so if we're
+ * around the float defined zero then make it official to avoid
+ * precision problems later on */
+ if (floatIsZero(disc))
+ disc = 0.0;
+
+ if (disc < 0.0) return VG_FALSE; /* Points are too far apart */
+ s = sqrt(disc);
+ sdx = s*dx;
+ sdy = s*dy;
+ *cx0 = xm + sdy;
+ *cy0 = ym - sdx;
+ *cx1 = xm - sdy;
+ *cy1 = ym + sdx;
+ return VG_TRUE;
+}
+
+
+/* Given: Ellipse parameters rh, rv, rot (in degrees),
+ * endpoints (x0, y0) and (x1, y1)
+ * Return: TRUE if a solution exists, FALSE otherwise
+ * Ellipse centers are written to (cx0, cy0) and (cx1, cy1)
+ */
+static VGboolean
+find_ellipses(double rh, double rv, double rot,
+ double x0, double y0, double x1, double y1,
+ double *cx0, double *cy0, double *cx1, double *cy1)
+{
+ double COS, SIN, x0p, y0p, x1p, y1p, pcx0, pcy0, pcx1, pcy1;
+ /* Convert rotation angle from degrees to radians */
+ rot *= M_PI/180.0;
+ /* Pre-compute rotation matrix entries */
+ COS = cos(rot); SIN = sin(rot);
+ /* Transform (x0, y0) and (x1, y1) into unit space */
+ /* using (inverse) rotate, followed by (inverse) scale */
+ x0p = (x0*COS + y0*SIN)/rh;
+ y0p = (-x0*SIN + y0*COS)/rv;
+ x1p = (x1*COS + y1*SIN)/rh;
+ y1p = (-x1*SIN + y1*COS)/rv;
+ if (!find_unit_circles(x0p, y0p, x1p, y1p,
+ &pcx0, &pcy0, &pcx1, &pcy1)) {
+ return VG_FALSE;
+ }
+ /* Transform back to original coordinate space */
+ /* using (forward) scale followed by (forward) rotate */
+ pcx0 *= rh; pcy0 *= rv;
+ pcx1 *= rh; pcy1 *= rv;
+ *cx0 = pcx0*COS - pcy0*SIN;
+ *cy0 = pcx0*SIN + pcy0*COS;
+ *cx1 = pcx1*COS - pcy1*SIN;
+ *cy1 = pcx1*SIN + pcy1*COS;
+ return VG_TRUE;
+}
+
+static INLINE VGboolean
+try_to_fix_radii(struct arc *arc)
+{
+ double COS, SIN, rot, x0p, y0p, x1p, y1p;
+ double dx, dy, dsq, scale;
+
+ /* Convert rotation angle from degrees to radians */
+ rot = DEGREES_TO_RADIANS(arc->theta);
+
+ /* Pre-compute rotation matrix entries */
+ COS = cos(rot); SIN = sin(rot);
+
+ /* Transform (x0, y0) and (x1, y1) into unit space */
+ /* using (inverse) rotate, followed by (inverse) scale */
+ x0p = (arc->x1*COS + arc->y1*SIN)/arc->a;
+ y0p = (-arc->x1*SIN + arc->y1*COS)/arc->b;
+ x1p = (arc->x2*COS + arc->y2*SIN)/arc->a;
+ y1p = (-arc->x2*SIN + arc->y2*COS)/arc->b;
+ /* Compute differences and averages */
+ dx = x0p - x1p;
+ dy = y0p - y1p;
+
+ dsq = dx*dx + dy*dy;
+#if 0
+ if (dsq <= 0.001) {
+ debug_printf("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaaaaa\n");
+ }
+#endif
+ scale = 1/(2/sqrt(dsq));
+ arc->a *= scale;
+ arc->b *= scale;
+ return VG_TRUE;
+}
+
+static INLINE double vector_normalize(double *v)
+{
+ double sq = v[0] * v[0] + v[1] * v[1];
+ return sqrt(sq);
+}
+static INLINE double vector_orientation(double *v)
+{
+ double norm = vector_normalize(v);
+ double cosa = v[0] / norm;
+ double sina = v[1] / norm;
+ return (sina>=0 ? acos(cosa) : 2*M_PI - acos(cosa));
+}
+static INLINE double vector_dot(double *v0,
+ double *v1)
+{
+ return v0[0] * v1[0] + v0[1] * v1[1];
+}
+
+static INLINE double vector_angles(double *v0,
+ double *v1)
+{
+ double dot = vector_dot(v0, v1);
+ double norm0 = vector_normalize(v0);
+ double norm1 = vector_normalize(v1);
+
+ return acos(dot / (norm0 * norm1));
+}
+
+static VGboolean find_angles(struct arc *arc)
+{
+ double vec0[2], vec1[2];
+ double lambda1, lambda2;
+ double angle;
+ struct matrix matrix;
+
+ if (floatIsZero(arc->a) || floatIsZero(arc->b)) {
+ return VG_FALSE;
+ }
+ /* map the points to an identity circle */
+ matrix_load_identity(&matrix);
+ matrix_scale(&matrix, 1.f, arc->a/arc->b);
+ matrix_rotate(&matrix, -arc->theta);
+ matrix_map_point(&matrix,
+ arc->x1, arc->y1,
+ &arc->x1, &arc->y1);
+ matrix_map_point(&matrix,
+ arc->x2, arc->y2,
+ &arc->x2, &arc->y2);
+ matrix_map_point(&matrix,
+ arc->cx, arc->cy,
+ &arc->cx, &arc->cy);
+
+#if DEBUG_ARCS
+ debug_printf("Matrix 3 [%f, %f, %f| %f, %f, %f| %f, %f, %f]\n",
+ matrix.m[0], matrix.m[1], matrix.m[2],
+ matrix.m[3], matrix.m[4], matrix.m[5],
+ matrix.m[6], matrix.m[7], matrix.m[8]);
+ debug_printf("Endpoints [%f, %f], [%f, %f]\n",
+ arc->x1, arc->y1, arc->x2, arc->y2);
+#endif
+
+ vec0[0] = arc->x1 - arc->cx;
+ vec0[1] = arc->y1 - arc->cy;
+ vec1[0] = arc->x2 - arc->cx;
+ vec1[1] = arc->y2 - arc->cy;
+
+#if DEBUG_ARCS
+ debug_printf("Vec is [%f, %f], [%f, %f], [%f, %f]\n",
+ vec0[0], vec0[1], vec1[0], vec1[1], arc->cx, arc->cy);
+#endif
+
+ lambda1 = vector_orientation(vec0);
+
+ if (isnan(lambda1))
+ lambda1 = 0.f;
+
+ if (arc->type == VG_SCWARC_TO ||
+ arc->type == VG_SCCWARC_TO)
+ angle = vector_angles(vec0, vec1);
+ else if (arc->type == VG_LCWARC_TO ||
+ arc->type == VG_LCCWARC_TO) {
+ angle = 2*M_PI - vector_angles(vec0, vec1);
+ } else
+ abort();
+
+ if (isnan(angle))
+ angle = M_PI;
+
+
+ if (arc->type == VG_SCWARC_TO ||
+ arc->type == VG_LCWARC_TO)
+ lambda2 = lambda1 - angle;
+ else
+ lambda2 = lambda1 + angle;
+
+#if DEBUG_ARCS
+ debug_printf("Angle is %f and (%f, %f)\n", angle, lambda1, lambda2);
+#endif
+
+#if 0
+ arc->eta1 = atan2(sin(lambda1) / arc->b,
+ cos(lambda1) / arc->a);
+ arc->eta2 = atan2(sin(lambda2) / arc->b,
+ cos(lambda2) / arc->a);
+
+ /* make sure we have eta1 <= eta2 <= eta1 + 2 PI */
+ arc->eta2 -= two_pi * floor((arc->eta2 - arc->eta1) / two_pi);
+
+ /* the preceding correction fails if we have exactly et2 - eta1 = 2 PI
+ it reduces the interval to zero length */
+ if ((lambda2 - lambda1 > M_PI) && (arc->eta2 - arc->eta1 < M_PI)) {
+ arc->eta2 += 2 * M_PI;
+ }
+#else
+ arc->eta1 = lambda1;
+ arc->eta2 = lambda2;
+#endif
+
+ return VG_TRUE;
+}
+
+#if DEBUG_ARCS
+static void check_endpoints(struct arc *arc)
+{
+ double x1, y1, x2, y2;
+
+ double a_cos_eta1 = arc->a * cos(arc->eta1);
+ double b_sin_eta1 = arc->b * sin(arc->eta1);
+ x1 = arc->cx + a_cos_eta1 * arc->cos_theta -
+ b_sin_eta1 * arc->sin_theta;
+ y1 = arc->cy + a_cos_eta1 * arc->sin_theta +
+ b_sin_eta1 * arc->cos_theta;
+
+ double a_cos_eta2 = arc->a * cos(arc->eta2);
+ double b_sin_eta2 = arc->b * sin(arc->eta2);
+ x2 = arc->cx + a_cos_eta2 * arc->cos_theta -
+ b_sin_eta2 * arc->sin_theta;
+ y2 = arc->cy + a_cos_eta2 * arc->sin_theta +
+ b_sin_eta2 * arc->cos_theta;
+
+ debug_printf("Computed (%f, %f), (%f, %f)\n",
+ x1, y1, x2, y2);
+ debug_printf("Real (%f, %f), (%f, %f)\n",
+ arc->x1, arc->y1,
+ arc->x2, arc->y2);
+}
+#endif
+
+void arc_init(struct arc *arc,
+ VGPathSegment type,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat rh, VGfloat rv,
+ VGfloat rot)
+{
+ assert(type == VG_SCCWARC_TO ||
+ type == VG_SCWARC_TO ||
+ type == VG_LCCWARC_TO ||
+ type == VG_LCWARC_TO);
+ arc->type = type;
+ arc->x1 = x1;
+ arc->y1 = y1;
+ arc->x2 = x2;
+ arc->y2 = y2;
+ arc->a = rh;
+ arc->b = rv;
+ arc->theta = rot;
+ arc->cos_theta = cos(arc->theta);
+ arc->sin_theta = sin(arc->theta);
+ {
+ double cx0, cy0, cx1, cy1;
+ double cx, cy;
+ arc->is_valid = find_ellipses(rh, rv, rot, x1, y1, x2, y2,
+ &cx0, &cy0, &cx1, &cy1);
+
+ if (!arc->is_valid && try_to_fix_radii(arc)) {
+ rh = arc->a;
+ rv = arc->b;
+ arc->is_valid =
+ find_ellipses(rh, rv, rot, x1, y1, x2, y2,
+ &cx0, &cy0, &cx1, &cy1);
+ }
+
+ if (type == VG_SCWARC_TO ||
+ type == VG_LCCWARC_TO) {
+ cx = cx1;
+ cy = cy1;
+ } else {
+ cx = cx0;
+ cy = cy0;
+ }
+#if DEBUG_ARCS
+ debug_printf("Centers are : (%f, %f) , (%f, %f). Real (%f, %f)\n",
+ cx0, cy0, cx1, cy1, cx, cy);
+#endif
+ arc->cx = cx;
+ arc->cy = cy;
+ if (arc->is_valid) {
+ arc->is_valid = find_angles(arc);
+#if DEBUG_ARCS
+ check_endpoints(arc);
+#endif
+ /* remap a few points. find_angles requires
+ * rot in angles, the rest of the code
+ * will need them in radians. and find_angles
+ * modifies the center to match an identity
+ * circle so lets reset it */
+ arc->theta = DEGREES_TO_RADIANS(rot);
+ arc->cos_theta = cos(arc->theta);
+ arc->sin_theta = sin(arc->theta);
+ arc->cx = cx;
+ arc->cy = cy;
+ }
+ }
+}
+
+static INLINE double rational_function(double x, const double *c)
+{
+ return (x * (x * c[0] + c[1]) + c[2]) / (x + c[3]);
+}
+
+static double estimate_error(struct arc *arc,
+ double etaA, double etaB)
+{
+ double eta = 0.5 * (etaA + etaB);
+
+ double x = arc->b / arc->a;
+ double dEta = etaB - etaA;
+ double cos2 = cos(2 * eta);
+ double cos4 = cos(4 * eta);
+ double cos6 = cos(6 * eta);
+ double c0, c1;
+
+ /* select the right coeficients set according to degree and b/a */
+ const double (*coeffs)[4][4];
+ const double *safety;
+ coeffs = (x < 0.25) ? coeffs3Low : coeffs3High;
+ safety = safety3;
+
+ c0 = rational_function(x, coeffs[0][0])
+ + cos2 * rational_function(x, coeffs[0][1])
+ + cos4 * rational_function(x, coeffs[0][2])
+ + cos6 * rational_function(x, coeffs[0][3]);
+
+ c1 = rational_function(x, coeffs[1][0])
+ + cos2 * rational_function(x, coeffs[1][1])
+ + cos4 * rational_function(x, coeffs[1][2])
+ + cos6 * rational_function(x, coeffs[1][3]);
+
+ return rational_function(x, safety) * arc->a * exp(c0 + c1 * dEta);
+}
+
+struct arc_cb {
+ void (*move)(struct arc_cb *cb, VGfloat x, VGfloat y);
+ void (*point)(struct arc_cb *cb, VGfloat x, VGfloat y);
+ void (*bezier)(struct arc_cb *cb, struct bezier *bezier);
+
+ void *user_data;
+};
+
+static void cb_null_move(struct arc_cb *cb, VGfloat x, VGfloat y)
+{
+}
+
+static void polygon_point(struct arc_cb *cb, VGfloat x, VGfloat y)
+{
+ struct polygon *poly = (struct polygon*)cb->user_data;
+ polygon_vertex_append(poly, x, y);
+}
+
+static void polygon_bezier(struct arc_cb *cb, struct bezier *bezier)
+{
+ struct polygon *poly = (struct polygon*)cb->user_data;
+ bezier_add_to_polygon(bezier, poly);
+}
+
+static void stroke_point(struct arc_cb *cb, VGfloat x, VGfloat y)
+{
+ struct stroker *stroker = (struct stroker*)cb->user_data;
+ stroker_line_to(stroker, x, y);
+}
+
+static void stroke_curve(struct arc_cb *cb, struct bezier *bezier)
+{
+ struct stroker *stroker = (struct stroker*)cb->user_data;
+ stroker_curve_to(stroker,
+ bezier->x2, bezier->y2,
+ bezier->x3, bezier->y3,
+ bezier->x4, bezier->y4);
+}
+
+static void stroke_emit_point(struct arc_cb *cb, VGfloat x, VGfloat y)
+{
+ struct stroker *stroker = (struct stroker*)cb->user_data;
+ stroker_emit_line_to(stroker, x, y);
+}
+
+static void stroke_emit_curve(struct arc_cb *cb, struct bezier *bezier)
+{
+ struct stroker *stroker = (struct stroker*)cb->user_data;
+ stroker_emit_curve_to(stroker,
+ bezier->x2, bezier->y2,
+ bezier->x3, bezier->y3,
+ bezier->x4, bezier->y4);
+}
+
+static void arc_path_move(struct arc_cb *cb, VGfloat x, VGfloat y)
+{
+ struct path *path = (struct path*)cb->user_data;
+ path_move_to(path, x, y);
+}
+
+static void arc_path_point(struct arc_cb *cb, VGfloat x, VGfloat y)
+{
+ struct path *path = (struct path*)cb->user_data;
+ path_line_to(path, x, y);
+}
+
+static void arc_path_bezier(struct arc_cb *cb, struct bezier *bezier)
+{
+ struct path *path = (struct path*)cb->user_data;
+ path_cubic_to(path,
+ bezier->x2, bezier->y2,
+ bezier->x3, bezier->y3,
+ bezier->x4, bezier->y4);
+}
+
+static INLINE int num_beziers_needed(struct arc *arc)
+{
+ double threshold = 0.05;
+ VGboolean found = VG_FALSE;
+ int n = 1;
+ double min_eta, max_eta;
+
+ min_eta = MIN2(arc->eta1, arc->eta2);
+ max_eta = MAX2(arc->eta1, arc->eta2);
+
+ while ((! found) && (n < 1024)) {
+ double d_eta = (max_eta - min_eta) / n;
+ if (d_eta <= 0.5 * M_PI) {
+ double eta_b = min_eta;
+ found = VG_TRUE;
+ for (int i = 0; found && (i < n); ++i) {
+ double etaA = eta_b;
+ eta_b += d_eta;
+ found = (estimate_error(arc, etaA, eta_b) <= threshold);
+ }
+ }
+ n = n << 1;
+ }
+
+ return n;
+}
+
+static void arc_to_beziers(struct arc *arc,
+ struct arc_cb cb,
+ struct matrix *matrix)
+{
+ int n = 1;
+ double d_eta, eta_b, cos_eta_b,
+ sin_eta_b, a_cos_eta_b, b_sin_eta_b, a_sin_eta_b,
+ b_cos_eta_b, x_b, y_b, x_b_dot, y_b_dot, lx, ly;
+ double t, alpha;
+
+ { /* always move to the start of the arc */
+ VGfloat x = arc->x1;
+ VGfloat y = arc->y1;
+ matrix_map_point(matrix, x, y, &x, &y);
+ cb.move(&cb, x, y);
+ }
+
+ if (!arc->is_valid) {
+ VGfloat x = arc->x2;
+ VGfloat y = arc->y2;
+ matrix_map_point(matrix, x, y, &x, &y);
+ cb.point(&cb, x, y);
+ return;
+ }
+
+ /* find the number of Bézier curves needed */
+ n = num_beziers_needed(arc);
+
+ d_eta = (arc->eta2 - arc->eta1) / n;
+ eta_b = arc->eta1;
+
+ cos_eta_b = cos(eta_b);
+ sin_eta_b = sin(eta_b);
+ a_cos_eta_b = arc->a * cos_eta_b;
+ b_sin_eta_b = arc->b * sin_eta_b;
+ a_sin_eta_b = arc->a * sin_eta_b;
+ b_cos_eta_b = arc->b * cos_eta_b;
+ x_b = arc->cx + a_cos_eta_b * arc->cos_theta -
+ b_sin_eta_b * arc->sin_theta;
+ y_b = arc->cy + a_cos_eta_b * arc->sin_theta +
+ b_sin_eta_b * arc->cos_theta;
+ x_b_dot = -a_sin_eta_b * arc->cos_theta -
+ b_cos_eta_b * arc->sin_theta;
+ y_b_dot = -a_sin_eta_b * arc->sin_theta +
+ b_cos_eta_b * arc->cos_theta;
+
+ {
+ VGfloat x = x_b, y = y_b;
+ matrix_map_point(matrix, x, y, &x, &y);
+ cb.point(&cb, x, y);
+ }
+ lx = x_b;
+ ly = y_b;
+
+ t = tan(0.5 * d_eta);
+ alpha = sin(d_eta) * (sqrt(4 + 3 * t * t) - 1) / 3;
+
+ for (int i = 0; i < n; ++i) {
+ struct bezier bezier;
+ double xA = x_b;
+ double yA = y_b;
+ double xADot = x_b_dot;
+ double yADot = y_b_dot;
+
+ eta_b += d_eta;
+ cos_eta_b = cos(eta_b);
+ sin_eta_b = sin(eta_b);
+ a_cos_eta_b = arc->a * cos_eta_b;
+ b_sin_eta_b = arc->b * sin_eta_b;
+ a_sin_eta_b = arc->a * sin_eta_b;
+ b_cos_eta_b = arc->b * cos_eta_b;
+ x_b = arc->cx + a_cos_eta_b * arc->cos_theta -
+ b_sin_eta_b * arc->sin_theta;
+ y_b = arc->cy + a_cos_eta_b * arc->sin_theta +
+ b_sin_eta_b * arc->cos_theta;
+ x_b_dot = -a_sin_eta_b * arc->cos_theta -
+ b_cos_eta_b * arc->sin_theta;
+ y_b_dot = -a_sin_eta_b * arc->sin_theta +
+ b_cos_eta_b * arc->cos_theta;
+
+ bezier_init(&bezier,
+ lx, ly,
+ (float) (xA + alpha * xADot), (float) (yA + alpha * yADot),
+ (float) (x_b - alpha * x_b_dot), (float) (y_b - alpha * y_b_dot),
+ (float) x_b, (float) y_b);
+#if 0
+ debug_printf("%d) Bezier (%f, %f), (%f, %f), (%f, %f), (%f, %f)\n",
+ i,
+ bezier.x1, bezier.y1,
+ bezier.x2, bezier.y2,
+ bezier.x3, bezier.y3,
+ bezier.x4, bezier.y4);
+#endif
+ bezier_transform(&bezier, matrix);
+ cb.bezier(&cb, &bezier);
+ lx = x_b;
+ ly = y_b;
+ }
+}
+
+
+void arc_add_to_polygon(struct arc *arc,
+ struct polygon *poly,
+ struct matrix *matrix)
+{
+ struct arc_cb cb;
+
+ cb.move = cb_null_move;
+ cb.point = polygon_point;
+ cb.bezier = polygon_bezier;
+ cb.user_data = poly;
+
+ arc_to_beziers(arc, cb, matrix);
+}
+
+void arc_stroke_cb(struct arc *arc,
+ struct stroker *stroke,
+ struct matrix *matrix)
+{
+ struct arc_cb cb;
+
+ cb.move = cb_null_move;
+ cb.point = stroke_point;
+ cb.bezier = stroke_curve;
+ cb.user_data = stroke;
+
+ arc_to_beziers(arc, cb, matrix);
+}
+
+void arc_stroker_emit(struct arc *arc,
+ struct stroker *stroker,
+ struct matrix *matrix)
+{
+ struct arc_cb cb;
+
+ cb.move = cb_null_move;
+ cb.point = stroke_emit_point;
+ cb.bezier = stroke_emit_curve;
+ cb.user_data = stroker;
+
+ arc_to_beziers(arc, cb, matrix);
+}
+
+void arc_to_path(struct arc *arc,
+ struct path *path,
+ struct matrix *matrix)
+{
+ struct arc_cb cb;
+
+ cb.move = arc_path_move;
+ cb.point = arc_path_point;
+ cb.bezier = arc_path_bezier;
+ cb.user_data = path;
+
+ arc_to_beziers(arc, cb, matrix);
+}
diff --git a/src/gallium/state_trackers/vega/arc.h b/src/gallium/state_trackers/vega/arc.h
new file mode 100644
index 0000000000..3205cd5021
--- /dev/null
+++ b/src/gallium/state_trackers/vega/arc.h
@@ -0,0 +1,80 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ARC_H
+#define ARC_H
+
+#include "VG/openvg.h"
+
+struct polygon;
+struct matrix;
+struct stroker;
+struct path;
+
+struct arc {
+ VGPathSegment type;
+
+ VGfloat cx, cy;
+
+ VGfloat a, b;
+
+ VGfloat theta;
+ VGfloat cos_theta, sin_theta;
+
+ VGfloat eta1;
+ VGfloat eta2;
+
+ VGfloat x1, y1, x2, y2;
+
+ VGboolean is_valid;
+};
+
+void arc_init(struct arc *arc,
+ VGPathSegment type,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat rh, VGfloat rv,
+ VGfloat rot);
+
+void arc_add_to_polygon(struct arc *arc,
+ struct polygon *poly,
+ struct matrix *matrix);
+
+
+void arc_to_path(struct arc *arc,
+ struct path *p,
+ struct matrix *matrix);
+
+void arc_stroke_cb(struct arc *arc,
+ struct stroker *stroke,
+ struct matrix *matrix);
+
+void arc_stroker_emit(struct arc *arc,
+ struct stroker *stroke,
+ struct matrix *matrix);
+
+
+#endif
diff --git a/src/gallium/state_trackers/vega/asm_fill.h b/src/gallium/state_trackers/vega/asm_fill.h
new file mode 100644
index 0000000000..2f394ad6c5
--- /dev/null
+++ b/src/gallium/state_trackers/vega/asm_fill.h
@@ -0,0 +1,246 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ASM_FILL_H
+#define ASM_FILL_H
+
+static const char solid_fill_asm[] =
+ "MOV %s, CONST[0]\n";
+
+
+static const char linear_grad_asm[] =
+ "MOV TEMP[0].xy, IN[0]\n"
+ "MOV TEMP[0].z, CONST[1].yyyy\n"
+ "DP3 TEMP[1], CONST[2], TEMP[0]\n"
+ "DP3 TEMP[2], CONST[3], TEMP[0]\n"
+ "DP3 TEMP[3], CONST[4], TEMP[0]\n"
+ "RCP TEMP[3], TEMP[3]\n"
+ "MUL TEMP[1], TEMP[1], TEMP[3]\n"
+ "MUL TEMP[2], TEMP[2], TEMP[3]\n"
+ "MOV TEMP[4].x, TEMP[1]\n"
+ "MOV TEMP[4].y, TEMP[2]\n"
+ "MUL TEMP[0], CONST[0].yyyy, TEMP[4].yyyy\n"
+ "MAD TEMP[1], CONST[0].xxxx, TEMP[4].xxxx, TEMP[0]\n"
+ "MUL TEMP[2], TEMP[1], CONST[0].zzzz\n"
+ "TEX %s, TEMP[2], SAMP[0], 1D\n";
+
+static const char radial_grad_asm[] =
+ "MOV TEMP[0].xy, IN[0]\n"
+ "MOV TEMP[0].z, CONST[1].yyyy\n"
+ "DP3 TEMP[1], CONST[2], TEMP[0]\n"
+ "DP3 TEMP[2], CONST[3], TEMP[0]\n"
+ "DP3 TEMP[3], CONST[4], TEMP[0]\n"
+ "RCP TEMP[3], TEMP[3]\n"
+ "MUL TEMP[1], TEMP[1], TEMP[3]\n"
+ "MUL TEMP[2], TEMP[2], TEMP[3]\n"
+ "MOV TEMP[5].x, TEMP[1]\n"
+ "MOV TEMP[5].y, TEMP[2]\n"
+ "MUL TEMP[0], CONST[0].yyyy, TEMP[5].yyyy\n"
+ "MAD TEMP[1], CONST[0].xxxx, TEMP[5].xxxx, TEMP[0]\n"
+ "ADD TEMP[1], TEMP[1], TEMP[1]\n"
+ "MUL TEMP[3], TEMP[5].yyyy, TEMP[5].yyyy\n"
+ "MAD TEMP[4], TEMP[5].xxxx, TEMP[5].xxxx, TEMP[3]\n"
+ "MOV TEMP[4], -TEMP[4]\n"
+ "MUL TEMP[2], CONST[0].zzzz, TEMP[4]\n"
+ "MUL TEMP[0], CONST[1].wwww, TEMP[2]\n"
+ "MUL TEMP[3], TEMP[1], TEMP[1]\n"
+ "SUB TEMP[2], TEMP[3], TEMP[0]\n"
+ "RSQ TEMP[2], |TEMP[2]|\n"
+ "RCP TEMP[2], TEMP[2]\n"
+ "SUB TEMP[1], TEMP[2], TEMP[1]\n"
+ "ADD TEMP[0], CONST[0].zzzz, CONST[0].zzzz\n"
+ "RCP TEMP[0], TEMP[0]\n"
+ "MUL TEMP[2], TEMP[1], TEMP[0]\n"
+ "TEX %s, TEMP[2], SAMP[0], 1D\n";
+
+static const char pattern_asm[] =
+ "MOV TEMP[0].xy, IN[0]\n"
+ "MOV TEMP[0].z, CONST[1].yyyy\n"
+ "DP3 TEMP[1], CONST[2], TEMP[0]\n"
+ "DP3 TEMP[2], CONST[3], TEMP[0]\n"
+ "DP3 TEMP[3], CONST[4], TEMP[0]\n"
+ "RCP TEMP[3], TEMP[3]\n"
+ "MUL TEMP[1], TEMP[1], TEMP[3]\n"
+ "MUL TEMP[2], TEMP[2], TEMP[3]\n"
+ "MOV TEMP[4].x, TEMP[1]\n"
+ "MOV TEMP[4].y, TEMP[2]\n"
+ "RCP TEMP[0], CONST[1].zwzw\n"
+ "MOV TEMP[1], TEMP[4]\n"
+ "MUL TEMP[1].x, TEMP[1], TEMP[0]\n"
+ "MUL TEMP[1].y, TEMP[1], TEMP[0]\n"
+ "TEX %s, TEMP[1], SAMP[0], 2D\n";
+
+
+static const char mask_asm[] =
+ "TEX TEMP[1], IN[0], SAMP[1], 2D\n"
+ "MUL TEMP[0].w, TEMP[0].wwww, TEMP[1].wwww\n"
+ "MOV %s, TEMP[0]\n";
+
+
+static const char image_normal_asm[] =
+ "TEX %s, IN[1], SAMP[3], 2D\n";
+
+static const char image_multiply_asm[] =
+ "TEX TEMP[1], IN[1], SAMP[3], 2D\n"
+ "MUL %s, TEMP[0], TEMP[1]\n";
+
+static const char image_stencil_asm[] =
+ "TEX TEMP[1], IN[1], SAMP[3], 2D\n"
+ "MUL %s, TEMP[0], TEMP[1]\n";
+
+
+#define EXTENDED_BLEND_OVER \
+ "SUB TEMP[3], CONST[1].yyyy, TEMP[1].wwww\n" \
+ "SUB TEMP[4], CONST[1].yyyy, TEMP[0].wwww\n" \
+ "MUL TEMP[3], TEMP[0], TEMP[3]\n" \
+ "MUL TEMP[4], TEMP[1], TEMP[4]\n" \
+ "ADD TEMP[3], TEMP[3], TEMP[4]\n"
+
+static const char blend_multiply_asm[] =
+ "TEX TEMP[1], IN[0], SAMP[2], 2D\n"
+ EXTENDED_BLEND_OVER
+ "MUL TEMP[4], TEMP[0], TEMP[1]\n"
+ "ADD TEMP[1], TEMP[4], TEMP[3]\n"/*result.rgb*/
+ "MUL TEMP[2], TEMP[0].wwww, TEMP[1].wwww\n"
+ "ADD TEMP[3], TEMP[0].wwww, TEMP[1].wwww\n"
+ "SUB TEMP[1].w, TEMP[3], TEMP[2]\n"
+ "MOV %s, TEMP[1]\n";
+#if 1
+static const char blend_screen_asm[] =
+ "TEX TEMP[1], IN[0], SAMP[2], 2D\n"
+ "ADD TEMP[3], TEMP[0], TEMP[1]\n"
+ "MUL TEMP[2], TEMP[0], TEMP[1]\n"
+ "SUB %s, TEMP[3], TEMP[2]\n";
+#else
+static const char blend_screen_asm[] =
+ "TEX TEMP[1], IN[0], SAMP[2], 2D\n"
+ "MOV %s, TEMP[1]\n";
+#endif
+
+static const char blend_darken_asm[] =
+ "TEX TEMP[1], IN[0], SAMP[2], 2D\n"
+ EXTENDED_BLEND_OVER
+ "MUL TEMP[4], TEMP[0], TEMP[1].wwww\n"
+ "MUL TEMP[5], TEMP[1], TEMP[0].wwww\n"
+ "MIN TEMP[4], TEMP[4], TEMP[5]\n"
+ "ADD TEMP[1], TEMP[3], TEMP[4]\n"
+ "MUL TEMP[2], TEMP[0].wwww, TEMP[1].wwww\n"
+ "ADD TEMP[3], TEMP[0].wwww, TEMP[1].wwww\n"
+ "SUB TEMP[1].w, TEMP[3], TEMP[2]\n"
+ "MOV %s, TEMP[1]\n";
+
+static const char blend_lighten_asm[] =
+ "TEX TEMP[1], IN[0], SAMP[2], 2D\n"
+ EXTENDED_BLEND_OVER
+ "MUL TEMP[4], TEMP[0], TEMP[1].wwww\n"
+ "MUL TEMP[5], TEMP[1], TEMP[0].wwww\n"
+ "MAX TEMP[4], TEMP[4], TEMP[5]\n"
+ "ADD TEMP[1], TEMP[3], TEMP[4]\n"
+ "MUL TEMP[2], TEMP[0].wwww, TEMP[1].wwww\n"
+ "ADD TEMP[3], TEMP[0].wwww, TEMP[1].wwww\n"
+ "SUB TEMP[1].w, TEMP[3], TEMP[2]\n"
+ "MOV %s, TEMP[1]\n";
+
+
+static const char premultiply_asm[] =
+ "MUL TEMP[0].xyz, TEMP[0], TEMP[0].wwww\n";
+
+static const char unpremultiply_asm[] =
+ "TEX TEMP[0], IN[0], SAMP[1], 2D\n";
+
+
+static const char color_bw_asm[] =
+ "ADD TEMP[1], CONST[1].yyyy, CONST[1].yyyy\n"
+ "RCP TEMP[2], TEMP[1]\n"
+ "ADD TEMP[1], CONST[1].yyyy, TEMP[2]\n"
+ "ADD TEMP[2].x, TEMP[0].xxxx, TEMP[0].yyyy\n"
+ "ADD TEMP[2].x, TEMP[0].zzzz, TEMP[0].xxxx\n"
+ "SGE TEMP[0].xyz, TEMP[2].xxxx, TEMP[1]\n"
+ "SGE TEMP[0].w, TEMP[0].wwww, TEMP[2].yyyy\n"
+ "MOV %s, TEMP[0]\n";
+
+
+struct shader_asm_info {
+ VGint id;
+ VGint num_tokens;
+ const char * txt;
+
+ VGboolean needs_position;
+
+ VGint start_const;
+ VGint num_consts;
+
+ VGint start_sampler;
+ VGint num_samplers;
+
+ VGint start_temp;
+ VGint num_temps;
+};
+
+
+static const struct shader_asm_info shaders_asm[] = {
+ /* fills */
+ {VEGA_SOLID_FILL_SHADER, 40, solid_fill_asm,
+ VG_FALSE, 0, 1, 0, 0, 0, 0},
+ {VEGA_LINEAR_GRADIENT_SHADER, 200, linear_grad_asm,
+ VG_TRUE, 0, 5, 0, 1, 0, 5},
+ {VEGA_RADIAL_GRADIENT_SHADER, 200, radial_grad_asm,
+ VG_TRUE, 0, 5, 0, 1, 0, 6},
+ {VEGA_PATTERN_SHADER, 100, pattern_asm,
+ VG_TRUE, 1, 4, 0, 1, 0, 5},
+
+ /* image draw modes */
+ {VEGA_IMAGE_NORMAL_SHADER, 200, image_normal_asm,
+ VG_TRUE, 0, 0, 3, 1, 0, 0},
+ {VEGA_IMAGE_MULTIPLY_SHADER, 200, image_multiply_asm,
+ VG_TRUE, 0, 0, 3, 1, 0, 2},
+ {VEGA_IMAGE_STENCIL_SHADER, 200, image_stencil_asm,
+ VG_TRUE, 0, 0, 3, 1, 0, 2},
+
+ {VEGA_MASK_SHADER, 100, mask_asm,
+ VG_TRUE, 0, 0, 1, 1, 0, 2},
+
+ /* extra blend modes */
+ {VEGA_BLEND_MULTIPLY_SHADER, 200, blend_multiply_asm,
+ VG_TRUE, 1, 1, 2, 1, 0, 5},
+ {VEGA_BLEND_SCREEN_SHADER, 200, blend_screen_asm,
+ VG_TRUE, 0, 0, 2, 1, 0, 4},
+ {VEGA_BLEND_DARKEN_SHADER, 200, blend_darken_asm,
+ VG_TRUE, 1, 1, 2, 1, 0, 6},
+ {VEGA_BLEND_LIGHTEN_SHADER, 200, blend_lighten_asm,
+ VG_TRUE, 1, 1, 2, 1, 0, 6},
+
+ /* premultiply */
+ {VEGA_PREMULTIPLY_SHADER, 100, premultiply_asm,
+ VG_FALSE, 0, 0, 0, 0, 0, 1},
+ {VEGA_UNPREMULTIPLY_SHADER, 100, unpremultiply_asm,
+ VG_FALSE, 0, 0, 0, 0, 0, 1},
+
+ /* color transform to black and white */
+ {VEGA_BW_SHADER, 150, color_bw_asm,
+ VG_FALSE, 1, 1, 0, 0, 0, 3},
+};
+#endif
diff --git a/src/gallium/state_trackers/vega/asm_filters.h b/src/gallium/state_trackers/vega/asm_filters.h
new file mode 100644
index 0000000000..9a49f2e12d
--- /dev/null
+++ b/src/gallium/state_trackers/vega/asm_filters.h
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ASM_FILTERS_H
+#define ASM_FILTERS_H
+
+static const char color_matrix_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "DCL CONST[0..4], CONSTANT\n"
+ "DCL TEMP[0..4], CONSTANT\n"
+ "DCL SAMP[0], CONSTANT\n"
+ "TEX TEMP[0], IN[0], SAMP[0], 2D\n"
+ "MOV TEMP[1], TEMP[0].xxxx\n"
+ "MOV TEMP[2], TEMP[0].yyyy\n"
+ "MOV TEMP[3], TEMP[0].zzzz\n"
+ "MOV TEMP[4], TEMP[0].wwww\n"
+ "MUL TEMP[1], TEMP[1], CONST[0]\n"
+ "MUL TEMP[2], TEMP[2], CONST[1]\n"
+ "MUL TEMP[3], TEMP[3], CONST[2]\n"
+ "MUL TEMP[4], TEMP[4], CONST[3]\n"
+ "ADD TEMP[0], TEMP[1], CONST[4]\n"
+ "ADD TEMP[0], TEMP[0], TEMP[2]\n"
+ "ADD TEMP[0], TEMP[0], TEMP[3]\n"
+ "ADD TEMP[0], TEMP[0], TEMP[4]\n"
+ "MOV OUT[0], TEMP[0]\n"
+ "END\n";
+
+static const char convolution_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "DCL TEMP[0..4], CONSTANT\n"
+ "DCL ADDR[0], CONSTANT\n"
+ "DCL CONST[0..%d], CONSTANT\n"
+ "DCL SAMP[0], CONSTANT\n"
+ "0: MOV TEMP[0], CONST[0].xxxx\n"
+ "1: MOV TEMP[1], CONST[0].xxxx\n"
+ "2: BGNLOOP :14\n"
+ "3: SGE TEMP[0].z, TEMP[0].yyyy, CONST[1].xxxx\n"
+ "4: IF TEMP[0].zzzz :7\n"
+ "5: BRK\n"
+ "6: ENDIF\n"
+ "7: ARL ADDR[0].x, TEMP[0].yyyy\n"
+ "8: MOV TEMP[3], CONST[ADDR[0]+2]\n"
+ "9: ADD TEMP[4].xy, IN[0], TEMP[3]\n"
+ "10: TEX TEMP[2], TEMP[4], SAMP[0], 2D\n"
+ "11: MOV TEMP[3], CONST[ADDR[0]+%d]\n"
+ "12: MAD TEMP[1], TEMP[2], TEMP[3], TEMP[1]\n"
+ "13: ADD TEMP[0].y, TEMP[0].yyyy, CONST[0].yyyy\n"
+ "14: ENDLOOP :2\n"
+ "15: MAD OUT[0], TEMP[1], CONST[1].yyyy, CONST[1].zzzz\n"
+ "16: END\n";
+
+
+static const char lookup_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "DCL TEMP[0..2], CONSTANT\n"
+ "DCL CONST[0], CONSTANT\n"
+ "DCL SAMP[0..1], CONSTANT\n"
+ "TEX TEMP[0], IN[0], SAMP[0], 2D\n"
+ "MOV TEMP[1], TEMP[0]\n"
+ /* do red */
+ "TEX TEMP[2], TEMP[1].xxxx, SAMP[1], 1D\n"
+ "MOV TEMP[0].x, TEMP[2].xxxx\n"
+ /* do blue */
+ "TEX TEMP[2], TEMP[1].yyyy, SAMP[1], 1D\n"
+ "MOV TEMP[0].y, TEMP[2].yyyy\n"
+ /* do green */
+ "TEX TEMP[2], TEMP[1].zzzz, SAMP[1], 1D\n"
+ "MOV TEMP[0].z, TEMP[2].zzzz\n"
+ /* do alpha */
+ "TEX TEMP[2], TEMP[1].wwww, SAMP[1], 1D\n"
+ "MOV TEMP[0].w, TEMP[2].wwww\n"
+ "MOV OUT[0], TEMP[0]\n"
+ "END\n";
+
+
+static const char lookup_single_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "DCL TEMP[0..2], CONSTANT\n"
+ "DCL CONST[0], CONSTANT\n"
+ "DCL SAMP[0..1], CONSTANT\n"
+ "TEX TEMP[0], IN[0], SAMP[0], 2D\n"
+ "TEX TEMP[1], TEMP[0].%s, SAMP[1], 1D\n"
+ "MOV OUT[0], TEMP[1]\n"
+ "END\n";
+
+#endif
diff --git a/src/gallium/state_trackers/vega/asm_util.h b/src/gallium/state_trackers/vega/asm_util.h
new file mode 100644
index 0000000000..218e1d166d
--- /dev/null
+++ b/src/gallium/state_trackers/vega/asm_util.h
@@ -0,0 +1,136 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef ASM_UTIL_H
+#define ASM_UTIL_H
+
+
+static const char pass_through_depth_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], POSITION, LINEAR\n"
+ "DCL OUT[0].z, POSITION, CONSTANT\n"
+ "0: MOV OUT[0].z, IN[0].zzzz\n"
+ "1: END\n";
+
+
+
+/* μnew = μmask */
+static const char set_mask_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL SAMP[0], CONSTANT\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "0: TEX OUT[0], IN[0], SAMP[0], 2D\n"/*umask*/
+ "1: END\n";
+
+/* μnew = 1 – (1 – μmask)*(1 – μprev) */
+static const char union_mask_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL IN[1], POSITION, LINEAR\n"
+ "DCL CONST[0], CONSTANT\n"
+ "DCL SAMP[0..1], CONSTANT\n"
+ "DCL TEMP[0..3], CONSTANT\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "0: TEX TEMP[1], IN[0], SAMP[0], 2D\n"/*umask*/
+ "1: TEX TEMP[0], IN[1], SAMP[1], 2D\n"/*uprev*/
+ "2: SUB TEMP[2], CONST[0], TEMP[0]\n"
+ "3: SUB TEMP[3], CONST[0], TEMP[1]\n"
+ "4: MUL TEMP[0].w, TEMP[2].wwww, TEMP[3].wwww\n"
+ "5: SUB OUT[0], CONST[0], TEMP[0]\n"
+ "6: END\n";
+
+/* μnew = μmask *μprev */
+static const char intersect_mask_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL IN[1], POSITION, LINEAR\n"
+ "DCL CONST[0], CONSTANT\n"
+ "DCL SAMP[0..1], CONSTANT\n"
+ "DCL TEMP[0..1], CONSTANT\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "0: TEX TEMP[0], IN[1], SAMP[1], 2D\n"/*uprev*/
+ "1: TEX TEMP[1], IN[0], SAMP[0], 2D\n"/*umask*/
+ "2: MUL OUT[0], TEMP[0].wwww, TEMP[1].wwww\n"
+ "3: END\n";
+
+/* μnew = μprev*(1 – μmask) */
+static const char subtract_mask_asm[] =
+ "FRAG1.1\n"
+ "DCL IN[0], GENERIC[0], PERSPECTIVE\n"
+ "DCL IN[1], POSITION, LINEAR\n"
+ "DCL CONST[0], CONSTANT\n"
+ "DCL SAMP[0..1], CONSTANT\n"
+ "DCL TEMP[0..2], CONSTANT\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "0: TEX TEMP[1], IN[0], SAMP[0], 2D\n"/*umask*/
+ "1: TEX TEMP[0], IN[1], SAMP[1], 2D\n"/*uprev*/
+ "2: SUB TEMP[2], CONST[0], TEMP[1]\n"
+ "3: MUL OUT[0], TEMP[2].wwww, TEMP[0].wwww\n"
+ "4: END\n";
+
+
+static const char vs_plain_asm[] =
+ "VERT1.1\n"
+ "DCL IN[0]\n"
+ "DCL OUT[0], POSITION\n"
+ "DCL TEMP[0]\n"
+ "DCL CONST[0..1]\n"
+ "0: MUL TEMP[0], IN[0], CONST[0]\n"
+ "1: ADD TEMP[0], TEMP[0], CONST[1]\n"
+ "2: MOV OUT[0], TEMP[0]\n"
+ "3: END\n";
+
+static const char vs_clear_asm[] =
+ "VERT1.1\n"
+ "DCL IN[0]\n"
+ "DCL IN[1]\n"
+ "DCL OUT[0], POSITION\n"
+ "DCL OUT[1], COLOR\n"
+ "DCL TEMP[0]\n"
+ "DCL CONST[0..1]\n"
+ "0: MUL TEMP[0], IN[0], CONST[0]\n"
+ "1: ADD TEMP[0], TEMP[0], CONST[1]\n"
+ "2: MOV OUT[0], TEMP[0]\n"
+ "3: MOV OUT[1], IN[1]\n"
+ "4: END\n";
+
+
+static const char vs_texture_asm[] =
+ "VERT1.1\n"
+ "DCL IN[0]\n"
+ "DCL IN[1]\n"
+ "DCL OUT[0], POSITION\n"
+ "DCL OUT[1], GENERIC\n"
+ "DCL TEMP[0]\n"
+ "DCL CONST[0..1]\n"
+ "0: MUL TEMP[0], IN[0], CONST[0]\n"
+ "1: ADD TEMP[0], TEMP[0], CONST[1]\n"
+ "2: MOV OUT[0], TEMP[0]\n"
+ "3: MOV OUT[1], IN[1]\n"
+ "4: END\n";
+
+#endif
diff --git a/src/gallium/state_trackers/vega/bezier.c b/src/gallium/state_trackers/vega/bezier.c
new file mode 100644
index 0000000000..39a7ade016
--- /dev/null
+++ b/src/gallium/state_trackers/vega/bezier.c
@@ -0,0 +1,704 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "bezier.h"
+
+#include "matrix.h"
+#include "polygon.h"
+
+#include "pipe/p_compiler.h"
+#include "util/u_debug.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <math.h>
+
+static const float flatness = 0.5;
+
+
+static INLINE void split_left(struct bezier *bez, VGfloat t, struct bezier* left)
+{
+ left->x1 = bez->x1;
+ left->y1 = bez->y1;
+
+ left->x2 = bez->x1 + t * (bez->x2 - bez->x1);
+ left->y2 = bez->y1 + t * (bez->y2 - bez->y1);
+
+ left->x3 = bez->x2 + t * (bez->x3 - bez->x2);
+ left->y3 = bez->y2 + t * (bez->y3 - bez->y2);
+
+ bez->x3 = bez->x3 + t * (bez->x4 - bez->x3);
+ bez->y3 = bez->y3 + t * (bez->y4 - bez->y3);
+
+ bez->x2 = left->x3 + t * (bez->x3 - left->x3);
+ bez->y2 = left->y3 + t * (bez->y3 - left->y3);
+
+ left->x3 = left->x2 + t * (left->x3 - left->x2);
+ left->y3 = left->y2 + t * (left->y3 - left->y2);
+
+ left->x4 = bez->x1 = left->x3 + t * (bez->x2 - left->x3);
+ left->y4 = bez->y1 = left->y3 + t * (bez->y2 - left->y3);
+}
+
+static INLINE void split(struct bezier *bez,
+ struct bezier *first_half,
+ struct bezier *second_half)
+{
+ double c = (bez->x2 + bez->x3) * 0.5;
+ first_half->x2 = (bez->x1 + bez->x2) * 0.5;
+ second_half->x3 = (bez->x3 + bez->x4) * 0.5;
+ first_half->x1 = bez->x1;
+ second_half->x4 = bez->x4;
+ first_half->x3 = (first_half->x2 + c) * 0.5;
+ second_half->x2 = (second_half->x3 + c) * 0.5;
+ first_half->x4 = second_half->x1 =
+ (first_half->x3 + second_half->x2) * 0.5;
+
+ c = (bez->y2 + bez->y3) / 2;
+ first_half->y2 = (bez->y1 + bez->y2) * 0.5;
+ second_half->y3 = (bez->y3 + bez->y4) * 0.5;
+ first_half->y1 = bez->y1;
+ second_half->y4 = bez->y4;
+ first_half->y3 = (first_half->y2 + c) * 0.5;
+ second_half->y2 = (second_half->y3 + c) * 0.5;
+ first_half->y4 = second_half->y1 =
+ (first_half->y3 + second_half->y2) * 0.5;
+}
+
+struct polygon * bezier_to_polygon(struct bezier *bez)
+{
+ struct polygon *poly = polygon_create(64);
+ polygon_vertex_append(poly, bez->x1, bez->y1);
+ bezier_add_to_polygon(bez, poly);
+ return poly;
+}
+
+void bezier_add_to_polygon(const struct bezier *bez,
+ struct polygon *poly)
+{
+ struct bezier beziers[32];
+ struct bezier *b;
+
+ beziers[0] = *bez;
+ b = beziers;
+
+ while (b >= beziers) {
+ double y4y1 = b->y4 - b->y1;
+ double x4x1 = b->x4 - b->x1;
+ double l = ABS(x4x1) + ABS(y4y1);
+ double d;
+ if (l > 1.f) {
+ d = ABS((x4x1)*(b->y1 - b->y2) - (y4y1)*(b->x1 - b->x2))
+ + ABS((x4x1)*(b->y1 - b->y3) - (y4y1)*(b->x1 - b->x3));
+ } else {
+ d = ABS(b->x1 - b->x2) + ABS(b->y1 - b->y2) +
+ ABS(b->x1 - b->x3) + ABS(b->y1 - b->y3);
+ l = 1.;
+ }
+ if (d < flatness*l || b == beziers + 31) {
+ /* good enough, we pop it off and add the endpoint */
+ polygon_vertex_append(poly, b->x4, b->y4);
+ --b;
+ } else {
+ /* split, second half of the bezier goes lower into the stack */
+ split(b, b+1, b);
+ ++b;
+ }
+ }
+}
+
+static void add_if_close(struct bezier *bez, VGfloat *length, VGfloat error)
+{
+ struct bezier left, right; /* bez poly splits */
+ VGfloat len = 0.0; /* arc length */
+ VGfloat chord; /* chord length */
+
+ len = len + line_length(bez->x1, bez->y1, bez->x2, bez->y2);
+ len = len + line_length(bez->x2, bez->y2, bez->x3, bez->y3);
+ len = len + line_length(bez->x3, bez->y3, bez->x4, bez->y4);
+
+ chord = line_length(bez->x1, bez->y1, bez->x4, bez->y4);
+
+ if ((len-chord) > error) {
+ split(bez, &left, &right); /* split in two */
+ add_if_close(&left, length, error); /* try left side */
+ add_if_close(&right, length, error); /* try right side */
+ return;
+ }
+
+ *length = *length + len;
+
+ return;
+}
+
+float bezier_length(struct bezier *bez, float error)
+{
+ VGfloat length = 0.f;
+
+ add_if_close(bez, &length, error);
+ return length;
+}
+
+void bezier_init(struct bezier *bez,
+ float x1, float y1,
+ float x2, float y2,
+ float x3, float y3,
+ float x4, float y4)
+{
+ bez->x1 = x1;
+ bez->y1 = y1;
+ bez->x2 = x2;
+ bez->y2 = y2;
+ bez->x3 = x3;
+ bez->y3 = y3;
+ bez->x4 = x4;
+ bez->y4 = y4;
+#if 0
+ debug_printf("bezier in [%f, %f, %f, %f, %f, %f]\n",
+ x1, y1, x2, y2, x3, y3, x4, y4);
+#endif
+}
+
+
+static INLINE void bezier_init2v(struct bezier *bez,
+ float *pt1,
+ float *pt2,
+ float *pt3,
+ float *pt4)
+{
+ bez->x1 = pt1[0];
+ bez->y1 = pt1[1];
+
+ bez->x2 = pt2[0];
+ bez->y2 = pt2[1];
+
+ bez->x3 = pt3[0];
+ bez->y3 = pt3[1];
+
+ bez->x4 = pt4[0];
+ bez->y4 = pt4[1];
+}
+
+
+void bezier_transform(struct bezier *bez,
+ struct matrix *matrix)
+{
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, bez->x1, bez->y1, &bez->x1, &bez->y1);
+ matrix_map_point(matrix, bez->x2, bez->y2, &bez->x2, &bez->y2);
+ matrix_map_point(matrix, bez->x3, bez->y3, &bez->x3, &bez->y3);
+ matrix_map_point(matrix, bez->x4, bez->y4, &bez->x4, &bez->y4);
+}
+
+static INLINE void bezier_point_at(const struct bezier *bez, float t, float *pt)
+{
+ float a, b, c, d;
+ float m_t;
+ m_t = 1. - t;
+ b = m_t * m_t;
+ c = t * t;
+ d = c * t;
+ a = b * m_t;
+ b *= 3. * t;
+ c *= 3. * m_t;
+ pt[0] = a*bez->x1 + b*bez->x2 + c*bez->x3 + d*bez->x4;
+ pt[1] = a*bez->y1 + b*bez->y2 + c*bez->y3 + d*bez->y4;
+}
+
+static INLINE void bezier_normal_at(const struct bezier *bez, float t, float *norm)
+{
+ float m_t = 1. - t;
+ float a = m_t * m_t;
+ float b = t * m_t;
+ float c = t * t;
+
+ norm[0] = (bez->y2-bez->y1) * a + (bez->y3-bez->y2) * b + (bez->y4-bez->y3) * c;
+ norm[1] = -(bez->x2-bez->x1) * a - (bez->x3-bez->x2) * b - (bez->x4-bez->x3) * c;
+}
+
+enum shift_result {
+ Ok,
+ Discard,
+ Split,
+ Circle
+};
+
+static enum shift_result good_offset(const struct bezier *b1,
+ const struct bezier *b2,
+ float offset, float threshold)
+{
+ const float o2 = offset*offset;
+ const float max_dist_line = threshold*offset*offset;
+ const float max_dist_normal = threshold*offset;
+ const float spacing = 0.25;
+ for (float i = spacing; i < 0.99; i += spacing) {
+ float p1[2],p2[2], d, l;
+ float normal[2];
+ bezier_point_at(b1, i, p1);
+ bezier_point_at(b2, i, p2);
+ d = (p1[0] - p2[0])*(p1[0] - p2[0]) + (p1[1] - p2[1])*(p1[1] - p2[1]);
+ if (ABS(d - o2) > max_dist_line)
+ return Split;
+
+ bezier_normal_at(b1, i, normal);
+ l = ABS(normal[0]) + ABS(normal[1]);
+ if (l != 0.) {
+ d = ABS(normal[0]*(p1[1] - p2[1]) - normal[1]*(p1[0] - p2[0]) ) / l;
+ if (d > max_dist_normal)
+ return Split;
+ }
+ }
+ return Ok;
+}
+
+static INLINE void shift_line_by_normal(float *l, float offset)
+{
+ float norm[4];
+ float tx, ty;
+
+ line_normal(l, norm);
+ line_normalize(norm);
+
+ tx = (norm[2] - norm[0]) * offset;
+ ty = (norm[3] - norm[1]) * offset;
+ l[0] += tx; l[1] += ty;
+ l[2] += tx; l[3] += ty;
+}
+
+static INLINE VGboolean is_bezier_line(float (*points)[2], int count)
+{
+ float dx13 = points[2][0] - points[0][0];
+ float dy13 = points[2][1] - points[0][1];
+
+ float dx12 = points[1][0] - points[0][0];
+ float dy12 = points[1][1] - points[0][1];
+
+ debug_assert(count > 2);
+
+ if (count == 3) {
+ return floatsEqual(dx12 * dy13, dx13 * dy12);
+ } else if (count == 4) {
+ float dx14 = points[3][0] - points[0][0];
+ float dy14 = points[3][1] - points[0][1];
+
+ return (floatsEqual(dx12 * dy13, dx13 * dy12) &&
+ floatsEqual(dx12 * dy14, dx14 * dy12));
+ }
+
+ return VG_FALSE;
+}
+
+static INLINE void compute_pt_normal(float *pt1, float *pt2, float *res)
+{
+ float line[4];
+ float normal[4];
+ line[0] = 0.f; line[1] = 0.f;
+ line[2] = pt2[0] - pt1[0];
+ line[3] = pt2[1] - pt1[1];
+ line_normal(line, normal);
+ line_normalize(normal);
+
+ res[0] = normal[2];
+ res[1] = normal[3];
+}
+
+static enum shift_result shift(const struct bezier *orig,
+ struct bezier *shifted,
+ float offset, float threshold)
+{
+ int map[4];
+ VGboolean p1_p2_equal = (orig->x1 == orig->x2 && orig->y1 == orig->y2);
+ VGboolean p2_p3_equal = (orig->x2 == orig->x3 && orig->y2 == orig->y3);
+ VGboolean p3_p4_equal = (orig->x3 == orig->x4 && orig->y3 == orig->y4);
+
+ float points[4][2];
+ int np = 0;
+ float bounds[4];
+ float points_shifted[4][2];
+ float prev_normal[2];
+
+ points[np][0] = orig->x1;
+ points[np][1] = orig->y1;
+ map[0] = 0;
+ ++np;
+ if (!p1_p2_equal) {
+ points[np][0] = orig->x2;
+ points[np][1] = orig->y2;
+ ++np;
+ }
+ map[1] = np - 1;
+ if (!p2_p3_equal) {
+ points[np][0] = orig->x3;
+ points[np][1] = orig->y3;
+ ++np;
+ }
+ map[2] = np - 1;
+ if (!p3_p4_equal) {
+ points[np][0] = orig->x4;
+ points[np][1] = orig->y4;
+ ++np;
+ }
+ map[3] = np - 1;
+ if (np == 1)
+ return Discard;
+
+ /* We need to specialcase lines of 3 or 4 points due to numerical
+ instability in intersection code below */
+ if (np > 2 && is_bezier_line(points, np)) {
+ float l[4] = { points[0][0], points[0][1],
+ points[np-1][0], points[np-1][1] };
+ float ctrl1[2], ctrl2[2];
+ if (floatsEqual(points[0][0], points[np-1][0]) &&
+ floatsEqual(points[0][1], points[np-1][1]))
+ return Discard;
+
+ shift_line_by_normal(l, offset);
+ line_point_at(l, 0.33, ctrl1);
+ line_point_at(l, 0.66, ctrl2);
+ bezier_init(shifted, l[0], l[1],
+ ctrl1[0], ctrl1[1], ctrl2[0], ctrl2[1],
+ l[2], l[3]);
+ return Ok;
+ }
+
+ bezier_bounds(orig, bounds);
+ if (np == 4 && bounds[2] < .1*offset && bounds[3] < .1*offset) {
+ float l = (orig->x1 - orig->x2)*(orig->x1 - orig->x2) +
+ (orig->y1 - orig->y2)*(orig->y1 - orig->y1) *
+ (orig->x3 - orig->x4)*(orig->x3 - orig->x4) +
+ (orig->y3 - orig->y4)*(orig->y3 - orig->y4);
+ float dot = (orig->x1 - orig->x2)*(orig->x3 - orig->x4) +
+ (orig->y1 - orig->y2)*(orig->y3 - orig->y4);
+ if (dot < 0 && dot*dot < 0.8*l)
+ /* the points are close and reverse dirction. Approximate the whole
+ thing by a semi circle */
+ return Circle;
+ }
+
+ compute_pt_normal(points[0], points[1], prev_normal);
+
+ points_shifted[0][0] = points[0][0] + offset * prev_normal[0];
+ points_shifted[0][1] = points[0][1] + offset * prev_normal[1];
+
+ for (int i = 1; i < np - 1; ++i) {
+ float normal_sum[2], r;
+ float next_normal[2];
+ compute_pt_normal(points[i], points[i + 1], next_normal);
+
+ normal_sum[0] = prev_normal[0] + next_normal[0];
+ normal_sum[1] = prev_normal[1] + next_normal[1];
+
+ r = 1.0 + prev_normal[0] * next_normal[0]
+ + prev_normal[1] * next_normal[1];
+
+ if (floatsEqual(r + 1, 1)) {
+ points_shifted[i][0] = points[i][0] + offset * prev_normal[0];
+ points_shifted[i][1] = points[i][1] + offset * prev_normal[1];
+ } else {
+ float k = offset / r;
+ points_shifted[i][0] = points[i][0] + k * normal_sum[0];
+ points_shifted[i][1] = points[i][1] + k * normal_sum[1];
+ }
+
+ prev_normal[0] = next_normal[0];
+ prev_normal[1] = next_normal[1];
+ }
+
+ points_shifted[np - 1][0] = points[np - 1][0] + offset * prev_normal[0];
+ points_shifted[np - 1][1] = points[np - 1][1] + offset * prev_normal[1];
+
+ bezier_init2v(shifted,
+ points_shifted[map[0]], points_shifted[map[1]],
+ points_shifted[map[2]], points_shifted[map[3]]);
+
+ return good_offset(orig, shifted, offset, threshold);
+}
+
+static VGboolean make_circle(const struct bezier *b, float offset, struct bezier *o)
+{
+ float normals[3][2];
+ float dist;
+ float angles[2];
+ float sign = 1.f;
+ int i;
+ float circle[3][2];
+
+ normals[0][0] = b->y2 - b->y1;
+ normals[0][1] = b->x1 - b->x2;
+ dist = sqrt(normals[0][0]*normals[0][0] + normals[0][1]*normals[0][1]);
+ if (floatsEqual(dist + 1, 1.f))
+ return VG_FALSE;
+ normals[0][0] /= dist;
+ normals[0][1] /= dist;
+
+ normals[2][0] = b->y4 - b->y3;
+ normals[2][1] = b->x3 - b->x4;
+ dist = sqrt(normals[2][0]*normals[2][0] + normals[2][1]*normals[2][1]);
+ if (floatsEqual(dist + 1, 1.f))
+ return VG_FALSE;
+ normals[2][0] /= dist;
+ normals[2][1] /= dist;
+
+ normals[1][0] = b->x1 - b->x2 - b->x3 + b->x4;
+ normals[1][1] = b->y1 - b->y2 - b->y3 + b->y4;
+ dist = -1*sqrt(normals[1][0]*normals[1][0] + normals[1][1]*normals[1][1]);
+ normals[1][0] /= dist;
+ normals[1][1] /= dist;
+
+ for (i = 0; i < 2; ++i) {
+ float cos_a = normals[i][0]*normals[i+1][0] + normals[i][1]*normals[i+1][1];
+ if (cos_a > 1.)
+ cos_a = 1.;
+ if (cos_a < -1.)
+ cos_a = -1;
+ angles[i] = acos(cos_a)/M_PI;
+ }
+
+ if (angles[0] + angles[1] > 1.) {
+ /* more than 180 degrees */
+ normals[1][0] = -normals[1][0];
+ normals[1][1] = -normals[1][1];
+ angles[0] = 1. - angles[0];
+ angles[1] = 1. - angles[1];
+ sign = -1.;
+ }
+
+ circle[0][0] = b->x1 + normals[0][0]*offset;
+ circle[0][1] = b->y1 + normals[0][1]*offset;
+
+ circle[1][0] = 0.5*(b->x1 + b->x4) + normals[1][0]*offset;
+ circle[1][1] = 0.5*(b->y1 + b->y4) + normals[1][1]*offset;
+
+ circle[2][0] = b->x4 + normals[2][0]*offset;
+ circle[2][1] = b->y4 + normals[2][1]*offset;
+
+ for (i = 0; i < 2; ++i) {
+ float kappa = 2.*KAPPA * sign * offset * angles[i];
+
+ o->x1 = circle[i][0];
+ o->y1 = circle[i][1];
+ o->x2 = circle[i][0] - normals[i][1]*kappa;
+ o->y2 = circle[i][1] + normals[i][0]*kappa;
+ o->x3 = circle[i+1][0] + normals[i+1][1]*kappa;
+ o->y3 = circle[i+1][1] - normals[i+1][0]*kappa;
+ o->x4 = circle[i+1][0];
+ o->y4 = circle[i+1][1];
+
+ ++o;
+ }
+ return VG_TRUE;
+}
+
+int bezier_translate_by_normal(struct bezier *bez,
+ struct bezier *curves,
+ int max_curves,
+ float normal_len,
+ float threshold)
+{
+ struct bezier beziers[10];
+ struct bezier *b, *o;
+
+ /* fixme: this should really be floatsEqual */
+ if (bez->x1 == bez->x2 && bez->x1 == bez->x3 && bez->x1 == bez->x4 &&
+ bez->y1 == bez->y2 && bez->y1 == bez->y3 && bez->y1 == bez->y4)
+ return 0;
+
+ --max_curves;
+redo:
+ beziers[0] = *bez;
+ b = beziers;
+ o = curves;
+
+ while (b >= beziers) {
+ int stack_segments = b - beziers + 1;
+ enum shift_result res;
+ if ((stack_segments == 10) || (o - curves == max_curves - stack_segments)) {
+ threshold *= 1.5;
+ if (threshold > 2.)
+ goto give_up;
+ goto redo;
+ }
+ res = shift(b, o, normal_len, threshold);
+ if (res == Discard) {
+ --b;
+ } else if (res == Ok) {
+ ++o;
+ --b;
+ continue;
+ } else if (res == Circle && max_curves - (o - curves) >= 2) {
+ /* add semi circle */
+ if (make_circle(b, normal_len, o))
+ o += 2;
+ --b;
+ } else {
+ split(b, b+1, b);
+ ++b;
+ }
+ }
+
+give_up:
+ while (b >= beziers) {
+ enum shift_result res = shift(b, o, normal_len, threshold);
+
+ /* if res isn't Ok or Split then *o is undefined */
+ if (res == Ok || res == Split)
+ ++o;
+
+ --b;
+ }
+
+ debug_assert(o - curves <= max_curves);
+ return o - curves;
+}
+
+void bezier_bounds(const struct bezier *bez,
+ float *bounds/*x/y/width/height*/)
+{
+ float xmin = bez->x1;
+ float xmax = bez->x1;
+ float ymin = bez->y1;
+ float ymax = bez->y1;
+
+ if (bez->x2 < xmin)
+ xmin = bez->x2;
+ else if (bez->x2 > xmax)
+ xmax = bez->x2;
+ if (bez->x3 < xmin)
+ xmin = bez->x3;
+ else if (bez->x3 > xmax)
+ xmax = bez->x3;
+ if (bez->x4 < xmin)
+ xmin = bez->x4;
+ else if (bez->x4 > xmax)
+ xmax = bez->x4;
+
+ if (bez->y2 < ymin)
+ ymin = bez->y2;
+ else if (bez->y2 > ymax)
+ ymax = bez->y2;
+ if (bez->y3 < ymin)
+ ymin = bez->y3;
+ else if (bez->y3 > ymax)
+ ymax = bez->y3;
+ if (bez->y4 < ymin)
+ ymin = bez->y4;
+ else if (bez->y4 > ymax)
+ ymax = bez->y4;
+
+ bounds[0] = xmin; /* x */
+ bounds[1] = ymin; /* y */
+ bounds[2] = xmax - xmin; /* width */
+ bounds[3] = ymax - ymin; /* height */
+}
+
+void bezier_start_tangent(const struct bezier *bez,
+ float *tangent)
+{
+ tangent[0] = bez->x1;
+ tangent[1] = bez->y1;
+ tangent[2] = bez->x2;
+ tangent[3] = bez->y2;
+
+ if (null_line(tangent)) {
+ tangent[0] = bez->x1;
+ tangent[1] = bez->y1;
+ tangent[2] = bez->x3;
+ tangent[3] = bez->y3;
+ }
+ if (null_line(tangent)) {
+ tangent[0] = bez->x1;
+ tangent[1] = bez->y1;
+ tangent[2] = bez->x4;
+ tangent[3] = bez->y4;
+ }
+}
+
+
+static INLINE VGfloat bezier_t_at_length(struct bezier *bez,
+ VGfloat at_length,
+ VGfloat error)
+{
+ VGfloat len = bezier_length(bez, error);
+ VGfloat t = 1.0;
+ VGfloat last_bigger = 1.;
+
+ if (at_length > len || floatsEqual(at_length, len))
+ return t;
+
+ if (floatIsZero(at_length))
+ return 0.f;
+
+ t *= 0.5;
+ while (1) {
+ struct bezier right = *bez;
+ struct bezier left;
+ VGfloat tmp_len;
+ split_left(&right, t, &left);
+ tmp_len = bezier_length(&left, error);
+ if (ABS(tmp_len - at_length) < error)
+ break;
+
+ if (tmp_len < at_length) {
+ t += (last_bigger - t)*.5;
+ } else {
+ last_bigger = t;
+ t -= t*.5;
+ }
+ }
+ return t;
+}
+
+void bezier_point_at_length(struct bezier *bez,
+ float length,
+ float *point,
+ float *normal)
+{
+ /* ~0.000001 seems to be required to pass G2080x tests */
+ VGfloat t = bezier_t_at_length(bez, length, 0.000001);
+ bezier_point_at(bez, t, point);
+ bezier_normal_at(bez, t, normal);
+ vector_unit(normal);
+}
+
+void bezier_point_at_t(struct bezier *bez, float t,
+ float *point, float *normal)
+{
+ bezier_point_at(bez, t, point);
+ bezier_normal_at(bez, t, normal);
+ vector_unit(normal);
+}
+
+void bezier_exact_bounds(const struct bezier *bez,
+ float *bounds/*x/y/width/height*/)
+{
+ struct polygon *poly = polygon_create(64);
+ polygon_vertex_append(poly, bez->x1, bez->y1);
+ bezier_add_to_polygon(bez, poly);
+ polygon_bounding_rect(poly, bounds);
+ polygon_destroy(poly);
+}
+
diff --git a/src/gallium/state_trackers/vega/bezier.h b/src/gallium/state_trackers/vega/bezier.h
new file mode 100644
index 0000000000..e06666051c
--- /dev/null
+++ b/src/gallium/state_trackers/vega/bezier.h
@@ -0,0 +1,81 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef BEZIER_H
+#define BEZIER_H
+
+struct polygon;
+struct matrix;
+
+struct bezier {
+ float x1, y1;
+ float x2, y2;
+ float x3, y3;
+ float x4, y4;
+};
+
+
+#define BEZIER_DEFAULT_ERROR 0.01
+
+/* kappa as being l of a circle with r = 1, we can emulate any
+ * circle of radius r by using the formula
+ * l = r . kappa
+ * More at:
+ * http://www.whizkidtech.redprince.net/bezier/circle/ */
+#define KAPPA 0.5522847498
+
+void bezier_init(struct bezier *bez,
+ float x1, float y1,
+ float x2, float y2,
+ float x3, float y3,
+ float x4, float y4);
+
+struct polygon *bezier_to_polygon(struct bezier *bez);
+void bezier_add_to_polygon(const struct bezier *bez,
+ struct polygon *poly);
+float bezier_length(struct bezier *bez, float error);
+void bezier_transform(struct bezier *bez,
+ struct matrix *mat);
+
+int bezier_translate_by_normal(struct bezier *b,
+ struct bezier *curves,
+ int max_curves,
+ float normal_len,
+ float threshold);
+void bezier_bounds(const struct bezier *bez,
+ float *bounds/*x/y/width/height*/);
+void bezier_exact_bounds(const struct bezier *bez,
+ float *bounds/*x/y/width/height*/);
+
+void bezier_start_tangent(const struct bezier *bez,
+ float *tangent);
+
+void bezier_point_at_length(struct bezier *bez, float length,
+ float *point, float *normal);
+void bezier_point_at_t(struct bezier *bez, float t,
+ float *point, float *normal);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/image.c b/src/gallium/state_trackers/vega/image.c
new file mode 100644
index 0000000000..9a722980d5
--- /dev/null
+++ b/src/gallium/state_trackers/vega/image.c
@@ -0,0 +1,654 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "image.h"
+
+#include "vg_translate.h"
+#include "vg_context.h"
+#include "matrix.h"
+#include "renderer.h"
+#include "util_array.h"
+#include "api_consts.h"
+#include "shaders_cache.h"
+#include "shader.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_blit.h"
+#include "util/u_tile.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+static enum pipe_format vg_format_to_pipe(VGImageFormat format)
+{
+ switch(format) {
+ case VG_sRGB_565:
+ return PIPE_FORMAT_R5G6B5_UNORM;
+ case VG_sRGBA_5551:
+ return PIPE_FORMAT_A1R5G5B5_UNORM;
+ case VG_sRGBA_4444:
+ return PIPE_FORMAT_A4R4G4B4_UNORM;
+ case VG_sL_8:
+ case VG_lL_8:
+ return PIPE_FORMAT_L8_UNORM;
+ case VG_BW_1:
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+ case VG_A_8:
+ return PIPE_FORMAT_A8_UNORM;
+#ifdef OPENVG_VERSION_1_1
+ case VG_A_1:
+ case VG_A_4:
+ return PIPE_FORMAT_A8_UNORM;
+#endif
+ default:
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+ }
+}
+
+static INLINE void vg_sync_size(VGfloat *src_loc, VGfloat *dst_loc)
+{
+ src_loc[2] = MIN2(src_loc[2], dst_loc[2]);
+ src_loc[3] = MIN2(src_loc[3], dst_loc[3]);
+ dst_loc[2] = src_loc[2];
+ dst_loc[3] = src_loc[3];
+}
+
+
+static void vg_copy_texture(struct vg_context *ctx,
+ struct pipe_texture *dst, VGint dx, VGint dy,
+ struct pipe_texture *src, VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ VGfloat dst_loc[4], src_loc[4];
+ VGfloat dst_bounds[4], src_bounds[4];
+ VGfloat src_shift[4], dst_shift[4], shift[4];
+
+ dst_loc[0] = dx;
+ dst_loc[1] = dy;
+ dst_loc[2] = width;
+ dst_loc[3] = height;
+ dst_bounds[0] = 0.f;
+ dst_bounds[1] = 0.f;
+ dst_bounds[2] = dst->width[0];
+ dst_bounds[3] = dst->height[0];
+
+ src_loc[0] = sx;
+ src_loc[1] = sy;
+ src_loc[2] = width;
+ src_loc[3] = height;
+ src_bounds[0] = 0.f;
+ src_bounds[1] = 0.f;
+ src_bounds[2] = src->width[0];
+ src_bounds[3] = src->height[0];
+
+ vg_bound_rect(src_loc, src_bounds, src_shift);
+ vg_bound_rect(dst_loc, dst_bounds, dst_shift);
+ shift[0] = src_shift[0] - dst_shift[0];
+ shift[1] = src_shift[1] - dst_shift[1];
+
+ if (shift[0] < 0)
+ vg_shift_rectx(src_loc, src_bounds, -shift[0]);
+ else
+ vg_shift_rectx(dst_loc, dst_bounds, shift[0]);
+
+ if (shift[1] < 0)
+ vg_shift_recty(src_loc, src_bounds, -shift[1]);
+ else
+ vg_shift_recty(dst_loc, dst_bounds, shift[1]);
+
+ vg_sync_size(src_loc, dst_loc);
+
+ if (src_loc[2] >= 0 && src_loc[3] >= 0 &&
+ dst_loc[2] >= 0 && dst_loc[3] >= 0) {
+ renderer_copy_texture(ctx->renderer,
+ src,
+ src_loc[0],
+ src_loc[1] + src_loc[3],
+ src_loc[0] + src_loc[2],
+ src_loc[1],
+ dst,
+ dst_loc[0],
+ dst_loc[1] + dst_loc[3],
+ dst_loc[0] + dst_loc[2],
+ dst_loc[1]);
+ }
+
+}
+
+void vg_copy_surface(struct vg_context *ctx,
+ struct pipe_surface *dst, VGint dx, VGint dy,
+ struct pipe_surface *src, VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ VGfloat dst_loc[4], src_loc[4];
+ VGfloat dst_bounds[4], src_bounds[4];
+ VGfloat src_shift[4], dst_shift[4], shift[4];
+
+ dst_loc[0] = dx;
+ dst_loc[1] = dy;
+ dst_loc[2] = width;
+ dst_loc[3] = height;
+ dst_bounds[0] = 0.f;
+ dst_bounds[1] = 0.f;
+ dst_bounds[2] = dst->width;
+ dst_bounds[3] = dst->height;
+
+ src_loc[0] = sx;
+ src_loc[1] = sy;
+ src_loc[2] = width;
+ src_loc[3] = height;
+ src_bounds[0] = 0.f;
+ src_bounds[1] = 0.f;
+ src_bounds[2] = src->width;
+ src_bounds[3] = src->height;
+
+ vg_bound_rect(src_loc, src_bounds, src_shift);
+ vg_bound_rect(dst_loc, dst_bounds, dst_shift);
+ shift[0] = src_shift[0] - dst_shift[0];
+ shift[1] = src_shift[1] - dst_shift[1];
+
+ if (shift[0] < 0)
+ vg_shift_rectx(src_loc, src_bounds, -shift[0]);
+ else
+ vg_shift_rectx(dst_loc, dst_bounds, shift[0]);
+
+ if (shift[1] < 0)
+ vg_shift_recty(src_loc, src_bounds, -shift[1]);
+ else
+ vg_shift_recty(dst_loc, dst_bounds, shift[1]);
+
+ vg_sync_size(src_loc, dst_loc);
+
+ if (src_loc[2] > 0 && src_loc[3] > 0 &&
+ dst_loc[2] > 0 && dst_loc[3] > 0) {
+ if (src == dst)
+ renderer_copy_surface(ctx->renderer,
+ src,
+ src_loc[0],
+ src->height - (src_loc[1] + src_loc[3]),
+ src_loc[0] + src_loc[2],
+ src->height - src_loc[1],
+ dst,
+ dst_loc[0],
+ dst->height - (dst_loc[1] + dst_loc[3]),
+ dst_loc[0] + dst_loc[2],
+ dst->height - dst_loc[1],
+ 0, 0);
+ else
+ renderer_copy_surface(ctx->renderer,
+ src,
+ src_loc[0],
+ src->height - src_loc[1],
+ src_loc[0] + src_loc[2],
+ src->height - (src_loc[1] + src_loc[3]),
+ dst,
+ dst_loc[0],
+ dst->height - (dst_loc[1] + dst_loc[3]),
+ dst_loc[0] + dst_loc[2],
+ dst->height - dst_loc[1],
+ 0, 0);
+ }
+
+}
+
+static struct pipe_texture *image_texture(struct vg_image *img)
+{
+ struct pipe_texture *tex = img->texture;
+ return tex;
+}
+
+
+static void image_cleari(struct vg_image *img, VGint clear_colori,
+ VGint x, VGint y, VGint width, VGint height)
+{
+ VGint *clearbuf;
+ VGint i;
+ VGfloat dwidth, dheight;
+
+ clearbuf = malloc(sizeof(VGint)*width*height);
+ for (i = 0; i < width*height; ++i)
+ clearbuf[i] = clear_colori;
+
+ dwidth = MIN2(width, img->width);
+ dheight = MIN2(height, img->height);
+
+ image_sub_data(img, clearbuf, width * sizeof(VGint),
+ VG_sRGBA_8888,
+ x, y, dwidth, dheight);
+ free(clearbuf);
+}
+
+struct vg_image * image_create(VGImageFormat format,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *image = CALLOC_STRUCT(vg_image);
+ enum pipe_format pformat = vg_format_to_pipe(format);
+ struct pipe_texture pt, *newtex;
+ struct pipe_screen *screen = ctx->pipe->screen;
+
+ vg_init_object(&image->base, ctx, VG_OBJECT_IMAGE);
+
+ image->format = format;
+ image->width = width;
+ image->height = height;
+
+ image->sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ image->sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ image->sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ image->sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ image->sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ image->sampler.normalized_coords = 1;
+
+ assert(screen->is_format_supported(screen, pformat, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_SAMPLER, 0));
+
+ memset(&pt, 0, sizeof(pt));
+ pt.target = PIPE_TEXTURE_2D;
+ pt.format = pformat;
+ pf_get_block(pformat, &pt.block);
+ pt.last_level = 0;
+ pt.width[0] = width;
+ pt.height[0] = height;
+ pt.depth[0] = 1;
+ pt.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
+
+ newtex = screen->texture_create(screen, &pt);
+
+ debug_assert(newtex);
+
+ image->texture = newtex;
+
+ vg_context_add_object(ctx, VG_OBJECT_IMAGE, image);
+
+ image_cleari(image, 0, 0, 0, image->width, image->height);
+ return image;
+}
+
+void image_destroy(struct vg_image *img)
+{
+ struct vg_context *ctx = vg_current_context();
+ vg_context_remove_object(ctx, VG_OBJECT_IMAGE, img);
+
+
+ if (img->parent) {
+ /* remove img from the parent child array */
+ int idx;
+ struct vg_image **array =
+ (struct vg_image **)img->parent->children_array->data;
+
+ for (idx = 0; idx < img->parent->children_array->num_elements; ++idx) {
+ struct vg_image *child = array[idx];
+ if (child == img) {
+ break;
+ }
+ }
+ debug_assert(idx < img->parent->children_array->num_elements);
+ array_remove_element(img->parent->children_array, idx);
+ }
+
+ if (img->children_array && img->children_array->num_elements) {
+ /* reparent the children */
+ VGint i;
+ struct vg_image *parent = img->parent;
+ struct vg_image **children =
+ (struct vg_image **)img->children_array->data;
+ if (!parent) {
+ VGint min_x = children[0]->x;
+ parent = children[0];
+
+ for (i = 1; i < img->children_array->num_elements; ++i) {
+ struct vg_image *child = children[i];
+ if (child->x < min_x) {
+ parent = child;
+ }
+ }
+ }
+
+ for (i = 0; i < img->children_array->num_elements; ++i) {
+ struct vg_image *child = children[i];
+ if (child != parent) {
+ child->parent = parent;
+ if (!parent->children_array) {
+ parent->children_array = array_create(
+ sizeof(struct vg_image*));
+ }
+ array_append_data(parent->children_array,
+ &child, 1);
+ } else
+ child->parent = NULL;
+ }
+ array_destroy(img->children_array);
+ }
+
+ pipe_texture_reference(&img->texture, NULL);
+ free(img);
+}
+
+void image_clear(struct vg_image *img,
+ VGint x, VGint y, VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGfloat *clear_colorf = ctx->state.vg.clear_color;
+ VGubyte r, g, b ,a;
+ VGint clear_colori;
+ /* FIXME: this is very nasty */
+ r = float_to_ubyte(clear_colorf[0]);
+ g = float_to_ubyte(clear_colorf[1]);
+ b = float_to_ubyte(clear_colorf[2]);
+ a = float_to_ubyte(clear_colorf[3]);
+ clear_colori = r << 24 | g << 16 | b << 8 | a;
+ image_cleari(img, clear_colori, x, y, width, height);
+}
+
+void image_sub_data(struct vg_image *image,
+ const void * data,
+ VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ const VGint yStep = 1;
+ VGubyte *src = (VGubyte *)data;
+ VGfloat temp[VEGA_MAX_IMAGE_WIDTH][4];
+ VGfloat *df = (VGfloat*)temp;
+ VGint i;
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_screen *screen = ctx->pipe->screen;
+ struct pipe_texture *texture = image_texture(image);
+ VGint xoffset = 0, yoffset = 0;
+
+ if (x < 0) {
+ xoffset -= x;
+ width += x;
+ x = 0;
+ }
+ if (y < 0) {
+ yoffset -= y;
+ height += y;
+ y = 0;
+ }
+
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (x > image->width || y > image->width) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+
+ if (x + width > image->width) {
+ width = image->width - x;
+ }
+
+ if (y + height > image->height) {
+ height = image->height - y;
+ }
+
+ { /* upload color_data */
+ struct pipe_transfer *transfer = screen->get_tex_transfer(
+ screen, texture, 0, 0, 0,
+ PIPE_TRANSFER_WRITE, 0, 0, texture->width[0], texture->height[0]);
+ src += (dataStride * yoffset);
+ for (i = 0; i < height; i++) {
+ _vega_unpack_float_span_rgba(ctx, width, xoffset, src, dataFormat, temp);
+ pipe_put_tile_rgba(transfer, x+image->x, y+image->y, width, 1, df);
+ y += yStep;
+ src += dataStride;
+ }
+ screen->tex_transfer_destroy(transfer);
+ }
+}
+
+void image_get_sub_data(struct vg_image * image,
+ void * data,
+ VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ VGfloat temp[VEGA_MAX_IMAGE_WIDTH][4];
+ VGfloat *df = (VGfloat*)temp;
+ VGint y = 0, yStep = 1;
+ VGint i;
+ VGubyte *dst = (VGubyte *)data;
+
+ {
+ struct pipe_transfer *transfer =
+ screen->get_tex_transfer(screen,
+ image->texture, 0, 0, 0,
+ PIPE_TRANSFER_READ,
+ 0, 0,
+ image->x + image->width,
+ image->y + image->height);
+ /* Do a row at a time to flip image data vertically */
+ for (i = 0; i < height; i++) {
+#if 0
+ debug_printf("%d-%d == %d\n", sy, height, y);
+#endif
+ pipe_get_tile_rgba(transfer, sx+image->x, y, width, 1, df);
+ y += yStep;
+ _vega_pack_rgba_span_float(ctx, width, temp, dataFormat, dst);
+ dst += dataStride;
+ }
+
+ screen->tex_transfer_destroy(transfer);
+ }
+}
+
+struct vg_image * image_child_image(struct vg_image *parent,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_image *image = CALLOC_STRUCT(vg_image);
+
+ vg_init_object(&image->base, ctx, VG_OBJECT_IMAGE);
+
+ image->x = parent->x + x;
+ image->y = parent->y + y;
+ image->width = width;
+ image->height = height;
+ image->parent = parent;
+ image->texture = 0;
+ pipe_texture_reference(&image->texture,
+ parent->texture);
+
+ image->sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ image->sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ image->sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ image->sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ image->sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ image->sampler.normalized_coords = 1;
+
+ if (!parent->children_array)
+ parent->children_array = array_create(
+ sizeof(struct vg_image*));
+
+ array_append_data(parent->children_array,
+ &image, 1);
+
+ vg_context_add_object(ctx, VG_OBJECT_IMAGE, image);
+
+ return image;
+}
+
+void image_copy(struct vg_image *dst, VGint dx, VGint dy,
+ struct vg_image *src, VGint sx, VGint sy,
+ VGint width, VGint height,
+ VGboolean dither)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (width <= 0 || height <= 0) {
+ vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
+ return;
+ }
+ /* make sure rendering has completed */
+ ctx->pipe->flush(ctx->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+ vg_copy_texture(ctx, dst->texture, dst->x + dx, dst->y + dy,
+ src->texture, src->x + sx, src->y + sy, width, height);
+}
+
+void image_draw(struct vg_image *img)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGfloat x1, y1;
+ VGfloat x2, y2;
+ VGfloat x3, y3;
+ VGfloat x4, y4;
+ struct matrix *matrix;
+
+ x1 = 0;
+ y1 = 0;
+ x2 = img->width;
+ y2 = 0;
+ x3 = img->width;
+ y3 = img->height;
+ x4 = 0;
+ y4 = img->height;
+
+ matrix = &ctx->state.vg.image_user_to_surface_matrix;
+
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ matrix_map_point(matrix, x4, y4, &x4, &y4);
+
+ shader_set_drawing_image(ctx->shader, VG_TRUE);
+ shader_set_paint(ctx->shader, ctx->state.vg.fill_paint);
+ shader_set_image(ctx->shader, img);
+ shader_bind(ctx->shader);
+
+ renderer_texture_quad(ctx->renderer, image_texture(img),
+ img->x, img->y, img->x + img->width, img->y + img->height,
+ x1, y1, x2, y2, x3, y3, x4, y4);
+}
+
+void image_set_pixels(VGint dx, VGint dy,
+ struct vg_image *src, VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_surface *surf;
+ struct st_renderbuffer *strb = ctx->draw_buffer->strb;
+
+ /* make sure rendering has completed */
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+ surf = screen->get_tex_surface(screen, image_texture(src), 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_READ);
+
+ vg_copy_surface(ctx, strb->surface, dx, dy,
+ surf, sx+src->x, sy+src->y, width, height);
+
+ screen->tex_surface_destroy(surf);
+}
+
+void image_get_pixels(struct vg_image *dst, VGint dx, VGint dy,
+ VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_surface *surf;
+ struct st_renderbuffer *strb = ctx->draw_buffer->strb;
+
+ /* flip the y coordinates */
+ /*dy = dst->height - dy - height;*/
+
+ /* make sure rendering has completed */
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+ surf = screen->get_tex_surface(screen, image_texture(dst), 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE |
+ PIPE_BUFFER_USAGE_GPU_READ);
+ vg_copy_surface(ctx, surf, dst->x + dx, dst->y + dy,
+ strb->surface, sx, sy, width, height);
+
+ pipe_surface_reference(&surf, NULL);
+}
+
+
+VGboolean vg_image_overlaps(struct vg_image *dst,
+ struct vg_image *src)
+{
+ if (dst == src || dst->parent == src ||
+ dst == src->parent)
+ return VG_TRUE;
+ if (dst->parent && dst->parent == src->parent) {
+ VGfloat left1 = dst->x;
+ VGfloat left2 = src->x;
+ VGfloat right1 = dst->x + dst->width;
+ VGfloat right2 = src->x + src->width;
+ VGfloat bottom1 = dst->y;
+ VGfloat bottom2 = src->y;
+ VGfloat top1 = dst->y + dst->height;
+ VGfloat top2 = src->y + src->height;
+
+ return !(left2 > right1 || right2 < left1 ||
+ top2 > bottom1 || bottom2 < top1);
+ }
+ return VG_FALSE;
+}
+
+VGint image_bind_samplers(struct vg_image *img, struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures)
+{
+ img->sampler.min_img_filter = image_sampler_filter(img->base.ctx);
+ img->sampler.mag_img_filter = image_sampler_filter(img->base.ctx);
+ samplers[3] = &img->sampler;
+ textures[3] = img->texture;
+ return 1;
+}
+
+VGint image_sampler_filter(struct vg_context *ctx)
+{
+ switch(ctx->state.vg.image_quality) {
+ case VG_IMAGE_QUALITY_NONANTIALIASED:
+ return PIPE_TEX_FILTER_NEAREST;
+ break;
+ case VG_IMAGE_QUALITY_FASTER:
+ return PIPE_TEX_FILTER_NEAREST;
+ break;
+ case VG_IMAGE_QUALITY_BETTER:
+ /*return PIPE_TEX_FILTER_ANISO;*/
+ return PIPE_TEX_FILTER_LINEAR;
+ break;
+ default:
+ debug_printf("Unknown image quality");
+ }
+ return PIPE_TEX_FILTER_NEAREST;
+}
diff --git a/src/gallium/state_trackers/vega/image.h b/src/gallium/state_trackers/vega/image.h
new file mode 100644
index 0000000000..78e17cffa6
--- /dev/null
+++ b/src/gallium/state_trackers/vega/image.h
@@ -0,0 +1,104 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef IMAGES_H
+#define IMAGES_H
+
+#include "vg_context.h"
+#include "pipe/p_state.h"
+
+struct pipe_texture;
+struct array;
+struct vg_context;
+struct pipe_surface;
+
+struct vg_image {
+ struct vg_object base;
+ VGImageFormat format;
+ VGint x, y;
+ VGint width, height;
+
+ struct vg_image *parent;
+
+ struct pipe_texture *texture;
+ struct pipe_sampler_state sampler;
+
+ struct array *children_array;
+};
+
+struct vg_image *image_create(VGImageFormat format,
+ VGint width, VGint height);
+void image_destroy(struct vg_image *img);
+
+void image_clear(struct vg_image *img,
+ VGint x, VGint y, VGint width, VGint height);
+
+void image_sub_data(struct vg_image *image,
+ const void * data,
+ VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint x, VGint y,
+ VGint width, VGint height);
+
+void image_get_sub_data(struct vg_image * image,
+ void * data,
+ VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint x, VGint y,
+ VGint width, VGint height);
+
+struct vg_image *image_child_image(struct vg_image *parent,
+ VGint x, VGint y,
+ VGint width, VGint height);
+
+void image_copy(struct vg_image *dst, VGint dx, VGint dy,
+ struct vg_image *src, VGint sx, VGint sy,
+ VGint width, VGint height,
+ VGboolean dither);
+
+void image_draw(struct vg_image *img);
+
+void image_set_pixels(VGint dx, VGint dy,
+ struct vg_image *src, VGint sx, VGint sy,
+ VGint width, VGint height);
+void image_get_pixels(struct vg_image *dst, VGint dx, VGint dy,
+ VGint sx, VGint sy,
+ VGint width, VGint height);
+
+VGint image_bind_samplers(struct vg_image *dst, struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures);
+
+VGboolean vg_image_overlaps(struct vg_image *dst,
+ struct vg_image *src);
+
+VGint image_sampler_filter(struct vg_context *ctx);
+
+void vg_copy_surface(struct vg_context *ctx,
+ struct pipe_surface *dst, VGint dx, VGint dy,
+ struct pipe_surface *src, VGint sx, VGint sy,
+ VGint width, VGint height);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/mask.c b/src/gallium/state_trackers/vega/mask.c
new file mode 100644
index 0000000000..24650a37d5
--- /dev/null
+++ b/src/gallium/state_trackers/vega/mask.c
@@ -0,0 +1,690 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "mask.h"
+
+#include "path.h"
+#include "image.h"
+#include "shaders_cache.h"
+#include "renderer.h"
+#include "asm_util.h"
+#include "st_inlines.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+
+struct vg_mask_layer {
+ struct vg_object base;
+
+ VGint width;
+ VGint height;
+
+ struct pipe_texture *texture;
+};
+
+static INLINE struct pipe_surface *
+alpha_mask_surface(struct vg_context *ctx, int usage)
+{
+ struct pipe_screen *screen = ctx->pipe->screen;
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+ return screen->get_tex_surface(screen,
+ stfb->alpha_mask,
+ 0, 0, 0,
+ usage);
+}
+
+static INLINE VGboolean
+intersect_rectangles(VGint dwidth, VGint dheight,
+ VGint swidth, VGint sheight,
+ VGint tx, VGint ty,
+ VGint twidth, VGint theight,
+ VGint *offsets,
+ VGint *location)
+{
+ if (tx + twidth <= 0 || tx >= dwidth)
+ return VG_FALSE;
+ if (ty + theight <= 0 || ty >= dheight)
+ return VG_FALSE;
+
+ offsets[0] = 0;
+ offsets[1] = 0;
+ location[0] = tx;
+ location[1] = ty;
+
+ if (tx < 0) {
+ offsets[0] -= tx;
+ location[0] = 0;
+
+ location[2] = MIN2(tx + swidth, MIN2(dwidth, tx + twidth));
+ offsets[2] = location[2];
+ } else {
+ offsets[2] = MIN2(twidth, MIN2(dwidth - tx, swidth ));
+ location[2] = offsets[2];
+ }
+
+ if (ty < 0) {
+ offsets[1] -= ty;
+ location[1] = 0;
+
+ location[3] = MIN2(ty + sheight, MIN2(dheight, ty + theight));
+ offsets[3] = location[3];
+ } else {
+ offsets[3] = MIN2(theight, MIN2(dheight - ty, sheight));
+ location[3] = offsets[3];
+ }
+
+ return VG_TRUE;
+}
+
+#if DEBUG_MASKS
+static void read_alpha_mask(void * data, VGint dataStride,
+ VGImageFormat dataFormat,
+ VGint sx, VGint sy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+ struct st_renderbuffer *strb = stfb->alpha_mask;
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+
+ VGfloat temp[VEGA_MAX_IMAGE_WIDTH][4];
+ VGfloat *df = (VGfloat*)temp;
+ VGint y = (fb->height - sy) - 1, yStep = -1;
+ VGint i;
+ VGubyte *dst = (VGubyte *)data;
+ VGint xoffset = 0, yoffset = 0;
+
+ /* make sure rendering has completed */
+ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+ if (sx < 0) {
+ xoffset = -sx;
+ xoffset *= _vega_size_for_format(dataFormat);
+ width += sx;
+ sx = 0;
+ }
+ if (sy < 0) {
+ yoffset = -sy;
+ height += sy;
+ sy = 0;
+ y = (fb->height - sy) - 1;
+ yoffset *= dataStride;
+ }
+
+ {
+ struct pipe_surface *surf;
+
+ surf = screen->get_tex_surface(screen, strb->texture, 0, 0, 0,
+ PIPE_BUFFER_USAGE_CPU_READ);
+
+ /* Do a row at a time to flip image data vertically */
+ for (i = 0; i < height; i++) {
+#if 0
+ debug_printf("%d-%d == %d\n", sy, height, y);
+#endif
+ pipe_get_tile_rgba(surf, sx, y, width, 1, df);
+ y += yStep;
+ _vega_pack_rgba_span_float(ctx, width, temp, dataFormat,
+ dst + yoffset + xoffset);
+ dst += dataStride;
+ }
+
+ pipe_surface_reference(&surf, NULL);
+ }
+}
+
+void save_alpha_to_file(const char *filename)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+ VGint *data;
+ int i, j;
+
+ data = malloc(sizeof(int) * fb->width * fb->height);
+ read_alpha_mask(data, fb->width * sizeof(int),
+ VG_sRGBA_8888,
+ 0, 0, fb->width, fb->height);
+ fprintf(stderr, "/*---------- start */\n");
+ fprintf(stderr, "const int image_width = %d;\n",
+ fb->width);
+ fprintf(stderr, "const int image_height = %d;\n",
+ fb->height);
+ fprintf(stderr, "const int image_data = {\n");
+ for (i = 0; i < fb->height; ++i) {
+ for (j = 0; j < fb->width; ++j) {
+ int rgba = data[i * fb->height + j];
+ int argb = 0;
+ argb = (rgba >> 8);
+ argb |= ((rgba & 0xff) << 24);
+ fprintf(stderr, "0x%x, ", argb);
+ }
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "};\n");
+ fprintf(stderr, "/*---------- end */\n");
+}
+#endif
+
+static void setup_mask_framebuffer(struct pipe_surface *surf,
+ VGint surf_width, VGint surf_height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_framebuffer_state fb;
+
+ memset(&fb, 0, sizeof(fb));
+ fb.width = surf_width;
+ fb.height = surf_height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = surf;
+ {
+ VGint i;
+ for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
+ fb.cbufs[i] = 0;
+ }
+ cso_set_framebuffer(ctx->cso_context, &fb);
+}
+
+
+/* setup shader constants */
+static void setup_mask_operation(VGMaskOperation operation)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_constant_buffer *cbuf = &ctx->mask.cbuf;
+ const VGint param_bytes = 4 * sizeof(VGfloat);
+ const VGfloat ones[4] = {1.f, 1.f, 1.f, 1.f};
+ void *shader = 0;
+
+ /* We always need to get a new buffer, to keep the drivers simple and
+ * avoid gratuitous rendering synchronization.
+ */
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+
+ cbuf->buffer = pipe_buffer_create(ctx->pipe->screen, 1,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ param_bytes);
+ if (cbuf->buffer) {
+ st_no_flush_pipe_buffer_write(ctx, cbuf->buffer,
+ 0, param_bytes, ones);
+ }
+
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
+ switch (operation) {
+ case VG_UNION_MASK: {
+ if (!ctx->mask.union_fs) {
+ ctx->mask.union_fs = shader_create_from_text(ctx->pipe,
+ union_mask_asm,
+ 200,
+ PIPE_SHADER_FRAGMENT);
+ }
+ shader = ctx->mask.union_fs->driver;
+ }
+ break;
+ case VG_INTERSECT_MASK: {
+ if (!ctx->mask.intersect_fs) {
+ ctx->mask.intersect_fs = shader_create_from_text(ctx->pipe,
+ intersect_mask_asm,
+ 200,
+ PIPE_SHADER_FRAGMENT);
+ }
+ shader = ctx->mask.intersect_fs->driver;
+ }
+ break;
+ case VG_SUBTRACT_MASK: {
+ if (!ctx->mask.subtract_fs) {
+ ctx->mask.subtract_fs = shader_create_from_text(ctx->pipe,
+ subtract_mask_asm,
+ 200,
+ PIPE_SHADER_FRAGMENT);
+ }
+ shader = ctx->mask.subtract_fs->driver;
+ }
+ break;
+ case VG_SET_MASK: {
+ if (!ctx->mask.set_fs) {
+ ctx->mask.set_fs = shader_create_from_text(ctx->pipe,
+ set_mask_asm,
+ 200,
+ PIPE_SHADER_FRAGMENT);
+ }
+ shader = ctx->mask.set_fs->driver;
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ cso_set_fragment_shader_handle(ctx->cso_context, shader);
+}
+
+static void setup_mask_samplers(struct pipe_texture *umask)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
+ struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
+ struct st_framebuffer *fb_buffers = ctx->draw_buffer;
+ struct pipe_texture *uprev = NULL;
+ struct pipe_sampler_state sampler;
+
+ uprev = fb_buffers->blend_texture;
+ sampler = ctx->mask.sampler;
+ sampler.normalized_coords = 1;
+
+ samplers[0] = NULL;
+ samplers[1] = NULL;
+ samplers[2] = NULL;
+ textures[0] = NULL;
+ textures[1] = NULL;
+ textures[2] = NULL;
+
+ samplers[0] = &sampler;
+ samplers[1] = &ctx->mask.sampler;
+
+ textures[0] = umask;
+ textures[1] = uprev;
+
+ cso_set_samplers(ctx->cso_context, 2,
+ (const struct pipe_sampler_state **)samplers);
+ cso_set_sampler_textures(ctx->cso_context, 2, textures);
+}
+
+
+/* setup shader constants */
+static void setup_mask_fill(const VGfloat color[4])
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_constant_buffer *cbuf = &ctx->mask.cbuf;
+ const VGint param_bytes = 4 * sizeof(VGfloat);
+
+ /* We always need to get a new buffer, to keep the drivers simple and
+ * avoid gratuitous rendering synchronization.
+ */
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+
+ cbuf->buffer = pipe_buffer_create(ctx->pipe->screen, 1,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ param_bytes);
+ if (cbuf->buffer) {
+ st_no_flush_pipe_buffer_write(ctx, cbuf->buffer, 0, param_bytes, color);
+ }
+
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
+ cso_set_fragment_shader_handle(ctx->cso_context,
+ shaders_cache_fill(ctx->sc,
+ VEGA_SOLID_FILL_SHADER));
+}
+
+static void setup_mask_viewport()
+{
+ struct vg_context *ctx = vg_current_context();
+ vg_set_viewport(ctx, VEGA_Y0_TOP);
+}
+
+static void setup_mask_blend()
+{
+ struct vg_context *ctx = vg_current_context();
+
+ struct pipe_blend_state blend;
+
+ memset(&blend, 0, sizeof(struct pipe_blend_state));
+ blend.blend_enable = 1;
+ blend.colormask |= PIPE_MASK_R;
+ blend.colormask |= PIPE_MASK_G;
+ blend.colormask |= PIPE_MASK_B;
+ blend.colormask |= PIPE_MASK_A;
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+
+ cso_set_blend(ctx->cso_context, &blend);
+}
+
+
+static void surface_fill(struct pipe_surface *surf,
+ int surf_width, int surf_height,
+ int x, int y, int width, int height,
+ const VGfloat color[4])
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (x < 0) {
+ width += x;
+ x = 0;
+ }
+ if (y < 0) {
+ height += y;
+ y = 0;
+ }
+
+ cso_save_framebuffer(ctx->cso_context);
+ cso_save_blend(ctx->cso_context);
+ cso_save_fragment_shader(ctx->cso_context);
+ cso_save_viewport(ctx->cso_context);
+
+ setup_mask_blend();
+ setup_mask_fill(color);
+ setup_mask_framebuffer(surf, surf_width, surf_height);
+ setup_mask_viewport();
+
+ renderer_draw_quad(ctx->renderer, x, y,
+ x + width, y + height, 0.0f/*depth should be disabled*/);
+
+
+ /* make sure rendering has completed */
+ ctx->pipe->flush(ctx->pipe,
+ PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_FRAME,
+ NULL);
+
+#if DEBUG_MASKS
+ save_alpha_to_file(0);
+#endif
+
+ cso_restore_blend(ctx->cso_context);
+ cso_restore_framebuffer(ctx->cso_context);
+ cso_restore_fragment_shader(ctx->cso_context);
+ cso_restore_viewport(ctx->cso_context);
+}
+
+
+static void mask_using_texture(struct pipe_texture *texture,
+ VGMaskOperation operation,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct pipe_surface *surface =
+ alpha_mask_surface(ctx, PIPE_BUFFER_USAGE_GPU_WRITE);
+ VGint offsets[4], loc[4];
+
+ if (!surface)
+ return;
+ if (!intersect_rectangles(surface->width, surface->height,
+ texture->width[0], texture->height[0],
+ x, y, width, height,
+ offsets, loc))
+ return;
+#if 0
+ debug_printf("Offset = [%d, %d, %d, %d]\n", offsets[0],
+ offsets[1], offsets[2], offsets[3]);
+ debug_printf("Locati = [%d, %d, %d, %d]\n", loc[0],
+ loc[1], loc[2], loc[3]);
+#endif
+
+ /* prepare our blend surface */
+ vg_prepare_blend_surface_from_mask(ctx);
+
+ cso_save_samplers(ctx->cso_context);
+ cso_save_sampler_textures(ctx->cso_context);
+ cso_save_framebuffer(ctx->cso_context);
+ cso_save_blend(ctx->cso_context);
+ cso_save_fragment_shader(ctx->cso_context);
+ cso_save_viewport(ctx->cso_context);
+
+ setup_mask_samplers(texture);
+ setup_mask_blend();
+ setup_mask_operation(operation);
+ setup_mask_framebuffer(surface, surface->width, surface->height);
+ setup_mask_viewport();
+
+ /* render the quad to propagate the rendering from stencil */
+ renderer_draw_texture(ctx->renderer, texture,
+ offsets[0], offsets[1],
+ offsets[0] + offsets[2], offsets[1] + offsets[3],
+ loc[0], loc[1], loc[0] + loc[2], loc[1] + loc[3]);
+
+ /* make sure rendering has completed */
+ ctx->pipe->flush(ctx->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+ cso_restore_blend(ctx->cso_context);
+ cso_restore_framebuffer(ctx->cso_context);
+ cso_restore_fragment_shader(ctx->cso_context);
+ cso_restore_samplers(ctx->cso_context);
+ cso_restore_sampler_textures(ctx->cso_context);
+ cso_restore_viewport(ctx->cso_context);
+
+ pipe_surface_reference(&surface, NULL);
+}
+
+
+#ifdef OPENVG_VERSION_1_1
+
+struct vg_mask_layer * mask_layer_create(VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct vg_mask_layer *mask = 0;
+
+ mask = CALLOC_STRUCT(vg_mask_layer);
+ vg_init_object(&mask->base, ctx, VG_OBJECT_MASK);
+ mask->width = width;
+ mask->height = height;
+
+ {
+ struct pipe_texture pt;
+ struct pipe_screen *screen = ctx->pipe->screen;
+
+ memset(&pt, 0, sizeof(pt));
+ pt.target = PIPE_TEXTURE_2D;
+ pt.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ pf_get_block(PIPE_FORMAT_A8R8G8B8_UNORM, &pt.block);
+ pt.last_level = 0;
+ pt.width[0] = width;
+ pt.height[0] = height;
+ pt.depth[0] = 1;
+ pt.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
+ pt.compressed = 0;
+
+ mask->texture = screen->texture_create(screen, &pt);
+ }
+
+ vg_context_add_object(ctx, VG_OBJECT_MASK, mask);
+
+ return mask;
+}
+
+void mask_layer_destroy(struct vg_mask_layer *layer)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ vg_context_remove_object(ctx, VG_OBJECT_MASK, layer);
+ pipe_texture_release(&layer->texture);
+ free(layer);
+}
+
+void mask_layer_fill(struct vg_mask_layer *layer,
+ VGint x, VGint y,
+ VGint width, VGint height,
+ VGfloat value)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGfloat alpha_color[4] = {0, 0, 0, 0};
+ struct pipe_surface *surface;
+
+ alpha_color[3] = value;
+
+ surface = ctx->pipe->screen->get_tex_surface(
+ ctx->pipe->screen, layer->texture,
+ 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ surface_fill(surface,
+ layer->width, layer->height,
+ x, y, width, height, alpha_color);
+
+ ctx->pipe->screen->tex_surface_release(ctx->pipe->screen, &surface);
+}
+
+void mask_copy(struct vg_mask_layer *layer,
+ VGint sx, VGint sy,
+ VGint dx, VGint dy,
+ VGint width, VGint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct st_framebuffer *fb_buffers = ctx->draw_buffer;
+
+ renderer_copy_texture(ctx->renderer,
+ layer->texture,
+ sx, sy,
+ sx + width, sy + height,
+ fb_buffers->alpha_mask,
+ dx, dy,
+ dx + width, dy + height);
+}
+
+static void mask_layer_render_to(struct vg_mask_layer *layer,
+ struct path *path,
+ VGbitfield paint_modes)
+{
+ struct vg_context *ctx = vg_current_context();
+ const VGfloat fill_color[4] = {1.f, 1.f, 1.f, 1.f};
+ struct pipe_screen *screen = ctx->pipe->screen;
+ struct pipe_surface *surface;
+
+ surface = screen->get_tex_surface(screen, layer->texture, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ cso_save_framebuffer(ctx->cso_context);
+ cso_save_fragment_shader(ctx->cso_context);
+ cso_save_viewport(ctx->cso_context);
+
+ setup_mask_blend();
+ setup_mask_fill(fill_color);
+ setup_mask_framebuffer(surface, layer->width, layer->height);
+ setup_mask_viewport();
+
+ if (paint_modes & VG_FILL_PATH) {
+ struct matrix *mat = &ctx->state.vg.path_user_to_surface_matrix;
+ path_fill(path, mat);
+ }
+
+ if (paint_modes & VG_STROKE_PATH){
+ path_stroke(path);
+ }
+
+
+ /* make sure rendering has completed */
+ ctx->pipe->flush(ctx->pipe, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+ cso_restore_framebuffer(ctx->cso_context);
+ cso_restore_fragment_shader(ctx->cso_context);
+ cso_restore_viewport(ctx->cso_context);
+ ctx->state.dirty |= BLEND_DIRTY;
+
+ screen->tex_surface_release(ctx->pipe->screen, &surface);
+}
+
+void mask_render_to(struct path *path,
+ VGbitfield paint_modes,
+ VGMaskOperation operation)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct st_framebuffer *fb_buffers = ctx->draw_buffer;
+ struct vg_mask_layer *temp_layer;
+ VGint width, height;
+
+ width = fb_buffers->alpha_mask->width[0];
+ height = fb_buffers->alpha_mask->width[0];
+
+ temp_layer = mask_layer_create(width, height);
+
+ mask_layer_render_to(temp_layer, path, paint_modes);
+
+ mask_using_layer(temp_layer, 0, 0, width, height,
+ operation);
+
+ mask_layer_destroy(temp_layer);
+}
+
+void mask_using_layer(struct vg_mask_layer *layer,
+ VGMaskOperation operation,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ mask_using_texture(layer->texture, operation,
+ x, y, width, height);
+}
+
+VGint mask_layer_width(struct vg_mask_layer *layer)
+{
+ return layer->width;
+}
+
+VGint mask_layer_height(struct vg_mask_layer *layer)
+{
+ return layer->height;
+}
+
+
+#endif
+
+void mask_using_image(struct vg_image *image,
+ VGMaskOperation operation,
+ VGint x, VGint y,
+ VGint width, VGint height)
+{
+ mask_using_texture(image->texture, operation,
+ x, y, width, height);
+}
+
+void mask_fill(VGint x, VGint y, VGint width, VGint height,
+ VGfloat value)
+{
+ struct vg_context *ctx = vg_current_context();
+ VGfloat alpha_color[4] = {.0f, .0f, .0f, value};
+ struct pipe_surface *surf = alpha_mask_surface(
+ ctx, PIPE_BUFFER_USAGE_GPU_WRITE);
+
+#if DEBUG_MASKS
+ debug_printf("mask_fill(%d, %d, %d, %d) with rgba(%f, %f, %f, %f)\n",
+ x, y, width, height,
+ alpha_color[0], alpha_color[1],
+ alpha_color[2], alpha_color[3]);
+ debug_printf("XXX %f === %f \n",
+ alpha_color[3], value);
+#endif
+
+ surface_fill(surf, surf->width, surf->height,
+ x, y, width, height, alpha_color);
+
+ pipe_surface_reference(&surf, NULL);
+}
+
+VGint mask_bind_samplers(struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ if (ctx->state.vg.masking) {
+ struct st_framebuffer *fb_buffers = ctx->draw_buffer;
+
+ samplers[1] = &ctx->mask.sampler;
+ textures[1] = fb_buffers->alpha_mask;
+ return 1;
+ } else
+ return 0;
+}
diff --git a/src/gallium/state_trackers/vega/mask.h b/src/gallium/state_trackers/vega/mask.h
new file mode 100644
index 0000000000..5eaaede0e3
--- /dev/null
+++ b/src/gallium/state_trackers/vega/mask.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef MASK_H
+#define MASK_H
+
+#include "vg_context.h"
+
+struct path;
+struct vg_image;
+struct pipe_texture;
+
+struct vg_mask_layer *mask_layer_create(VGint width, VGint height);
+void mask_layer_destroy(struct vg_mask_layer *layer);
+void mask_layer_fill(struct vg_mask_layer *layer,
+ VGint x, VGint y,
+ VGint width, VGint height,
+ VGfloat value);
+VGint mask_layer_width(struct vg_mask_layer *layer);
+VGint mask_layer_height(struct vg_mask_layer *layer);
+void mask_copy(struct vg_mask_layer *layer,
+ VGint sx, VGint sy,
+ VGint dx, VGint dy,
+ VGint width, VGint height);
+
+void mask_render_to(struct path *path,
+ VGbitfield paint_modes,
+ VGMaskOperation operation);
+
+void mask_using_layer(struct vg_mask_layer *layer,
+ VGMaskOperation operation,
+ VGint x, VGint y,
+ VGint width, VGint height);
+void mask_using_image(struct vg_image *image,
+ VGMaskOperation operation,
+ VGint x, VGint y,
+ VGint width, VGint height);
+void mask_fill(VGint x, VGint y,
+ VGint width, VGint height,
+ VGfloat value);
+
+VGint mask_bind_samplers(struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/matrix.h b/src/gallium/state_trackers/vega/matrix.h
new file mode 100644
index 0000000000..4c207f912a
--- /dev/null
+++ b/src/gallium/state_trackers/vega/matrix.h
@@ -0,0 +1,462 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef MATRIX_H
+#define MATRIX_H
+
+#include "VG/openvg.h"
+
+#include "pipe/p_compiler.h"
+#include "util/u_math.h"
+
+#include <stdio.h>
+#include <math.h>
+
+#define floatsEqual(x, y) (fabs(x - y) <= 0.00001f * MIN2(fabs(x), fabs(y)))
+#define floatIsZero(x) (floatsEqual((x) + 1, 1))
+#define ABS(x) (fabsf(x))
+
+#define DEGREES_TO_RADIANS(d) (0.0174532925199 * (d))
+#define FLT_TO_INT(flt) float_to_int_floor(((VGuint*)&(flt))[0])
+
+static INLINE VGint float_to_int_floor(VGuint bits)
+{
+ int sign = (bits >> 31) ? -1 : 1;
+ int exp = ((bits >> 23) & 255) - 127;
+ int mant = bits & 0x007fffff;
+ int sh = 23 - exp;
+
+ /* abs(value) >= 2^31 -> clamp. */
+
+ if (exp >= 31)
+ return (VGint)((sign < 0) ? 0x80000000u : 0x7fffffffu);
+
+ /* abs(value) < 1 -> return -1 or 0. */
+
+ if (exp < 0)
+ return (sign < 0 && (exp > -127 || mant != 0)) ? -1 : 0;
+
+ /* abs(value) >= 2^23 -> shift left. */
+
+ mant |= 0x00800000;
+ if (sh <= 0)
+ return sign * (mant << -sh);
+
+ /* Negative -> add a rounding term. */
+
+ if (sign < 0)
+ mant += (1 << sh) - 1;
+
+ /* Shift right to obtain the result. */
+
+ return sign * (mant >> sh);
+}
+
+
+struct matrix {
+ VGfloat m[9];
+};
+
+static INLINE void matrix_init(struct matrix *mat,
+ const VGfloat *val)
+{
+ memcpy(mat->m, val, sizeof(VGfloat) * 9);
+}
+
+static INLINE void matrix_inits(struct matrix *mat,
+ VGfloat m11, VGfloat m12, VGfloat m13,
+ VGfloat m21, VGfloat m22, VGfloat m23,
+ VGfloat m31, VGfloat m32, VGfloat m33)
+{
+ mat->m[0] = m11; mat->m[1] = m12; mat->m[2] = m13;
+ mat->m[3] = m21; mat->m[4] = m22; mat->m[5] = m23;
+ mat->m[6] = m31; mat->m[7] = m32; mat->m[8] = m33;
+}
+
+static INLINE void matrix_load_identity(struct matrix *matrix)
+{
+ static const VGfloat identity[9] = {1.f, 0.f, 0.f,
+ 0.f, 1.f, 0.f,
+ 0.f, 0.f, 1.f};
+ memcpy(matrix->m, identity, sizeof(identity));
+}
+
+static INLINE VGboolean matrix_is_identity(struct matrix *matrix)
+{
+ return floatsEqual(matrix->m[0], 1) && floatIsZero(matrix->m[1]) &&
+ floatIsZero(matrix->m[2]) &&
+ floatIsZero(matrix->m[3]) && floatsEqual(matrix->m[4], 1) &&
+ floatIsZero(matrix->m[5]) &&
+ floatIsZero(matrix->m[6]) && floatIsZero(matrix->m[7]) &&
+ floatIsZero(matrix->m[8]);
+}
+
+static INLINE VGboolean matrix_is_affine(struct matrix *matrix)
+{
+ return floatIsZero(matrix->m[2]) && floatIsZero(matrix->m[5])
+ && floatsEqual(matrix->m[8], 1);
+}
+
+
+static INLINE void matrix_make_affine(struct matrix *matrix)
+{
+ matrix->m[2] = 0.f;
+ matrix->m[5] = 0.f;
+ matrix->m[8] = 1.f;
+}
+
+static INLINE void matrix_mult(struct matrix *dst,
+ struct matrix *src)
+{
+ VGfloat m11 = dst->m[0]*src->m[0] + dst->m[3]*src->m[1] + dst->m[6]*src->m[2];
+ VGfloat m12 = dst->m[0]*src->m[3] + dst->m[3]*src->m[4] + dst->m[6]*src->m[5];
+ VGfloat m13 = dst->m[0]*src->m[6] + dst->m[3]*src->m[7] + dst->m[6]*src->m[8];
+
+ VGfloat m21 = dst->m[1]*src->m[0] + dst->m[4]*src->m[1] + dst->m[7]*src->m[2];
+ VGfloat m22 = dst->m[1]*src->m[3] + dst->m[4]*src->m[4] + dst->m[7]*src->m[5];
+ VGfloat m23 = dst->m[1]*src->m[6] + dst->m[4]*src->m[7] + dst->m[7]*src->m[8];
+
+ VGfloat m31 = dst->m[2]*src->m[0] + dst->m[5]*src->m[1] + dst->m[8]*src->m[2];
+ VGfloat m32 = dst->m[2]*src->m[3] + dst->m[5]*src->m[4] + dst->m[8]*src->m[5];
+ VGfloat m33 = dst->m[2]*src->m[6] + dst->m[5]*src->m[7] + dst->m[8]*src->m[8];
+
+ dst->m[0] = m11; dst->m[1] = m21; dst->m[2] = m31;
+ dst->m[3] = m12; dst->m[4] = m22; dst->m[5] = m32;
+ dst->m[6] = m13; dst->m[7] = m23; dst->m[8] = m33;
+}
+
+
+static INLINE void matrix_map_point(struct matrix *mat,
+ VGfloat x, VGfloat y,
+ VGfloat *out_x, VGfloat *out_y)
+{
+ /* to be able to do matrix_map_point(m, x, y, &x, &y) use
+ * temporaries */
+ VGfloat tmp_x = x, tmp_y = y;
+
+ *out_x = mat->m[0]*tmp_x + mat->m[3]*tmp_y + mat->m[6];
+ *out_y = mat->m[1]*tmp_x + mat->m[4]*tmp_y + mat->m[7];
+ if (!matrix_is_affine(mat)) {
+ VGfloat w = 1/(mat->m[2]*tmp_x + mat->m[5]*tmp_y + mat->m[8]);
+ *out_x *= w;
+ *out_y *= w;
+ }
+}
+
+static INLINE void matrix_translate(struct matrix *dst,
+ VGfloat tx, VGfloat ty)
+{
+ if (!matrix_is_affine(dst)) {
+ struct matrix trans_matrix;
+ matrix_load_identity(&trans_matrix);
+ trans_matrix.m[6] = tx;
+ trans_matrix.m[7] = ty;
+ matrix_mult(dst, &trans_matrix);
+ } else {
+ dst->m[6] += tx*dst->m[0] + ty*dst->m[3];
+ dst->m[7] += ty*dst->m[4] + tx*dst->m[1];
+ }
+}
+
+static INLINE void matrix_scale(struct matrix *dst,
+ VGfloat sx, VGfloat sy)
+{
+ if (!matrix_is_affine(dst)) {
+ struct matrix scale_matrix;
+ matrix_load_identity(&scale_matrix);
+ scale_matrix.m[0] = sx;
+ scale_matrix.m[4] = sy;
+ matrix_mult(dst, &scale_matrix);
+ } else {
+ dst->m[0] *= sx; dst->m[1] *= sx;
+ dst->m[3] *= sy; dst->m[4] *= sy;
+ }
+}
+
+static INLINE void matrix_shear(struct matrix *dst,
+ VGfloat shx, VGfloat shy)
+{
+ struct matrix shear_matrix;
+ matrix_load_identity(&shear_matrix);
+ shear_matrix.m[1] = shy;
+ shear_matrix.m[3] = shx;
+ matrix_mult(dst, &shear_matrix);
+}
+
+static INLINE void matrix_rotate(struct matrix *dst,
+ VGfloat angle)
+{
+ struct matrix mat;
+ float sin_val = 0;
+ float cos_val = 0;
+
+
+ if (floatsEqual(angle, 90) || floatsEqual(angle, -270))
+ sin_val = 1.f;
+ else if (floatsEqual(angle, 270) || floatsEqual(angle, -90))
+ sin_val = -1.f;
+ else if (floatsEqual(angle, 180))
+ cos_val = -1.f;
+ else {
+ float radians = DEGREES_TO_RADIANS(angle);
+ sin_val = sin(radians);
+ cos_val = cos(radians);
+ }
+
+ if (!matrix_is_affine(dst)) {
+ matrix_load_identity(&mat);
+ mat.m[0] = cos_val; mat.m[1] = sin_val;
+ mat.m[3] = -sin_val; mat.m[4] = cos_val;
+
+ matrix_mult(dst, &mat);
+ } else {
+ VGfloat m11 = cos_val*dst->m[0] + sin_val*dst->m[3];
+ VGfloat m12 = cos_val*dst->m[1] + sin_val*dst->m[4];
+ VGfloat m21 = -sin_val*dst->m[0] + cos_val*dst->m[3];
+ VGfloat m22 = -sin_val*dst->m[1] + cos_val*dst->m[4];
+ dst->m[0] = m11; dst->m[1] = m12;
+ dst->m[3] = m21; dst->m[4] = m22;
+ }
+}
+
+
+static INLINE VGfloat matrix_determinant(struct matrix *mat)
+{
+ return mat->m[0]*(mat->m[8]*mat->m[4]-mat->m[7]*mat->m[5]) -
+ mat->m[3]*(mat->m[8]*mat->m[1]-mat->m[7]*mat->m[2])+
+ mat->m[6]*(mat->m[5]*mat->m[1]-mat->m[4]*mat->m[2]);
+}
+
+
+static INLINE void matrix_adjoint(struct matrix *mat)
+{
+ VGfloat h[9];
+ h[0] = mat->m[4]*mat->m[8] - mat->m[5]*mat->m[7];
+ h[3] = mat->m[5]*mat->m[6] - mat->m[3]*mat->m[8];
+ h[6] = mat->m[3]*mat->m[7] - mat->m[4]*mat->m[6];
+ h[1] = mat->m[2]*mat->m[7] - mat->m[1]*mat->m[8];
+ h[4] = mat->m[0]*mat->m[8] - mat->m[2]*mat->m[6];
+ h[7] = mat->m[1]*mat->m[6] - mat->m[0]*mat->m[7];
+ h[2] = mat->m[1]*mat->m[5] - mat->m[2]*mat->m[4];
+ h[5] = mat->m[2]*mat->m[3] - mat->m[0]*mat->m[5];
+ h[8] = mat->m[0]*mat->m[4] - mat->m[1]*mat->m[3];
+
+
+ memcpy(mat->m, h, sizeof(VGfloat) * 9);
+}
+
+static INLINE void matrix_divs(struct matrix *mat,
+ VGfloat s)
+{
+ mat->m[0] /= s;
+ mat->m[1] /= s;
+ mat->m[2] /= s;
+ mat->m[3] /= s;
+ mat->m[4] /= s;
+ mat->m[5] /= s;
+ mat->m[6] /= s;
+ mat->m[7] /= s;
+ mat->m[8] /= s;
+}
+
+static INLINE VGboolean matrix_invert(struct matrix *mat)
+{
+ VGfloat det = matrix_determinant(mat);
+
+ if (floatIsZero(det))
+ return VG_FALSE;
+
+ matrix_adjoint(mat);
+ matrix_divs(mat, det);
+ return VG_TRUE;
+}
+
+static INLINE VGboolean matrix_is_invertible(struct matrix *mat)
+{
+ return !floatIsZero(matrix_determinant(mat));
+}
+
+
+static INLINE VGboolean matrix_square_to_quad(VGfloat dx0, VGfloat dy0,
+ VGfloat dx1, VGfloat dy1,
+ VGfloat dx3, VGfloat dy3,
+ VGfloat dx2, VGfloat dy2,
+ struct matrix *mat)
+{
+ VGfloat ax = dx0 - dx1 + dx2 - dx3;
+ VGfloat ay = dy0 - dy1 + dy2 - dy3;
+
+ if (floatIsZero(ax) && floatIsZero(ay)) {
+ /* affine case */
+ matrix_inits(mat,
+ dx1 - dx0, dy1 - dy0, 0,
+ dx2 - dx1, dy2 - dy1, 0,
+ dx0, dy0, 1);
+ } else {
+ VGfloat a, b, c, d, e, f, g, h;
+ VGfloat ax1 = dx1 - dx2;
+ VGfloat ax2 = dx3 - dx2;
+ VGfloat ay1 = dy1 - dy2;
+ VGfloat ay2 = dy3 - dy2;
+
+ /* determinants */
+ VGfloat gtop = ax * ay2 - ax2 * ay;
+ VGfloat htop = ax1 * ay - ax * ay1;
+ VGfloat bottom = ax1 * ay2 - ax2 * ay1;
+
+ if (!bottom)
+ return VG_FALSE;
+
+ g = gtop / bottom;
+ h = htop / bottom;
+
+ a = dx1 - dx0 + g * dx1;
+ b = dx3 - dx0 + h * dx3;
+ c = dx0;
+ d = dy1 - dy0 + g * dy1;
+ e = dy3 - dy0 + h * dy3;
+ f = dy0;
+
+ matrix_inits(mat,
+ a, d, g,
+ b, e, h,
+ c, f, 1.f);
+ }
+
+ return VG_TRUE;
+}
+
+static INLINE VGboolean matrix_quad_to_square(VGfloat sx0, VGfloat sy0,
+ VGfloat sx1, VGfloat sy1,
+ VGfloat sx2, VGfloat sy2,
+ VGfloat sx3, VGfloat sy3,
+ struct matrix *mat)
+{
+ if (!matrix_square_to_quad(sx0, sy0, sx1, sy1,
+ sx2, sy2, sx3, sy3,
+ mat))
+ return VG_FALSE;
+
+ return matrix_invert(mat);
+}
+
+
+static INLINE VGboolean matrix_quad_to_quad(VGfloat dx0, VGfloat dy0,
+ VGfloat dx1, VGfloat dy1,
+ VGfloat dx2, VGfloat dy2,
+ VGfloat dx3, VGfloat dy3,
+ VGfloat sx0, VGfloat sy0,
+ VGfloat sx1, VGfloat sy1,
+ VGfloat sx2, VGfloat sy2,
+ VGfloat sx3, VGfloat sy3,
+ struct matrix *mat)
+{
+ struct matrix sqr_to_qd;
+
+ if (!matrix_square_to_quad(dx0, dy0, dx1, dy1,
+ dx2, dy2, dx3, dy3,
+ mat))
+ return VG_FALSE;
+
+ if (!matrix_quad_to_square(sx0, sy0, sx1, sy1,
+ sx2, sy2, sx3, sy3,
+ &sqr_to_qd))
+ return VG_FALSE;
+
+ matrix_mult(mat, &sqr_to_qd);
+
+ return VG_TRUE;
+}
+
+
+static INLINE VGboolean null_line(const VGfloat *l)
+{
+ return floatsEqual(l[0], l[2]) && floatsEqual(l[1], l[3]);
+}
+
+static INLINE void line_normal(float *l, float *norm)
+{
+ norm[0] = l[0];
+ norm[1] = l[1];
+
+ norm[2] = l[0] + (l[3] - l[1]);
+ norm[3] = l[1] - (l[2] - l[0]);
+}
+
+static INLINE void line_normalize(float *l)
+{
+ float x = l[2] - l[0];
+ float y = l[3] - l[1];
+ float len = sqrt(x*x + y*y);
+ l[2] = l[0] + x/len;
+ l[3] = l[1] + y/len;
+}
+
+static INLINE VGfloat line_length(VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2)
+{
+ VGfloat x = x2 - x1;
+ VGfloat y = y2 - y1;
+ return sqrt(x*x + y*y);
+}
+
+static INLINE VGfloat line_lengthv(const VGfloat *l)
+{
+ VGfloat x = l[2] - l[0];
+ VGfloat y = l[3] - l[1];
+ return sqrt(x*x + y*y);
+}
+
+
+static INLINE void line_point_at(float *l, float t, float *pt)
+{
+ float dx = l[2] - l[0];
+ float dy = l[3] - l[1];
+
+ pt[0] = l[0] + dx * t;
+ pt[1] = l[1] + dy * t;
+}
+
+static INLINE void vector_unit(float *vec)
+{
+ float len = sqrt(vec[0] * vec[0] + vec[1] * vec[1]);
+ vec[0] /= len;
+ vec[1] /= len;
+}
+
+static INLINE void line_normal_vector(float *line, float *vec)
+{
+ VGfloat normal[4];
+
+ line_normal(line, normal);
+
+ vec[0] = normal[2] - normal[0];
+ vec[1] = normal[3] - normal[1];
+
+ vector_unit(vec);
+}
+
+#endif
diff --git a/src/gallium/state_trackers/vega/paint.c b/src/gallium/state_trackers/vega/paint.c
new file mode 100644
index 0000000000..04a6ba9cdc
--- /dev/null
+++ b/src/gallium/state_trackers/vega/paint.c
@@ -0,0 +1,699 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "paint.h"
+
+#include "shaders_cache.h"
+#include "matrix.h"
+#include "image.h"
+#include "st_inlines.h"
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+#include "cso_cache/cso_context.h"
+
+struct vg_paint {
+ struct vg_object base;
+
+ VGPaintType type;
+
+ struct {
+ VGfloat color[4];
+ VGint colori[4];
+ } solid;
+
+ struct {
+ VGColorRampSpreadMode spread;
+ VGuint color_data[1024];
+ struct {
+ VGfloat coords[4];
+ VGint coordsi[4];
+ } linear;
+ struct {
+ VGfloat vals[5];
+ VGint valsi[5];
+ } radial;
+ struct pipe_texture *texture;
+ struct pipe_sampler_state sampler;
+
+ VGfloat *ramp_stops;
+ VGint *ramp_stopsi;
+ VGint num_stops;
+
+ VGboolean color_ramps_premultiplied;
+ } gradient;
+
+ struct {
+ struct pipe_texture *texture;
+ VGTilingMode tiling_mode;
+ struct pipe_sampler_state sampler;
+ } pattern;
+
+ struct pipe_constant_buffer cbuf;
+ struct pipe_shader_state fs_state;
+ void *fs;
+};
+
+static INLINE VGuint mix_pixels(VGuint p1, VGuint a, VGuint p2, VGuint b)
+{
+ VGuint t = (p1 & 0xff00ff) * a + (p2 & 0xff00ff) * b;
+ t >>= 8; t &= 0xff00ff;
+
+ p1 = ((p1 >> 8) & 0xff00ff) * a + ((p2 >> 8) & 0xff00ff) * b;
+ p1 &= 0xff00ff00; p1 |= t;
+
+ return p1;
+}
+
+static INLINE VGuint float4_to_argb(const VGfloat *clr)
+{
+ return float_to_ubyte(clr[3]) << 24 |
+ float_to_ubyte(clr[0]) << 16 |
+ float_to_ubyte(clr[1]) << 8 |
+ float_to_ubyte(clr[2]) << 0;
+}
+
+static INLINE void create_gradient_data(const VGfloat *ramp_stops,
+ VGint num,
+ VGuint *data,
+ VGint size)
+{
+ VGint i;
+ VGint pos = 0;
+ VGfloat fpos = 0, incr = 1.f / size;
+ VGuint last_color;
+
+ while (fpos < ramp_stops[0]) {
+ data[pos] = float4_to_argb(ramp_stops + 1);
+ fpos += incr;
+ ++pos;
+ }
+
+ for (i = 0; i < num - 1; ++i) {
+ VGint rcur = 5 * i;
+ VGint rnext = 5 * (i + 1);
+ VGfloat delta = 1.f/(ramp_stops[rnext] - ramp_stops[rcur]);
+ while (fpos < ramp_stops[rnext] && pos < size) {
+ VGint dist = 256 * ((fpos - ramp_stops[rcur]) * delta);
+ VGint idist = 256 - dist;
+ VGuint current_color = float4_to_argb(ramp_stops + rcur + 1);
+ VGuint next_color = float4_to_argb(ramp_stops + rnext + 1);
+ data[pos] = mix_pixels(current_color, idist,
+ next_color, dist);
+ fpos += incr;
+ ++pos;
+ }
+ }
+
+ last_color = float4_to_argb(ramp_stops + ((num - 1) * 5 + 1));
+ while (pos < size) {
+ data[pos] = last_color;
+ ++pos;
+ }
+ data[size-1] = last_color;
+}
+
+static INLINE struct pipe_texture *create_gradient_texture(struct vg_paint *p)
+{
+ struct pipe_context *pipe = p->base.ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_texture *tex = 0;
+ struct pipe_texture templ;
+
+ memset(&templ, 0, sizeof(templ));
+ templ.target = PIPE_TEXTURE_1D;
+ templ.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ templ.last_level = 0;
+ templ.width[0] = 1024;
+ templ.height[0] = 1;
+ templ.depth[0] = 1;
+ pf_get_block(PIPE_FORMAT_A8R8G8B8_UNORM, &templ.block);
+ templ.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER;
+
+ tex = screen->texture_create(screen, &templ);
+
+ { /* upload color_data */
+ struct pipe_transfer *transfer =
+ st_no_flush_get_tex_transfer(p->base.ctx, tex, 0, 0, 0,
+ PIPE_TRANSFER_WRITE, 0, 0, 1024, 1);
+ void *map = screen->transfer_map(screen, transfer);
+ memcpy(map, p->gradient.color_data, sizeof(VGint)*1024);
+ screen->transfer_unmap(screen, transfer);
+ screen->tex_transfer_destroy(transfer);
+ }
+
+ return tex;
+}
+
+struct vg_paint * paint_create(struct vg_context *ctx)
+{
+ struct vg_paint *paint = CALLOC_STRUCT(vg_paint);
+ const VGfloat default_color[] = {0.0f, 0.0f, 0.0f, 1.0f};
+ const VGfloat def_ling[] = {0.0f, 0.0f, 1.0f, 0.0f};
+ const VGfloat def_radg[] = {0.0f, 0.0f, 0.0f, 0.0f, 1.0f};
+ vg_init_object(&paint->base, ctx, VG_OBJECT_PAINT);
+ vg_context_add_object(ctx, VG_OBJECT_PAINT, paint);
+
+ paint->type = VG_PAINT_TYPE_COLOR;
+ memcpy(paint->solid.color, default_color,
+ 4 * sizeof(VGfloat));
+ paint->gradient.spread = VG_COLOR_RAMP_SPREAD_PAD;
+ memcpy(paint->gradient.linear.coords, def_ling,
+ 4 * sizeof(VGfloat));
+ memcpy(paint->gradient.radial.vals, def_radg,
+ 5 * sizeof(VGfloat));
+
+ paint->gradient.sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ paint->gradient.sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ paint->gradient.sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ paint->gradient.sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ paint->gradient.sampler.normalized_coords = 1;
+
+ memcpy(&paint->pattern.sampler,
+ &paint->gradient.sampler,
+ sizeof(struct pipe_sampler_state));
+
+ return paint;
+}
+
+void paint_destroy(struct vg_paint *paint)
+{
+ struct vg_context *ctx = paint->base.ctx;
+ if (paint->pattern.texture)
+ pipe_texture_reference(&paint->pattern.texture, NULL);
+ if (ctx)
+ vg_context_remove_object(ctx, VG_OBJECT_PAINT, paint);
+
+ free(paint->gradient.ramp_stopsi);
+ free(paint->gradient.ramp_stops);
+ free(paint);
+}
+
+void paint_set_color(struct vg_paint *paint,
+ const VGfloat *color)
+{
+ paint->solid.color[0] = color[0];
+ paint->solid.color[1] = color[1];
+ paint->solid.color[2] = color[2];
+ paint->solid.color[3] = color[3];
+
+ paint->solid.colori[0] = FLT_TO_INT(color[0]);
+ paint->solid.colori[1] = FLT_TO_INT(color[1]);
+ paint->solid.colori[2] = FLT_TO_INT(color[2]);
+ paint->solid.colori[3] = FLT_TO_INT(color[3]);
+}
+
+static INLINE void paint_color_buffer(struct vg_paint *paint, void *buffer)
+{
+ VGfloat *map = (VGfloat*)buffer;
+ memcpy(buffer, paint->solid.color, 4 * sizeof(VGfloat));
+ map[4] = 0.f;
+ map[5] = 1.f;
+ map[6] = 2.f;
+ map[7] = 4.f;
+}
+
+static INLINE void paint_linear_gradient_buffer(struct vg_paint *paint, void *buffer)
+{
+ struct vg_context *ctx = paint->base.ctx;
+ VGfloat *map = (VGfloat*)buffer;
+
+ map[0] = paint->gradient.linear.coords[2] - paint->gradient.linear.coords[0];
+ map[1] = paint->gradient.linear.coords[3] - paint->gradient.linear.coords[1];
+ map[2] = 1.f / (map[0] * map[0] + map[1] * map[1]);
+ map[3] = 1.f;
+
+ map[4] = 0.f;
+ map[5] = 1.f;
+ map[6] = 2.f;
+ map[7] = 4.f;
+ {
+ struct matrix mat;
+ struct matrix inv;
+ matrix_load_identity(&mat);
+ matrix_translate(&mat, -paint->gradient.linear.coords[0], -paint->gradient.linear.coords[1]);
+ memcpy(&inv, &ctx->state.vg.fill_paint_to_user_matrix,
+ sizeof(struct matrix));
+ matrix_invert(&inv);
+ matrix_mult(&inv, &mat);
+ memcpy(&mat, &inv,
+ sizeof(struct matrix));
+
+ map[8] = mat.m[0]; map[9] = mat.m[3]; map[10] = mat.m[6]; map[11] = 0.f;
+ map[12] = mat.m[1]; map[13] = mat.m[4]; map[14] = mat.m[7]; map[15] = 0.f;
+ map[16] = mat.m[2]; map[17] = mat.m[5]; map[18] = mat.m[8]; map[19] = 0.f;
+ }
+#if 0
+ debug_printf("Coords (%f, %f, %f, %f)\n",
+ map[0], map[1], map[2], map[3]);
+#endif
+}
+
+
+static INLINE void paint_radial_gradient_buffer(struct vg_paint *paint, void *buffer)
+{
+ VGfloat *radialCoords = paint->gradient.radial.vals;
+ struct vg_context *ctx = paint->base.ctx;
+
+ VGfloat *map = (VGfloat*)buffer;
+
+ map[0] = radialCoords[0] - radialCoords[2];
+ map[1] = radialCoords[1] - radialCoords[3];
+ map[2] = -map[0] * map[0] - map[1] * map[1] +
+ radialCoords[4] * radialCoords[4];
+ map[3] = 1.f;
+
+ map[4] = 0.f;
+ map[5] = 1.f;
+ map[6] = 2.f;
+ map[7] = 4.f;
+
+ {
+ struct matrix mat;
+ struct matrix inv;
+ matrix_load_identity(&mat);
+ matrix_translate(&mat, -radialCoords[2], -radialCoords[3]);
+ memcpy(&inv, &ctx->state.vg.fill_paint_to_user_matrix,
+ sizeof(struct matrix));
+ matrix_invert(&inv);
+ matrix_mult(&inv, &mat);
+ memcpy(&mat, &inv,
+ sizeof(struct matrix));
+
+ map[8] = mat.m[0]; map[9] = mat.m[3]; map[10] = mat.m[6]; map[11] = 0.f;
+ map[12] = mat.m[1]; map[13] = mat.m[4]; map[14] = mat.m[7]; map[15] = 0.f;
+ map[16] = mat.m[2]; map[17] = mat.m[5]; map[18] = mat.m[8]; map[19] = 0.f;
+ }
+
+#if 0
+ debug_printf("Coords (%f, %f, %f, %f)\n",
+ map[0], map[1], map[2], map[3]);
+#endif
+}
+
+
+static INLINE void paint_pattern_buffer(struct vg_paint *paint, void *buffer)
+{
+ struct vg_context *ctx = paint->base.ctx;
+
+ VGfloat *map = (VGfloat *)buffer;
+ memcpy(map, paint->solid.color, 4 * sizeof(VGfloat));
+
+ map[4] = 0.f;
+ map[5] = 1.f;
+ map[6] = paint->pattern.texture->width[0];
+ map[7] = paint->pattern.texture->height[0];
+ {
+ struct matrix mat;
+ memcpy(&mat, &ctx->state.vg.fill_paint_to_user_matrix,
+ sizeof(struct matrix));
+ matrix_invert(&mat);
+ {
+ struct matrix pm;
+ memcpy(&pm, &ctx->state.vg.path_user_to_surface_matrix,
+ sizeof(struct matrix));
+ matrix_invert(&pm);
+ matrix_mult(&pm, &mat);
+ memcpy(&mat, &pm, sizeof(struct matrix));
+ }
+
+ map[8] = mat.m[0]; map[9] = mat.m[3]; map[10] = mat.m[6]; map[11] = 0.f;
+ map[12] = mat.m[1]; map[13] = mat.m[4]; map[14] = mat.m[7]; map[15] = 0.f;
+ map[16] = mat.m[2]; map[17] = mat.m[5]; map[18] = mat.m[8]; map[19] = 0.f;
+ }
+}
+
+void paint_set_type(struct vg_paint *paint, VGPaintType type)
+{
+ paint->type = type;
+}
+
+void paint_set_ramp_stops(struct vg_paint *paint, const VGfloat *stops,
+ int num)
+{
+ const VGfloat default_stops[] = {0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f};
+ VGint i;
+ const VGint num_stops = num / 5;
+ VGfloat last_coord;
+
+ paint->gradient.num_stops = num;
+ if (num) {
+ free(paint->gradient.ramp_stops);
+ paint->gradient.ramp_stops = malloc(sizeof(VGfloat)*num);
+ memcpy(paint->gradient.ramp_stops, stops, sizeof(VGfloat)*num);
+ } else
+ return;
+
+ /* stops must be in increasing order. the last stop is 1.0. if the
+ * first one is bigger than 1 then the whole sequence is invalid*/
+ if (stops[0] > 1) {
+ stops = default_stops;
+ num = 10;
+ }
+ last_coord = stops[0];
+ for (i = 1; i < num_stops; ++i) {
+ VGint idx = 5 * i;
+ VGfloat coord = stops[idx];
+ if (!floatsEqual(last_coord, coord) && coord < last_coord) {
+ stops = default_stops;
+ num = 10;
+ break;
+ }
+ last_coord = coord;
+ }
+
+ create_gradient_data(stops, num / 5, paint->gradient.color_data,
+ 1024);
+
+ if (paint->gradient.texture) {
+ pipe_texture_reference(&paint->gradient.texture, NULL);
+ paint->gradient.texture = 0;
+ }
+
+ paint->gradient.texture = create_gradient_texture(paint);
+}
+
+void paint_set_colori(struct vg_paint *p,
+ VGuint rgba)
+{
+ p->solid.color[0] = ((rgba >> 24) & 0xff) / 255.f;
+ p->solid.color[1] = ((rgba >> 16) & 0xff) / 255.f;
+ p->solid.color[2] = ((rgba >> 8) & 0xff) / 255.f;
+ p->solid.color[3] = ((rgba >> 0) & 0xff) / 255.f;
+}
+
+VGuint paint_colori(struct vg_paint *p)
+{
+#define F2B(f) (float_to_ubyte(f))
+
+ return ((F2B(p->solid.color[0]) << 24) |
+ (F2B(p->solid.color[1]) << 16) |
+ (F2B(p->solid.color[2]) << 8) |
+ (F2B(p->solid.color[3]) << 0));
+#undef F2B
+}
+
+void paint_set_linear_gradient(struct vg_paint *paint,
+ const VGfloat *coords)
+{
+ memcpy(paint->gradient.linear.coords, coords, sizeof(VGfloat) * 4);
+}
+
+void paint_set_spread_mode(struct vg_paint *paint,
+ VGint mode)
+{
+ paint->gradient.spread = mode;
+ switch(mode) {
+ case VG_COLOR_RAMP_SPREAD_PAD:
+ paint->gradient.sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ break;
+ case VG_COLOR_RAMP_SPREAD_REPEAT:
+ paint->gradient.sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
+ break;
+ case VG_COLOR_RAMP_SPREAD_REFLECT:
+ paint->gradient.sampler.wrap_s = PIPE_TEX_WRAP_MIRROR_REPEAT;
+ break;
+ }
+}
+
+VGColorRampSpreadMode paint_spread_mode(struct vg_paint *paint)
+{
+ return paint->gradient.spread;
+}
+
+void paint_set_radial_gradient(struct vg_paint *paint,
+ const VGfloat *values)
+{
+ memcpy(paint->gradient.radial.vals, values, sizeof(VGfloat) * 5);
+}
+
+void paint_set_pattern(struct vg_paint *paint,
+ struct vg_image *img)
+{
+ if (paint->pattern.texture)
+ pipe_texture_reference(&paint->pattern.texture, NULL);
+
+ paint->pattern.texture = 0;
+ pipe_texture_reference(&paint->pattern.texture,
+ img->texture);
+}
+
+void paint_set_pattern_tiling(struct vg_paint *paint,
+ VGTilingMode mode)
+{
+ paint->pattern.tiling_mode = mode;
+
+ switch(mode) {
+ case VG_TILE_FILL:
+ paint->pattern.sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ paint->pattern.sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ break;
+ case VG_TILE_PAD:
+ paint->pattern.sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ paint->pattern.sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ break;
+ case VG_TILE_REPEAT:
+ paint->pattern.sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
+ paint->pattern.sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
+ break;
+ case VG_TILE_REFLECT:
+ paint->pattern.sampler.wrap_s = PIPE_TEX_WRAP_MIRROR_REPEAT;
+ paint->pattern.sampler.wrap_t = PIPE_TEX_WRAP_MIRROR_REPEAT;
+ break;
+ default:
+ debug_assert("!Unknown tiling mode");
+ }
+}
+
+void paint_get_color(struct vg_paint *paint,
+ VGfloat *color)
+{
+ color[0] = paint->solid.color[0];
+ color[1] = paint->solid.color[1];
+ color[2] = paint->solid.color[2];
+ color[3] = paint->solid.color[3];
+}
+
+void paint_ramp_stops(struct vg_paint *paint, VGfloat *stops,
+ int num)
+{
+ memcpy(stops, paint->gradient.ramp_stops, sizeof(VGfloat)*num);
+}
+
+void paint_linear_gradient(struct vg_paint *paint,
+ VGfloat *coords)
+{
+ memcpy(coords, paint->gradient.linear.coords, sizeof(VGfloat)*4);
+}
+
+void paint_radial_gradient(struct vg_paint *paint,
+ VGfloat *coords)
+{
+ memcpy(coords, paint->gradient.radial.vals, sizeof(VGfloat)*5);
+}
+
+int paint_num_ramp_stops(struct vg_paint *paint)
+{
+ return paint->gradient.num_stops;
+}
+
+VGPaintType paint_type(struct vg_paint *paint)
+{
+ return paint->type;
+}
+
+void paint_set_coloriv(struct vg_paint *paint,
+ const VGint *color)
+{
+ paint->solid.color[0] = color[0];
+ paint->solid.color[1] = color[1];
+ paint->solid.color[2] = color[2];
+ paint->solid.color[3] = color[3];
+
+ paint->solid.colori[0] = color[0];
+ paint->solid.colori[1] = color[1];
+ paint->solid.colori[2] = color[2];
+ paint->solid.colori[3] = color[3];
+}
+
+void paint_get_coloriv(struct vg_paint *paint,
+ VGint *color)
+{
+ color[0] = paint->solid.colori[0];
+ color[1] = paint->solid.colori[1];
+ color[2] = paint->solid.colori[2];
+ color[3] = paint->solid.colori[3];
+}
+
+void paint_set_color_ramp_premultiplied(struct vg_paint *paint,
+ VGboolean set)
+{
+ paint->gradient.color_ramps_premultiplied = set;
+}
+
+VGboolean paint_color_ramp_premultiplied(struct vg_paint *paint)
+{
+ return paint->gradient.color_ramps_premultiplied;
+}
+
+void paint_set_ramp_stopsi(struct vg_paint *paint, const VGint *stops,
+ int num)
+{
+ if (num) {
+ free(paint->gradient.ramp_stopsi);
+ paint->gradient.ramp_stopsi = malloc(sizeof(VGint)*num);
+ memcpy(paint->gradient.ramp_stopsi, stops, sizeof(VGint)*num);
+ }
+}
+
+void paint_ramp_stopsi(struct vg_paint *paint, VGint *stops,
+ int num)
+{
+ memcpy(stops, paint->gradient.ramp_stopsi, sizeof(VGint)*num);
+}
+
+void paint_set_linear_gradienti(struct vg_paint *paint,
+ const VGint *coords)
+{
+ memcpy(paint->gradient.linear.coordsi, coords, sizeof(VGint) * 4);
+}
+
+void paint_linear_gradienti(struct vg_paint *paint,
+ VGint *coords)
+{
+ memcpy(coords, paint->gradient.linear.coordsi, sizeof(VGint)*4);
+}
+
+void paint_set_radial_gradienti(struct vg_paint *paint,
+ const VGint *values)
+{
+ memcpy(paint->gradient.radial.valsi, values, sizeof(VGint) * 5);
+}
+
+void paint_radial_gradienti(struct vg_paint *paint,
+ VGint *coords)
+{
+ memcpy(coords, paint->gradient.radial.valsi, sizeof(VGint)*5);
+}
+
+VGTilingMode paint_pattern_tiling(struct vg_paint *paint)
+{
+ return paint->pattern.tiling_mode;
+}
+
+VGint paint_bind_samplers(struct vg_paint *paint, struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures)
+{
+ struct vg_context *ctx = vg_current_context();
+
+ switch(paint->type) {
+ case VG_PAINT_TYPE_LINEAR_GRADIENT:
+ case VG_PAINT_TYPE_RADIAL_GRADIENT: {
+ if (paint->gradient.texture) {
+ paint->gradient.sampler.min_img_filter = image_sampler_filter(ctx);
+ paint->gradient.sampler.mag_img_filter = image_sampler_filter(ctx);
+ samplers[0] = &paint->gradient.sampler;
+ textures[0] = paint->gradient.texture;
+ return 1;
+ }
+ }
+ break;
+ case VG_PAINT_TYPE_PATTERN: {
+ memcpy(paint->pattern.sampler.border_color,
+ ctx->state.vg.tile_fill_color,
+ sizeof(VGfloat) * 4);
+ paint->pattern.sampler.min_img_filter = image_sampler_filter(ctx);
+ paint->pattern.sampler.mag_img_filter = image_sampler_filter(ctx);
+ samplers[0] = &paint->pattern.sampler;
+ textures[0] = paint->pattern.texture;
+ return 1;
+ }
+ break;
+ default:
+ samplers[0] = &paint->pattern.sampler; /* dummy */
+ textures[0] = 0;
+ return 0;
+ break;
+ }
+ return 0;
+}
+
+void paint_resolve_type(struct vg_paint *paint)
+{
+ if (paint->type == VG_PAINT_TYPE_PATTERN &&
+ !paint->pattern.texture) {
+ paint->type = VG_PAINT_TYPE_COLOR;
+ }
+}
+
+VGint paint_constant_buffer_size(struct vg_paint *paint)
+{
+ switch(paint->type) {
+ case VG_PAINT_TYPE_COLOR:
+ return 8 * sizeof(VGfloat);/*4 color + 4 constants (0.f,1.f,2.f,4.f)*/
+ break;
+ case VG_PAINT_TYPE_LINEAR_GRADIENT:
+ return 20 * sizeof(VGfloat);
+ break;
+ case VG_PAINT_TYPE_RADIAL_GRADIENT:
+ return 20 * sizeof(VGfloat);
+ break;
+ case VG_PAINT_TYPE_PATTERN:
+ return 20 * sizeof(VGfloat);
+ break;
+ default:
+ debug_printf("Uknown paint type: %d\n", paint->type);
+ }
+
+ return 0;
+}
+
+void paint_fill_constant_buffer(struct vg_paint *paint,
+ void *buffer)
+{
+ switch(paint->type) {
+ case VG_PAINT_TYPE_COLOR:
+ paint_color_buffer(paint, buffer);
+ break;
+ case VG_PAINT_TYPE_LINEAR_GRADIENT:
+ paint_linear_gradient_buffer(paint, buffer);
+ break;
+ case VG_PAINT_TYPE_RADIAL_GRADIENT:
+ paint_radial_gradient_buffer(paint, buffer);
+ break;
+ case VG_PAINT_TYPE_PATTERN:
+ paint_pattern_buffer(paint, buffer);
+ break;
+
+ default:
+ abort();
+ }
+}
diff --git a/src/gallium/state_trackers/vega/paint.h b/src/gallium/state_trackers/vega/paint.h
new file mode 100644
index 0000000000..999b5c167c
--- /dev/null
+++ b/src/gallium/state_trackers/vega/paint.h
@@ -0,0 +1,118 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PAINT_H
+#define PAINT_H
+
+#include "vg_context.h"
+
+#include "VG/openvg.h"
+#include "pipe/p_state.h"
+
+struct vg_paint;
+struct vg_image;
+struct pipe_sampler_state;
+struct pipe_texture;
+
+struct vg_paint *paint_create(struct vg_context *ctx);
+void paint_destroy(struct vg_paint *paint);
+
+void paint_set_color(struct vg_paint *paint,
+ const VGfloat *color);
+void paint_get_color(struct vg_paint *paint,
+ VGfloat *color);
+
+void paint_set_coloriv(struct vg_paint *paint,
+ const VGint *color);
+void paint_get_coloriv(struct vg_paint *paint,
+ VGint *color);
+
+void paint_set_colori(struct vg_paint *paint,
+ VGuint rgba);
+
+VGuint paint_colori(struct vg_paint *paint);
+
+void paint_set_type(struct vg_paint *paint, VGPaintType type);
+VGPaintType paint_type(struct vg_paint *paint);
+void paint_resolve_type(struct vg_paint *paint);
+
+void paint_set_linear_gradient(struct vg_paint *paint,
+ const VGfloat *coords);
+void paint_linear_gradient(struct vg_paint *paint,
+ VGfloat *coords);
+void paint_set_linear_gradienti(struct vg_paint *paint,
+ const VGint *coords);
+void paint_linear_gradienti(struct vg_paint *paint,
+ VGint *coords);
+
+
+void paint_set_radial_gradient(struct vg_paint *paint,
+ const VGfloat *values);
+void paint_radial_gradient(struct vg_paint *paint,
+ VGfloat *coords);
+void paint_set_radial_gradienti(struct vg_paint *paint,
+ const VGint *values);
+void paint_radial_gradienti(struct vg_paint *paint,
+ VGint *coords);
+
+
+void paint_set_ramp_stops(struct vg_paint *paint, const VGfloat *stops,
+ int num);
+void paint_ramp_stops(struct vg_paint *paint, VGfloat *stops,
+ int num);
+
+void paint_set_ramp_stopsi(struct vg_paint *paint, const VGint *stops,
+ int num);
+void paint_ramp_stopsi(struct vg_paint *paint, VGint *stops,
+ int num);
+
+int paint_num_ramp_stops(struct vg_paint *paint);
+
+void paint_set_spread_mode(struct vg_paint *paint,
+ VGint mode);
+VGColorRampSpreadMode paint_spread_mode(struct vg_paint *paint);
+
+
+void paint_set_pattern(struct vg_paint *paint,
+ struct vg_image *img);
+void paint_set_pattern_tiling(struct vg_paint *paint,
+ VGTilingMode mode);
+VGTilingMode paint_pattern_tiling(struct vg_paint *paint);
+
+void paint_set_color_ramp_premultiplied(struct vg_paint *paint,
+ VGboolean set);
+VGboolean paint_color_ramp_premultiplied(struct vg_paint *paint);
+
+
+VGint paint_bind_samplers(struct vg_paint *paint, struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures);
+
+VGint paint_constant_buffer_size(struct vg_paint *paint);
+void paint_fill_constant_buffer(struct vg_paint *paint,
+ void *buffer);
+
+
+#endif
diff --git a/src/gallium/state_trackers/vega/path.c b/src/gallium/state_trackers/vega/path.c
new file mode 100644
index 0000000000..4fc23a7a27
--- /dev/null
+++ b/src/gallium/state_trackers/vega/path.c
@@ -0,0 +1,2034 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "path.h"
+
+#include "stroker.h"
+#include "polygon.h"
+#include "bezier.h"
+#include "matrix.h"
+#include "vg_context.h"
+#include "util_array.h"
+#include "arc.h"
+#include "path_utils.h"
+#include "paint.h"
+#include "shader.h"
+
+#include "util/u_memory.h"
+
+#include <assert.h>
+
+#define DEBUG_PATH 0
+
+struct path {
+ struct vg_object base;
+ VGbitfield caps;
+ VGboolean dirty;
+ VGboolean dirty_stroke;
+
+ VGPathDatatype datatype;
+
+ VGfloat scale;
+ VGfloat bias;
+
+ VGint num_segments;
+
+ struct array * segments;
+ struct array * control_points;
+
+ struct {
+ struct polygon_array polygon_array;
+ struct matrix matrix;
+ } fill_polys;
+
+ struct {
+ struct path *path;
+ struct matrix matrix;
+ VGfloat stroke_width;
+ VGfloat miter_limit;
+ VGCapStyle cap_style;
+ VGJoinStyle join_style;
+ } stroked;
+};
+
+
+static INLINE void data_at(void **data,
+ struct path *p,
+ VGint start, VGint count,
+ VGfloat *out)
+{
+ VGPathDatatype dt = p->datatype;
+ VGint i;
+ VGint end = start + count;
+ VGfloat *itr = out;
+
+ switch(dt) {
+ case VG_PATH_DATATYPE_S_8: {
+ VGbyte **bdata = (VGbyte **)data;
+ for (i = start; i < end; ++i) {
+ *itr = (*bdata)[i];
+ ++itr;
+ }
+ *bdata += count;
+ }
+ break;
+ case VG_PATH_DATATYPE_S_16: {
+ VGshort **bdata = (VGshort **)data;
+ for (i = start; i < end; ++i) {
+ *itr = (*bdata)[i];
+ ++itr;
+ }
+ *bdata += count;
+ }
+ break;
+ case VG_PATH_DATATYPE_S_32: {
+ VGint **bdata = (VGint **)data;
+ for (i = start; i < end; ++i) {
+ *itr = (*bdata)[i];
+ ++itr;
+ }
+ *bdata += count;
+ }
+ break;
+ case VG_PATH_DATATYPE_F: {
+ VGfloat **fdata = (VGfloat **)data;
+ for (i = start; i < end; ++i) {
+ *itr = (*fdata)[i];
+ ++itr;
+ }
+ *fdata += count;
+ }
+ break;
+ default:
+ debug_assert(!"Unknown path datatype!");
+ }
+}
+
+
+void vg_float_to_datatype(VGPathDatatype datatype,
+ VGubyte *common_data,
+ const VGfloat *data,
+ VGint num_coords)
+{
+ VGint i;
+ switch(datatype) {
+ case VG_PATH_DATATYPE_S_8: {
+ for (i = 0; i < num_coords; ++i) {
+ common_data[i] = (VGubyte)data[i];
+ }
+ }
+ break;
+ case VG_PATH_DATATYPE_S_16: {
+ VGshort *buf = (VGshort*)common_data;
+ for (i = 0; i < num_coords; ++i) {
+ buf[i] = (VGshort)data[i];
+ }
+ }
+ break;
+ case VG_PATH_DATATYPE_S_32: {
+ VGint *buf = (VGint*)common_data;
+ for (i = 0; i < num_coords; ++i) {
+ buf[i] = (VGint)data[i];
+ }
+ }
+ break;
+ case VG_PATH_DATATYPE_F: {
+ memcpy(common_data, data, sizeof(VGfloat) * num_coords);
+ }
+ break;
+ default:
+ debug_assert(!"Unknown path datatype!");
+ }
+}
+
+static void coords_adjust_by_scale_bias(struct path *p,
+ void *pdata, VGint num_coords,
+ VGfloat scale, VGfloat bias,
+ VGPathDatatype datatype)
+{
+ VGfloat data[8];
+ void *coords = (VGfloat *)pdata;
+ VGubyte *common_data = (VGubyte *)pdata;
+ VGint size_dst = size_for_datatype(datatype);
+ VGint i;
+
+ for (i = 0; i < num_coords; ++i) {
+ data_at(&coords, p, 0, 1, data);
+ data[0] = data[0] * scale + bias;
+ vg_float_to_datatype(datatype, common_data, data, 1);
+ common_data += size_dst;
+ }
+}
+
+struct path * path_create(VGPathDatatype dt, VGfloat scale, VGfloat bias,
+ VGint segmentCapacityHint,
+ VGint coordCapacityHint,
+ VGbitfield capabilities)
+{
+ struct path *path = CALLOC_STRUCT(path);
+
+ vg_init_object(&path->base, vg_current_context(), VG_OBJECT_PATH);
+ path->caps = capabilities & VG_PATH_CAPABILITY_ALL;
+ vg_context_add_object(vg_current_context(), VG_OBJECT_PATH, path);
+
+ path->datatype = dt;
+ path->scale = scale;
+ path->bias = bias;
+
+ path->segments = array_create(size_for_datatype(VG_PATH_DATATYPE_S_8));
+ path->control_points = array_create(size_for_datatype(dt));
+
+ path->dirty = VG_TRUE;
+ path->dirty_stroke = VG_TRUE;
+
+ return path;
+}
+
+void path_destroy(struct path *p)
+{
+ vg_context_remove_object(vg_current_context(), VG_OBJECT_PATH, p);
+
+ array_destroy(p->segments);
+ array_destroy(p->control_points);
+ array_destroy(p->fill_polys.polygon_array.array);
+
+ if (p->stroked.path)
+ path_destroy(p->stroked.path);
+
+ free(p);
+}
+
+VGbitfield path_capabilities(struct path *p)
+{
+ return p->caps;
+}
+
+void path_set_capabilities(struct path *p, VGbitfield bf)
+{
+ p->caps = (bf & VG_PATH_CAPABILITY_ALL);
+}
+
+void path_append_data(struct path *p,
+ VGint numSegments,
+ const VGubyte * pathSegments,
+ const void * pathData)
+{
+ VGint old_segments = p->num_segments;
+ VGint num_new_coords = num_elements_for_segments(pathSegments, numSegments);
+ array_append_data(p->segments, pathSegments, numSegments);
+ array_append_data(p->control_points, pathData, num_new_coords);
+
+ p->num_segments += numSegments;
+ if (!floatsEqual(p->scale, 1.f) || !floatsEqual(p->bias, 0.f)) {
+ VGubyte *coords = (VGubyte*)p->control_points->data;
+ coords_adjust_by_scale_bias(p,
+ coords + old_segments * p->control_points->datatype_size,
+ num_new_coords,
+ p->scale, p->bias, p->datatype);
+ }
+ p->dirty = VG_TRUE;
+ p->dirty_stroke = VG_TRUE;
+}
+
+VGint path_num_segments(struct path *p)
+{
+ return p->num_segments;
+}
+
+static INLINE void map_if_relative(VGfloat ox, VGfloat oy,
+ VGboolean relative,
+ VGfloat *x, VGfloat *y)
+{
+ if (relative) {
+ if (x)
+ *x += ox;
+ if (y)
+ *y += oy;
+ }
+}
+
+static INLINE void close_polygon(struct polygon *current,
+ VGfloat sx, VGfloat sy,
+ VGfloat ox, VGfloat oy,
+ struct matrix *matrix)
+{
+ if (!floatsEqual(sx, ox) ||
+ !floatsEqual(sy, oy)) {
+ VGfloat x0 = sx;
+ VGfloat y0 = sy;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ polygon_vertex_append(current, x0, y0);
+ }
+}
+
+static void convert_path(struct path *p,
+ VGPathDatatype to,
+ void *dst,
+ VGint num_coords)
+{
+ VGfloat data[8];
+ void *coords = (VGfloat *)p->control_points->data;
+ VGubyte *common_data = (VGubyte *)dst;
+ VGint size_dst = size_for_datatype(to);
+ VGint i;
+
+ for (i = 0; i < num_coords; ++i) {
+ data_at(&coords, p, 0, 1, data);
+ vg_float_to_datatype(to, common_data, data, 1);
+ common_data += size_dst;
+ }
+}
+
+
+static void polygon_array_calculate_bounds( struct polygon_array *polyarray )
+{
+ struct array *polys = polyarray->array;
+ VGfloat min_x, max_x;
+ VGfloat min_y, max_y;
+ VGfloat bounds[4];
+ unsigned i;
+
+ assert(polys);
+ assert(polys->num_elements);
+ polygon_bounding_rect((((struct polygon**)polys->data)[0]), bounds);
+ min_x = bounds[0];
+ min_y = bounds[1];
+ max_x = bounds[0] + bounds[2];
+ max_y = bounds[1] + bounds[3];
+ for (i = 1; i < polys->num_elements; ++i) {
+ struct polygon *p = (((struct polygon**)polys->data)[i]);
+ polygon_bounding_rect(p, bounds);
+ min_x = MIN2(min_x, bounds[0]);
+ min_y = MIN2(min_y, bounds[1]);
+ max_x = MAX2(max_x, bounds[0] + bounds[2]);
+ max_y = MAX2(max_y, bounds[1] + bounds[3]);
+ }
+
+ polyarray->min_x = min_x;
+ polyarray->min_y = min_y;
+ polyarray->max_x = max_x;
+ polyarray->max_y = max_y;
+}
+
+
+static struct polygon_array * path_get_fill_polygons(struct path *p, struct matrix *matrix)
+{
+ VGint i;
+ struct polygon *current = 0;
+ VGfloat sx, sy, px, py, ox, oy;
+ VGfloat x0, y0, x1, y1, x2, y2, x3, y3;
+ VGfloat data[8];
+ void *coords = (VGfloat *)p->control_points->data;
+ struct array *array;
+
+ if (p->fill_polys.polygon_array.array)
+ {
+ if (memcmp( &p->fill_polys.matrix,
+ matrix,
+ sizeof *matrix ) == 0 && p->dirty == VG_FALSE)
+ {
+ return &p->fill_polys.polygon_array;
+ }
+ else {
+ array_destroy( p->fill_polys.polygon_array.array );
+ p->fill_polys.polygon_array.array = NULL;
+ }
+ }
+
+ array = array_create(sizeof(struct array*));
+
+ sx = sy = px = py = ox = oy = 0.f;
+
+ current = polygon_create(32);
+
+ for (i = 0; i < p->num_segments; ++i) {
+ VGubyte segment = ((VGubyte*)(p->segments->data))[i];
+ VGint command = SEGMENT_COMMAND(segment);
+ VGboolean relative = SEGMENT_ABS_REL(segment);
+
+ switch(command) {
+ case VG_CLOSE_PATH:
+ close_polygon(current, sx, sy, ox, oy, matrix);
+ ox = sx;
+ oy = sy;
+ break;
+ case VG_MOVE_TO:
+ if (current && polygon_vertex_count(current) > 0) {
+ /* add polygon */
+ close_polygon(current, sx, sy, ox, oy, matrix);
+ array_append_data(array, &current, 1);
+ current = polygon_create(32);
+ }
+ data_at(&coords, p, 0, 2, data);
+ x0 = data[0];
+ y0 = data[1];
+ map_if_relative(ox, oy, relative, &x0, &y0);
+ sx = x0;
+ sy = y0;
+ ox = x0;
+ oy = y0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ polygon_vertex_append(current, x0, y0);
+ break;
+ case VG_LINE_TO:
+ data_at(&coords, p, 0, 2, data);
+ x0 = data[0];
+ y0 = data[1];
+ map_if_relative(ox, oy, relative, &x0, &y0);
+ ox = x0;
+ oy = y0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ polygon_vertex_append(current, x0, y0);
+ break;
+ case VG_HLINE_TO:
+ data_at(&coords, p, 0, 1, data);
+ x0 = data[0];
+ y0 = oy;
+ map_if_relative(ox, oy, relative, &x0, 0);
+ ox = x0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ polygon_vertex_append(current, x0, y0);
+ break;
+ case VG_VLINE_TO:
+ data_at(&coords, p, 0, 1, data);
+ x0 = ox;
+ y0 = data[0];
+ map_if_relative(ox, oy, relative, 0, &y0);
+ oy = y0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ polygon_vertex_append(current, x0, y0);
+ break;
+ case VG_CUBIC_TO: {
+ struct bezier bezier;
+ data_at(&coords, p, 0, 6, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = data[0];
+ y1 = data[1];
+ x2 = data[2];
+ y2 = data[3];
+ x3 = data[4];
+ y3 = data[5];
+ map_if_relative(ox, oy, relative, &x1, &y1);
+ map_if_relative(ox, oy, relative, &x2, &y2);
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ ox = x3;
+ oy = y3;
+ px = x2;
+ py = y2;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ bezier_init(&bezier, x0, y0, x1, y1,
+ x2, y2, x3, y3);
+ bezier_add_to_polygon(&bezier, current);
+ }
+ break;
+ case VG_QUAD_TO: {
+ struct bezier bezier;
+ data_at(&coords, p, 0, 4, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = data[0];
+ y1 = data[1];
+ x3 = data[2];
+ y3 = data[3];
+ map_if_relative(ox, oy, relative, &x1, &y1);
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ px = x1;
+ py = y1;
+ { /* form a cubic out of it */
+ x2 = (x3 + 2*x1) / 3.f;
+ y2 = (y3 + 2*y1) / 3.f;
+ x1 = (x0 + 2*x1) / 3.f;
+ y1 = (y0 + 2*y1) / 3.f;
+ }
+ ox = x3;
+ oy = y3;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ bezier_init(&bezier, x0, y0, x1, y1,
+ x2, y2, x3, y3);
+ bezier_add_to_polygon(&bezier, current);
+ }
+ break;
+ case VG_SQUAD_TO: {
+ struct bezier bezier;
+ data_at(&coords, p, 0, 2, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = 2*ox-px;
+ y1 = 2*oy-py;
+ x3 = data[0];
+ y3 = data[1];
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ px = x1;
+ py = y1;
+ { /* form a cubic out of it */
+ x2 = (x3 + 2*x1) / 3.f;
+ y2 = (y3 + 2*y1) / 3.f;
+ x1 = (x0 + 2*x1) / 3.f;
+ y1 = (y0 + 2*y1) / 3.f;
+ }
+ ox = x3;
+ oy = y3;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ bezier_init(&bezier, x0, y0, x1, y1,
+ x2, y2, x3, y3);
+ bezier_add_to_polygon(&bezier, current);
+ }
+ break;
+ case VG_SCUBIC_TO: {
+ struct bezier bezier;
+ data_at(&coords, p, 0, 4, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = 2*ox-px;
+ y1 = 2*oy-py;
+ x2 = data[0];
+ y2 = data[1];
+ x3 = data[2];
+ y3 = data[3];
+ map_if_relative(ox, oy, relative, &x2, &y2);
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ ox = x3;
+ oy = y3;
+ px = x2;
+ py = y2;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ bezier_init(&bezier, x0, y0, x1, y1,
+ x2, y2, x3, y3);
+ bezier_add_to_polygon(&bezier, current);
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ VGfloat rh, rv, rot;
+ struct arc arc;
+
+ data_at(&coords, p, 0, 5, data);
+ x0 = ox;
+ y0 = oy;
+ rh = data[0];
+ rv = data[1];
+ rot = data[2];
+ x1 = data[3];
+ y1 = data[4];
+ map_if_relative(ox, oy, relative, &x1, &y1);
+#if 0
+ debug_printf("------- ARC (%f, %f), (%f, %f) %f, %f, %f\n",
+ x0, y0, x1, y1, rh, rv, rot);
+#endif
+ arc_init(&arc, command, x0, y0, x1, y1,
+ rh, rv, rot);
+ arc_add_to_polygon(&arc, current,
+ matrix);
+ ox = x1;
+ oy = y1;
+ px = x1;
+ py = y1;
+ }
+ break;
+ default:
+ abort();
+ assert(!"Unknown segment!");
+ }
+ }
+ if (current) {
+ if (polygon_vertex_count(current) > 0) {
+ close_polygon(current, sx, sy, ox, oy, matrix);
+ array_append_data(array, &current, 1);
+ } else
+ polygon_destroy(current);
+ }
+
+ p->fill_polys.polygon_array.array = array;
+ p->fill_polys.matrix = *matrix;
+
+ polygon_array_calculate_bounds( &p->fill_polys.polygon_array );
+
+ p->dirty = VG_FALSE;
+
+ return &p->fill_polys.polygon_array;
+}
+
+VGbyte path_datatype_size(struct path *p)
+{
+ return size_for_datatype(p->datatype);
+}
+
+VGPathDatatype path_datatype(struct path *p)
+{
+ return p->datatype;
+}
+
+VGfloat path_scale(struct path *p)
+{
+ return p->scale;
+}
+
+VGfloat path_bias(struct path *p)
+{
+ return p->bias;
+}
+
+VGint path_num_coords(struct path *p)
+{
+ return num_elements_for_segments((VGubyte*)p->segments->data,
+ p->num_segments);
+}
+
+void path_modify_coords(struct path *p,
+ VGint startIndex,
+ VGint numSegments,
+ const void * pathData)
+{
+ VGubyte *segments = (VGubyte*)(p->segments->data);
+ VGint count = num_elements_for_segments(&segments[startIndex], numSegments);
+ VGint start_cp = num_elements_for_segments(segments, startIndex);
+
+ array_change_data(p->control_points, pathData, start_cp, count);
+ coords_adjust_by_scale_bias(p,
+ ((VGubyte*)p->control_points->data) +
+ (startIndex * p->control_points->datatype_size),
+ path_num_coords(p),
+ p->scale, p->bias, p->datatype);
+ p->dirty = VG_TRUE;
+ p->dirty_stroke = VG_TRUE;
+}
+
+void path_for_each_segment(struct path *path,
+ path_for_each_cb cb,
+ void *user_data)
+{
+ VGint i;
+ struct path_for_each_data p;
+ VGfloat data[8];
+ void *coords = (VGfloat *)path->control_points->data;
+
+ p.coords = data;
+ p.sx = p.sy = p.px = p.py = p.ox = p.oy = 0.f;
+ p.user_data = user_data;
+
+ for (i = 0; i < path->num_segments; ++i) {
+ VGint command;
+ VGboolean relative;
+
+ p.segment = ((VGubyte*)(path->segments->data))[i];
+ command = SEGMENT_COMMAND(p.segment);
+ relative = SEGMENT_ABS_REL(p.segment);
+
+ switch(command) {
+ case VG_CLOSE_PATH:
+ cb(path, &p);
+ break;
+ case VG_MOVE_TO:
+ data_at(&coords, path, 0, 2, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], &data[1]);
+ cb(path, &p);
+ p.sx = data[0];
+ p.sy = data[1];
+ p.ox = data[0];
+ p.oy = data[1];
+ p.px = data[0];
+ p.py = data[1];
+ break;
+ case VG_LINE_TO:
+ data_at(&coords, path, 0, 2, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], &data[1]);
+ cb(path, &p);
+ p.ox = data[0];
+ p.oy = data[1];
+ p.px = data[0];
+ p.py = data[1];
+ break;
+ case VG_HLINE_TO:
+ data_at(&coords, path, 0, 1, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], 0);
+ p.segment = VG_LINE_TO;
+ data[1] = p.oy;
+ cb(path, &p);
+ p.ox = data[0];
+ p.oy = data[1];
+ p.px = data[0];
+ p.py = data[1];
+ break;
+ case VG_VLINE_TO:
+ data_at(&coords, path, 0, 1, data);
+ map_if_relative(p.ox, p.oy, relative, 0, &data[0]);
+ p.segment = VG_LINE_TO;
+ data[1] = data[0];
+ data[0] = p.ox;
+ cb(path, &p);
+ p.ox = data[0];
+ p.oy = data[1];
+ p.px = data[0];
+ p.py = data[1];
+ break;
+ case VG_CUBIC_TO: {
+ data_at(&coords, path, 0, 6, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], &data[1]);
+ map_if_relative(p.ox, p.oy, relative, &data[2], &data[3]);
+ map_if_relative(p.ox, p.oy, relative, &data[4], &data[5]);
+ cb(path, &p);
+ p.px = data[2];
+ p.py = data[3];
+ p.ox = data[4];
+ p.oy = data[5];
+ }
+ break;
+ case VG_QUAD_TO: {
+ data_at(&coords, path, 0, 4, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], &data[1]);
+ map_if_relative(p.ox, p.oy, relative, &data[2], &data[3]);
+ cb(path, &p);
+ p.px = data[0];
+ p.py = data[1];
+ p.ox = data[2];
+ p.oy = data[3];
+ }
+ break;
+ case VG_SQUAD_TO: {
+ data_at(&coords, path, 0, 2, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], &data[1]);
+ cb(path, &p);
+ p.px = 2*p.ox-p.px;
+ p.py = 2*p.oy-p.py;
+ p.ox = data[2];
+ p.oy = data[3];
+ }
+ break;
+ case VG_SCUBIC_TO: {
+ data_at(&coords, path, 0, 4, data);
+ map_if_relative(p.ox, p.oy, relative, &data[0], &data[1]);
+ map_if_relative(p.ox, p.oy, relative, &data[2], &data[3]);
+ cb(path, &p);
+ p.px = data[0];
+ p.py = data[1];
+ p.ox = data[2];
+ p.oy = data[3];
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ data_at(&coords, path, 0, 5, data);
+ map_if_relative(p.ox, p.oy, relative, &data[3], &data[4]);
+#if 0
+ debug_printf("------- ARC (%f, %f), (%f, %f) %f, %f, %f\n",
+ p.ox, p.oy, data[3], data[4], data[0], data[1], data[2]);
+#endif
+ cb(path, &p);
+ p.ox = data[3];
+ p.oy = data[4];
+ p.px = data[3];
+ p.py = data[4];
+ }
+ break;
+ default:
+ abort();
+ assert(!"Unknown segment!");
+ }
+ }
+}
+
+struct transform_data {
+ struct array *segments;
+ struct array *coords;
+
+ struct matrix *matrix;
+
+ VGPathDatatype datatype;
+};
+
+static VGboolean transform_cb(struct path *p,
+ struct path_for_each_data *pd)
+{
+ struct transform_data *td = (struct transform_data *)pd->user_data;
+ VGint num_coords = num_elements_for_segments(&pd->segment, 1);
+ VGubyte segment = SEGMENT_COMMAND(pd->segment);/* abs bit is 0 */
+ VGfloat data[8];
+ VGubyte common_data[sizeof(VGfloat)*8];
+
+ memcpy(data, pd->coords, sizeof(VGfloat) * num_coords);
+
+ switch(segment) {
+ case VG_CLOSE_PATH:
+ break;
+ case VG_MOVE_TO:
+ matrix_map_point(td->matrix,
+ data[0], data[1], &data[0], &data[1]);
+ break;
+ case VG_LINE_TO:
+ matrix_map_point(td->matrix,
+ data[0], data[1], &data[0], &data[1]);
+ break;
+ case VG_HLINE_TO:
+ case VG_VLINE_TO:
+ assert(0);
+ break;
+ case VG_QUAD_TO:
+ matrix_map_point(td->matrix,
+ data[0], data[1], &data[0], &data[1]);
+ matrix_map_point(td->matrix,
+ data[2], data[3], &data[2], &data[3]);
+ break;
+ case VG_CUBIC_TO:
+ matrix_map_point(td->matrix,
+ data[0], data[1], &data[0], &data[1]);
+ matrix_map_point(td->matrix,
+ data[2], data[3], &data[2], &data[3]);
+ matrix_map_point(td->matrix,
+ data[4], data[5], &data[4], &data[5]);
+ break;
+ case VG_SQUAD_TO:
+ matrix_map_point(td->matrix,
+ data[0], data[1], &data[0], &data[1]);
+ break;
+ case VG_SCUBIC_TO:
+ matrix_map_point(td->matrix,
+ data[0], data[1], &data[0], &data[1]);
+ matrix_map_point(td->matrix,
+ data[2], data[3], &data[2], &data[3]);
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ struct arc arc;
+ struct path *path = path_create(td->datatype,
+ 1, 0, 0, 0, VG_PATH_CAPABILITY_ALL);
+ arc_init(&arc, segment,
+ pd->ox, pd->oy, data[3], data[4],
+ data[0], data[1], data[2]);
+
+ arc_to_path(&arc, path, td->matrix);
+
+ num_coords = path_num_coords(path);
+
+ array_append_data(td->segments, path->segments->data,
+ path->num_segments);
+ array_append_data(td->coords, path->control_points->data,
+ num_coords);
+ path_destroy(path);
+
+ return VG_TRUE;
+ }
+ break;
+ default:
+ break;
+ }
+
+ vg_float_to_datatype(td->datatype, common_data, data, num_coords);
+
+ array_append_data(td->segments, &pd->segment, 1);
+ array_append_data(td->coords, common_data, num_coords);
+ return VG_TRUE;
+}
+
+void path_transform(struct path *dst, struct path *src)
+{
+ struct transform_data data;
+ struct vg_context *ctx = dst->base.ctx;
+
+ data.segments = dst->segments;
+ data.coords = dst->control_points;
+ data.matrix = &ctx->state.vg.path_user_to_surface_matrix;
+ data.datatype = dst->datatype;
+
+ path_for_each_segment(src, transform_cb, (void*)&data);
+
+ dst->num_segments = dst->segments->num_elements;
+ dst->dirty = VG_TRUE;
+ dst->dirty_stroke = VG_TRUE;
+}
+
+void path_append_path(struct path *dst,
+ struct path *src)
+{
+ VGint num_coords = path_num_coords(src);
+ void *dst_data = malloc(size_for_datatype(dst->datatype) * num_coords);
+ array_append_data(dst->segments,
+ src->segments->data,
+ src->num_segments);
+ convert_path(src, dst->datatype,
+ dst_data, num_coords);
+ array_append_data(dst->control_points,
+ dst_data,
+ num_coords);
+ free(dst_data);
+
+ dst->num_segments += src->num_segments;
+ dst->dirty = VG_TRUE;
+ dst->dirty_stroke = VG_TRUE;
+}
+
+static INLINE VGboolean is_segment_arc(VGubyte segment)
+{
+ VGubyte scommand = SEGMENT_COMMAND(segment);
+ return (scommand == VG_SCCWARC_TO ||
+ scommand == VG_SCWARC_TO ||
+ scommand == VG_LCCWARC_TO ||
+ scommand == VG_LCWARC_TO);
+}
+
+struct path_iter_data {
+ struct path *path;
+ VGubyte segment;
+ void *coords;
+ VGfloat px, py, ox, oy, sx, sy;
+};
+static INLINE VGubyte normalize_coords(struct path_iter_data *pd,
+ VGint *num_coords,
+ VGfloat *data)
+{
+ VGint command = SEGMENT_COMMAND(pd->segment);
+ VGboolean relative = SEGMENT_ABS_REL(pd->segment);
+
+ switch(command) {
+ case VG_CLOSE_PATH:
+ *num_coords = 0;
+ pd->ox = pd->sx;
+ pd->oy = pd->sy;
+ return VG_CLOSE_PATH;
+ break;
+ case VG_MOVE_TO:
+ data_at(&pd->coords, pd->path, 0, 2, data);
+ map_if_relative(pd->ox, pd->oy, relative, &data[0], &data[1]);
+ pd->sx = data[0];
+ pd->sy = data[1];
+ pd->ox = data[0];
+ pd->oy = data[1];
+ pd->px = data[0];
+ pd->py = data[1];
+ *num_coords = 2;
+ return VG_MOVE_TO_ABS;
+ break;
+ case VG_LINE_TO:
+ data_at(&pd->coords, pd->path, 0, 2, data);
+ map_if_relative(pd->ox, pd->oy, relative, &data[0], &data[1]);
+ pd->ox = data[0];
+ pd->oy = data[1];
+ pd->px = data[0];
+ pd->py = data[1];
+ *num_coords = 2;
+ return VG_LINE_TO_ABS;
+ break;
+ case VG_HLINE_TO:
+ data_at(&pd->coords, pd->path, 0, 1, data);
+ map_if_relative(pd->ox, pd->oy, relative, &data[0], 0);
+ data[1] = pd->oy;
+ pd->ox = data[0];
+ pd->oy = data[1];
+ pd->px = data[0];
+ pd->py = data[1];
+ *num_coords = 2;
+ return VG_LINE_TO_ABS;
+ break;
+ case VG_VLINE_TO:
+ data_at(&pd->coords, pd->path, 0, 1, data);
+ map_if_relative(pd->ox, pd->oy, relative, 0, &data[0]);
+ data[1] = data[0];
+ data[0] = pd->ox;
+ pd->ox = data[0];
+ pd->oy = data[1];
+ pd->px = data[0];
+ pd->py = data[1];
+ *num_coords = 2;
+ return VG_LINE_TO_ABS;
+ break;
+ case VG_CUBIC_TO: {
+ data_at(&pd->coords, pd->path, 0, 6, data);
+ map_if_relative(pd->ox, pd->oy, relative, &data[0], &data[1]);
+ map_if_relative(pd->ox, pd->oy, relative, &data[2], &data[3]);
+ map_if_relative(pd->ox, pd->oy, relative, &data[4], &data[5]);
+ pd->px = data[2];
+ pd->py = data[3];
+ pd->ox = data[4];
+ pd->oy = data[5];
+ *num_coords = 6;
+ return VG_CUBIC_TO_ABS;
+ }
+ break;
+ case VG_QUAD_TO: {
+ VGfloat x0, y0, x1, y1, x2, y2, x3, y3;
+ data_at(&pd->coords, pd->path, 0, 4, data);
+ x0 = pd->ox;
+ y0 = pd->oy;
+ x1 = data[0];
+ y1 = data[1];
+ x3 = data[2];
+ y3 = data[3];
+ map_if_relative(pd->ox, pd->oy, relative, &x1, &y1);
+ map_if_relative(pd->ox, pd->oy, relative, &x3, &y3);
+ pd->px = x1;
+ pd->py = y1;
+ { /* form a cubic out of it */
+ x2 = (x3 + 2*x1) / 3.f;
+ y2 = (y3 + 2*y1) / 3.f;
+ x1 = (x0 + 2*x1) / 3.f;
+ y1 = (y0 + 2*y1) / 3.f;
+ }
+ pd->ox = x3;
+ pd->oy = y3;
+ data[0] = x1;
+ data[1] = y1;
+ data[2] = x2;
+ data[3] = y2;
+ data[4] = x3;
+ data[5] = y3;
+ *num_coords = 6;
+ return VG_CUBIC_TO_ABS;
+ }
+ break;
+ case VG_SQUAD_TO: {
+ VGfloat x0, y0, x1, y1, x2, y2, x3, y3;
+ data_at(&pd->coords, pd->path, 0, 2, data);
+ x0 = pd->ox;
+ y0 = pd->oy;
+ x1 = 2 * pd->ox - pd->px;
+ y1 = 2 * pd->oy - pd->py;
+ x3 = data[0];
+ y3 = data[1];
+ map_if_relative(pd->ox, pd->oy, relative, &x3, &y3);
+ pd->px = x1;
+ pd->py = y1;
+ { /* form a cubic out of it */
+ x2 = (x3 + 2*x1) / 3.f;
+ y2 = (y3 + 2*y1) / 3.f;
+ x1 = (x0 + 2*x1) / 3.f;
+ y1 = (y0 + 2*y1) / 3.f;
+ }
+ pd->ox = x3;
+ pd->oy = y3;
+ data[0] = x1;
+ data[1] = y1;
+ data[2] = x2;
+ data[3] = y2;
+ data[4] = x3;
+ data[5] = y3;
+ *num_coords = 6;
+ return VG_CUBIC_TO_ABS;
+ }
+ break;
+ case VG_SCUBIC_TO: {
+ VGfloat x0, y0, x1, y1, x2, y2, x3, y3;
+ data_at(&pd->coords, pd->path, 0, 4, data);
+ x0 = pd->ox;
+ y0 = pd->oy;
+ x1 = 2*pd->ox-pd->px;
+ y1 = 2*pd->oy-pd->py;
+ x2 = data[0];
+ y2 = data[1];
+ x3 = data[2];
+ y3 = data[3];
+ map_if_relative(pd->ox, pd->oy, relative, &x2, &y2);
+ map_if_relative(pd->ox, pd->oy, relative, &x3, &y3);
+ pd->ox = x3;
+ pd->oy = y3;
+ pd->px = x2;
+ pd->py = y2;
+ data[0] = x1;
+ data[1] = y1;
+ data[2] = x2;
+ data[3] = y2;
+ data[4] = x3;
+ data[5] = y3;
+ *num_coords = 6;
+ return VG_CUBIC_TO_ABS;
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ data_at(&pd->coords, pd->path, 0, 5, data);
+ map_if_relative(pd->ox, pd->oy, relative, &data[3], &data[4]);
+ pd->ox = data[3];
+ pd->oy = data[4];
+ pd->px = data[3];
+ pd->py = data[4];
+ *num_coords = 5;
+ return command | VG_ABSOLUTE;
+ }
+ break;
+ default:
+ abort();
+ assert(!"Unknown segment!");
+ }
+}
+
+static void linearly_interpolate(VGfloat *result,
+ const VGfloat *start,
+ const VGfloat *end,
+ VGfloat amount,
+ VGint number)
+{
+ VGint i;
+ for (i = 0; i < number; ++i) {
+ result[i] = start[i] + (end[i] - start[i]) * amount;
+ }
+}
+
+VGboolean path_interpolate(struct path *dst,
+ struct path *start, struct path *end,
+ VGfloat amount)
+{
+ /* temporary path that we can discard if it will turn
+ * out that start is not compatible with end */
+ struct path *res_path = path_create(dst->datatype,
+ 1.0, 0.0,
+ 0, 0, dst->caps);
+ VGint i;
+ VGfloat start_coords[8];
+ VGfloat end_coords[8];
+ VGfloat results[8];
+ VGubyte common_data[sizeof(VGfloat)*8];
+ struct path_iter_data start_iter, end_iter;
+
+ memset(&start_iter, 0, sizeof(struct path_iter_data));
+ memset(&end_iter, 0, sizeof(struct path_iter_data));
+
+ start_iter.path = start;
+ start_iter.coords = start->control_points->data;
+ end_iter.path = end;
+ end_iter.coords = end->control_points->data;
+
+ for (i = 0; i < start->num_segments; ++i) {
+ VGubyte segment;
+ VGubyte ssegment, esegment;
+ VGint snum_coords, enum_coords;
+ start_iter.segment = ((VGubyte*)(start->segments->data))[i];
+ end_iter.segment = ((VGubyte*)(end->segments->data))[i];
+
+ ssegment = normalize_coords(&start_iter, &snum_coords,
+ start_coords);
+ esegment = normalize_coords(&end_iter, &enum_coords,
+ end_coords);
+
+ if (is_segment_arc(ssegment)) {
+ if (!is_segment_arc(esegment)) {
+ path_destroy(res_path);
+ return VG_FALSE;
+ }
+ if (amount > 0.5)
+ segment = esegment;
+ else
+ segment = ssegment;
+ } else if (is_segment_arc(esegment)) {
+ path_destroy(res_path);
+ return VG_FALSE;
+ }
+ else if (ssegment != esegment) {
+ path_destroy(res_path);
+ return VG_FALSE;
+ }
+ else
+ segment = ssegment;
+
+ linearly_interpolate(results, start_coords, end_coords,
+ amount, snum_coords);
+ vg_float_to_datatype(dst->datatype, common_data, results, snum_coords);
+ path_append_data(res_path, 1, &segment, common_data);
+ }
+
+ path_append_path(dst, res_path);
+ path_destroy(res_path);
+
+ dst->dirty = VG_TRUE;
+ dst->dirty_stroke = VG_TRUE;
+
+ return VG_TRUE;
+}
+
+void path_clear(struct path *p, VGbitfield capabilities)
+{
+ path_set_capabilities(p, capabilities);
+ array_destroy(p->segments);
+ array_destroy(p->control_points);
+ p->segments = array_create(size_for_datatype(VG_PATH_DATATYPE_S_8));
+ p->control_points = array_create(size_for_datatype(p->datatype));
+ p->num_segments = 0;
+ p->dirty = VG_TRUE;
+ p->dirty_stroke = VG_TRUE;
+}
+
+struct path * path_create_stroke(struct path *p,
+ struct matrix *matrix)
+{
+ VGint i;
+ VGfloat sx, sy, px, py, ox, oy;
+ VGfloat x0, y0, x1, y1, x2, y2, x3, y3;
+ VGfloat data[8];
+ void *coords = (VGfloat *)p->control_points->data;
+ int dashed = (p->base.ctx->state.vg.stroke.dash_pattern_num ? 1 : 0);
+ struct dash_stroker stroker;
+ struct vg_state *vg_state = &p->base.ctx->state.vg;
+
+ if (p->stroked.path)
+ {
+ /* ### compare the dash patterns to see if we can cache them.
+ * for now we simply always bail out if the path is dashed.
+ */
+ if (memcmp( &p->stroked.matrix,
+ matrix,
+ sizeof *matrix ) == 0 &&
+ !dashed && !p->dirty_stroke &&
+ floatsEqual(p->stroked.stroke_width, vg_state->stroke.line_width.f) &&
+ floatsEqual(p->stroked.miter_limit, vg_state->stroke.miter_limit.f) &&
+ p->stroked.cap_style == vg_state->stroke.cap_style &&
+ p->stroked.join_style == vg_state->stroke.join_style)
+ {
+ return p->stroked.path;
+ }
+ else {
+ path_destroy( p->stroked.path );
+ p->stroked.path = NULL;
+ }
+ }
+
+
+ sx = sy = px = py = ox = oy = 0.f;
+
+ if (dashed)
+ dash_stroker_init((struct stroker *)&stroker, vg_state);
+ else
+ stroker_init((struct stroker *)&stroker, vg_state);
+
+ stroker_begin((struct stroker *)&stroker);
+
+ for (i = 0; i < p->num_segments; ++i) {
+ VGubyte segment = ((VGubyte*)(p->segments->data))[i];
+ VGint command = SEGMENT_COMMAND(segment);
+ VGboolean relative = SEGMENT_ABS_REL(segment);
+
+ switch(command) {
+ case VG_CLOSE_PATH: {
+ VGfloat x0 = sx;
+ VGfloat y0 = sy;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ stroker_line_to((struct stroker *)&stroker, x0, y0);
+ }
+ break;
+ case VG_MOVE_TO:
+ data_at(&coords, p, 0, 2, data);
+ x0 = data[0];
+ y0 = data[1];
+ map_if_relative(ox, oy, relative, &x0, &y0);
+ sx = x0;
+ sy = y0;
+ ox = x0;
+ oy = y0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ stroker_move_to((struct stroker *)&stroker, x0, y0);
+ break;
+ case VG_LINE_TO:
+ data_at(&coords, p, 0, 2, data);
+ x0 = data[0];
+ y0 = data[1];
+ map_if_relative(ox, oy, relative, &x0, &y0);
+ ox = x0;
+ oy = y0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ stroker_line_to((struct stroker *)&stroker, x0, y0);
+ break;
+ case VG_HLINE_TO:
+ data_at(&coords, p, 0, 1, data);
+ x0 = data[0];
+ y0 = oy;
+ map_if_relative(ox, oy, relative, &x0, 0);
+ ox = x0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ stroker_line_to((struct stroker *)&stroker, x0, y0);
+ break;
+ case VG_VLINE_TO:
+ data_at(&coords, p, 0, 1, data);
+ x0 = ox;
+ y0 = data[0];
+ map_if_relative(ox, oy, relative, 0, &y0);
+ oy = y0;
+ px = x0;
+ py = y0;
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ stroker_line_to((struct stroker *)&stroker, x0, y0);
+ break;
+ case VG_CUBIC_TO: {
+ data_at(&coords, p, 0, 6, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = data[0];
+ y1 = data[1];
+ x2 = data[2];
+ y2 = data[3];
+ x3 = data[4];
+ y3 = data[5];
+ map_if_relative(ox, oy, relative, &x1, &y1);
+ map_if_relative(ox, oy, relative, &x2, &y2);
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ if (floatsEqual(x1, ox) && floatsEqual(y1, oy) &&
+ floatsEqual(x1, x2) && floatsEqual(y1, y2) &&
+ floatsEqual(x2, x3) && floatsEqual(y2, y3)) {
+ /*ignore the empty segment */
+ continue;
+ } else if (floatsEqual(x3, ox) && floatsEqual(y3, oy)) {
+ /* if dup vertex, emit a line */
+ ox = x3;
+ oy = y3;
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_line_to((struct stroker *)&stroker, x3, y3);
+ continue;
+ }
+ ox = x3;
+ oy = y3;
+ px = x2;
+ py = y2;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_curve_to((struct stroker *)&stroker, x1, y1, x2, y2, x3, y3);
+ }
+ break;
+ case VG_QUAD_TO: {
+ data_at(&coords, p, 0, 4, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = data[0];
+ y1 = data[1];
+ x3 = data[2];
+ y3 = data[3];
+ map_if_relative(ox, oy, relative, &x1, &y1);
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ px = x1;
+ py = y1;
+ { /* form a cubic out of it */
+ x2 = (x3 + 2*x1) / 3.f;
+ y2 = (y3 + 2*y1) / 3.f;
+ x1 = (x0 + 2*x1) / 3.f;
+ y1 = (y0 + 2*y1) / 3.f;
+ }
+ if (floatsEqual(x1, ox) && floatsEqual(y1, oy) &&
+ floatsEqual(x1, x2) && floatsEqual(y1, y2) &&
+ floatsEqual(x2, x3) && floatsEqual(y2, y3)) {
+ /*ignore the empty segment */
+ continue;
+ } else if (floatsEqual(x3, ox) && floatsEqual(y3, oy)) {
+ /* if dup vertex, emit a line */
+ ox = x3;
+ oy = y3;
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_line_to((struct stroker *)&stroker, x3, y3);
+ continue;
+ }
+ ox = x3;
+ oy = y3;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_curve_to((struct stroker *)&stroker, x1, y1, x2, y2, x3, y3);
+ }
+ break;
+ case VG_SQUAD_TO: {
+ data_at(&coords, p, 0, 2, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = 2*ox-px;
+ y1 = 2*oy-py;
+ x3 = data[0];
+ y3 = data[1];
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ px = x1;
+ py = y1;
+ { /* form a cubic out of it */
+ x2 = (x3 + 2*x1) / 3.f;
+ y2 = (y3 + 2*y1) / 3.f;
+ x1 = (x0 + 2*x1) / 3.f;
+ y1 = (y0 + 2*y1) / 3.f;
+ }
+ if (floatsEqual(x1, ox) && floatsEqual(y1, oy) &&
+ floatsEqual(x1, x2) && floatsEqual(y1, y2) &&
+ floatsEqual(x2, x3) && floatsEqual(y2, y3)) {
+ /*ignore the empty segment */
+ continue;
+ } else if (floatsEqual(x3, ox) && floatsEqual(y3, oy)) {
+ /* if dup vertex, emit a line */
+ ox = x3;
+ oy = y3;
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_line_to((struct stroker *)&stroker, x3, y3);
+ continue;
+ }
+ ox = x3;
+ oy = y3;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_curve_to((struct stroker *)&stroker, x1, y1, x2, y2, x3, y3);
+ }
+ break;
+ case VG_SCUBIC_TO: {
+ data_at(&coords, p, 0, 4, data);
+ x0 = ox;
+ y0 = oy;
+ x1 = 2*ox-px;
+ y1 = 2*oy-py;
+ x2 = data[0];
+ y2 = data[1];
+ x3 = data[2];
+ y3 = data[3];
+ map_if_relative(ox, oy, relative, &x2, &y2);
+ map_if_relative(ox, oy, relative, &x3, &y3);
+ if (floatsEqual(x1, ox) && floatsEqual(y1, oy) &&
+ floatsEqual(x1, x2) && floatsEqual(y1, y2) &&
+ floatsEqual(x2, x3) && floatsEqual(y2, y3)) {
+ /*ignore the empty segment */
+ continue;
+ } else if (floatsEqual(x3, ox) && floatsEqual(y3, oy)) {
+ /* if dup vertex, emit a line */
+ ox = x3;
+ oy = y3;
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_line_to((struct stroker *)&stroker, x3, y3);
+ continue;
+ }
+ ox = x3;
+ oy = y3;
+ px = x2;
+ py = y2;
+ assert(matrix_is_affine(matrix));
+ matrix_map_point(matrix, x0, y0, &x0, &y0);
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ matrix_map_point(matrix, x2, y2, &x2, &y2);
+ matrix_map_point(matrix, x3, y3, &x3, &y3);
+ stroker_curve_to((struct stroker *)&stroker, x1, y1, x2, y2, x3, y3);
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ VGfloat rh, rv, rot;
+ struct arc arc;
+
+ data_at(&coords, p, 0, 5, data);
+ x0 = ox;
+ y0 = oy;
+ rh = data[0];
+ rv = data[1];
+ rot = data[2];
+ x1 = data[3];
+ y1 = data[4];
+ map_if_relative(ox, oy, relative, &x1, &y1);
+ if (floatsEqual(x1, ox) && floatsEqual(y1, oy)) {
+ /* if dup vertex, emit a line */
+ ox = x1;
+ oy = y1;
+ matrix_map_point(matrix, x1, y1, &x1, &y1);
+ stroker_line_to((struct stroker *)&stroker, x1, y1);
+ continue;
+ }
+ arc_init(&arc, command, x0, y0, x1, y1,
+ rh, rv, rot);
+ arc_stroke_cb(&arc, (struct stroker *)&stroker,
+ matrix);
+ ox = x1;
+ oy = y1;
+ px = x1;
+ py = y1;
+ }
+ break;
+ default:
+ abort();
+ assert(!"Unknown segment!");
+ }
+ }
+
+ stroker_end((struct stroker *)&stroker);
+
+ if (dashed)
+ dash_stroker_cleanup((struct dash_stroker *)&stroker);
+ else
+ stroker_cleanup((struct stroker *)&stroker);
+
+ p->stroked.path = stroker.base.path;
+ p->stroked.matrix = *matrix;
+ p->dirty_stroke = VG_FALSE;
+ p->stroked.stroke_width = vg_state->stroke.line_width.f;
+ p->stroked.miter_limit = vg_state->stroke.miter_limit.f;
+ p->stroked.cap_style = vg_state->stroke.cap_style;
+ p->stroked.join_style = vg_state->stroke.join_style;
+
+ return stroker.base.path;
+}
+
+void path_render(struct path *p, VGbitfield paintModes)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *mat = &ctx->state.vg.path_user_to_surface_matrix;
+
+ vg_validate_state(ctx);
+
+ shader_set_drawing_image(ctx->shader, VG_FALSE);
+ shader_set_image(ctx->shader, 0);
+#if 0
+ fprintf(stderr, "Matrix(11=%f 12=%f 13=%f 21=%f 22=%f 23=%f 31=%f 32=%f 33=%f)\n",
+ mat->m[0], mat->m[1], mat->m[2],
+ mat->m[3], mat->m[4], mat->m[5],
+ mat->m[6], mat->m[7], mat->m[8]);
+#endif
+ if (paintModes & VG_FILL_PATH) {
+ /* First the fill */
+ shader_set_paint(ctx->shader, ctx->state.vg.fill_paint);
+ shader_bind(ctx->shader);
+ path_fill(p, mat);
+ }
+
+ if (paintModes & VG_STROKE_PATH){
+ /* 8.7.5: "line width less than or equal to 0 prevents stroking from
+ * taking place."*/
+ if (ctx->state.vg.stroke.line_width.f <= 0)
+ return;
+ shader_set_paint(ctx->shader, ctx->state.vg.stroke_paint);
+ shader_bind(ctx->shader);
+ path_stroke(p);
+ }
+}
+
+void path_fill(struct path *p, struct matrix *mat)
+{
+ struct vg_context *ctx = vg_current_context();
+ {
+ struct polygon_array *polygon_array = path_get_fill_polygons(p, mat);
+ struct array *polys = polygon_array->array;
+
+ if (!polygon_array || !polys || !polys->num_elements) {
+ return;
+ }
+ polygon_array_fill(polygon_array, ctx);
+ }
+}
+
+void path_stroke(struct path *p)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct matrix *mat = &ctx->state.vg.path_user_to_surface_matrix;
+ VGFillRule old_fill = ctx->state.vg.fill_rule;
+ struct matrix identity;
+ struct path *stroke;
+
+ matrix_load_identity(&identity);
+ stroke = path_create_stroke(p, &identity);
+ if (stroke && !path_is_empty(stroke)) {
+ ctx->state.vg.fill_rule = VG_NON_ZERO;
+
+ path_fill(stroke, mat);
+
+ ctx->state.vg.fill_rule = old_fill;
+ }
+}
+
+void path_move_to(struct path *p, float x, float y)
+{
+ VGubyte segment = VG_MOVE_TO_ABS;
+ VGubyte common_data[sizeof(VGfloat) * 2];
+ VGfloat data[2] = {x, y};
+
+ vg_float_to_datatype(p->datatype, common_data, data, 2);
+ path_append_data(p, 1, &segment, common_data);
+}
+
+void path_line_to(struct path *p, float x, float y)
+{
+ VGubyte segment = VG_LINE_TO_ABS;
+ VGubyte common_data[sizeof(VGfloat) * 2];
+ VGfloat data[2] = {x, y};
+
+ vg_float_to_datatype(p->datatype, common_data, data, 2);
+
+ path_append_data(p, 1, &segment, common_data);
+}
+
+void path_cubic_to(struct path *p, float px1, float py1,
+ float px2, float py2,
+ float x, float y)
+{
+ VGubyte segment = VG_CUBIC_TO_ABS;
+ VGubyte common_data[sizeof(VGfloat) * 6];
+ VGfloat data[6];
+
+ data[0] = px1; data[1] = py1;
+ data[2] = px2; data[3] = py2;
+ data[4] = x; data[5] = y;
+
+ vg_float_to_datatype(p->datatype, common_data, data, 6);
+
+ path_append_data(p, 1, &segment, common_data);
+}
+
+static INLINE void line_bounds(VGfloat *line /*x1,y1,x2,y2*/,
+ VGfloat *bounds)
+{
+ bounds[0] = MIN2(line[0], line[2]);
+ bounds[1] = MIN2(line[1], line[3]);
+ bounds[2] = MAX2(line[0], line[2]) - bounds[0];
+ bounds[3] = MAX2(line[1], line[3]) - bounds[1];
+}
+
+static INLINE void unite_bounds(VGfloat *bounds,
+ VGfloat *el)
+{
+ VGfloat cx1, cy1, cx2, cy2;
+ VGfloat nx1, ny1, nx2, ny2;
+
+ cx1 = bounds[0];
+ cy1 = bounds[1];
+ cx2 = bounds[0] + bounds[2];
+ cy2 = bounds[1] + bounds[3];
+
+ nx1 = el[0];
+ ny1 = el[1];
+ nx2 = el[0] + el[2];
+ ny2 = el[1] + el[3];
+
+ bounds[0] = MIN2(cx1, nx1);
+ bounds[1] = MIN2(cy1, ny1);
+ bounds[2] = MAX2(cx2, nx2) - bounds[0];
+ bounds[3] = MAX2(cy2, ny2) - bounds[1];
+}
+
+static INLINE void set_bounds(VGfloat *bounds,
+ VGfloat *element_bounds,
+ VGboolean *initialized)
+{
+ if (!(*initialized)) {
+ memcpy(bounds, element_bounds, 4 * sizeof(VGfloat));
+ *initialized = VG_TRUE;
+ } else
+ unite_bounds(bounds, element_bounds);
+}
+
+void path_bounding_rect(struct path *p, float *x, float *y,
+ float *w, float *h)
+{
+ VGint i;
+ VGfloat coords[8];
+ struct path_iter_data iter;
+ VGint num_coords;
+ VGfloat bounds[4];
+ VGfloat element_bounds[4];
+ VGfloat ox, oy;
+ VGboolean bounds_inited = VG_FALSE;
+
+ memset(&iter, 0, sizeof(struct path_iter_data));
+ memset(&bounds, 0, sizeof(bounds));
+
+ if (!p->num_segments) {
+ bounds[2] = -1;
+ bounds[3] = -1;
+ }
+
+
+ iter.path = p;
+ iter.coords = p->control_points->data;
+
+ for (i = 0; i < p->num_segments; ++i) {
+ VGubyte segment;
+ iter.segment = ((VGubyte*)(p->segments->data))[i];
+
+ ox = iter.ox;
+ oy = iter.oy;
+
+ segment = normalize_coords(&iter, &num_coords, coords);
+
+ switch(segment) {
+ case VG_CLOSE_PATH:
+ case VG_MOVE_TO_ABS:
+ break;
+ case VG_LINE_TO_ABS: {
+ VGfloat line[4] = {ox, oy, coords[0], coords[1]};
+ line_bounds(line, element_bounds);
+ set_bounds(bounds, element_bounds, &bounds_inited);
+ }
+ break;
+ case VG_CUBIC_TO_ABS: {
+ struct bezier bezier;
+ bezier_init(&bezier, ox, oy,
+ coords[0], coords[1],
+ coords[2], coords[3],
+ coords[4], coords[5]);
+ bezier_exact_bounds(&bezier, element_bounds);
+ set_bounds(bounds, element_bounds, &bounds_inited);
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ struct arc arc;
+ struct matrix identity;
+ struct path *path = path_create(VG_PATH_DATATYPE_F,
+ 1, 0, 0, 0, VG_PATH_CAPABILITY_ALL);
+
+ matrix_load_identity(&identity);
+ arc_init(&arc, segment,
+ ox, oy, coords[3], coords[4],
+ coords[0], coords[1], coords[2]);
+
+ arc_to_path(&arc, path, &identity);
+
+ path_bounding_rect(path, element_bounds + 0, element_bounds + 1,
+ element_bounds + 2, element_bounds + 3);
+ set_bounds(bounds, element_bounds, &bounds_inited);
+ }
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ *x = bounds[0];
+ *y = bounds[1];
+ *w = bounds[2];
+ *h = bounds[3];
+}
+
+float path_length(struct path *p, int start_segment, int num_segments)
+{
+ VGint i;
+ VGfloat coords[8];
+ struct path_iter_data iter;
+ VGint num_coords;
+ VGfloat length = 0;
+ VGfloat ox, oy;
+ VGboolean in_range = VG_FALSE;
+
+ memset(&iter, 0, sizeof(struct path_iter_data));
+
+ iter.path = p;
+ iter.coords = p->control_points->data;
+
+ for (i = 0; i < (start_segment + num_segments); ++i) {
+ VGubyte segment;
+
+ iter.segment = ((VGubyte*)(p->segments->data))[i];
+
+ ox = iter.ox;
+ oy = iter.oy;
+
+ segment = normalize_coords(&iter, &num_coords, coords);
+
+ in_range = (i >= start_segment) && i <= (start_segment + num_segments);
+ if (!in_range)
+ continue;
+
+ switch(segment) {
+ case VG_MOVE_TO_ABS:
+ break;
+ case VG_CLOSE_PATH: {
+ VGfloat line[4] = {ox, oy, iter.sx, iter.sy};
+ length += line_lengthv(line);
+ }
+ break;
+ case VG_LINE_TO_ABS: {
+ VGfloat line[4] = {ox, oy, coords[0], coords[1]};
+ length += line_lengthv(line);
+ }
+ break;
+ case VG_CUBIC_TO_ABS: {
+ struct bezier bezier;
+ bezier_init(&bezier, ox, oy,
+ coords[0], coords[1],
+ coords[2], coords[3],
+ coords[4], coords[5]);
+ length += bezier_length(&bezier, BEZIER_DEFAULT_ERROR);
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ struct arc arc;
+ struct matrix identity;
+ struct path *path = path_create(VG_PATH_DATATYPE_F,
+ 1, 0, 0, 0, VG_PATH_CAPABILITY_ALL);
+
+ matrix_load_identity(&identity);
+ arc_init(&arc, segment,
+ ox, oy, coords[3], coords[4],
+ coords[0], coords[1], coords[2]);
+
+ arc_to_path(&arc, path, &identity);
+
+ length += path_length(path, 0, path_num_segments(path));
+ }
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ return length;
+}
+
+static INLINE VGboolean point_on_current_segment(VGfloat distance,
+ VGfloat length,
+ VGfloat segment_length)
+{
+ return
+ (((floatIsZero(distance) || distance < 0) && floatIsZero(length)) ||
+ ((distance > length || floatsEqual(distance, length)) &&
+ (floatsEqual(distance, length + segment_length) ||
+ distance < (length + segment_length))));
+}
+
+static VGboolean path_point_segment(struct path_iter_data iter,
+ struct path_iter_data prev_iter,
+ VGfloat coords[8],
+ VGfloat distance,
+ VGfloat length, VGfloat *current_length,
+ VGfloat *point, VGfloat *normal)
+{
+ switch (iter.segment) {
+ case VG_MOVE_TO_ABS:
+ break;
+ case VG_CLOSE_PATH: {
+ VGfloat line[4] = {prev_iter.ox, prev_iter.oy, iter.sx, iter.sy};
+ VGboolean on_current_segment = VG_FALSE;
+ *current_length = line_lengthv(line);
+ on_current_segment = point_on_current_segment(distance,
+ length,
+ *current_length);
+ if (on_current_segment) {
+ VGfloat at = (distance - length) / line_lengthv(line);
+ line_normal_vector(line, normal);
+ line_point_at(line, at, point);
+ return VG_TRUE;
+ }
+ }
+ break;
+ case VG_LINE_TO_ABS: {
+ VGfloat line[4] = {prev_iter.ox, prev_iter.oy, coords[0], coords[1]};
+ VGboolean on_current_segment = VG_FALSE;
+ *current_length = line_lengthv(line);
+ on_current_segment = point_on_current_segment(distance,
+ length,
+ *current_length);
+ if (on_current_segment) {
+ VGfloat at = (distance - length) / line_lengthv(line);
+ line_normal_vector(line, normal);
+ line_point_at(line, at, point);
+ return VG_TRUE;
+ }
+ }
+ break;
+ case VG_CUBIC_TO_ABS: {
+ struct bezier bezier;
+ bezier_init(&bezier, prev_iter.ox, prev_iter.oy,
+ coords[0], coords[1],
+ coords[2], coords[3],
+ coords[4], coords[5]);
+ *current_length = bezier_length(&bezier, BEZIER_DEFAULT_ERROR);
+ if (point_on_current_segment(distance, length, *current_length)) {
+ bezier_point_at_length(&bezier, distance - length,
+ point, normal);
+ return VG_TRUE;
+ }
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ struct arc arc;
+ struct matrix identity;
+ struct path *path = path_create(VG_PATH_DATATYPE_F,
+ 1, 0, 0, 0, VG_PATH_CAPABILITY_ALL);
+
+ matrix_load_identity(&identity);
+ arc_init(&arc, iter.segment,
+ prev_iter.ox, prev_iter.oy, coords[3], coords[4],
+ coords[0], coords[1], coords[2]);
+
+ arc_to_path(&arc, path, &identity);
+
+ *current_length = path_length(path, 0, path_num_segments(path));
+ if (point_on_current_segment(distance, length, *current_length)) {
+ path_point(path, 0, path_num_segments(path),
+ distance - length, point, normal);
+ return VG_TRUE;
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+ return VG_FALSE;
+}
+
+void path_point(struct path *p, VGint start_segment, VGint num_segments,
+ VGfloat distance, VGfloat *point, VGfloat *normal)
+{
+ VGint i;
+ VGfloat coords[8];
+ struct path_iter_data iter, prev_iter;
+ VGint num_coords;
+ VGfloat length = 0;
+ VGfloat current_length = 0;
+
+ memset(&iter, 0, sizeof(struct path_iter_data));
+ memset(&prev_iter, 0, sizeof(struct path_iter_data));
+
+ point[0] = 0;
+ point[1] = 0;
+
+ normal[0] = 0;
+ normal[1] = -1;
+
+ iter.path = p;
+ iter.coords = p->control_points->data;
+ if (distance < 0)
+ distance = 0;
+
+ for (i = 0; i < (start_segment + num_segments); ++i) {
+ VGboolean outside_range = (i < start_segment ||
+ i >= (start_segment + num_segments));
+
+ prev_iter = iter;
+
+ iter.segment = ((VGubyte*)(p->segments->data))[i];
+ iter.segment = normalize_coords(&iter, &num_coords, coords);
+
+ if (outside_range)
+ continue;
+
+ if (path_point_segment(iter, prev_iter, coords,
+ distance, length, &current_length,
+ point, normal))
+ return;
+
+ length += current_length;
+ }
+
+ /*
+ *OpenVG 1.0 - 8.6.11 vgPointAlongPath
+ *
+ * If distance is greater than or equal to the path length
+ *(i.e., the value returned by vgPathLength when called with the same
+ *startSegment and numSegments parameters), the visual ending point of
+ *the path is used.
+ */
+ {
+ switch (iter.segment) {
+ case VG_MOVE_TO_ABS:
+ break;
+ case VG_CLOSE_PATH: {
+ VGfloat line[4] = {prev_iter.ox, prev_iter.oy, iter.sx, iter.sy};
+ line_normal_vector(line, normal);
+ line_point_at(line, 1.f, point);
+ }
+ break;
+ case VG_LINE_TO_ABS: {
+ VGfloat line[4] = {prev_iter.ox, prev_iter.oy, coords[0], coords[1]};
+ line_normal_vector(line, normal);
+ line_point_at(line, 1.f, point);
+ }
+ break;
+ case VG_CUBIC_TO_ABS: {
+ struct bezier bezier;
+ bezier_init(&bezier, prev_iter.ox, prev_iter.oy,
+ coords[0], coords[1],
+ coords[2], coords[3],
+ coords[4], coords[5]);
+ bezier_point_at_t(&bezier, 1.f, point, normal);
+ }
+ break;
+ case VG_SCCWARC_TO:
+ case VG_SCWARC_TO:
+ case VG_LCCWARC_TO:
+ case VG_LCWARC_TO: {
+ struct arc arc;
+ struct matrix identity;
+ struct path *path = path_create(VG_PATH_DATATYPE_F,
+ 1, 0, 0, 0, VG_PATH_CAPABILITY_ALL);
+
+ matrix_load_identity(&identity);
+ arc_init(&arc, iter.segment,
+ prev_iter.ox, prev_iter.oy, coords[3], coords[4],
+ coords[0], coords[1], coords[2]);
+
+ arc_to_path(&arc, path, &identity);
+
+ path_point(path, 0, path_num_segments(path),
+ /* to make sure we're bigger than len * 2 it */
+ 2 * path_length(path, 0, path_num_segments(path)),
+ point, normal);
+ }
+ break;
+ default:
+ assert(0);
+ }
+ }
+}
+
+VGboolean path_is_empty(struct path *p)
+{
+ return p->segments->num_elements == 0;
+}
diff --git a/src/gallium/state_trackers/vega/path.h b/src/gallium/state_trackers/vega/path.h
new file mode 100644
index 0000000000..e34538b736
--- /dev/null
+++ b/src/gallium/state_trackers/vega/path.h
@@ -0,0 +1,126 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _PATH_H
+#define _PATH_H
+
+#include "VG/openvg.h"
+
+struct path;
+struct polygon;
+struct matrix;
+
+enum fill_rule {
+ ODD_EVEN_FILL,
+ WINDING_FILL
+};
+
+
+struct path_for_each_data {
+ VGubyte segment;
+ /* all coords are absolute, even if segment is relative */
+ const VGfloat *coords;
+ VGfloat sx, sy, ox, oy, px, py;
+ void *user_data;
+};
+
+typedef VGboolean (*path_for_each_cb)(struct path *p,
+ struct path_for_each_data *data);
+
+
+struct path *path_create(VGPathDatatype dt, VGfloat scale, VGfloat bias,
+ VGint segmentCapacityHint,
+ VGint coordCapacityHint,
+ VGbitfield capabilities);
+void path_destroy(struct path *p);
+
+VGbitfield path_capabilities(struct path *p);
+void path_set_capabilities(struct path *p, VGbitfield bf);
+
+void path_append_data(struct path *p,
+ VGint numSegments,
+ const VGubyte * pathSegments,
+ const void * pathData);
+
+void path_append_path(struct path *dst,
+ struct path *src);
+
+VGint path_num_segments(struct path *p);
+
+void path_bounding_rect(struct path *p, float *x, float *y,
+ float *w, float *h);
+float path_length(struct path *p, int start_segment, int num_segments);
+
+void path_set_fill_rule(enum fill_rule fill);
+enum fill_rule path_fill_rule(enum fill_rule fill);
+
+VGboolean path_is_empty(struct path *p);
+
+VGbyte path_datatype_size(struct path *p);
+
+VGPathDatatype path_datatype(struct path *p);
+VGfloat path_scale(struct path *p);
+VGfloat path_bias(struct path *p);
+VGint path_num_coords(struct path *p);
+
+void path_modify_coords(struct path *p,
+ VGint startIndex,
+ VGint numSegments,
+ const void * pathData);
+
+struct path *path_create_stroke(struct path *p,
+ struct matrix *m);
+
+void path_for_each_segment(struct path *path,
+ path_for_each_cb cb,
+ void *user_data);
+
+void path_transform(struct path *dst, struct path *src);
+VGboolean path_interpolate(struct path *dst,
+ struct path *start, struct path *end,
+ VGfloat amount);
+
+void path_clear(struct path *p, VGbitfield capabilities);
+void path_render(struct path *p, VGbitfield paintModes);
+void path_fill(struct path *p, struct matrix *mat);
+void path_stroke(struct path *p);
+
+void path_move_to(struct path *p, float x, float y);
+void path_line_to(struct path *p, float x, float y);
+void path_cubic_to(struct path *p, float px1, float py1,
+ float px2, float py2,
+ float x, float y);
+
+void path_point(struct path *p, VGint startSegment, VGint numSegments,
+ VGfloat distance, VGfloat *point, VGfloat *normal);
+
+
+
+void vg_float_to_datatype(VGPathDatatype datatype,
+ VGubyte *common_data,
+ const VGfloat *data,
+ VGint num_coords);
+#endif
diff --git a/src/gallium/state_trackers/vega/path_utils.h b/src/gallium/state_trackers/vega/path_utils.h
new file mode 100644
index 0000000000..c2b3221dc5
--- /dev/null
+++ b/src/gallium/state_trackers/vega/path_utils.h
@@ -0,0 +1,109 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PATH_UTILS_H
+#define PATH_UTILS_H
+
+#include "VG/openvg.h"
+
+#define SEGMENT_COMMAND(command) /* Extract segment type */ \
+ ((command) & 0x1e)
+#define SEGMENT_ABS_REL(command) /* Extract absolute/relative bit */ \
+ ((command) & 0x1)
+
+static INLINE VGint size_for_datatype(VGPathDatatype datatype)
+{
+ switch(datatype) {
+ case VG_PATH_DATATYPE_S_8:
+ return 1;
+ case VG_PATH_DATATYPE_S_16:
+ return 2;
+ case VG_PATH_DATATYPE_S_32:
+ return 4;
+ case VG_PATH_DATATYPE_F:
+ return 4;
+ default:
+ assert(!"unknown datatype");
+ }
+ return 0;
+}
+
+static INLINE VGint num_elements_for_segments(const VGubyte *segments,
+ VGint num_segments)
+{
+ VGint i;
+ VGint count = 0;
+
+ for (i = 0; i < num_segments; ++i) {
+ VGubyte segment = segments[i];
+ VGint command = SEGMENT_COMMAND(segment);
+ switch(command) {
+ case VG_CLOSE_PATH:
+ break;
+ case VG_MOVE_TO:
+ count += 2;
+ break;
+ case VG_LINE_TO:
+ count += 2;
+ break;
+ case VG_HLINE_TO:
+ count += 1;
+ break;
+ case VG_VLINE_TO:
+ count += 1;
+ break;
+ case VG_QUAD_TO:
+ count += 4;
+ break;
+ case VG_CUBIC_TO:
+ count += 6;
+ break;
+ case VG_SQUAD_TO:
+ count += 2;
+ break;
+ case VG_SCUBIC_TO:
+ count += 4;
+ break;
+ case VG_SCCWARC_TO:
+ count += 5;
+ break;
+ case VG_SCWARC_TO:
+ count += 5;
+ break;
+ case VG_LCCWARC_TO:
+ count += 5;
+ break;
+ case VG_LCWARC_TO:
+ count += 5;
+ break;
+ default:
+ assert(!"Unknown segment!");
+ }
+ }
+ return count;
+}
+
+#endif
diff --git a/src/gallium/state_trackers/vega/polygon.c b/src/gallium/state_trackers/vega/polygon.c
new file mode 100644
index 0000000000..b6d282d803
--- /dev/null
+++ b/src/gallium/state_trackers/vega/polygon.c
@@ -0,0 +1,550 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "polygon.h"
+
+#include "matrix.h" /*for floatsEqual*/
+#include "vg_context.h"
+#include "vg_state.h"
+#include "paint.h"
+#include "renderer.h"
+#include "util_array.h"
+#include "VG/openvg.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+
+#include "util/u_draw_quad.h"
+#include "util/u_math.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+#define DEBUG_POLYGON 0
+
+#define COMPONENTS 2
+
+struct polygon
+{
+ VGfloat *data;
+ VGint size;
+
+ VGint num_verts;
+
+ VGboolean dirty;
+ struct pipe_buffer *vbuf;
+ struct pipe_screen *screen;
+};
+
+static float *ptr_to_vertex(float *data, int idx)
+{
+ return data + (idx * COMPONENTS);
+}
+
+#if 0
+static void polygon_print(struct polygon *poly)
+{
+ int i;
+ float *vert;
+ debug_printf("Polygon %p, size = %d\n", poly, poly->num_verts);
+ for (i = 0; i < poly->num_verts; ++i) {
+ vert = ptr_to_vertex(poly->data, i);
+ debug_printf("%f, %f, ", vert[0], vert[1]);
+ }
+ debug_printf("\nend\n");
+}
+#endif
+
+
+struct polygon * polygon_create(int size)
+{
+ struct polygon *poly = (struct polygon*)malloc(sizeof(struct polygon));
+
+ poly->data = malloc(sizeof(float) * COMPONENTS * size);
+ poly->size = size;
+ poly->num_verts = 0;
+ poly->dirty = VG_TRUE;
+ poly->vbuf = NULL;
+
+ return poly;
+}
+
+struct polygon * polygon_create_from_data(float *data, int size)
+{
+ struct polygon *poly = polygon_create(size);
+
+ memcpy(poly->data, data, sizeof(float) * COMPONENTS * size);
+ poly->num_verts = size;
+ poly->dirty = VG_TRUE;
+ poly->vbuf = NULL;
+
+ return poly;
+}
+
+void polygon_destroy(struct polygon *poly)
+{
+ if (poly->vbuf)
+ pipe_buffer_reference(&poly->vbuf, NULL);
+
+ free(poly->data);
+ free(poly);
+}
+
+void polygon_resize(struct polygon *poly, int new_size)
+{
+ float *data = (float*)malloc(sizeof(float) * COMPONENTS * new_size);
+ int size = MIN2(sizeof(float) * COMPONENTS * new_size,
+ sizeof(float) * COMPONENTS * poly->size);
+ memcpy(data, poly->data, size);
+ free(poly->data);
+ poly->data = data;
+ poly->size = new_size;
+ poly->dirty = VG_TRUE;
+}
+
+int polygon_size(struct polygon *poly)
+{
+ return poly->size;
+}
+
+int polygon_vertex_count(struct polygon *poly)
+{
+ return poly->num_verts;
+}
+
+float * polygon_data(struct polygon *poly)
+{
+ return poly->data;
+}
+
+void polygon_vertex_append(struct polygon *p,
+ float x, float y)
+{
+ float *vert;
+#if DEBUG_POLYGON
+ debug_printf("Append vertex [%f, %f]\n", x, y);
+#endif
+ if (p->num_verts >= p->size) {
+ polygon_resize(p, p->size * 2);
+ }
+
+ vert = ptr_to_vertex(p->data, p->num_verts);
+ vert[0] = x;
+ vert[1] = y;
+ ++p->num_verts;
+ p->dirty = VG_TRUE;
+}
+
+void polygon_set_vertex(struct polygon *p, int idx,
+ float x, float y)
+{
+ float *vert;
+ if (idx >= p->num_verts) {
+ /*fixme: error reporting*/
+ abort();
+ return;
+ }
+
+ vert = ptr_to_vertex(p->data, idx);
+ vert[0] = x;
+ vert[1] = y;
+ p->dirty = VG_TRUE;
+}
+
+void polygon_vertex(struct polygon *p, int idx,
+ float *vertex)
+{
+ float *vert;
+ if (idx >= p->num_verts) {
+ /*fixme: error reporting*/
+ abort();
+ return;
+ }
+
+ vert = ptr_to_vertex(p->data, idx);
+ vertex[0] = vert[0];
+ vertex[1] = vert[1];
+}
+
+void polygon_bounding_rect(struct polygon *p,
+ float *rect)
+{
+ int i;
+ float minx, miny, maxx, maxy;
+ float *vert = ptr_to_vertex(p->data, 0);
+ minx = vert[0];
+ maxx = vert[0];
+ miny = vert[1];
+ maxy = vert[1];
+
+ for (i = 1; i < p->num_verts; ++i) {
+ vert = ptr_to_vertex(p->data, i);
+ minx = MIN2(vert[0], minx);
+ miny = MIN2(vert[1], miny);
+
+ maxx = MAX2(vert[0], maxx);
+ maxy = MAX2(vert[1], maxy);
+ }
+
+ rect[0] = minx;
+ rect[1] = miny;
+ rect[2] = maxx - minx;
+ rect[3] = maxy - miny;
+}
+
+int polygon_contains_point(struct polygon *p,
+ float x, float y)
+{
+ return 0;
+}
+
+void polygon_append_polygon(struct polygon *dst,
+ struct polygon *src)
+{
+ if (dst->num_verts + src->num_verts >= dst->size) {
+ polygon_resize(dst, dst->num_verts + src->num_verts * 1.5);
+ }
+ memcpy(ptr_to_vertex(dst->data, dst->num_verts),
+ src->data, src->num_verts * COMPONENTS * sizeof(VGfloat));
+ dst->num_verts += src->num_verts;
+}
+
+VGboolean polygon_is_closed(struct polygon *p)
+{
+ VGfloat start[2], end[2];
+
+ polygon_vertex(p, 0, start);
+ polygon_vertex(p, p->num_verts - 1, end);
+
+ return floatsEqual(start[0], end[0]) && floatsEqual(start[1], end[1]);
+}
+
+static void set_blend_for_fill(struct pipe_blend_state *blend)
+{
+ memset(blend, 0, sizeof(struct pipe_blend_state));
+ blend->colormask = 0; /*disable colorwrites*/
+
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
+}
+
+static void draw_polygon(struct vg_context *ctx,
+ struct polygon *poly)
+{
+ int vert_size;
+ struct pipe_context *pipe;
+ struct pipe_vertex_buffer vbuffer;
+ struct pipe_vertex_element velement;
+
+ vert_size = poly->num_verts * COMPONENTS * sizeof(float);
+
+ /*polygon_print(poly);*/
+
+ pipe = ctx->pipe;
+
+ if (poly->vbuf == NULL || poly->dirty) {
+ if (poly->vbuf) {
+ pipe_buffer_reference(&poly->vbuf,
+ NULL);
+ }
+ poly->screen = pipe->screen;
+ poly->vbuf= pipe_user_buffer_create(poly->screen,
+ poly->data,
+ vert_size);
+ poly->dirty = VG_FALSE;
+ }
+
+
+ /* tell pipe about the vertex buffer */
+ memset(&vbuffer, 0, sizeof(vbuffer));
+ vbuffer.buffer = poly->vbuf;
+ vbuffer.stride = COMPONENTS * sizeof(float); /* vertex size */
+ vbuffer.buffer_offset = 0;
+ vbuffer.max_index = poly->num_verts - 1;
+ pipe->set_vertex_buffers(pipe, 1, &vbuffer);
+
+ /* tell pipe about the vertex attributes */
+ velement.src_offset = 0;
+ velement.vertex_buffer_index = 0;
+ velement.src_format = PIPE_FORMAT_R32G32_FLOAT;
+ velement.nr_components = COMPONENTS;
+ pipe->set_vertex_elements(pipe, 1, &velement);
+
+ /* draw */
+ pipe->draw_arrays(pipe, PIPE_PRIM_TRIANGLE_FAN,
+ 0, poly->num_verts);
+}
+
+void polygon_fill(struct polygon *poly, struct vg_context *ctx)
+{
+ struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_blend_state blend;
+ VGfloat bounds[4];
+ VGfloat min_x, min_y, max_x, max_y;
+ assert(poly);
+ polygon_bounding_rect(poly, bounds);
+ min_x = bounds[0];
+ min_y = bounds[1];
+ max_x = bounds[0] + bounds[2];
+ max_y = bounds[1] + bounds[3];
+
+#if DEBUG_POLYGON
+ debug_printf("Poly bounds are [%f, %f], [%f, %f]\n",
+ min_x, min_y, max_x, max_y);
+#endif
+
+ set_blend_for_fill(&blend);
+
+ memset(&dsa, 0, sizeof(struct pipe_depth_stencil_alpha_state));
+
+ cso_save_blend(ctx->cso_context);
+ cso_save_depth_stencil_alpha(ctx->cso_context);
+
+ dsa.stencil[0].enabled = 1;
+ if (ctx->state.vg.fill_rule == VG_EVEN_ODD) {
+ dsa.stencil[0].writemask = 1;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INVERT;
+ dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = ~0;
+
+ cso_set_blend(ctx->cso_context, &blend);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ draw_polygon(ctx, poly);
+ } else if (ctx->state.vg.fill_rule == VG_NON_ZERO) {
+ struct pipe_screen *screen = ctx->pipe->screen;
+
+ if (screen->get_param(screen, PIPE_CAP_TWO_SIDED_STENCIL)) {
+ /* front */
+ dsa.stencil[0].writemask = ~0;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INCR_WRAP;
+ dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = ~0;
+
+ /* back */
+ dsa.stencil[1].enabled = 1;
+ dsa.stencil[1].writemask = ~0;
+ dsa.stencil[1].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[1].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[1].zpass_op = PIPE_STENCIL_OP_DECR_WRAP;
+ dsa.stencil[1].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[1].ref_value = 0;
+ dsa.stencil[1].valuemask = ~0;
+
+ cso_set_blend(ctx->cso_context, &blend);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ draw_polygon(ctx, poly);
+ } else {
+ struct pipe_rasterizer_state raster;
+
+ memcpy(&raster, &ctx->state.g3d.rasterizer, sizeof(struct pipe_rasterizer_state));
+
+ cso_save_rasterizer(ctx->cso_context);
+ dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = ~0;
+
+ raster.cull_mode = raster.front_winding ^ PIPE_WINDING_BOTH;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INCR_WRAP;
+
+ cso_set_blend(ctx->cso_context, &blend);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ cso_set_rasterizer(ctx->cso_context, &raster);
+ draw_polygon(ctx, poly);
+
+ raster.cull_mode = raster.front_winding;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_DECR_WRAP;
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ cso_set_rasterizer(ctx->cso_context, &raster);
+ draw_polygon(ctx, poly);
+
+ cso_restore_rasterizer(ctx->cso_context);
+ }
+ }
+
+ /* restore color writes */
+ cso_restore_blend(ctx->cso_context);
+ /* setup stencil ops */
+ dsa.stencil[0].func = PIPE_FUNC_NOTEQUAL;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_REPLACE;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_REPLACE;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_REPLACE;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = dsa.stencil[0].writemask;
+ dsa.stencil[1].enabled = 0;
+ memcpy(&dsa.depth, &ctx->state.g3d.dsa.depth,
+ sizeof(struct pipe_depth_state));
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+
+ /* render the quad to propagate the rendering from stencil */
+ renderer_draw_quad(ctx->renderer, min_x, min_y,
+ max_x, max_y, 0.0f/*depth should be disabled*/);
+
+ cso_restore_depth_stencil_alpha(ctx->cso_context);
+}
+
+void polygon_array_fill(struct polygon_array *polyarray, struct vg_context *ctx)
+{
+ struct array *polys = polyarray->array;
+ struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_blend_state blend;
+ VGfloat min_x = polyarray->min_x;
+ VGfloat min_y = polyarray->min_y;
+ VGfloat max_x = polyarray->max_x;
+ VGfloat max_y = polyarray->max_y;
+ VGint i;
+
+
+#if DEBUG_POLYGON
+ debug_printf("%s: Poly bounds are [%f, %f], [%f, %f]\n",
+ __FUNCTION__,
+ min_x, min_y, max_x, max_y);
+#endif
+
+ set_blend_for_fill(&blend);
+
+ memset(&dsa, 0, sizeof(struct pipe_depth_stencil_alpha_state));
+
+ cso_save_blend(ctx->cso_context);
+ cso_save_depth_stencil_alpha(ctx->cso_context);
+
+ dsa.stencil[0].enabled = 1;
+ if (ctx->state.vg.fill_rule == VG_EVEN_ODD) {
+ dsa.stencil[0].writemask = 1;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INVERT;
+ dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = ~0;
+
+ cso_set_blend(ctx->cso_context, &blend);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ for (i = 0; i < polys->num_elements; ++i) {
+ struct polygon *poly = (((struct polygon**)polys->data)[i]);
+ draw_polygon(ctx, poly);
+ }
+ } else if (ctx->state.vg.fill_rule == VG_NON_ZERO) {
+ struct pipe_screen *screen = ctx->pipe->screen;
+
+ if (screen->get_param(screen, PIPE_CAP_TWO_SIDED_STENCIL)) {
+ /* front */
+ dsa.stencil[0].writemask = ~0;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INCR_WRAP;
+ dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = ~0;
+
+ /* back */
+ dsa.stencil[1].enabled = 1;
+ dsa.stencil[1].writemask = ~0;
+ dsa.stencil[1].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[1].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[1].zpass_op = PIPE_STENCIL_OP_DECR_WRAP;
+ dsa.stencil[1].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[1].ref_value = 0;
+ dsa.stencil[1].valuemask = ~0;
+
+ cso_set_blend(ctx->cso_context, &blend);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ for (i = 0; i < polys->num_elements; ++i) {
+ struct polygon *poly = (((struct polygon**)polys->data)[i]);
+ draw_polygon(ctx, poly);
+ }
+ } else {
+ struct pipe_rasterizer_state raster;
+
+ memcpy(&raster, &ctx->state.g3d.rasterizer, sizeof(struct pipe_rasterizer_state));
+
+ cso_save_rasterizer(ctx->cso_context);
+ dsa.stencil[0].func = PIPE_FUNC_ALWAYS;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = ~0;
+
+ raster.cull_mode = raster.front_winding ^ PIPE_WINDING_BOTH;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_INCR_WRAP;
+
+ cso_set_blend(ctx->cso_context, &blend);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ cso_set_rasterizer(ctx->cso_context, &raster);
+ for (i = 0; i < polys->num_elements; ++i) {
+ struct polygon *poly = (((struct polygon**)polys->data)[i]);
+ draw_polygon(ctx, poly);
+ }
+
+ raster.cull_mode = raster.front_winding;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_KEEP;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_DECR_WRAP;
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+ cso_set_rasterizer(ctx->cso_context, &raster);
+ for (i = 0; i < polys->num_elements; ++i) {
+ struct polygon *poly = (((struct polygon**)polys->data)[i]);
+ draw_polygon(ctx, poly);
+ }
+
+ cso_restore_rasterizer(ctx->cso_context);
+ }
+ }
+
+ /* restore color writes */
+ cso_restore_blend(ctx->cso_context);
+ /* setup stencil ops */
+ dsa.stencil[0].func = PIPE_FUNC_NOTEQUAL;
+ dsa.stencil[0].fail_op = PIPE_STENCIL_OP_REPLACE;
+ dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_REPLACE;
+ dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_REPLACE;
+ dsa.stencil[0].ref_value = 0;
+ dsa.stencil[0].valuemask = dsa.stencil[0].writemask;
+ dsa.stencil[1].enabled = 0;
+ memcpy(&dsa.depth, &ctx->state.g3d.dsa.depth,
+ sizeof(struct pipe_depth_state));
+ cso_set_depth_stencil_alpha(ctx->cso_context, &dsa);
+
+ /* render the quad to propagate the rendering from stencil */
+ renderer_draw_quad(ctx->renderer, min_x, min_y,
+ max_x, max_y, 0.0f/*depth should be disabled*/);
+
+ cso_restore_depth_stencil_alpha(ctx->cso_context);
+}
diff --git a/src/gallium/state_trackers/vega/polygon.h b/src/gallium/state_trackers/vega/polygon.h
new file mode 100644
index 0000000000..22672b728e
--- /dev/null
+++ b/src/gallium/state_trackers/vega/polygon.h
@@ -0,0 +1,75 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef POLYGON_H
+#define POLYGON_H
+
+#include "VG/openvg.h"
+
+struct polygon;
+struct vg_context;
+struct vg_paint;
+struct array;
+
+struct polygon *polygon_create(int size);
+struct polygon *polygon_create_from_data(float *data, int size);
+void polygon_destroy(struct polygon *poly);
+
+void polygon_resize(struct polygon *poly, int new_size);
+int polygon_size(struct polygon *poly);
+
+int polygon_vertex_count(struct polygon *poly);
+float * polygon_data(struct polygon *poly);
+
+void polygon_vertex_append(struct polygon *p,
+ float x, float y);
+void polygon_append_polygon(struct polygon *dst,
+ struct polygon *src);
+void polygon_set_vertex(struct polygon *p, int idx,
+ float x, float y);
+void polygon_vertex(struct polygon *p, int idx,
+ float *vertex);
+
+void polygon_bounding_rect(struct polygon *p,
+ float *rect);
+int polygon_contains_point(struct polygon *p,
+ float x, float y);
+
+VGboolean polygon_is_closed(struct polygon *p);
+
+void polygon_fill(struct polygon *p, struct vg_context *pipe);
+
+/* TODO: make a file/module around this struct
+ */
+struct polygon_array {
+ struct array *array;
+ VGfloat min_x, max_x;
+ VGfloat min_y, max_y;
+};
+
+void polygon_array_fill(struct polygon_array *polyarray, struct vg_context *ctx);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/renderer.c b/src/gallium/state_trackers/vega/renderer.c
new file mode 100644
index 0000000000..f7c5f2f0cd
--- /dev/null
+++ b/src/gallium/state_trackers/vega/renderer.c
@@ -0,0 +1,592 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "renderer.h"
+
+#include "vg_context.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "util/u_draw_quad.h"
+#include "util/u_simple_shaders.h"
+#include "util/u_memory.h"
+
+#include "cso_cache/cso_context.h"
+
+struct renderer {
+ struct pipe_context *pipe;
+ struct vg_context *owner;
+
+ struct cso_context *cso;
+
+ void *fs;
+
+ VGfloat vertices[4][2][4];
+};
+
+static void setup_shaders(struct renderer *ctx)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ /* fragment shader */
+ ctx->fs = util_make_fragment_tex_shader(pipe);
+}
+
+static struct pipe_buffer *
+setup_vertex_data(struct renderer *ctx,
+ float x0, float y0, float x1, float y1, float z)
+{
+ ctx->vertices[0][0][0] = x0;
+ ctx->vertices[0][0][1] = y0;
+ ctx->vertices[0][0][2] = z;
+ ctx->vertices[0][1][0] = 0.0f; /*s*/
+ ctx->vertices[0][1][1] = 0.0f; /*t*/
+
+ ctx->vertices[1][0][0] = x1;
+ ctx->vertices[1][0][1] = y0;
+ ctx->vertices[1][0][2] = z;
+ ctx->vertices[1][1][0] = 1.0f; /*s*/
+ ctx->vertices[1][1][1] = 0.0f; /*t*/
+
+ ctx->vertices[2][0][0] = x1;
+ ctx->vertices[2][0][1] = y1;
+ ctx->vertices[2][0][2] = z;
+ ctx->vertices[2][1][0] = 1.0f;
+ ctx->vertices[2][1][1] = 1.0f;
+
+ ctx->vertices[3][0][0] = x0;
+ ctx->vertices[3][0][1] = y1;
+ ctx->vertices[3][0][2] = z;
+ ctx->vertices[3][1][0] = 0.0f;
+ ctx->vertices[3][1][1] = 1.0f;
+
+ return pipe_user_buffer_create( ctx->pipe->screen,
+ ctx->vertices,
+ sizeof(ctx->vertices) );
+}
+
+static struct pipe_buffer *
+setup_vertex_data_tex(struct renderer *ctx,
+ float x0, float y0, float x1, float y1,
+ float s0, float t0, float s1, float t1,
+ float z)
+{
+ ctx->vertices[0][0][0] = x0;
+ ctx->vertices[0][0][1] = y0;
+ ctx->vertices[0][0][2] = z;
+ ctx->vertices[0][1][0] = s0; /*s*/
+ ctx->vertices[0][1][1] = t0; /*t*/
+
+ ctx->vertices[1][0][0] = x1;
+ ctx->vertices[1][0][1] = y0;
+ ctx->vertices[1][0][2] = z;
+ ctx->vertices[1][1][0] = s1; /*s*/
+ ctx->vertices[1][1][1] = t0; /*t*/
+
+ ctx->vertices[2][0][0] = x1;
+ ctx->vertices[2][0][1] = y1;
+ ctx->vertices[2][0][2] = z;
+ ctx->vertices[2][1][0] = s1;
+ ctx->vertices[2][1][1] = t1;
+
+ ctx->vertices[3][0][0] = x0;
+ ctx->vertices[3][0][1] = y1;
+ ctx->vertices[3][0][2] = z;
+ ctx->vertices[3][1][0] = s0;
+ ctx->vertices[3][1][1] = t1;
+
+ return pipe_user_buffer_create( ctx->pipe->screen,
+ ctx->vertices,
+ sizeof(ctx->vertices) );
+}
+
+
+static struct pipe_buffer *
+setup_vertex_data_qtex(struct renderer *ctx,
+ float x0, float y0, float x1, float y1,
+ float x2, float y2, float x3, float y3,
+ float s0, float t0, float s1, float t1,
+ float z)
+{
+ ctx->vertices[0][0][0] = x0;
+ ctx->vertices[0][0][1] = y0;
+ ctx->vertices[0][0][2] = z;
+ ctx->vertices[0][1][0] = s0; /*s*/
+ ctx->vertices[0][1][1] = t0; /*t*/
+
+ ctx->vertices[1][0][0] = x1;
+ ctx->vertices[1][0][1] = y1;
+ ctx->vertices[1][0][2] = z;
+ ctx->vertices[1][1][0] = s1; /*s*/
+ ctx->vertices[1][1][1] = t0; /*t*/
+
+ ctx->vertices[2][0][0] = x2;
+ ctx->vertices[2][0][1] = y2;
+ ctx->vertices[2][0][2] = z;
+ ctx->vertices[2][1][0] = s1;
+ ctx->vertices[2][1][1] = t1;
+
+ ctx->vertices[3][0][0] = x3;
+ ctx->vertices[3][0][1] = y3;
+ ctx->vertices[3][0][2] = z;
+ ctx->vertices[3][1][0] = s0;
+ ctx->vertices[3][1][1] = t1;
+
+ return pipe_user_buffer_create( ctx->pipe->screen,
+ ctx->vertices,
+ sizeof(ctx->vertices) );
+}
+
+struct renderer * renderer_create(struct vg_context *owner)
+{
+ VGint i;
+ struct renderer *renderer = CALLOC_STRUCT(renderer);
+
+ if (!renderer)
+ return NULL;
+
+ renderer->owner = owner;
+ renderer->pipe = owner->pipe;
+ renderer->cso = owner->cso_context;
+
+ setup_shaders(renderer);
+
+ /* init vertex data that doesn't change */
+ for (i = 0; i < 4; i++) {
+ renderer->vertices[i][0][3] = 1.0f; /* w */
+ renderer->vertices[i][1][2] = 0.0f; /* r */
+ renderer->vertices[i][1][3] = 1.0f; /* q */
+ }
+
+ return renderer;
+}
+
+void renderer_destroy(struct renderer *ctx)
+{
+#if 0
+ if (ctx->fs) {
+ cso_delete_fragment_shader(ctx->cso, ctx->fs);
+ ctx->fs = NULL;
+ }
+#endif
+ free(ctx);
+}
+
+void renderer_draw_quad(struct renderer *r,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat depth)
+{
+ struct pipe_buffer *buf;
+
+ buf = setup_vertex_data(r, x1, y1, x2, y2, depth);
+
+ if (buf) {
+ util_draw_vertex_buffer(r->pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference( &buf,
+ NULL );
+ }
+}
+
+void renderer_draw_texture(struct renderer *r,
+ struct pipe_texture *tex,
+ VGfloat x1offset, VGfloat y1offset,
+ VGfloat x2offset, VGfloat y2offset,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2)
+{
+ struct pipe_context *pipe = r->pipe;
+ struct pipe_buffer *buf;
+ VGfloat s0, t0, s1, t1;
+
+ assert(tex->width[0] != 0);
+ assert(tex->height[0] != 0);
+
+ s0 = x1offset / tex->width[0];
+ s1 = x2offset / tex->width[0];
+ t0 = y1offset / tex->height[0];
+ t1 = y2offset / tex->height[0];
+
+ cso_save_vertex_shader(r->cso);
+ /* shaders */
+ cso_set_vertex_shader_handle(r->cso, vg_texture_vs(r->owner));
+
+ /* draw quad */
+ buf = setup_vertex_data_tex(r, x1, y1, x2, y2,
+ s0, t0, s1, t1, 0.0f);
+
+ if (buf) {
+ util_draw_vertex_buffer(pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference( &buf,
+ NULL );
+ }
+
+ cso_restore_vertex_shader(r->cso);
+}
+
+void renderer_copy_texture(struct renderer *ctx,
+ struct pipe_texture *src,
+ VGfloat sx1, VGfloat sy1,
+ VGfloat sx2, VGfloat sy2,
+ struct pipe_texture *dst,
+ VGfloat dx1, VGfloat dy1,
+ VGfloat dx2, VGfloat dy2)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_buffer *buf;
+ struct pipe_surface *dst_surf = screen->get_tex_surface(
+ screen, dst, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+ struct pipe_framebuffer_state fb;
+ float s0, t0, s1, t1;
+
+ assert(src->width[0] != 0);
+ assert(src->height[0] != 0);
+ assert(dst->width[0] != 0);
+ assert(dst->height[0] != 0);
+
+#if 0
+ debug_printf("copy texture [%f, %f, %f, %f], [%f, %f, %f, %f]\n",
+ sx1, sy1, sx2, sy2, dx1, dy1, dx2, dy2);
+#endif
+
+#if 1
+ s0 = sx1 / src->width[0];
+ s1 = sx2 / src->width[0];
+ t0 = sy1 / src->height[0];
+ t1 = sy2 / src->height[0];
+#else
+ s0 = 0;
+ s1 = 1;
+ t0 = 0;
+ t1 = 1;
+#endif
+
+ assert(screen->is_format_supported(screen, dst_surf->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0));
+
+ /* save state (restored below) */
+ cso_save_blend(ctx->cso);
+ cso_save_samplers(ctx->cso);
+ cso_save_sampler_textures(ctx->cso);
+ cso_save_framebuffer(ctx->cso);
+ cso_save_fragment_shader(ctx->cso);
+ cso_save_vertex_shader(ctx->cso);
+
+ cso_save_viewport(ctx->cso);
+
+
+ /* set misc state we care about */
+ {
+ struct pipe_blend_state blend;
+ memset(&blend, 0, sizeof(blend));
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.colormask = PIPE_MASK_RGBA;
+ cso_set_blend(ctx->cso, &blend);
+ }
+
+ /* sampler */
+ {
+ struct pipe_sampler_state sampler;
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.normalized_coords = 1;
+ cso_single_sampler(ctx->cso, 0, &sampler);
+ cso_single_sampler_done(ctx->cso);
+ }
+
+ vg_set_viewport(ctx->owner, VEGA_Y0_TOP);
+
+ /* texture */
+ cso_set_sampler_textures(ctx->cso, 1, &src);
+
+ /* shaders */
+ cso_set_vertex_shader_handle(ctx->cso, vg_texture_vs(ctx->owner));
+ cso_set_fragment_shader_handle(ctx->cso, ctx->fs);
+
+ /* drawing dest */
+ memset(&fb, 0, sizeof(fb));
+ fb.width = dst_surf->width;
+ fb.height = dst_surf->height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = dst_surf;
+ {
+ VGint i;
+ for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
+ fb.cbufs[i] = 0;
+ }
+ cso_set_framebuffer(ctx->cso, &fb);
+
+ /* draw quad */
+ buf = setup_vertex_data_tex(ctx,
+ dx1, dy1,
+ dx2, dy2,
+ s0, t0, s1, t1,
+ 0.0f);
+
+ if (buf) {
+ util_draw_vertex_buffer(ctx->pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference( &buf,
+ NULL );
+ }
+
+ /* restore state we changed */
+ cso_restore_blend(ctx->cso);
+ cso_restore_samplers(ctx->cso);
+ cso_restore_sampler_textures(ctx->cso);
+ cso_restore_framebuffer(ctx->cso);
+ cso_restore_vertex_shader(ctx->cso);
+ cso_restore_fragment_shader(ctx->cso);
+ cso_restore_viewport(ctx->cso);
+
+ pipe_surface_reference(&dst_surf, NULL);
+}
+
+void renderer_copy_surface(struct renderer *ctx,
+ struct pipe_surface *src,
+ int srcX0, int srcY0,
+ int srcX1, int srcY1,
+ struct pipe_surface *dst,
+ int dstX0, int dstY0,
+ int dstX1, int dstY1,
+ float z, unsigned filter)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_buffer *buf;
+ struct pipe_texture texTemp, *tex;
+ struct pipe_surface *texSurf;
+ struct pipe_framebuffer_state fb;
+ struct st_framebuffer *stfb = ctx->owner->draw_buffer;
+ const int srcW = abs(srcX1 - srcX0);
+ const int srcH = abs(srcY1 - srcY0);
+ const int srcLeft = MIN2(srcX0, srcX1);
+ const int srcTop = MIN2(srcY0, srcY1);
+
+ assert(filter == PIPE_TEX_MIPFILTER_NEAREST ||
+ filter == PIPE_TEX_MIPFILTER_LINEAR);
+
+ if (srcLeft != srcX0) {
+ /* left-right flip */
+ int tmp = dstX0;
+ dstX0 = dstX1;
+ dstX1 = tmp;
+ }
+
+ if (srcTop != srcY0) {
+ /* up-down flip */
+ int tmp = dstY0;
+ dstY0 = dstY1;
+ dstY1 = tmp;
+ }
+
+ assert(screen->is_format_supported(screen, src->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_SAMPLER, 0));
+ assert(screen->is_format_supported(screen, dst->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_SAMPLER, 0));
+ assert(screen->is_format_supported(screen, dst->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0));
+
+ /*
+ * XXX for now we're always creating a temporary texture.
+ * Strictly speaking that's not always needed.
+ */
+
+ /* create temp texture */
+ memset(&texTemp, 0, sizeof(texTemp));
+ texTemp.target = PIPE_TEXTURE_2D;
+ texTemp.format = src->format;
+ texTemp.last_level = 0;
+ texTemp.width[0] = srcW;
+ texTemp.height[0] = srcH;
+ texTemp.depth[0] = 1;
+ pf_get_block(src->format, &texTemp.block);
+
+ tex = screen->texture_create(screen, &texTemp);
+ if (!tex)
+ return;
+
+ texSurf = screen->get_tex_surface(screen, tex, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* load temp texture */
+ pipe->surface_copy(pipe,
+ texSurf, 0, 0, /* dest */
+ src, srcLeft, srcTop, /* src */
+ srcW, srcH); /* size */
+
+ /* free the surface, update the texture if necessary.*/
+ screen->tex_surface_destroy(texSurf);
+
+ /* save state (restored below) */
+ cso_save_blend(ctx->cso);
+ cso_save_samplers(ctx->cso);
+ cso_save_sampler_textures(ctx->cso);
+ cso_save_framebuffer(ctx->cso);
+ cso_save_fragment_shader(ctx->cso);
+ cso_save_vertex_shader(ctx->cso);
+ cso_save_viewport(ctx->cso);
+
+ /* set misc state we care about */
+ {
+ struct pipe_blend_state blend;
+ memset(&blend, 0, sizeof(blend));
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.colormask = PIPE_MASK_RGBA;
+ cso_set_blend(ctx->cso, &blend);
+ }
+
+ vg_set_viewport(ctx->owner, VEGA_Y0_TOP);
+
+ /* sampler */
+ {
+ struct pipe_sampler_state sampler;
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.normalized_coords = 1;
+ cso_single_sampler(ctx->cso, 0, &sampler);
+ cso_single_sampler_done(ctx->cso);
+ }
+
+ /* texture */
+ cso_set_sampler_textures(ctx->cso, 1, &tex);
+
+ /* shaders */
+ cso_set_fragment_shader_handle(ctx->cso, ctx->fs);
+ cso_set_vertex_shader_handle(ctx->cso, vg_texture_vs(ctx->owner));
+
+ /* drawing dest */
+ if (stfb->strb->surface != dst) {
+ memset(&fb, 0, sizeof(fb));
+ fb.width = dst->width;
+ fb.height = dst->height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = dst;
+ fb.zsbuf = stfb->dsrb->surface;
+ cso_set_framebuffer(ctx->cso, &fb);
+ }
+
+ /* draw quad */
+ buf = setup_vertex_data(ctx,
+ (float) dstX0, (float) dstY0,
+ (float) dstX1, (float) dstY1, z);
+
+ if (buf) {
+ util_draw_vertex_buffer(ctx->pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference( &buf,
+ NULL );
+ }
+
+
+ /* restore state we changed */
+ cso_restore_blend(ctx->cso);
+ cso_restore_samplers(ctx->cso);
+ cso_restore_sampler_textures(ctx->cso);
+ cso_restore_framebuffer(ctx->cso);
+ cso_restore_fragment_shader(ctx->cso);
+ cso_restore_vertex_shader(ctx->cso);
+ cso_restore_viewport(ctx->cso);
+
+ pipe_texture_reference(&tex, NULL);
+}
+
+void renderer_texture_quad(struct renderer *r,
+ struct pipe_texture *tex,
+ VGfloat x1offset, VGfloat y1offset,
+ VGfloat x2offset, VGfloat y2offset,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat x3, VGfloat y3,
+ VGfloat x4, VGfloat y4)
+{
+ struct pipe_context *pipe = r->pipe;
+ struct pipe_buffer *buf;
+ VGfloat s0, t0, s1, t1;
+
+ assert(tex->width[0] != 0);
+ assert(tex->height[0] != 0);
+
+ s0 = x1offset / tex->width[0];
+ s1 = x2offset / tex->width[0];
+ t0 = y1offset / tex->height[0];
+ t1 = y2offset / tex->height[0];
+
+ cso_save_vertex_shader(r->cso);
+ /* shaders */
+ cso_set_vertex_shader_handle(r->cso, vg_texture_vs(r->owner));
+
+ /* draw quad */
+ buf = setup_vertex_data_qtex(r, x1, y1, x2, y2, x3, y3, x4, y4,
+ s0, t0, s1, t1, 0.0f);
+
+ if (buf) {
+ util_draw_vertex_buffer(pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference(&buf,
+ NULL);
+ }
+
+ cso_restore_vertex_shader(r->cso);
+}
diff --git a/src/gallium/state_trackers/vega/renderer.h b/src/gallium/state_trackers/vega/renderer.h
new file mode 100644
index 0000000000..990cd32c31
--- /dev/null
+++ b/src/gallium/state_trackers/vega/renderer.h
@@ -0,0 +1,76 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef RENDERER_H
+#define RENDERER_H
+
+#include "VG/openvg.h"
+
+struct renderer;
+
+struct vg_context;
+struct pipe_texture;
+struct pipe_surface;
+
+struct renderer *renderer_create(struct vg_context *owner);
+void renderer_destroy(struct renderer *);
+
+void renderer_draw_quad(struct renderer *,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat depth);
+void renderer_draw_texture(struct renderer *,
+ struct pipe_texture *texture,
+ VGfloat x1offset, VGfloat y1offset,
+ VGfloat x2offset, VGfloat y2offset,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2);
+void renderer_texture_quad(struct renderer *,
+ struct pipe_texture *texture,
+ VGfloat x1offset, VGfloat y1offset,
+ VGfloat x2offset, VGfloat y2offset,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat x3, VGfloat y3,
+ VGfloat x4, VGfloat y4);
+void renderer_copy_texture(struct renderer *r,
+ struct pipe_texture *src,
+ VGfloat sx1, VGfloat sy1,
+ VGfloat sx2, VGfloat sy2,
+ struct pipe_texture *dst,
+ VGfloat dx1, VGfloat dy1,
+ VGfloat dx2, VGfloat dy2);
+void renderer_copy_surface(struct renderer *r,
+ struct pipe_surface *src,
+ int sx1, int sy1,
+ int sx2, int sy2,
+ struct pipe_surface *dst,
+ int dx1, int dy1,
+ int dx2, int dy2,
+ float z, unsigned filter);
+
+
+#endif
diff --git a/src/gallium/state_trackers/vega/shader.c b/src/gallium/state_trackers/vega/shader.c
new file mode 100644
index 0000000000..d9074a377b
--- /dev/null
+++ b/src/gallium/state_trackers/vega/shader.c
@@ -0,0 +1,310 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "shader.h"
+
+#include "vg_context.h"
+#include "shaders_cache.h"
+#include "paint.h"
+#include "mask.h"
+#include "image.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "util/u_memory.h"
+
+#define MAX_CONSTANTS 20
+
+struct shader {
+ struct vg_context *context;
+
+ VGboolean masking;
+ struct vg_paint *paint;
+ struct vg_image *image;
+
+ VGboolean drawing_image;
+ VGImageMode image_mode;
+
+ float constants[MAX_CONSTANTS];
+ struct pipe_constant_buffer cbuf;
+ struct pipe_shader_state fs_state;
+ void *fs;
+};
+
+struct shader * shader_create(struct vg_context *ctx)
+{
+ struct shader *shader = 0;
+
+ shader = CALLOC_STRUCT(shader);
+ shader->context = ctx;
+
+ return shader;
+}
+
+void shader_destroy(struct shader *shader)
+{
+ free(shader);
+}
+
+void shader_set_masking(struct shader *shader, VGboolean set)
+{
+ shader->masking = set;
+}
+
+VGboolean shader_is_masking(struct shader *shader)
+{
+ return shader->masking;
+}
+
+void shader_set_paint(struct shader *shader, struct vg_paint *paint)
+{
+ shader->paint = paint;
+}
+
+struct vg_paint * shader_paint(struct shader *shader)
+{
+ return shader->paint;
+}
+
+
+static void setup_constant_buffer(struct shader *shader)
+{
+ struct vg_context *ctx = shader->context;
+ struct pipe_context *pipe = shader->context->pipe;
+ struct pipe_constant_buffer *cbuf = &shader->cbuf;
+ VGint param_bytes = paint_constant_buffer_size(shader->paint);
+ float temp_buf[MAX_CONSTANTS];
+
+ assert(param_bytes <= sizeof(temp_buf));
+ paint_fill_constant_buffer(shader->paint, temp_buf);
+
+ if (cbuf->buffer == NULL ||
+ memcmp(temp_buf, shader->constants, param_bytes) != 0)
+ {
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+
+ memcpy(shader->constants, temp_buf, param_bytes);
+ cbuf->buffer = pipe_user_buffer_create(pipe->screen,
+ &shader->constants,
+ sizeof(shader->constants));
+ }
+
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
+}
+
+static VGint blend_bind_samplers(struct vg_context *ctx,
+ struct pipe_sampler_state **samplers,
+ struct pipe_texture **textures)
+{
+ VGBlendMode bmode = ctx->state.vg.blend_mode;
+
+ if (bmode == VG_BLEND_MULTIPLY ||
+ bmode == VG_BLEND_SCREEN ||
+ bmode == VG_BLEND_DARKEN ||
+ bmode == VG_BLEND_LIGHTEN) {
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+
+ vg_prepare_blend_surface(ctx);
+
+ samplers[2] = &ctx->blend_sampler;
+ textures[2] = stfb->blend_texture;
+
+ if (!samplers[0] || !textures[0]) {
+ samplers[1] = samplers[2];
+ textures[1] = textures[2];
+ }
+ if (!samplers[1] || !textures[1]) {
+ samplers[1] = samplers[0];
+ textures[1] = textures[0];
+ }
+
+ return 1;
+ }
+ return 0;
+}
+
+static void setup_samplers(struct shader *shader)
+{
+ struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
+ struct pipe_texture *textures[PIPE_MAX_SAMPLERS];
+ struct vg_context *ctx = shader->context;
+ /* a little wonky: we use the num as a boolean that just says
+ * whether any sampler/textures have been set. the actual numbering
+ * for samplers is always the same:
+ * 0 - paint sampler/texture for gradient/pattern
+ * 1 - mask sampler/texture
+ * 2 - blend sampler/texture
+ * 3 - image sampler/texture
+ * */
+ VGint num = 0;
+
+ samplers[0] = NULL;
+ samplers[1] = NULL;
+ samplers[2] = NULL;
+ samplers[3] = NULL;
+ textures[0] = NULL;
+ textures[1] = NULL;
+ textures[2] = NULL;
+ textures[3] = NULL;
+
+ num += paint_bind_samplers(shader->paint, samplers, textures);
+ num += mask_bind_samplers(samplers, textures);
+ num += blend_bind_samplers(ctx, samplers, textures);
+ if (shader->drawing_image && shader->image)
+ num += image_bind_samplers(shader->image, samplers, textures);
+
+ if (num) {
+ cso_set_samplers(ctx->cso_context, 4, (const struct pipe_sampler_state **)samplers);
+ cso_set_sampler_textures(ctx->cso_context, 4, textures);
+ }
+}
+
+static INLINE VGboolean is_format_bw(struct shader *shader)
+{
+#if 0
+ struct vg_context *ctx = shader->context;
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+#endif
+
+ if (shader->drawing_image && shader->image) {
+ if (shader->image->format == VG_BW_1)
+ return VG_TRUE;
+ }
+
+ return VG_FALSE;
+}
+
+static void setup_shader_program(struct shader *shader)
+{
+ struct vg_context *ctx = shader->context;
+ VGint shader_id = 0;
+ VGBlendMode blend_mode = ctx->state.vg.blend_mode;
+ VGboolean black_white = is_format_bw(shader);
+
+ /* 1st stage: fill */
+ if (!shader->drawing_image ||
+ (shader->image_mode == VG_DRAW_IMAGE_MULTIPLY || shader->image_mode == VG_DRAW_IMAGE_STENCIL)) {
+ switch(paint_type(shader->paint)) {
+ case VG_PAINT_TYPE_COLOR:
+ shader_id |= VEGA_SOLID_FILL_SHADER;
+ break;
+ case VG_PAINT_TYPE_LINEAR_GRADIENT:
+ shader_id |= VEGA_LINEAR_GRADIENT_SHADER;
+ break;
+ case VG_PAINT_TYPE_RADIAL_GRADIENT:
+ shader_id |= VEGA_RADIAL_GRADIENT_SHADER;
+ break;
+ case VG_PAINT_TYPE_PATTERN:
+ shader_id |= VEGA_PATTERN_SHADER;
+ break;
+
+ default:
+ abort();
+ }
+ }
+
+ /* second stage image */
+ if (shader->drawing_image) {
+ switch(shader->image_mode) {
+ case VG_DRAW_IMAGE_NORMAL:
+ shader_id |= VEGA_IMAGE_NORMAL_SHADER;
+ break;
+ case VG_DRAW_IMAGE_MULTIPLY:
+ shader_id |= VEGA_IMAGE_MULTIPLY_SHADER;
+ break;
+ case VG_DRAW_IMAGE_STENCIL:
+ shader_id |= VEGA_IMAGE_STENCIL_SHADER;
+ break;
+ default:
+ debug_printf("Unknown image mode!");
+ }
+ }
+
+ if (shader->masking)
+ shader_id |= VEGA_MASK_SHADER;
+
+ switch(blend_mode) {
+ case VG_BLEND_MULTIPLY:
+ shader_id |= VEGA_BLEND_MULTIPLY_SHADER;
+ break;
+ case VG_BLEND_SCREEN:
+ shader_id |= VEGA_BLEND_SCREEN_SHADER;
+ break;
+ case VG_BLEND_DARKEN:
+ shader_id |= VEGA_BLEND_DARKEN_SHADER;
+ break;
+ case VG_BLEND_LIGHTEN:
+ shader_id |= VEGA_BLEND_LIGHTEN_SHADER;
+ break;
+ default:
+ /* handled by pipe_blend_state */
+ break;
+ }
+
+ if (black_white)
+ shader_id |= VEGA_BW_SHADER;
+
+ shader->fs = shaders_cache_fill(ctx->sc, shader_id);
+ cso_set_fragment_shader_handle(ctx->cso_context, shader->fs);
+}
+
+
+void shader_bind(struct shader *shader)
+{
+ /* first resolve the real paint type */
+ paint_resolve_type(shader->paint);
+
+ setup_constant_buffer(shader);
+ setup_samplers(shader);
+ setup_shader_program(shader);
+}
+
+void shader_set_image_mode(struct shader *shader, VGImageMode image_mode)
+{
+ shader->image_mode = image_mode;
+}
+
+VGImageMode shader_image_mode(struct shader *shader)
+{
+ return shader->image_mode;
+}
+
+void shader_set_drawing_image(struct shader *shader, VGboolean drawing_image)
+{
+ shader->drawing_image = drawing_image;
+}
+
+VGboolean shader_drawing_image(struct shader *shader)
+{
+ return shader->drawing_image;
+}
+
+void shader_set_image(struct shader *shader, struct vg_image *img)
+{
+ shader->image = img;
+}
diff --git a/src/gallium/state_trackers/vega/shader.h b/src/gallium/state_trackers/vega/shader.h
new file mode 100644
index 0000000000..847eee6a31
--- /dev/null
+++ b/src/gallium/state_trackers/vega/shader.h
@@ -0,0 +1,56 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SHADER_H
+#define SHADER_H
+
+#include "VG/openvg.h"
+
+struct shader;
+struct vg_paint;
+struct vg_context;
+struct vg_image;
+
+struct shader *shader_create(struct vg_context *context);
+void shader_destroy(struct shader *shader);
+
+void shader_set_masking(struct shader *shader, VGboolean set);
+VGboolean shader_is_masking(struct shader *shader);
+
+void shader_set_paint(struct shader *shader, struct vg_paint *paint);
+struct vg_paint *shader_paint(struct shader *shader);
+
+void shader_set_image_mode(struct shader *shader, VGImageMode image_mode);
+VGImageMode shader_image_mode(struct shader *shader);
+
+void shader_set_drawing_image(struct shader *shader, VGboolean drawing_image);
+VGboolean shader_drawing_image(struct shader *shader);
+
+void shader_set_image(struct shader *shader, struct vg_image *img);
+
+void shader_bind(struct shader *shader);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/shaders_cache.c b/src/gallium/state_trackers/vega/shaders_cache.c
new file mode 100644
index 0000000000..fd0831fab1
--- /dev/null
+++ b/src/gallium/state_trackers/vega/shaders_cache.c
@@ -0,0 +1,439 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "shaders_cache.h"
+
+#include "vg_context.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "tgsi/tgsi_build.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_parse.h"
+#include "tgsi/tgsi_util.h"
+#include "tgsi/tgsi_text.h"
+
+#include "util/u_memory.h"
+#include "util/u_math.h"
+#include "util/u_debug.h"
+#include "cso_cache/cso_hash.h"
+#include "cso_cache/cso_context.h"
+
+#include "VG/openvg.h"
+
+#include "asm_fill.h"
+
+/* Essentially we construct an ubber-shader based on the state
+ * of the pipeline. The stages are:
+ * 1) Fill (mandatory, solid color/gradient/pattern/image draw)
+ * 2) Image composition (image mode multiply and stencil)
+ * 3) Mask
+ * 4) Extended blend (multiply/screen/darken/lighten)
+ * 5) Premultiply/Unpremultiply
+ * 6) Color transform (to black and white)
+ */
+#define SHADER_STAGES 6
+
+struct cached_shader {
+ void *driver_shader;
+ struct pipe_shader_state state;
+};
+
+struct shaders_cache {
+ struct vg_context *pipe;
+
+ struct cso_hash *hash;
+};
+
+
+static INLINE struct tgsi_token *tokens_from_assembly(const char *txt, int num_tokens)
+{
+ struct tgsi_token *tokens;
+
+ tokens = (struct tgsi_token *) MALLOC(num_tokens * sizeof(tokens[0]));
+
+ tgsi_text_translate(txt, tokens, num_tokens);
+
+#if DEBUG_SHADERS
+ tgsi_dump(tokens, 0);
+#endif
+
+ return tokens;
+}
+
+#define ALL_FILLS (VEGA_SOLID_FILL_SHADER | \
+ VEGA_LINEAR_GRADIENT_SHADER | \
+ VEGA_RADIAL_GRADIENT_SHADER | \
+ VEGA_PATTERN_SHADER | \
+ VEGA_IMAGE_NORMAL_SHADER)
+
+
+/*
+static const char max_shader_preamble[] =
+ "FRAG1.1\n"
+ "DCL IN[0], POSITION, LINEAR\n"
+ "DCL IN[1], GENERIC[0], PERSPECTIVE\n"
+ "DCL OUT[0], COLOR, CONSTANT\n"
+ "DCL CONST[0..9], CONSTANT\n"
+ "DCL TEMP[0..9], CONSTANT\n"
+ "DCL SAMP[0..9], CONSTANT\n";
+
+ max_shader_preamble strlen == 175
+*/
+#define MAX_PREAMBLE 175
+
+static INLINE VGint range_min(VGint min, VGint current)
+{
+ if (min < 0)
+ min = current;
+ else
+ min = MIN2(min, current);
+ return min;
+}
+
+static INLINE VGint range_max(VGint max, VGint current)
+{
+ return MAX2(max, current);
+}
+
+static void
+create_preamble(char *txt,
+ const struct shader_asm_info *shaders[SHADER_STAGES],
+ int num_shaders)
+{
+ VGboolean declare_input = VG_FALSE;
+ VGint start_const = -1, end_const = 0;
+ VGint start_temp = -1, end_temp = 0;
+ VGint start_sampler = -1, end_sampler = 0;
+ VGint i;
+ VGint num_consts, num_temps, num_samplers;
+
+ for (i = 0; i < num_shaders; ++i) {
+ if (shaders[i]->num_consts)
+ start_const = range_min(start_const, shaders[i]->start_const);
+ if (shaders[i]->num_temps)
+ start_temp = range_min(start_temp, shaders[i]->start_temp);
+ if (shaders[i]->num_samplers)
+ start_sampler = range_min(start_sampler, shaders[i]->start_sampler);
+
+ end_const = range_max(end_const, shaders[i]->start_const +
+ shaders[i]->num_consts);
+ end_temp = range_max(end_temp, shaders[i]->start_temp +
+ shaders[i]->num_temps);
+ end_sampler = range_max(end_sampler, shaders[i]->start_sampler +
+ shaders[i]->num_samplers);
+ if (shaders[i]->needs_position)
+ declare_input = VG_TRUE;
+ }
+ /* if they're still unitialized, initialize them */
+ if (start_const < 0)
+ start_const = 0;
+ if (start_temp < 0)
+ start_temp = 0;
+ if (start_sampler < 0)
+ start_sampler = 0;
+
+ num_consts = end_const - start_const;
+ num_temps = end_temp - start_temp;
+ num_samplers = end_sampler - start_sampler;
+ /* end exclusive */
+ --end_const;
+ --end_temp;
+ --end_sampler;
+
+ sprintf(txt, "FRAG1.1\n");
+
+ if (declare_input) {
+ sprintf(txt + strlen(txt), "DCL IN[0], POSITION, LINEAR\n");
+ sprintf(txt + strlen(txt), "DCL IN[1], GENERIC[0], PERSPECTIVE\n");
+ }
+
+ /* we always have a color output */
+ sprintf(txt + strlen(txt), "DCL OUT[0], COLOR, CONSTANT\n");
+
+ if (num_consts > 1)
+ sprintf(txt + strlen(txt), "DCL CONST[%d..%d], CONSTANT\n", start_const, end_const);
+ else if (num_consts == 1)
+ sprintf(txt + strlen(txt), "DCL CONST[%d], CONSTANT\n", start_const);
+
+ if (num_temps > 1)
+ sprintf(txt + strlen(txt), "DCL TEMP[%d..%d], CONSTANT\n", start_temp, end_temp);
+ else if (num_temps > 1)
+ sprintf(txt + strlen(txt), "DCL TEMP[%d], CONSTANT\n", start_temp);
+
+ if (num_samplers > 1)
+ sprintf(txt + strlen(txt), "DCL SAMP[%d..%d], CONSTANT\n", start_sampler, end_sampler);
+ else if (num_samplers == 1)
+ sprintf(txt + strlen(txt), "DCL SAMP[%d], CONSTANT\n", start_sampler);
+}
+
+static void *
+combine_shaders(const struct shader_asm_info *shaders[SHADER_STAGES], int num_shaders,
+ struct pipe_context *pipe,
+ struct pipe_shader_state *shader)
+{
+ char *combined_txt;
+ int combined_len = MAX_PREAMBLE;
+ int combined_tokens = 0;
+ int i = 0;
+ int current_shader = 0;
+ int current_len;
+
+ for (i = 0; i < num_shaders; ++i) {
+ combined_len += strlen(shaders[i]->txt);
+ combined_tokens += shaders[i]->num_tokens;
+ }
+ /* add for the %s->TEMP[0] substitutions */
+ combined_len += num_shaders * 7 /*TEMP[0]*/ + 4 /*"END\n"*/;
+
+ combined_txt = (char*)malloc(combined_len);
+ combined_txt[0] = '\0';
+
+ create_preamble(combined_txt, shaders, num_shaders);
+
+ while (current_shader < num_shaders) {
+ const char temp[] = "TEMP[0]";
+ const char out[] = "OUT[0]";
+ const char *subst = temp;
+
+ current_len = strlen(combined_txt);
+
+ /* if the last shader then output */
+ if (current_shader + 1 == num_shaders)
+ subst = out;
+
+ snprintf(combined_txt + current_len,
+ combined_len - current_len,
+ shaders[current_shader]->txt,
+ subst);
+ ++current_shader;
+ }
+
+
+ current_len = strlen(combined_txt);
+ snprintf(combined_txt + current_len,
+ combined_len - current_len,
+ "END\n");
+
+ debug_printf("Combined shader is : \n%s\n",
+ combined_txt);
+
+ shader->tokens = tokens_from_assembly(
+ combined_txt, combined_tokens);
+
+ free(combined_txt);
+
+ return pipe->create_fs_state(pipe, shader);
+}
+
+static void *
+create_shader(struct pipe_context *pipe,
+ int id,
+ struct pipe_shader_state *shader)
+{
+ int idx = 0;
+ const struct shader_asm_info * shaders[SHADER_STAGES];
+
+ /* the shader has to have a fill */
+ debug_assert(id & ALL_FILLS);
+
+ /* first stage */
+ if (id & VEGA_SOLID_FILL_SHADER) {
+ debug_assert(idx == 0);
+ shaders[idx] = &shaders_asm[0];
+ debug_assert(shaders_asm[0].id == VEGA_SOLID_FILL_SHADER);
+ ++idx;
+ }
+ if ((id & VEGA_LINEAR_GRADIENT_SHADER)) {
+ debug_assert(idx == 0);
+ shaders[idx] = &shaders_asm[1];
+ debug_assert(shaders_asm[1].id == VEGA_LINEAR_GRADIENT_SHADER);
+ ++idx;
+ }
+ if ((id & VEGA_RADIAL_GRADIENT_SHADER)) {
+ debug_assert(idx == 0);
+ shaders[idx] = &shaders_asm[2];
+ debug_assert(shaders_asm[2].id == VEGA_RADIAL_GRADIENT_SHADER);
+ ++idx;
+ }
+ if ((id & VEGA_PATTERN_SHADER)) {
+ debug_assert(idx == 0);
+ debug_assert(shaders_asm[3].id == VEGA_PATTERN_SHADER);
+ shaders[idx] = &shaders_asm[3];
+ ++idx;
+ }
+ if ((id & VEGA_IMAGE_NORMAL_SHADER)) {
+ debug_assert(idx == 0);
+ debug_assert(shaders_asm[4].id == VEGA_IMAGE_NORMAL_SHADER);
+ shaders[idx] = &shaders_asm[4];
+ ++idx;
+ }
+
+ /* second stage */
+ if ((id & VEGA_IMAGE_MULTIPLY_SHADER)) {
+ debug_assert(shaders_asm[5].id == VEGA_IMAGE_MULTIPLY_SHADER);
+ shaders[idx] = &shaders_asm[5];
+ ++idx;
+ } else if ((id & VEGA_IMAGE_STENCIL_SHADER)) {
+ debug_assert(shaders_asm[6].id == VEGA_IMAGE_STENCIL_SHADER);
+ shaders[idx] = &shaders_asm[6];
+ ++idx;
+ }
+
+ /* third stage */
+ if ((id & VEGA_MASK_SHADER)) {
+ debug_assert(idx == 1);
+ debug_assert(shaders_asm[7].id == VEGA_MASK_SHADER);
+ shaders[idx] = &shaders_asm[7];
+ ++idx;
+ }
+
+ /* fourth stage */
+ if ((id & VEGA_BLEND_MULTIPLY_SHADER)) {
+ debug_assert(shaders_asm[8].id == VEGA_BLEND_MULTIPLY_SHADER);
+ shaders[idx] = &shaders_asm[8];
+ ++idx;
+ } else if ((id & VEGA_BLEND_SCREEN_SHADER)) {
+ debug_assert(shaders_asm[9].id == VEGA_BLEND_SCREEN_SHADER);
+ shaders[idx] = &shaders_asm[9];
+ ++idx;
+ } else if ((id & VEGA_BLEND_DARKEN_SHADER)) {
+ debug_assert(shaders_asm[10].id == VEGA_BLEND_DARKEN_SHADER);
+ shaders[idx] = &shaders_asm[10];
+ ++idx;
+ } else if ((id & VEGA_BLEND_LIGHTEN_SHADER)) {
+ debug_assert(shaders_asm[11].id == VEGA_BLEND_LIGHTEN_SHADER);
+ shaders[idx] = &shaders_asm[11];
+ ++idx;
+ }
+
+ /* fifth stage */
+ if ((id & VEGA_PREMULTIPLY_SHADER)) {
+ debug_assert(shaders_asm[12].id == VEGA_PREMULTIPLY_SHADER);
+ shaders[idx] = &shaders_asm[12];
+ ++idx;
+ } else if ((id & VEGA_UNPREMULTIPLY_SHADER)) {
+ debug_assert(shaders_asm[13].id == VEGA_UNPREMULTIPLY_SHADER);
+ shaders[idx] = &shaders_asm[13];
+ ++idx;
+ }
+
+ /* sixth stage */
+ if ((id & VEGA_BW_SHADER)) {
+ debug_assert(shaders_asm[14].id == VEGA_BW_SHADER);
+ shaders[idx] = &shaders_asm[14];
+ ++idx;
+ }
+
+ return combine_shaders(shaders, idx, pipe, shader);
+}
+
+/*************************************************/
+
+struct shaders_cache * shaders_cache_create(struct vg_context *vg)
+{
+ struct shaders_cache *sc = CALLOC_STRUCT(shaders_cache);
+
+ sc->pipe = vg;
+ sc->hash = cso_hash_create();
+
+ return sc;
+}
+
+void shaders_cache_destroy(struct shaders_cache *sc)
+{
+ struct cso_hash_iter iter = cso_hash_first_node(sc->hash);
+
+ while (!cso_hash_iter_is_null(iter)) {
+ struct cached_shader *cached =
+ (struct cached_shader *)cso_hash_iter_data(iter);
+ cso_delete_fragment_shader(sc->pipe->cso_context,
+ cached->driver_shader);
+ iter = cso_hash_erase(sc->hash, iter);
+ }
+
+ cso_hash_delete(sc->hash);
+ free(sc);
+}
+
+void * shaders_cache_fill(struct shaders_cache *sc,
+ int shader_key)
+{
+ VGint key = shader_key;
+ struct cached_shader *cached;
+ struct cso_hash_iter iter = cso_hash_find(sc->hash, key);
+
+ if (cso_hash_iter_is_null(iter)) {
+ cached = CALLOC_STRUCT(cached_shader);
+ cached->driver_shader = create_shader(sc->pipe->pipe, key, &cached->state);
+
+ cso_hash_insert(sc->hash, key, cached);
+
+ return cached->driver_shader;
+ }
+
+ cached = (struct cached_shader *)cso_hash_iter_data(iter);
+
+ assert(cached->driver_shader);
+ return cached->driver_shader;
+}
+
+struct vg_shader * shader_create_from_text(struct pipe_context *pipe,
+ const char *txt, int num_tokens,
+ int type)
+{
+ struct vg_shader *shader = (struct vg_shader *)malloc(
+ sizeof(struct vg_shader));
+ struct tgsi_token *tokens = tokens_from_assembly(txt, num_tokens);
+ struct pipe_shader_state state;
+
+ debug_assert(type == PIPE_SHADER_VERTEX ||
+ type == PIPE_SHADER_FRAGMENT);
+
+ state.tokens = tokens;
+ shader->type = type;
+ shader->tokens = tokens;
+
+ if (type == PIPE_SHADER_FRAGMENT)
+ shader->driver = pipe->create_fs_state(pipe, &state);
+ else
+ shader->driver = pipe->create_vs_state(pipe, &state);
+ return shader;
+}
+
+void vg_shader_destroy(struct vg_context *ctx, struct vg_shader *shader)
+{
+ if (shader->type == PIPE_SHADER_FRAGMENT)
+ cso_delete_fragment_shader(ctx->cso_context, shader->driver);
+ else
+ cso_delete_vertex_shader(ctx->cso_context, shader->driver);
+ free(shader->tokens);
+ free(shader);
+}
diff --git a/src/gallium/state_trackers/vega/shaders_cache.h b/src/gallium/state_trackers/vega/shaders_cache.h
new file mode 100644
index 0000000000..feca58b61a
--- /dev/null
+++ b/src/gallium/state_trackers/vega/shaders_cache.h
@@ -0,0 +1,77 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef SHADERS_CACHE_H
+#define SHADERS_CACHE_H
+
+
+struct vg_context;
+struct pipe_context;
+struct tgsi_token;
+struct shaders_cache;
+
+enum VegaShaderType {
+ VEGA_SOLID_FILL_SHADER = 1 << 0,
+ VEGA_LINEAR_GRADIENT_SHADER = 1 << 1,
+ VEGA_RADIAL_GRADIENT_SHADER = 1 << 2,
+ VEGA_PATTERN_SHADER = 1 << 3,
+ VEGA_IMAGE_NORMAL_SHADER = 1 << 4,
+ VEGA_IMAGE_MULTIPLY_SHADER = 1 << 5,
+ VEGA_IMAGE_STENCIL_SHADER = 1 << 6,
+
+ VEGA_MASK_SHADER = 1 << 7,
+
+ VEGA_BLEND_MULTIPLY_SHADER = 1 << 8,
+ VEGA_BLEND_SCREEN_SHADER = 1 << 9,
+ VEGA_BLEND_DARKEN_SHADER = 1 << 10,
+ VEGA_BLEND_LIGHTEN_SHADER = 1 << 11,
+
+ VEGA_PREMULTIPLY_SHADER = 1 << 12,
+ VEGA_UNPREMULTIPLY_SHADER = 1 << 13,
+
+ VEGA_BW_SHADER = 1 << 14
+};
+
+struct vg_shader {
+ void *driver;
+ struct tgsi_token *tokens;
+ int type;/* PIPE_SHADER_VERTEX, PIPE_SHADER_FRAGMENT */
+};
+
+struct shaders_cache *shaders_cache_create(struct vg_context *pipe);
+void shaders_cache_destroy(struct shaders_cache *sc);
+void *shaders_cache_fill(struct shaders_cache *sc,
+ int shader_key);
+
+struct vg_shader *shader_create_from_text(struct pipe_context *pipe,
+ const char *txt, int num_tokens,
+ int type);
+
+void vg_shader_destroy(struct vg_context *ctx, struct vg_shader *shader);
+
+
+
+#endif
diff --git a/src/gallium/state_trackers/vega/st_inlines.h b/src/gallium/state_trackers/vega/st_inlines.h
new file mode 100644
index 0000000000..1f331dfcdb
--- /dev/null
+++ b/src/gallium/state_trackers/vega/st_inlines.h
@@ -0,0 +1,159 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Functions for checking if buffers/textures are referenced when we need
+ * to read/write from/to them. Flush when needed.
+ */
+
+#ifndef ST_INLINES_H
+#define ST_INLINES_H
+
+#include "vg_context.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_state.h"
+
+static INLINE struct pipe_transfer *
+st_cond_flush_get_tex_transfer(struct vg_context *st,
+ struct pipe_texture *pt,
+ unsigned int face,
+ unsigned int level,
+ unsigned int zslice,
+ enum pipe_transfer_usage usage,
+ unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct pipe_screen *screen = st->pipe->screen;
+ struct pipe_context *pipe = st->pipe;
+ unsigned referenced =
+ pipe->is_texture_referenced(pipe, pt, face, level);
+
+ if (referenced && ((referenced & PIPE_REFERENCED_FOR_WRITE) ||
+ usage == PIPE_TRANSFER_WRITE ||
+ usage == PIPE_TRANSFER_READ_WRITE))
+ vgFlush();
+
+ return screen->get_tex_transfer(screen, pt, face, level, zslice, usage,
+ x, y, w, h);
+}
+
+static INLINE struct pipe_transfer *
+st_no_flush_get_tex_transfer(struct vg_context *st,
+ struct pipe_texture *pt,
+ unsigned int face,
+ unsigned int level,
+ unsigned int zslice,
+ enum pipe_transfer_usage usage,
+ unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct pipe_screen *screen = st->pipe->screen;
+
+ return screen->get_tex_transfer(screen, pt, face, level,
+ zslice, usage, x, y, w, h);
+}
+
+static INLINE void *
+st_cond_flush_pipe_buffer_map(struct vg_context *st,
+ struct pipe_buffer *buf,
+ unsigned int map_flags)
+{
+ struct pipe_context *pipe = st->pipe;
+ unsigned int referenced = pipe->is_buffer_referenced(pipe, buf);
+
+ if (referenced && ((referenced & PIPE_REFERENCED_FOR_WRITE) ||
+ (map_flags & PIPE_BUFFER_USAGE_CPU_WRITE)))
+ vgFlush();
+
+ return pipe_buffer_map(pipe->screen, buf, map_flags);
+}
+
+static INLINE void *
+st_no_flush_pipe_buffer_map(struct vg_context *st,
+ struct pipe_buffer *buf,
+ unsigned int map_flags)
+{
+ return pipe_buffer_map(st->pipe->screen, buf, map_flags);
+}
+
+
+static INLINE void
+st_cond_flush_pipe_buffer_write(struct vg_context *st,
+ struct pipe_buffer *buf,
+ unsigned int offset,
+ unsigned int size,
+ const void * data)
+{
+ struct pipe_context *pipe = st->pipe;
+
+ if (pipe->is_buffer_referenced(pipe, buf))
+ vgFlush();
+
+ pipe_buffer_write(pipe->screen, buf, offset, size, data);
+}
+
+static INLINE void
+st_no_flush_pipe_buffer_write(struct vg_context *st,
+ struct pipe_buffer *buf,
+ unsigned int offset,
+ unsigned int size,
+ const void * data)
+{
+ pipe_buffer_write(st->pipe->screen, buf, offset, size, data);
+}
+
+static INLINE void
+st_cond_flush_pipe_buffer_read(struct vg_context *st,
+ struct pipe_buffer *buf,
+ unsigned int offset,
+ unsigned int size,
+ void * data)
+{
+ struct pipe_context *pipe = st->pipe;
+
+ if (pipe->is_buffer_referenced(pipe, buf) & PIPE_REFERENCED_FOR_WRITE)
+ vgFlush();
+
+ pipe_buffer_read(pipe->screen, buf, offset, size, data);
+}
+
+static INLINE void
+st_no_flush_pipe_buffer_read(struct vg_context *st,
+ struct pipe_buffer *buf,
+ unsigned int offset,
+ unsigned int size,
+ void * data)
+{
+ pipe_buffer_read(st->pipe->screen, buf, offset, size, data);
+}
+
+#endif
+
diff --git a/src/gallium/state_trackers/vega/stroker.c b/src/gallium/state_trackers/vega/stroker.c
new file mode 100644
index 0000000000..1b92d2b5c6
--- /dev/null
+++ b/src/gallium/state_trackers/vega/stroker.c
@@ -0,0 +1,1349 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "stroker.h"
+
+#include "path.h"
+#include "vg_state.h"
+#include "util_array.h"
+#include "arc.h"
+#include "bezier.h"
+#include "matrix.h"
+#include "path_utils.h"
+#include "polygon.h"
+
+#include "math.h"
+
+#ifndef M_2PI
+#define M_2PI 6.28318530717958647692528676655900576
+#endif
+
+#define STROKE_SEGMENTS 0
+#define STROKE_DEBUG 0
+#define DEBUG_EMITS 0
+
+static const VGfloat curve_threshold = 0.25f;
+
+static const VGfloat zero_coords[] = {0.f, 0.f};
+
+enum intersection_type {
+ NoIntersections,
+ BoundedIntersection,
+ UnboundedIntersection,
+};
+
+enum line_join_mode {
+ FlatJoin,
+ SquareJoin,
+ MiterJoin,
+ RoundJoin,
+ RoundCap
+};
+
+struct stroke_iterator {
+ void (*next)(struct stroke_iterator *);
+ VGboolean (*has_next)(struct stroke_iterator *);
+
+ VGPathCommand (*current_command)(struct stroke_iterator *it);
+ void (*current_coords)(struct stroke_iterator *it, VGfloat *coords);
+
+ VGint position;
+ VGint coord_position;
+
+ const VGubyte *cmds;
+ const VGfloat *coords;
+ VGint num_commands;
+ VGint num_coords;
+
+ struct polygon *curve_poly;
+ VGint curve_index;
+};
+
+static VGPathCommand stroke_itr_command(struct stroke_iterator *itr)
+{
+ return itr->current_command(itr);
+}
+
+static void stroke_itr_coords(struct stroke_iterator *itr, VGfloat *coords)
+{
+ itr->current_coords(itr, coords);
+}
+
+static void stroke_fw_itr_coords(struct stroke_iterator *itr, VGfloat *coords)
+{
+ if (itr->position >= itr->num_commands)
+ return;
+ switch (stroke_itr_command(itr)) {
+ case VG_MOVE_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ break;
+ case VG_LINE_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ break;
+ case VG_CUBIC_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ coords[2] = itr->coords[itr->coord_position + 2];
+ coords[3] = itr->coords[itr->coord_position + 3];
+ coords[4] = itr->coords[itr->coord_position + 4];
+ coords[5] = itr->coords[itr->coord_position + 5];
+ break;
+ default:
+ debug_assert(!"invalid command!\n");
+ }
+}
+
+
+static void stroke_bw_itr_coords(struct stroke_iterator *itr, VGfloat *coords)
+{
+ if (itr->position >= itr->num_commands)
+ return;
+ switch (stroke_itr_command(itr)) {
+ case VG_MOVE_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ break;
+ case VG_LINE_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ break;
+ case VG_CUBIC_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position + 4];
+ coords[1] = itr->coords[itr->coord_position + 5];
+ coords[2] = itr->coords[itr->coord_position + 2];
+ coords[3] = itr->coords[itr->coord_position + 3];
+ coords[4] = itr->coords[itr->coord_position + 0];
+ coords[5] = itr->coords[itr->coord_position + 1];
+ break;
+ default:
+ debug_assert(!"invalid command!\n");
+ }
+}
+
+
+static VGPathCommand stroke_fw_current_command(struct stroke_iterator *it)
+{
+ return it->cmds[it->position];
+}
+
+static VGPathCommand stroke_bw_current_command(struct stroke_iterator *it)
+{
+ VGPathCommand prev_cmd;
+ if (it->position == it->num_commands -1)
+ return VG_MOVE_TO_ABS;
+
+ prev_cmd = it->cmds[it->position + 1];
+ return prev_cmd;
+}
+
+static VGboolean stroke_fw_has_next(struct stroke_iterator *itr)
+{
+ return itr->position < (itr->num_commands - 1);
+}
+
+static VGboolean stroke_bw_has_next(struct stroke_iterator *itr)
+{
+ return itr->position > 0;
+}
+
+static void stroke_fw_next(struct stroke_iterator *itr)
+{
+ VGubyte cmd;
+ debug_assert(stroke_fw_has_next(itr));
+
+ cmd = stroke_itr_command(itr);
+
+ itr->coord_position += num_elements_for_segments(&cmd, 1);
+ ++itr->position;
+}
+
+static void stroke_bw_next(struct stroke_iterator *itr)
+{
+ VGubyte cmd;
+ debug_assert(stroke_bw_has_next(itr));
+
+ --itr->position;
+ cmd = stroke_itr_command(itr);
+
+ itr->coord_position -= num_elements_for_segments(&cmd, 1);
+}
+
+static void stroke_itr_common_init(struct stroke_iterator *itr,
+ struct array *cmds,
+ struct array *coords)
+{
+ itr->cmds = (VGubyte*)cmds->data;
+ itr->num_commands = cmds->num_elements;
+
+ itr->coords = (VGfloat*)coords->data;
+ itr->num_coords = coords->num_elements;
+}
+
+static void stroke_forward_iterator(struct stroke_iterator *itr,
+ struct array *cmds,
+ struct array *coords)
+{
+ stroke_itr_common_init(itr, cmds, coords);
+ itr->position = 0;
+ itr->coord_position = 0;
+
+ itr->next = stroke_fw_next;
+ itr->has_next = stroke_fw_has_next;
+ itr->current_command = stroke_fw_current_command;
+ itr->current_coords = stroke_fw_itr_coords;
+}
+
+static void stroke_backward_iterator(struct stroke_iterator *itr,
+ struct array *cmds,
+ struct array *coords)
+{
+ VGubyte cmd;
+ stroke_itr_common_init(itr, cmds, coords);
+ itr->position = itr->num_commands - 1;
+
+ cmd = stroke_bw_current_command(itr);
+ itr->coord_position = itr->num_coords -
+ num_elements_for_segments(&cmd, 1);
+
+ itr->next = stroke_bw_next;
+ itr->has_next = stroke_bw_has_next;
+ itr->current_command = stroke_bw_current_command;
+ itr->current_coords = stroke_bw_itr_coords;
+}
+
+
+
+static void stroke_flat_next(struct stroke_iterator *itr)
+{
+ VGubyte cmd;
+
+ if (itr->curve_index >= 0) {
+ ++itr->curve_index;
+ if (itr->curve_index >= polygon_vertex_count(itr->curve_poly)) {
+ itr->curve_index = -1;
+ polygon_destroy(itr->curve_poly);
+ itr->curve_poly = 0;
+ } else
+ return;
+ }
+ debug_assert(stroke_fw_has_next(itr));
+
+ cmd = itr->cmds[itr->position];
+ itr->coord_position += num_elements_for_segments(&cmd, 1);
+ ++itr->position;
+
+ cmd = itr->cmds[itr->position];
+
+ if (cmd == VG_CUBIC_TO_ABS) {
+ struct bezier bezier;
+ VGfloat bez[8];
+
+ bez[0] = itr->coords[itr->coord_position - 2];
+ bez[1] = itr->coords[itr->coord_position - 1];
+ bez[2] = itr->coords[itr->coord_position];
+ bez[3] = itr->coords[itr->coord_position + 1];
+ bez[4] = itr->coords[itr->coord_position + 2];
+ bez[5] = itr->coords[itr->coord_position + 3];
+ bez[6] = itr->coords[itr->coord_position + 4];
+ bez[7] = itr->coords[itr->coord_position + 5];
+
+ bezier_init(&bezier,
+ bez[0], bez[1],
+ bez[2], bez[3],
+ bez[4], bez[5],
+ bez[6], bez[7]);
+ /* skip the first one, it's the same as the prev point */
+ itr->curve_index = 1;
+ if (itr->curve_poly) {
+ polygon_destroy(itr->curve_poly);
+ itr->curve_poly = 0;
+ }
+ itr->curve_poly = bezier_to_polygon(&bezier);
+ }
+}
+
+static VGboolean stroke_flat_has_next(struct stroke_iterator *itr)
+{
+ return (itr->curve_index >= 0 &&
+ itr->curve_index < (polygon_vertex_count(itr->curve_poly)-1))
+ || itr->position < (itr->num_commands - 1);
+}
+
+static VGPathCommand stroke_flat_current_command(struct stroke_iterator *it)
+{
+ if (it->cmds[it->position] == VG_CUBIC_TO_ABS) {
+ return VG_LINE_TO_ABS;
+ }
+ return it->cmds[it->position];
+}
+
+static void stroke_flat_itr_coords(struct stroke_iterator *itr, VGfloat *coords)
+{
+ if (itr->curve_index <= -1 && itr->position >= itr->num_commands)
+ return;
+
+ if (itr->curve_index >= 0) {
+ polygon_vertex(itr->curve_poly, itr->curve_index,
+ coords);
+ return;
+ }
+
+ switch (stroke_itr_command(itr)) {
+ case VG_MOVE_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ break;
+ case VG_LINE_TO_ABS:
+ coords[0] = itr->coords[itr->coord_position];
+ coords[1] = itr->coords[itr->coord_position + 1];
+ break;
+ case VG_CUBIC_TO_ABS:
+ default:
+ debug_assert(!"invalid command!\n");
+ }
+}
+
+static void stroke_flat_iterator(struct stroke_iterator *itr,
+ struct array *cmds,
+ struct array *coords)
+{
+ stroke_itr_common_init(itr, cmds, coords);
+ itr->position = 0;
+ itr->coord_position = 0;
+
+ itr->next = stroke_flat_next;
+ itr->has_next = stroke_flat_has_next;
+ itr->current_command = stroke_flat_current_command;
+ itr->current_coords = stroke_flat_itr_coords;
+ itr->curve_index = -1;
+ itr->curve_poly = 0;
+}
+
+
+static INLINE VGboolean finite_coords4(const VGfloat *c)
+{
+ return
+ isfinite(c[0]) && isfinite(c[1]) &&
+ isfinite(c[2]) && isfinite(c[3]);
+}
+
+/* from Graphics Gems II */
+#define SAME_SIGNS(a, b) ((a) * (b) >= 0)
+static VGboolean do_lines_intersect(VGfloat x1, VGfloat y1, VGfloat x2, VGfloat y2,
+ VGfloat x3, VGfloat y3, VGfloat x4, VGfloat y4)
+{
+ VGfloat a1, a2, b1, b2, c1, c2; /* Coefficients of line eqns */
+ VGfloat r1, r2, r3, r4; /* 'sign' values */
+
+ a1 = y2 - y1;
+ b1 = x1 - x2;
+ c1 = x2 * y1 - x1 * y2;
+
+ r3 = a1 * x3 + b1 * y3 + c1;
+ r4 = a1 * x4 + b1 * y4 + c1;
+
+ if (r3 != 0 && r4 != 0 && SAME_SIGNS(r3, r4))
+ return VG_FALSE;
+
+ a2 = y4 - y3;
+ b2 = x3 - x4;
+ c2 = x4 * y3 - x3 * y4;
+
+ r1 = a2 * x1 + b2 * y1 + c2;
+ r2 = a2 * x2 + b2 * y2 + c2;
+
+ if (r1 != 0 && r2 != 0 && SAME_SIGNS(r1, r2))
+ return VG_FALSE;
+
+ return VG_TRUE;
+}
+
+static INLINE VGfloat line_dx(const VGfloat *l)
+{
+ return l[2] - l[0];
+}
+
+static INLINE VGfloat line_dy(const VGfloat *l)
+{
+ return l[3] - l[1];
+}
+
+static INLINE VGfloat line_angle(const VGfloat *l)
+{
+ const VGfloat dx = line_dx(l);
+ const VGfloat dy = line_dy(l);
+
+ const VGfloat theta = atan2(-dy, dx) * 360.0 / M_2PI;
+
+ const VGfloat theta_normalized = theta < 0 ? theta + 360 : theta;
+
+ if (floatsEqual(theta_normalized, 360.f))
+ return 0;
+ else
+ return theta_normalized;
+}
+
+static INLINE void line_set_length(VGfloat *l, VGfloat len)
+{
+ VGfloat uv[] = {l[0], l[1], l[2], l[3]};
+ if (null_line(l))
+ return;
+ line_normalize(uv);
+ l[2] = l[0] + line_dx(uv) * len;
+ l[3] = l[1] + line_dy(uv) * len;
+}
+
+static INLINE void line_translate(VGfloat *l, VGfloat x, VGfloat y)
+{
+ l[0] += x;
+ l[1] += y;
+ l[2] += x;
+ l[3] += y;
+}
+
+static INLINE VGfloat line_angle_to(const VGfloat *l1,
+ const VGfloat *l2)
+{
+ VGfloat a1, a2, delta, delta_normalized;
+ if (null_line(l1) || null_line(l1))
+ return 0;
+
+ a1 = line_angle(l1);
+ a2 = line_angle(l2);
+
+ delta = a2 - a1;
+ delta_normalized = delta < 0 ? delta + 360 : delta;
+
+ if (floatsEqual(delta, 360.f))
+ return 0;
+ else
+ return delta_normalized;
+}
+
+static INLINE VGfloat line_angles(const VGfloat *l1,
+ const VGfloat *l2)
+{
+ VGfloat cos_line, rad = 0;
+
+ if (null_line(l1) || null_line(l2))
+ return 0;
+
+ cos_line = (line_dx(l1)*line_dx(l2) + line_dy(l1)*line_dy(l2)) /
+ (line_lengthv(l1)*line_lengthv(l2));
+ rad = 0;
+
+ if (cos_line >= -1.0 && cos_line <= 1.0)
+ rad = acos(cos_line);
+ return rad * 360 / M_2PI;
+}
+
+
+static INLINE VGfloat adapted_angle_on_x(const VGfloat *line)
+{
+ const VGfloat identity[] = {0, 0, 1, 0};
+ VGfloat angle = line_angles(line, identity);
+ if (line_dy(line) > 0)
+ angle = 360 - angle;
+ return angle;
+}
+
+static enum intersection_type line_intersect(const VGfloat *l1,
+ const VGfloat *l2,
+ float *intersection_point)
+{
+ VGfloat isect[2];
+ enum intersection_type type;
+ VGboolean dx_zero, ldx_zero;
+
+ if (null_line(l1) || null_line(l2) ||
+ !finite_coords4(l1) || !finite_coords4(l2))
+ return NoIntersections;
+
+ type = do_lines_intersect(l1[0], l1[1], l1[2], l1[3], l2[0], l2[1], l2[2], l2[3])
+ ? BoundedIntersection : UnboundedIntersection;
+
+ dx_zero = floatsEqual(line_dx(l1) + 1, 1);
+ ldx_zero = floatsEqual(line_dx(l2) + 1, 1);
+
+ /* one of the lines is vertical */
+ if (dx_zero && ldx_zero) {
+ type = NoIntersections;
+ } else if (dx_zero) {
+ VGfloat la = line_dy(l2) / line_dx(l2);
+ isect[0] = l1[0];
+ isect[1] = la * l1[0] + l2[1] - la * l2[0];
+ } else if (ldx_zero) {
+ VGfloat ta = line_dy(l1) / line_dx(l1);
+ isect[0] = l2[0];
+ isect[1] = ta * l2[0] + l1[1] - ta*l1[0];
+ } else {
+ VGfloat x;
+ VGfloat ta = line_dy(l1) / line_dx(l1);
+ VGfloat la = line_dy(l2) / line_dx(l2);
+ if (ta == la)
+ return NoIntersections;
+
+ x = ( - l2[1] + la * l2[0] + l1[1] - ta * l1[0] ) / (la - ta);
+ isect[0] = x;
+ isect[1] = ta*(x - l1[0]) + l1[1];
+ }
+ if (intersection_point) {
+ intersection_point[0] = isect[0];
+ intersection_point[1] = isect[1];
+ }
+ return type;
+}
+
+static INLINE enum line_join_mode stroker_join_mode(struct stroker *s)
+{
+ switch(s->join_style) {
+ case VG_JOIN_MITER:
+ return MiterJoin;
+ case VG_JOIN_ROUND:
+ return RoundJoin;
+ case VG_JOIN_BEVEL:
+ return FlatJoin;
+ default:
+ return FlatJoin;
+ }
+}
+
+static INLINE enum line_join_mode stroker_cap_mode(struct stroker *s)
+{
+ switch(s->cap_style) {
+ case VG_CAP_BUTT:
+ return FlatJoin;
+ case VG_CAP_ROUND:
+ return RoundCap;
+ case VG_CAP_SQUARE:
+ return SquareJoin;
+ default:
+ return FlatJoin;
+ }
+}
+
+void stroker_emit_move_to(struct stroker *stroker, VGfloat x, VGfloat y)
+{
+ VGubyte cmds = VG_MOVE_TO_ABS;
+ VGfloat coords[2] = {x, y};
+#if DEBUG_EMITS
+ debug_printf("emit move %f, %f\n", x, y);
+#endif
+ stroker->back2_x = stroker->back1_x;
+ stroker->back2_y = stroker->back1_y;
+ stroker->back1_x = x;
+ stroker->back1_y = y;
+
+ path_append_data(stroker->path,
+ 1,
+ &cmds, &coords);
+}
+
+void stroker_emit_line_to(struct stroker *stroker, VGfloat x, VGfloat y)
+{
+ VGubyte cmds = VG_LINE_TO_ABS;
+ VGfloat coords[2] = {x, y};
+#if DEBUG_EMITS
+ debug_printf("emit line %f, %f\n", x, y);
+#endif
+ stroker->back2_x = stroker->back1_x;
+ stroker->back2_y = stroker->back1_y;
+ stroker->back1_x = x;
+ stroker->back1_y = y;
+ path_append_data(stroker->path,
+ 1,
+ &cmds, &coords);
+}
+
+void stroker_emit_curve_to(struct stroker *stroker, VGfloat px1, VGfloat py1,
+ VGfloat px2, VGfloat py2,
+ VGfloat x, VGfloat y)
+{
+ VGubyte cmds = VG_CUBIC_TO_ABS;
+ VGfloat coords[6] = {px1, py1, px2, py2, x, y};
+#if DEBUG_EMITS
+ debug_printf("emit curve %f, %f, %f, %f, %f, %f\n", px1, py1,
+ px2, py2, x, y);
+#endif
+
+ if (px2 == x && py2 == y) {
+ if (px1 == x && py1 == y) {
+ stroker->back2_x = stroker->back1_x;
+ stroker->back2_y = stroker->back1_y;
+ } else {
+ stroker->back2_x = px1;
+ stroker->back2_y = py1;
+ }
+ } else {
+ stroker->back2_x = px2;
+ stroker->back2_y = py2;
+ }
+ stroker->back1_x = x;
+ stroker->back1_y = y;
+
+ path_append_data(stroker->path,
+ 1,
+ &cmds, &coords);
+}
+
+static INLINE void create_round_join(struct stroker *stroker,
+ VGfloat x1, VGfloat y1,
+ VGfloat x2, VGfloat y2,
+ VGfloat width, VGfloat height)
+{
+ struct arc arc;
+ struct matrix matrix;
+
+ matrix_load_identity(&matrix);
+
+ /*stroker_emit_line_to(stroker, nx, ny);*/
+
+ arc_init(&arc, VG_SCCWARC_TO_ABS,
+ x1, y1, x2, y2, width/2, height/2, 0);
+ arc_stroker_emit(&arc, stroker, &matrix);
+}
+
+
+static void create_joins(struct stroker *stroker,
+ VGfloat focal_x, VGfloat focal_y,
+ const VGfloat *next_line, enum line_join_mode join)
+{
+#if DEBUG_EMITS
+ debug_printf("create_joins: focal=[%f, %f], next_line=[%f, %f,%f, %f]\n",
+ focal_x, focal_y,
+ next_line[0], next_line[1], next_line[2], next_line[3]);
+#endif
+ /* if we're alredy connected do nothing */
+ if (floatsEqual(stroker->back1_x, next_line[0]) &&
+ floatsEqual(stroker->back1_y, next_line[1]))
+ return;
+
+ if (join == FlatJoin) {
+ stroker_emit_line_to(stroker, next_line[0], next_line[1]);
+ } else {
+ VGfloat prev_line[] = {stroker->back2_x, stroker->back2_y,
+ stroker->back1_x, stroker->back1_y};
+
+ VGfloat isect[2];
+ enum intersection_type type = line_intersect(prev_line, next_line, isect);
+
+ if (join == SquareJoin) {
+ VGfloat offset = stroker->stroke_width / 2;
+ VGfloat l1[4] = {prev_line[0],
+ prev_line[1],
+ prev_line[2],
+ prev_line[3]};
+ VGfloat l2[4] = {next_line[2],
+ next_line[3],
+ next_line[0],
+ next_line[1]};
+
+ line_translate(l1, line_dx(l1), line_dy(l1));
+ line_set_length(l1, offset);
+
+ line_translate(l2, line_dx(l2), line_dy(l2));
+ line_set_length(l2, offset);
+
+ stroker_emit_line_to(stroker, l1[2], l1[3]);
+ stroker_emit_line_to(stroker, l2[2], l2[3]);
+ stroker_emit_line_to(stroker, l2[0], l2[1]);
+ } else if (join == RoundJoin) {
+ VGfloat offset = stroker->stroke_width / 2;
+ VGfloat short_cut[4] = {prev_line[2], prev_line[3],
+ next_line[0], next_line[1]};
+ VGfloat angle = line_angles(prev_line, short_cut);
+
+ if (type == BoundedIntersection ||
+ (angle > 90 && !floatsEqual(angle, 90.f))) {
+ stroker_emit_line_to(stroker, next_line[0], next_line[1]);
+ return;
+ }
+ create_round_join(stroker, prev_line[2], prev_line[3],
+ next_line[0], next_line[1],
+ offset * 2, offset * 2);
+
+ stroker_emit_line_to(stroker, next_line[0], next_line[1]);
+ } else if (join == RoundCap) {
+ VGfloat offset = stroker->stroke_width / 2;
+ VGfloat l1[4] = { prev_line[0], prev_line[1],
+ prev_line[2], prev_line[3] };
+ VGfloat l2[4] = {focal_x, focal_y,
+ prev_line[2], prev_line[3]};
+
+ line_translate(l1, line_dx(l1), line_dy(l1));
+ line_set_length(l1, KAPPA * offset);
+
+ /* normal between prev_line and focal */
+ line_translate(l2, -line_dy(l2), line_dx(l2));
+ line_set_length(l2, KAPPA * offset);
+
+ stroker_emit_curve_to(stroker, l1[2], l1[3],
+ l2[2], l2[3],
+ l2[0], l2[1]);
+
+ l2[0] = l2[0];
+ l2[1] = l2[1];
+ l2[2] = l2[0] - line_dx(l2);
+ l2[3] = l2[1] - line_dy(l2);
+
+ line_translate(l1, next_line[0] - l1[0], next_line[1] - l1[1]);
+
+ stroker_emit_curve_to(stroker,
+ l2[2], l2[3],
+ l1[2], l1[3],
+ l1[0], l1[1]);
+ } else if (join == MiterJoin) {
+ VGfloat miter_line[4] = {stroker->back1_x, stroker->back1_y,
+ isect[0], isect[1]};
+ VGfloat sl = (stroker->stroke_width * stroker->miter_limit);
+ VGfloat inside_line[4] = {prev_line[2], prev_line[3],
+ next_line[0], next_line[1]};
+ VGfloat angle = line_angle_to(inside_line, prev_line);
+
+ if (type == BoundedIntersection ||
+ (angle > 90 && !floatsEqual(angle, 90.f))) {
+ /*
+ debug_printf("f = %f, nl = %f, pl = %f, is = %f\n",
+ focal_x, next_line[0],
+ prev_line[2], isect[0]);*/
+ stroker_emit_line_to(stroker, next_line[0], next_line[1]);
+ return;
+ }
+
+ if (type == NoIntersections || line_lengthv(miter_line) > sl) {
+ stroker_emit_line_to(stroker, next_line[0], next_line[1]);
+ } else {
+ stroker_emit_line_to(stroker, isect[0], isect[1]);
+ stroker_emit_line_to(stroker, next_line[0], next_line[1]);
+ }
+ } else {
+ debug_assert(!"create_joins bad join style");
+ }
+ }
+}
+
+static void stroker_add_segment(struct stroker *stroker,
+ VGPathCommand cmd,
+ const VGfloat *coords,
+ VGint num_coords)
+{
+ /* skip duplicated points */
+ if (stroker->segments->num_elements &&
+ stroker->last_cmd == cmd) {
+ VGfloat *data = stroker->control_points->data;
+ data += stroker->control_points->num_elements;
+ data -= num_coords;
+ switch (cmd) {
+ case VG_MOVE_TO_ABS:
+ if (floatsEqual(coords[0], data[0]) &&
+ floatsEqual(coords[1], data[1]))
+ return;
+ break;
+ case VG_LINE_TO_ABS:
+ if (floatsEqual(coords[0], data[0]) &&
+ floatsEqual(coords[1], data[1]))
+ return;
+ break;
+ case VG_CUBIC_TO_ABS:
+ if (floatsEqual(coords[0], data[0]) &&
+ floatsEqual(coords[1], data[1]) &&
+ floatsEqual(coords[2], data[2]) &&
+ floatsEqual(coords[3], data[3]) &&
+ floatsEqual(coords[4], data[4]) &&
+ floatsEqual(coords[5], data[5]))
+ return;
+ break;
+ default:
+ debug_assert(!"Invalid stroke segment");
+ }
+ } else if (stroker->last_cmd == VG_CUBIC_TO_ABS &&
+ cmd == VG_LINE_TO_ABS) {
+ VGfloat *data = stroker->control_points->data;
+ data += stroker->control_points->num_elements;
+ data -= 2;
+ if (floatsEqual(coords[0], data[0]) &&
+ floatsEqual(coords[1], data[1]))
+ return;
+ }
+ stroker->last_cmd = cmd;
+ array_append_data(stroker->segments, &cmd, 1);
+ array_append_data(stroker->control_points, coords, num_coords);
+}
+
+void stroker_move_to(struct stroker *stroker, VGfloat x, VGfloat y)
+{
+ VGfloat coords[2] = {x, y};
+#if STROKE_SEGMENTS
+ debug_printf("stroker_move_to(%f, %f)\n", x, y);
+#endif
+
+ if (stroker->segments->num_elements > 0)
+ stroker->process_subpath(stroker);
+
+ array_reset(stroker->segments);
+ array_reset(stroker->control_points);
+
+ stroker_add_segment(stroker, VG_MOVE_TO_ABS, coords, 2);
+}
+
+void stroker_line_to(struct stroker *stroker, VGfloat x, VGfloat y)
+{
+ VGfloat coords[] = {x, y};
+
+#if STROKE_SEGMENTS
+ debug_printf("stroker_line_to(%f, %f)\n", x, y);
+#endif
+ if (!stroker->segments->num_elements)
+ stroker_add_segment(stroker, VG_MOVE_TO_ABS, zero_coords, 2);
+
+ stroker_add_segment(stroker, VG_LINE_TO_ABS, coords, 2);
+}
+
+void stroker_curve_to(struct stroker *stroker, VGfloat px1, VGfloat py1,
+ VGfloat px2, VGfloat py2,
+ VGfloat x, VGfloat y)
+{
+ VGfloat coords[] = {px1, py1,
+ px2, py2,
+ x, y};
+#if STROKE_SEGMENTS
+ debug_printf("stroker_curve_to(%f, %f, %f, %f, %f, %f)\n",
+ px1, py1, px2, py2, x, y);
+#endif
+ if (!stroker->segments->num_elements)
+ stroker_add_segment(stroker, VG_MOVE_TO_ABS, zero_coords, 2);
+
+ stroker_add_segment(stroker, VG_CUBIC_TO_ABS, coords, 6);
+}
+
+static INLINE VGboolean is_segment_null(VGPathCommand cmd,
+ VGfloat *coords,
+ VGfloat *res)
+{
+ switch(cmd) {
+ case VG_MOVE_TO_ABS:
+ case VG_LINE_TO_ABS:
+ return floatsEqual(coords[0], res[0]) &&
+ floatsEqual(coords[1], res[1]);
+ break;
+ case VG_CUBIC_TO_ABS:
+ return floatsEqual(coords[0], res[0]) &&
+ floatsEqual(coords[1], res[1]) &&
+ floatsEqual(coords[2], res[0]) &&
+ floatsEqual(coords[3], res[1]) &&
+ floatsEqual(coords[4], res[0]) &&
+ floatsEqual(coords[5], res[1]);
+ break;
+ default:
+ assert(0);
+ }
+ return VG_FALSE;
+}
+
+static VGboolean vg_stroke_outline(struct stroke_iterator *it,
+ struct stroker *stroker,
+ VGboolean cap_first,
+ VGfloat *start_tangent)
+{
+ const int MAX_OFFSET = 16;
+ struct bezier offset_curves[MAX_OFFSET];
+ VGPathCommand first_element;
+ VGfloat start[2], prev[2];
+ VGboolean first = VG_TRUE;
+ VGfloat offset;
+
+ first_element = stroke_itr_command(it);
+ if (first_element != VG_MOVE_TO_ABS) {
+ stroker_emit_move_to(stroker, 0.f, 0.f);
+ prev[0] = 0.f;
+ prev[1] = 0.f;
+ }
+ stroke_itr_coords(it, start);
+#if STROKE_DEBUG
+ debug_printf(" -> (side) [%.2f, %.2f]\n",
+ start[0],
+ start[1]);
+#endif
+
+ prev[0] = start[0];
+ prev[1] = start[1];
+
+ offset = stroker->stroke_width / 2;
+
+ if (!it->has_next(it)) {
+ /* single point */
+
+ return VG_TRUE;
+ }
+
+ while (it->has_next(it)) {
+ VGPathCommand cmd;
+ VGfloat coords[8];
+
+ it->next(it);
+ cmd = stroke_itr_command(it);
+ stroke_itr_coords(it, coords);
+
+ if (cmd == VG_LINE_TO_ABS) {
+ VGfloat line[4] = {prev[0], prev[1], coords[0], coords[1]};
+ VGfloat normal[4];
+ line_normal(line, normal);
+
+#if STROKE_DEBUG
+ debug_printf("\n ---> (side) lineto [%.2f, %.2f]\n", coords[0], coords[1]);
+#endif
+ line_set_length(normal, offset);
+ line_translate(line, line_dx(normal), line_dy(normal));
+
+ /* if we are starting a new subpath, move to correct starting point */
+ if (first) {
+ if (cap_first)
+ create_joins(stroker, prev[0], prev[1], line,
+ stroker_cap_mode(stroker));
+ else
+ stroker_emit_move_to(stroker, line[0], line[1]);
+ memcpy(start_tangent, line,
+ sizeof(VGfloat) * 4);
+ first = VG_FALSE;
+ } else {
+ create_joins(stroker, prev[0], prev[1], line,
+ stroker_join_mode(stroker));
+ }
+
+ /* add the stroke for this line */
+ stroker_emit_line_to(stroker, line[2], line[3]);
+ prev[0] = coords[0];
+ prev[1] = coords[1];
+ } else if (cmd == VG_CUBIC_TO_ABS) {
+#if STROKE_DEBUG
+ debug_printf("\n ---> (side) cubicTo [%.2f, %.2f]\n",
+ coords[4],
+ coords[5]);
+#endif
+ struct bezier bezier;
+ int count;
+
+ bezier_init(&bezier,
+ prev[0], prev[1], coords[0], coords[1],
+ coords[2], coords[3], coords[4], coords[5]);
+
+ count = bezier_translate_by_normal(&bezier,
+ offset_curves,
+ MAX_OFFSET,
+ offset,
+ curve_threshold);
+
+ if (count) {
+ /* if we are starting a new subpath, move to correct starting point */
+ VGfloat tangent[4];
+ VGint i;
+
+ bezier_start_tangent(&bezier, tangent);
+ line_translate(tangent,
+ offset_curves[0].x1 - bezier.x1,
+ offset_curves[0].y1 - bezier.y1);
+ if (first) {
+ VGfloat pt[2] = {offset_curves[0].x1,
+ offset_curves[0].y1};
+
+ if (cap_first) {
+ create_joins(stroker, prev[0], prev[1], tangent,
+ stroker_cap_mode(stroker));
+ } else {
+ stroker_emit_move_to(stroker, pt[0], pt[1]);
+ }
+ start_tangent[0] = tangent[0];
+ start_tangent[1] = tangent[1];
+ start_tangent[2] = tangent[2];
+ start_tangent[3] = tangent[3];
+ first = VG_FALSE;
+ } else {
+ create_joins(stroker, prev[0], prev[1], tangent,
+ stroker_join_mode(stroker));
+ }
+
+ /* add these beziers */
+ for (i = 0; i < count; ++i) {
+ struct bezier *bez = &offset_curves[i];
+ stroker_emit_curve_to(stroker,
+ bez->x2, bez->y2,
+ bez->x3, bez->y3,
+ bez->x4, bez->y4);
+ }
+ }
+
+ prev[0] = coords[4];
+ prev[1] = coords[5];
+ }
+ }
+
+ if (floatsEqual(start[0], prev[0]) &&
+ floatsEqual(start[1], prev[1])) {
+ /* closed subpath, join first and last point */
+#if STROKE_DEBUG
+ debug_printf("\n stroker: closed subpath\n");
+#endif
+ create_joins(stroker, prev[0], prev[1], start_tangent,
+ stroker_join_mode(stroker));
+ return VG_TRUE;
+ } else {
+#if STROKE_DEBUG
+ debug_printf("\n stroker: open subpath\n");
+#endif
+ return VG_FALSE;
+ }
+}
+
+static void stroker_process_subpath(struct stroker *stroker)
+{
+ VGboolean fwclosed, bwclosed;
+ VGfloat fw_start_tangent[4], bw_start_tangent[4];
+ struct stroke_iterator fwit;
+ struct stroke_iterator bwit;
+ debug_assert(stroker->segments->num_elements > 0);
+
+ memset(fw_start_tangent, 0,
+ sizeof(VGfloat)*4);
+ memset(bw_start_tangent, 0,
+ sizeof(VGfloat)*4);
+
+ stroke_forward_iterator(&fwit, stroker->segments,
+ stroker->control_points);
+ stroke_backward_iterator(&bwit, stroker->segments,
+ stroker->control_points);
+
+ debug_assert(fwit.cmds[0] == VG_MOVE_TO_ABS);
+
+ fwclosed = vg_stroke_outline(&fwit, stroker, VG_FALSE, fw_start_tangent);
+ bwclosed = vg_stroke_outline(&bwit, stroker, !fwclosed, bw_start_tangent);
+
+ if (!bwclosed)
+ create_joins(stroker,
+ fwit.coords[0], fwit.coords[1], fw_start_tangent,
+ stroker_cap_mode(stroker));
+ else {
+ /* hack to handle the requirement of the VG spec that says that strokes
+ * of len==0 that have butt cap or round cap still need
+ * to be rendered. (8.7.4 Stroke Generation) */
+ if (stroker->segments->num_elements <= 3) {
+ VGPathCommand cmd;
+ VGfloat data[8], coords[8];
+ struct stroke_iterator *it = &fwit;
+
+ stroke_forward_iterator(it, stroker->segments,
+ stroker->control_points);
+ cmd = stroke_itr_command(it);
+ stroke_itr_coords(it, coords);
+ if (cmd != VG_MOVE_TO_ABS) {
+ memset(data, 0, sizeof(VGfloat) * 8);
+ if (!is_segment_null(cmd, coords, data))
+ return;
+ } else {
+ data[0] = coords[0];
+ data[1] = coords[1];
+ }
+ while (it->has_next(it)) {
+ it->next(it);
+ cmd = stroke_itr_command(it);
+ stroke_itr_coords(it, coords);
+ if (!is_segment_null(cmd, coords, data))
+ return;
+ }
+ /* generate the square/round cap */
+ if (stroker->cap_style == VG_CAP_SQUARE) {
+ VGfloat offset = stroker->stroke_width / 2;
+ stroker_emit_move_to(stroker, data[0] - offset,
+ data[1] - offset);
+ stroker_emit_line_to(stroker, data[0] + offset,
+ data[1] - offset);
+ stroker_emit_line_to(stroker, data[0] + offset,
+ data[1] + offset);
+ stroker_emit_line_to(stroker, data[0] - offset,
+ data[1] + offset);
+ stroker_emit_line_to(stroker, data[0] - offset,
+ data[1] - offset);
+ } else if (stroker->cap_style == VG_CAP_ROUND) {
+ VGfloat offset = stroker->stroke_width / 2;
+ VGfloat cx = data[0], cy = data[1];
+ { /*circle */
+ struct arc arc;
+ struct matrix matrix;
+ matrix_load_identity(&matrix);
+
+ stroker_emit_move_to(stroker, cx + offset, cy);
+ arc_init(&arc, VG_SCCWARC_TO_ABS,
+ cx + offset, cy,
+ cx - offset, cy,
+ offset, offset, 0);
+ arc_stroker_emit(&arc, stroker, &matrix);
+ arc_init(&arc, VG_SCCWARC_TO_ABS,
+ cx - offset, cy,
+ cx + offset, cy,
+ offset, offset, 0);
+ arc_stroker_emit(&arc, stroker, &matrix);
+ }
+ }
+ }
+ }
+}
+
+static INLINE VGfloat dash_pattern(struct dash_stroker *stroker,
+ VGint idx)
+{
+ if (stroker->dash_pattern[idx] < 0)
+ return 0.f;
+ return stroker->dash_pattern[idx];
+}
+
+static void dash_stroker_process_subpath(struct stroker *str)
+{
+ struct dash_stroker *stroker = (struct dash_stroker *)str;
+ VGfloat sum_length = 0;
+ VGint i;
+ VGint idash = 0;
+ VGfloat pos = 0;
+ VGfloat elen = 0;
+ VGfloat doffset = stroker->dash_phase;
+ VGfloat estart = 0;
+ VGfloat estop = 0;
+ VGfloat cline[4];
+ struct stroke_iterator it;
+ VGfloat prev[2];
+ VGfloat move_to_pos[2];
+ VGfloat line_to_pos[2];
+
+ VGboolean has_move_to = VG_FALSE;
+
+ stroke_flat_iterator(&it, stroker->base.segments,
+ stroker->base.control_points);
+
+ stroke_itr_coords(&it, prev);
+ move_to_pos[0] = prev[0];
+ move_to_pos[1] = prev[1];
+
+ debug_assert(stroker->dash_pattern_num > 0);
+
+ for (i = 0; i < stroker->dash_pattern_num; ++i) {
+ sum_length += dash_pattern(stroker, i);
+ }
+
+ if (floatIsZero(sum_length)) {
+ return;
+ }
+
+ doffset -= floorf(doffset / sum_length) * sum_length;
+
+ while (!floatIsZero(doffset) && doffset >= dash_pattern(stroker, idash)) {
+ doffset -= dash_pattern(stroker, idash);
+ idash = (idash + 1) % stroker->dash_pattern_num;
+ }
+
+ while (it.has_next(&it)) {
+ VGPathCommand cmd;
+ VGfloat coords[8];
+ VGboolean done;
+
+ it.next(&it);
+ cmd = stroke_itr_command(&it);
+ stroke_itr_coords(&it, coords);
+
+ debug_assert(cmd == VG_LINE_TO_ABS);
+ cline[0] = prev[0];
+ cline[1] = prev[1];
+ cline[2] = coords[0];
+ cline[3] = coords[1];
+
+ elen = line_lengthv(cline);
+
+ estop = estart + elen;
+
+ done = pos >= estop;
+ while (!done) {
+ VGfloat p2[2];
+
+ VGint idash_incr = 0;
+ VGboolean has_offset = doffset > 0;
+ VGfloat dpos = pos + dash_pattern(stroker, idash) - doffset - estart;
+
+ debug_assert(dpos >= 0);
+
+ if (dpos > elen) { /* dash extends this line */
+ doffset = dash_pattern(stroker, idash) - (dpos - elen);
+ pos = estop;
+ done = VG_TRUE;
+ p2[0] = cline[2];
+ p2[1] = cline[3];
+ } else { /* Dash is on this line */
+ line_point_at(cline, dpos/elen, p2);
+ pos = dpos + estart;
+ done = pos >= estop;
+ idash_incr = 1;
+ doffset = 0;
+ }
+
+ if (idash % 2 == 0) {
+ line_to_pos[0] = p2[0];
+ line_to_pos[1] = p2[1];
+
+ if (!has_offset || !has_move_to) {
+ stroker_move_to(&stroker->stroker, move_to_pos[0], move_to_pos[1]);
+ has_move_to = VG_TRUE;
+ }
+ stroker_line_to(&stroker->stroker, line_to_pos[0], line_to_pos[1]);
+ } else {
+ move_to_pos[0] = p2[0];
+ move_to_pos[1] = p2[1];
+ }
+
+ idash = (idash + idash_incr) % stroker->dash_pattern_num;
+ }
+
+ estart = estop;
+ prev[0] = coords[0];
+ prev[1] = coords[1];
+ }
+
+ if (it.curve_poly) {
+ polygon_destroy(it.curve_poly);
+ it.curve_poly = 0;
+ }
+
+ stroker->base.path = stroker->stroker.path;
+}
+
+static void default_begin(struct stroker *stroker)
+{
+ array_reset(stroker->segments);
+ array_reset(stroker->control_points);
+}
+
+static void default_end(struct stroker *stroker)
+{
+ if (stroker->segments->num_elements > 0)
+ stroker->process_subpath(stroker);
+}
+
+
+static void dash_stroker_begin(struct stroker *stroker)
+{
+ struct dash_stroker *dasher =
+ (struct dash_stroker *)stroker;
+
+ default_begin(&dasher->stroker);
+ default_begin(stroker);
+}
+
+static void dash_stroker_end(struct stroker *stroker)
+{
+ struct dash_stroker *dasher =
+ (struct dash_stroker *)stroker;
+
+ default_end(stroker);
+ default_end(&dasher->stroker);
+}
+
+void stroker_init(struct stroker *stroker,
+ struct vg_state *state)
+{
+ stroker->stroke_width = state->stroke.line_width.f;
+ stroker->miter_limit = state->stroke.miter_limit.f;
+ stroker->cap_style = state->stroke.cap_style;
+ stroker->join_style = state->stroke.join_style;
+
+ stroker->begin = default_begin;
+ stroker->process_subpath = stroker_process_subpath;
+ stroker->end = default_end;
+
+ stroker->segments = array_create(sizeof(VGubyte));
+ stroker->control_points = array_create(sizeof(VGfloat));
+
+ stroker->back1_x = 0;
+ stroker->back1_y = 0;
+ stroker->back2_x = 0;
+ stroker->back2_y = 0;
+
+ stroker->path = path_create(VG_PATH_DATATYPE_F, 1.0f, 0.0f,
+ 0, 0, VG_PATH_CAPABILITY_ALL);
+
+ stroker->last_cmd = VG_CLOSE_PATH;
+}
+
+void dash_stroker_init(struct stroker *str,
+ struct vg_state *state)
+{
+ struct dash_stroker *stroker = (struct dash_stroker *)str;
+ int i;
+
+ stroker_init(str, state);
+ stroker_init(&stroker->stroker, state);
+
+ {
+ int real_num = state->stroke.dash_pattern_num;
+ if (real_num % 2)/* if odd, ignore the last one */
+ --real_num;
+ for (i = 0; i < real_num; ++i)
+ stroker->dash_pattern[i] = state->stroke.dash_pattern[i].f;
+ stroker->dash_pattern_num = real_num;
+ }
+
+ stroker->dash_phase = state->stroke.dash_phase.f;
+ stroker->dash_phase_reset = state->stroke.dash_phase_reset;
+
+ stroker->base.begin = dash_stroker_begin;
+ stroker->base.process_subpath = dash_stroker_process_subpath;
+ stroker->base.end = dash_stroker_end;
+ path_destroy(stroker->base.path);
+ stroker->base.path = NULL;
+}
+
+void stroker_begin(struct stroker *stroker)
+{
+ stroker->begin(stroker);
+}
+
+void stroker_end(struct stroker *stroker)
+{
+ stroker->end(stroker);
+}
+
+void stroker_cleanup(struct stroker *stroker)
+{
+ array_destroy(stroker->segments);
+ array_destroy(stroker->control_points);
+}
+
+void dash_stroker_cleanup(struct dash_stroker *stroker)
+{
+ /* if stroker->base.path is null means we never
+ * processed a valid path so delete the temp one
+ * we already created */
+ if (!stroker->base.path)
+ path_destroy(stroker->stroker.path);
+ stroker_cleanup(&stroker->stroker);
+ stroker_cleanup((struct stroker*)stroker);
+}
diff --git a/src/gallium/state_trackers/vega/stroker.h b/src/gallium/state_trackers/vega/stroker.h
new file mode 100644
index 0000000000..36543cd923
--- /dev/null
+++ b/src/gallium/state_trackers/vega/stroker.h
@@ -0,0 +1,89 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STROKER_H
+#define STROKER_H
+
+#include "VG/openvg.h"
+#include "api_consts.h"
+
+struct path;
+struct vg_state;
+struct array;
+
+struct stroker {
+ void (*begin)(struct stroker *stroker);
+ void (*process_subpath)(struct stroker *stroker);
+ void (*end)(struct stroker *stroker);
+
+ struct array *segments;
+ struct array *control_points;
+ struct path *path;
+
+ VGfloat back1_x, back1_y;
+ VGfloat back2_x, back2_y;
+
+ VGfloat stroke_width;
+ VGfloat miter_limit;
+ VGCapStyle cap_style;
+ VGJoinStyle join_style;
+
+ VGPathCommand last_cmd;
+};
+
+struct dash_stroker {
+ struct stroker base;
+
+ struct stroker stroker;
+
+ VGfloat dash_pattern[VEGA_MAX_DASH_COUNT];
+ VGint dash_pattern_num;
+ VGfloat dash_phase;
+ VGboolean dash_phase_reset;
+};
+
+void stroker_init(struct stroker *stroker,
+ struct vg_state *state);
+void dash_stroker_init(struct stroker *stroker,
+ struct vg_state *state);
+void dash_stroker_cleanup(struct dash_stroker *stroker);
+void stroker_cleanup(struct stroker *stroker);
+
+void stroker_begin(struct stroker *stroker);
+void stroker_move_to(struct stroker *stroker, VGfloat x, VGfloat y);
+void stroker_line_to(struct stroker *stroker, VGfloat x, VGfloat y);
+void stroker_curve_to(struct stroker *stroker, VGfloat px1, VGfloat py1,
+ VGfloat px2, VGfloat py2,
+ VGfloat x, VGfloat y);
+void stroker_end(struct stroker *stroker);
+
+void stroker_emit_move_to(struct stroker *stroker, VGfloat x, VGfloat y);
+void stroker_emit_line_to(struct stroker *stroker, VGfloat x, VGfloat y);
+void stroker_emit_curve_to(struct stroker *stroker, VGfloat px1, VGfloat py1,
+ VGfloat px2, VGfloat py2,
+ VGfloat x, VGfloat y);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/util_array.h b/src/gallium/state_trackers/vega/util_array.h
new file mode 100644
index 0000000000..4343dfe36e
--- /dev/null
+++ b/src/gallium/state_trackers/vega/util_array.h
@@ -0,0 +1,122 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef UTIL_ARRAY_H
+#define UTIL_ARRAY_H
+
+#include "util/u_memory.h"
+
+#define DEFAULT_ARRAY_SIZE 256
+
+struct array {
+ VGint datatype_size;
+ void *data;
+ VGint size;
+ VGint num_elements;
+};
+
+static INLINE struct array *array_create(VGint datatype_size)
+{
+ struct array *array = CALLOC_STRUCT(array);
+ array->datatype_size = datatype_size;
+
+ array->size = DEFAULT_ARRAY_SIZE;
+ array->data = malloc(array->size * array->datatype_size);
+
+ return array;
+}
+
+
+static INLINE struct array *array_create_size(VGint datatype_size, VGint size)
+{
+ struct array *array = CALLOC_STRUCT(array);
+ array->datatype_size = datatype_size;
+
+ array->size = size;
+ array->data = malloc(array->size * array->datatype_size);
+
+ return array;
+}
+
+static INLINE void array_destroy(struct array *array)
+{
+ if (array)
+ free(array->data);
+ free(array);
+}
+
+static INLINE void array_resize(struct array *array, int num)
+{
+ VGint size = array->datatype_size * num;
+ void *data = malloc(size);
+ memcpy(data, array->data, array->size * array->datatype_size);
+ free(array->data);
+ array->data = data;
+ array->size = num;
+ array->num_elements = (array->num_elements > num) ? num :
+ array->num_elements;
+}
+
+static INLINE void array_append_data(struct array *array,
+ const void *data, int num_elements)
+{
+ VGbyte *adata;
+
+ while (array->num_elements + num_elements > array->size) {
+ array_resize(array, (array->num_elements + num_elements) * 1.5);
+ }
+ adata = (VGbyte *)array->data;
+ memcpy(adata + (array->num_elements * array->datatype_size), data,
+ num_elements * array->datatype_size);
+ array->num_elements += num_elements;
+}
+
+static INLINE void array_change_data(struct array *array,
+ const void *data,
+ int start_idx,
+ int num_elements)
+{
+ VGbyte *adata = (VGbyte *)array->data;
+ memcpy(adata + (start_idx * array->datatype_size), data,
+ num_elements * array->datatype_size);
+}
+
+static INLINE void array_remove_element(struct array *array,
+ int idx)
+{
+ VGbyte *adata = (VGbyte *)array->data;
+ memmove(adata + (idx * array->datatype_size),
+ adata + ((idx + 1) * array->datatype_size),
+ (array->num_elements - idx - 1) * array->datatype_size);
+ --array->num_elements;
+}
+
+static INLINE void array_reset(struct array *array)
+{
+ array->num_elements = 0;
+}
+
+#endif
diff --git a/src/gallium/state_trackers/vega/vg_context.c b/src/gallium/state_trackers/vega/vg_context.c
new file mode 100644
index 0000000000..e0ff02f3a9
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_context.c
@@ -0,0 +1,543 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vg_context.h"
+
+#include "paint.h"
+#include "renderer.h"
+#include "shaders_cache.h"
+#include "shader.h"
+#include "asm_util.h"
+#include "st_inlines.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "cso_cache/cso_context.h"
+
+#include "util/u_simple_shaders.h"
+#include "util/u_memory.h"
+#include "util/u_blit.h"
+
+struct vg_context *_vg_context = 0;
+
+struct vg_context * vg_current_context(void)
+{
+ return _vg_context;
+}
+
+static void init_clear(struct vg_context *st)
+{
+ struct pipe_context *pipe = st->pipe;
+
+ /* rasterizer state: bypass clipping */
+ memset(&st->clear.raster, 0, sizeof(st->clear.raster));
+ st->clear.raster.gl_rasterization_rules = 1;
+
+ /* fragment shader state: color pass-through program */
+ st->clear.fs =
+ util_make_fragment_passthrough_shader(pipe);
+}
+void vg_set_current_context(struct vg_context *ctx)
+{
+ _vg_context = ctx;
+}
+
+struct vg_context * vg_create_context(struct pipe_context *pipe,
+ const void *visual,
+ struct vg_context *share)
+{
+ struct vg_context *ctx;
+
+ ctx = CALLOC_STRUCT(vg_context);
+
+ ctx->pipe = pipe;
+
+ vg_init_state(&ctx->state.vg);
+ ctx->state.dirty = ALL_DIRTY;
+
+ ctx->cso_context = cso_create_context(pipe);
+
+ init_clear(ctx);
+
+ ctx->default_paint = paint_create(ctx);
+ ctx->state.vg.stroke_paint = ctx->default_paint;
+ ctx->state.vg.fill_paint = ctx->default_paint;
+
+
+ ctx->mask.sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->mask.sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->mask.sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ ctx->mask.sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ ctx->mask.sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ ctx->mask.sampler.normalized_coords = 0;
+
+ ctx->blend_sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->blend_sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ ctx->blend_sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ ctx->blend_sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ ctx->blend_sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ ctx->blend_sampler.normalized_coords = 0;
+
+ vg_set_error(ctx, VG_NO_ERROR);
+
+ ctx->owned_objects[VG_OBJECT_PAINT] = cso_hash_create();
+ ctx->owned_objects[VG_OBJECT_IMAGE] = cso_hash_create();
+ ctx->owned_objects[VG_OBJECT_MASK] = cso_hash_create();
+ ctx->owned_objects[VG_OBJECT_FONT] = cso_hash_create();
+ ctx->owned_objects[VG_OBJECT_PATH] = cso_hash_create();
+
+ ctx->renderer = renderer_create(ctx);
+ ctx->sc = shaders_cache_create(ctx);
+ ctx->shader = shader_create(ctx);
+
+ ctx->blit = util_create_blit(ctx->pipe, ctx->cso_context);
+
+ return ctx;
+}
+
+void vg_destroy_context(struct vg_context *ctx)
+{
+ struct pipe_constant_buffer *cbuf = &ctx->mask.cbuf;
+ struct pipe_constant_buffer *vsbuf = &ctx->vs_const_buffer;
+
+ util_destroy_blit(ctx->blit);
+ renderer_destroy(ctx->renderer);
+ shaders_cache_destroy(ctx->sc);
+ shader_destroy(ctx->shader);
+ paint_destroy(ctx->default_paint);
+
+ if (cbuf && cbuf->buffer)
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+
+ if (vsbuf && vsbuf->buffer)
+ pipe_buffer_reference(&vsbuf->buffer, NULL);
+
+ if (ctx->clear.fs) {
+ cso_delete_fragment_shader(ctx->cso_context, ctx->clear.fs);
+ ctx->clear.fs = NULL;
+ }
+
+ if (ctx->plain_vs) {
+ vg_shader_destroy(ctx, ctx->plain_vs);
+ ctx->plain_vs = NULL;
+ }
+ if (ctx->clear_vs) {
+ vg_shader_destroy(ctx, ctx->clear_vs);
+ ctx->clear_vs = NULL;
+ }
+ if (ctx->texture_vs) {
+ vg_shader_destroy(ctx, ctx->texture_vs);
+ ctx->texture_vs = NULL;
+ }
+
+ if (ctx->pass_through_depth_fs)
+ vg_shader_destroy(ctx, ctx->pass_through_depth_fs);
+ if (ctx->mask.union_fs)
+ vg_shader_destroy(ctx, ctx->mask.union_fs);
+ if (ctx->mask.intersect_fs)
+ vg_shader_destroy(ctx, ctx->mask.intersect_fs);
+ if (ctx->mask.subtract_fs)
+ vg_shader_destroy(ctx, ctx->mask.subtract_fs);
+ if (ctx->mask.set_fs)
+ vg_shader_destroy(ctx, ctx->mask.set_fs);
+
+ cso_release_all(ctx->cso_context);
+ cso_destroy_context(ctx->cso_context);
+
+ cso_hash_delete(ctx->owned_objects[VG_OBJECT_PAINT]);
+ cso_hash_delete(ctx->owned_objects[VG_OBJECT_IMAGE]);
+ cso_hash_delete(ctx->owned_objects[VG_OBJECT_MASK]);
+ cso_hash_delete(ctx->owned_objects[VG_OBJECT_FONT]);
+ cso_hash_delete(ctx->owned_objects[VG_OBJECT_PATH]);
+
+ free(ctx);
+}
+
+void vg_init_object(struct vg_object *obj, struct vg_context *ctx, enum vg_object_type type)
+{
+ obj->type = type;
+ obj->ctx = ctx;
+}
+
+VGboolean vg_context_is_object_valid(struct vg_context *ctx,
+ enum vg_object_type type,
+ void *ptr)
+{
+ if (ctx) {
+ struct cso_hash *hash = ctx->owned_objects[type];
+ if (!hash)
+ return VG_FALSE;
+ return cso_hash_contains(hash, (unsigned)(long)ptr);
+ }
+ return VG_FALSE;
+}
+
+void vg_context_add_object(struct vg_context *ctx,
+ enum vg_object_type type,
+ void *ptr)
+{
+ if (ctx) {
+ struct cso_hash *hash = ctx->owned_objects[type];
+ if (!hash)
+ return;
+ cso_hash_insert(hash, (unsigned)(long)ptr, ptr);
+ }
+}
+
+void vg_context_remove_object(struct vg_context *ctx,
+ enum vg_object_type type,
+ void *ptr)
+{
+ if (ctx) {
+ struct cso_hash *hash = ctx->owned_objects[type];
+ if (!hash)
+ return;
+ cso_hash_take(hash, (unsigned)(long)ptr);
+ }
+}
+
+static void update_clip_state(struct vg_context *ctx)
+{
+ struct pipe_depth_stencil_alpha_state *dsa = &ctx->state.g3d.dsa;
+ struct vg_state *state = &ctx->state.vg;
+
+ memset(dsa, 0, sizeof(struct pipe_depth_stencil_alpha_state));
+
+ if (state->scissoring) {
+ struct pipe_blend_state *blend = &ctx->state.g3d.blend;
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+ dsa->depth.writemask = 1;/*glDepthMask(TRUE);*/
+ dsa->depth.func = PIPE_FUNC_ALWAYS;
+ dsa->depth.enabled = 1;
+
+ cso_save_blend(ctx->cso_context);
+ cso_save_fragment_shader(ctx->cso_context);
+ /* set a passthrough shader */
+ if (!ctx->pass_through_depth_fs)
+ ctx->pass_through_depth_fs = shader_create_from_text(ctx->pipe,
+ pass_through_depth_asm,
+ 40,
+ PIPE_SHADER_FRAGMENT);
+ cso_set_fragment_shader_handle(ctx->cso_context,
+ ctx->pass_through_depth_fs->driver);
+ cso_set_depth_stencil_alpha(ctx->cso_context, dsa);
+
+ ctx->pipe->clear(ctx->pipe, PIPE_CLEAR_DEPTHSTENCIL, NULL, 1.0, 0);
+
+ /* disable color writes */
+ blend->colormask = 0; /*disable colorwrites*/
+ cso_set_blend(ctx->cso_context, blend);
+
+ /* enable scissoring */
+ for (int i = 0; i < state->scissor_rects_num; ++i) {
+ const float x = state->scissor_rects[i * 4 + 0].f;
+ const float y = state->scissor_rects[i * 4 + 1].f;
+ const float width = state->scissor_rects[i * 4 + 2].f;
+ const float height = state->scissor_rects[i * 4 + 3].f;
+ VGfloat minx, miny, maxx, maxy;
+
+ minx = 0;
+ miny = 0;
+ maxx = fb->width;
+ maxy = fb->height;
+
+ if (x > minx)
+ minx = x;
+ if (y > miny)
+ miny = y;
+
+ if (x + width < maxx)
+ maxx = x + width;
+ if (y + height < maxy)
+ maxy = y + height;
+
+ /* check for null space */
+ if (minx >= maxx || miny >= maxy)
+ minx = miny = maxx = maxy = 0;
+
+ /*glClear(GL_DEPTH_BUFFER_BIT);*/
+ renderer_draw_quad(ctx->renderer, minx, miny, maxx, maxy, 0.0f);
+ }
+
+ blend->colormask = 1; /*enable colorwrites*/
+ cso_restore_blend(ctx->cso_context);
+ cso_restore_fragment_shader(ctx->cso_context);
+
+ dsa->depth.enabled = 1; /* glEnable(GL_DEPTH_TEST); */
+ dsa->depth.writemask = 0;/*glDepthMask(FALSE);*/
+ dsa->depth.func = PIPE_FUNC_GEQUAL;
+ }
+}
+
+void vg_validate_state(struct vg_context *ctx)
+{
+ if ((ctx->state.dirty & BLEND_DIRTY)) {
+ struct pipe_blend_state *blend = &ctx->state.g3d.blend;
+ memset(blend, 0, sizeof(struct pipe_blend_state));
+ blend->blend_enable = 1;
+ blend->colormask |= PIPE_MASK_R;
+ blend->colormask |= PIPE_MASK_G;
+ blend->colormask |= PIPE_MASK_B;
+ blend->colormask |= PIPE_MASK_A;
+
+ switch (ctx->state.vg.blend_mode) {
+ case VG_BLEND_SRC:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ break;
+ case VG_BLEND_SRC_OVER:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
+ break;
+ case VG_BLEND_DST_OVER:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_INV_DST_ALPHA;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_INV_DST_ALPHA;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_DST_ALPHA;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_DST_ALPHA;
+ break;
+ case VG_BLEND_SRC_IN:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_DST_ALPHA;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_DST_ALPHA;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ break;
+ case VG_BLEND_DST_IN:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_ZERO;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
+ break;
+ case VG_BLEND_MULTIPLY:
+ case VG_BLEND_SCREEN:
+ case VG_BLEND_DARKEN:
+ case VG_BLEND_LIGHTEN:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ break;
+ case VG_BLEND_ADDITIVE:
+ blend->rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend->rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
+ blend->alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
+ break;
+ default:
+ assert(!"not implemented blend mode");
+ }
+ cso_set_blend(ctx->cso_context, &ctx->state.g3d.blend);
+ }
+ if ((ctx->state.dirty & RASTERIZER_DIRTY)) {
+ struct pipe_rasterizer_state *raster = &ctx->state.g3d.rasterizer;
+ memset(raster, 0, sizeof(struct pipe_rasterizer_state));
+ raster->gl_rasterization_rules = 1;
+ cso_set_rasterizer(ctx->cso_context, &ctx->state.g3d.rasterizer);
+ }
+ if ((ctx->state.dirty & VIEWPORT_DIRTY)) {
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+ const VGint param_bytes = 8 * sizeof(VGfloat);
+ VGfloat vs_consts[8] = {
+ 2.f/fb->width, 2.f/fb->height, 1, 1,
+ -1, -1, 0, 0
+ };
+ struct pipe_constant_buffer *cbuf = &ctx->vs_const_buffer;
+
+ vg_set_viewport(ctx, VEGA_Y0_BOTTOM);
+
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+ cbuf->buffer = pipe_buffer_create(ctx->pipe->screen, 16,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ param_bytes);
+
+ if (cbuf->buffer) {
+ st_no_flush_pipe_buffer_write(ctx, cbuf->buffer,
+ 0, param_bytes, vs_consts);
+ }
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, cbuf);
+ }
+ if ((ctx->state.dirty & VS_DIRTY)) {
+ cso_set_vertex_shader_handle(ctx->cso_context,
+ vg_plain_vs(ctx));
+ }
+
+ /* must be last because it renders to the depth buffer*/
+ if ((ctx->state.dirty & DEPTH_STENCIL_DIRTY)) {
+ update_clip_state(ctx);
+ cso_set_depth_stencil_alpha(ctx->cso_context, &ctx->state.g3d.dsa);
+ }
+
+ shader_set_masking(ctx->shader, ctx->state.vg.masking);
+ shader_set_image_mode(ctx->shader, ctx->state.vg.image_mode);
+
+ ctx->state.dirty = NONE_DIRTY;
+}
+
+VGboolean vg_object_is_valid(void *ptr, enum vg_object_type type)
+{
+ struct vg_object *obj = ptr;
+ if (ptr && is_aligned(obj) && obj->type == type)
+ return VG_TRUE;
+ else
+ return VG_FALSE;
+}
+
+void vg_set_error(struct vg_context *ctx,
+ VGErrorCode code)
+{
+ /*vgGetError returns the oldest error code provided by
+ * an API call on the current context since the previous
+ * call to vgGetError on that context (or since the creation
+ of the context).*/
+ if (ctx->_error == VG_NO_ERROR)
+ ctx->_error = code;
+}
+
+void vg_prepare_blend_surface(struct vg_context *ctx)
+{
+ struct pipe_surface *dest_surface = NULL;
+ struct pipe_context *pipe = ctx->pipe;
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+ struct st_renderbuffer *strb = stfb->strb;
+
+ /* first finish all pending rendering */
+ vgFinish();
+
+ dest_surface = pipe->screen->get_tex_surface(pipe->screen,
+ stfb->blend_texture,
+ 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+ /* flip it, because we want to use it as a sampler */
+ util_blit_pixels_tex(ctx->blit,
+ strb->texture,
+ 0, strb->height,
+ strb->width, 0,
+ dest_surface,
+ 0, 0,
+ strb->width, strb->height,
+ 0.0, PIPE_TEX_MIPFILTER_NEAREST);
+
+ if (dest_surface)
+ pipe_surface_reference(&dest_surface, NULL);
+
+ /* make sure it's complete */
+ vgFinish();
+}
+
+
+void vg_prepare_blend_surface_from_mask(struct vg_context *ctx)
+{
+ struct pipe_surface *dest_surface = NULL;
+ struct pipe_context *pipe = ctx->pipe;
+ struct st_framebuffer *stfb = ctx->draw_buffer;
+ struct st_renderbuffer *strb = stfb->strb;
+
+ vg_validate_state(ctx);
+
+ /* first finish all pending rendering */
+ vgFinish();
+
+ dest_surface = pipe->screen->get_tex_surface(pipe->screen,
+ stfb->blend_texture,
+ 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* flip it, because we want to use it as a sampler */
+ util_blit_pixels_tex(ctx->blit,
+ stfb->alpha_mask,
+ 0, strb->height,
+ strb->width, 0,
+ dest_surface,
+ 0, 0,
+ strb->width, strb->height,
+ 0.0, PIPE_TEX_MIPFILTER_NEAREST);
+
+ /* make sure it's complete */
+ vgFinish();
+
+ if (dest_surface)
+ pipe_surface_reference(&dest_surface, NULL);
+}
+
+void * vg_plain_vs(struct vg_context *ctx)
+{
+ if (!ctx->plain_vs) {
+ ctx->plain_vs = shader_create_from_text(ctx->pipe,
+ vs_plain_asm,
+ 200,
+ PIPE_SHADER_VERTEX);
+ }
+
+ return ctx->plain_vs->driver;
+}
+
+
+void * vg_clear_vs(struct vg_context *ctx)
+{
+ if (!ctx->clear_vs) {
+ ctx->clear_vs = shader_create_from_text(ctx->pipe,
+ vs_clear_asm,
+ 200,
+ PIPE_SHADER_VERTEX);
+ }
+
+ return ctx->clear_vs->driver;
+}
+
+void * vg_texture_vs(struct vg_context *ctx)
+{
+ if (!ctx->texture_vs) {
+ ctx->texture_vs = shader_create_from_text(ctx->pipe,
+ vs_texture_asm,
+ 200,
+ PIPE_SHADER_VERTEX);
+ }
+
+ return ctx->texture_vs->driver;
+}
+
+void vg_set_viewport(struct vg_context *ctx, VegaOrientation orientation)
+{
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb;
+ VGfloat y_scale = (orientation == VEGA_Y0_BOTTOM) ? -2.f : 2.f;
+
+ viewport.scale[0] = fb->width / 2.f;
+ viewport.scale[1] = fb->height / y_scale;
+ viewport.scale[2] = 1.0;
+ viewport.scale[3] = 1.0;
+ viewport.translate[0] = fb->width / 2.f;
+ viewport.translate[1] = fb->height / 2.f;
+ viewport.translate[2] = 0.0;
+ viewport.translate[3] = 0.0;
+
+ cso_set_viewport(ctx->cso_context, &viewport);
+}
diff --git a/src/gallium/state_trackers/vega/vg_context.h b/src/gallium/state_trackers/vega/vg_context.h
new file mode 100644
index 0000000000..ccc8889c8c
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_context.h
@@ -0,0 +1,292 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VG_CONTEXT_H
+#define VG_CONTEXT_H
+
+#include "vg_state.h"
+
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "util/u_pointer.h"
+#include "util/u_math.h"
+
+#include "cso_cache/cso_hash.h"
+#include "cso_cache/cso_context.h"
+
+struct renderer;
+struct shaders_cache;
+struct shader;
+struct vg_shader;
+
+struct st_renderbuffer {
+ enum pipe_format format;
+ struct pipe_surface *surface;
+ struct pipe_texture *texture;
+ VGint width, height;
+};
+
+struct st_framebuffer {
+ VGint init_width, init_height;
+ struct st_renderbuffer *strb;
+ struct st_renderbuffer *dsrb;
+
+ struct pipe_texture *alpha_mask;
+
+ struct pipe_texture *blend_texture;
+
+ void *privateData;
+};
+
+enum vg_object_type {
+ VG_OBJECT_UNKNOWN = 0,
+ VG_OBJECT_PAINT,
+ VG_OBJECT_IMAGE,
+ VG_OBJECT_MASK,
+ VG_OBJECT_FONT,
+ VG_OBJECT_PATH,
+
+ VG_OBJECT_LAST
+};
+enum dirty_state {
+ NONE_DIRTY = 0<<0,
+ BLEND_DIRTY = 1<<1,
+ RASTERIZER_DIRTY = 1<<2,
+ VIEWPORT_DIRTY = 1<<3,
+ VS_DIRTY = 1<<4,
+ DEPTH_STENCIL_DIRTY = 1<<5,
+ ALL_DIRTY = BLEND_DIRTY | RASTERIZER_DIRTY |
+ VIEWPORT_DIRTY | VS_DIRTY | DEPTH_STENCIL_DIRTY
+};
+
+struct vg_context
+{
+ struct pipe_context *pipe;
+
+ struct {
+ struct vg_state vg;
+ struct {
+ struct pipe_blend_state blend;
+ struct pipe_rasterizer_state rasterizer;
+ struct pipe_shader_state vs_state;
+ struct pipe_depth_stencil_alpha_state dsa;
+ struct pipe_framebuffer_state fb;
+ } g3d;
+ VGbitfield dirty;
+ } state;
+
+ VGErrorCode _error;
+
+ struct st_framebuffer *draw_buffer;
+
+ struct cso_hash *owned_objects[VG_OBJECT_LAST];
+
+ struct {
+ struct pipe_shader_state vert_shader;
+ struct pipe_shader_state frag_shader;
+ struct pipe_rasterizer_state raster;
+ void *fs;
+ float vertices[4][2][4]; /**< vertex pos + color */
+ } clear;
+
+ struct {
+ struct pipe_constant_buffer cbuf;
+ struct pipe_sampler_state sampler;
+
+ struct vg_shader *union_fs;
+ struct vg_shader *intersect_fs;
+ struct vg_shader *subtract_fs;
+ struct vg_shader *set_fs;
+ } mask;
+
+ struct vg_shader *pass_through_depth_fs;
+
+ struct cso_context *cso_context;
+
+ struct pipe_buffer *stencil_quad;
+ VGfloat stencil_vertices[4][2][4];
+
+ struct renderer *renderer;
+ struct shaders_cache *sc;
+ struct shader *shader;
+
+ struct pipe_sampler_state blend_sampler;
+ struct {
+ struct pipe_constant_buffer buffer;
+ void *color_matrix_fs;
+ } filter;
+ struct vg_paint *default_paint;
+
+ struct blit_state *blit;
+
+ struct vg_shader *plain_vs;
+ struct vg_shader *clear_vs;
+ struct vg_shader *texture_vs;
+ struct pipe_constant_buffer vs_const_buffer;
+};
+
+struct vg_object {
+ enum vg_object_type type;
+ struct vg_context *ctx;
+};
+void vg_init_object(struct vg_object *obj, struct vg_context *ctx, enum vg_object_type type);
+VGboolean vg_object_is_valid(void *ptr, enum vg_object_type type);
+
+struct vg_context *vg_create_context(struct pipe_context *pipe,
+ const void *visual,
+ struct vg_context *share);
+void vg_destroy_context(struct vg_context *ctx);
+struct vg_context *vg_current_context(void);
+void vg_set_current_context(struct vg_context *ctx);
+
+VGboolean vg_context_is_object_valid(struct vg_context *ctx,
+ enum vg_object_type type,
+ void *ptr);
+void vg_context_add_object(struct vg_context *ctx,
+ enum vg_object_type type,
+ void *ptr);
+void vg_context_remove_object(struct vg_context *ctx,
+ enum vg_object_type type,
+ void *ptr);
+
+void vg_validate_state(struct vg_context *ctx);
+
+void vg_set_error(struct vg_context *ctx,
+ VGErrorCode code);
+
+void vg_prepare_blend_surface(struct vg_context *ctx);
+void vg_prepare_blend_surface_from_mask(struct vg_context *ctx);
+
+
+static INLINE VGboolean is_aligned_to(const void *ptr, VGbyte alignment)
+{
+ void *aligned = align_pointer(ptr, alignment);
+ return (ptr == aligned) ? VG_TRUE : VG_FALSE;
+}
+
+static INLINE VGboolean is_aligned(const void *ptr)
+{
+ return is_aligned_to(ptr, 4);
+}
+
+static INLINE void vg_shift_rectx(VGfloat coords[4],
+ const VGfloat *bounds,
+ const VGfloat shift)
+{
+ coords[0] += shift;
+ coords[2] -= shift;
+ if (bounds) {
+ coords[2] = MIN2(coords[2], bounds[2]);
+ /* bound x/y + width/height */
+ if ((coords[0] + coords[2]) > (bounds[0] + bounds[2])) {
+ coords[2] = (bounds[0] + bounds[2]) - coords[0];
+ }
+ }
+}
+
+static INLINE void vg_shift_recty(VGfloat coords[4],
+ const VGfloat *bounds,
+ const VGfloat shift)
+{
+ coords[1] += shift;
+ coords[3] -= shift;
+ if (bounds) {
+ coords[3] = MIN2(coords[3], bounds[3]);
+ if ((coords[1] + coords[3]) > (bounds[1] + bounds[3])) {
+ coords[3] = (bounds[1] + bounds[3]) - coords[1];
+ }
+ }
+}
+
+static INLINE void vg_bound_rect(VGfloat coords[4],
+ const VGfloat bounds[4],
+ VGfloat shift[4])
+{
+ /* if outside the bounds */
+ if (coords[0] > (bounds[0] + bounds[2]) ||
+ coords[1] > (bounds[1] + bounds[3]) ||
+ (coords[0] + coords[2]) < bounds[0] ||
+ (coords[1] + coords[3]) < bounds[1]) {
+ coords[0] = 0.f;
+ coords[1] = 0.f;
+ coords[2] = 0.f;
+ coords[3] = 0.f;
+ shift[0] = 0.f;
+ shift[1] = 0.f;
+ return;
+ }
+
+ /* bound x */
+ if (coords[0] < bounds[0]) {
+ shift[0] = bounds[0] - coords[0];
+ coords[2] -= shift[0];
+ coords[0] = bounds[0];
+ } else
+ shift[0] = 0.f;
+
+ /* bound y */
+ if (coords[1] < bounds[1]) {
+ shift[1] = bounds[1] - coords[1];
+ coords[3] -= shift[1];
+ coords[1] = bounds[1];
+ } else
+ shift[1] = 0.f;
+
+ shift[2] = bounds[2] - coords[2];
+ shift[3] = bounds[3] - coords[3];
+ /* bound width/height */
+ coords[2] = MIN2(coords[2], bounds[2]);
+ coords[3] = MIN2(coords[3], bounds[3]);
+
+ /* bound x/y + width/height */
+ if ((coords[0] + coords[2]) > (bounds[0] + bounds[2])) {
+ coords[2] = (bounds[0] + bounds[2]) - coords[0];
+ }
+ if ((coords[1] + coords[3]) > (bounds[1] + bounds[3])) {
+ coords[3] = (bounds[1] + bounds[3]) - coords[1];
+ }
+
+ /* if outside the bounds */
+ if ((coords[0] + coords[2]) < bounds[0] ||
+ (coords[1] + coords[3]) < bounds[1]) {
+ coords[0] = 0.f;
+ coords[1] = 0.f;
+ coords[2] = 0.f;
+ coords[3] = 0.f;
+ return;
+ }
+}
+
+void *vg_plain_vs(struct vg_context *ctx);
+void *vg_clear_vs(struct vg_context *ctx);
+void *vg_texture_vs(struct vg_context *ctx);
+typedef enum {
+ VEGA_Y0_TOP,
+ VEGA_Y0_BOTTOM
+} VegaOrientation;
+void vg_set_viewport(struct vg_context *ctx, VegaOrientation orientation);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/vg_state.c b/src/gallium/state_trackers/vega/vg_state.c
new file mode 100644
index 0000000000..6f6bfdaf7a
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_state.c
@@ -0,0 +1,124 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vg_state.h"
+
+#include <string.h>
+
+void vg_init_state(struct vg_state *state)
+{
+ state->matrix_mode = VG_MATRIX_PATH_USER_TO_SURFACE;
+ state->fill_rule = VG_EVEN_ODD;
+ state->image_quality = VG_IMAGE_QUALITY_FASTER;
+ state->rendering_quality = VG_RENDERING_QUALITY_BETTER;
+ state->blend_mode = VG_BLEND_SRC_OVER;
+ state->image_mode = VG_DRAW_IMAGE_NORMAL;
+
+ memset(state->scissor_rects, 0, sizeof(state->scissor_rects));
+ state->scissor_rects_num = 0;
+
+ state->color_transform = VG_FALSE;
+ state->color_transform_values[0] = 1.0f;
+ state->color_transform_values[1] = 1.0f;
+ state->color_transform_values[2] = 1.0f;
+ state->color_transform_values[3] = 1.0f;
+ state->color_transform_values[4] = 0.0f;
+ state->color_transform_values[5] = 0.0f;
+ state->color_transform_values[6] = 0.0f;
+ state->color_transform_values[7] = 0.0f;
+
+ /* Stroke parameters */
+ state->stroke.line_width.f = 1.0f;
+ state->stroke.line_width.i = 1;
+ state->stroke.cap_style = VG_CAP_BUTT;
+ state->stroke.join_style = VG_JOIN_MITER;
+ state->stroke.miter_limit.f = 4.0f;
+ state->stroke.miter_limit.i = 4;
+ state->stroke.dash_pattern_num = 0;
+ state->stroke.dash_phase.f = 0.0f;
+ state->stroke.dash_phase.i = 0;
+ state->stroke.dash_phase_reset = VG_FALSE;
+
+ /* Edge fill color for VG_TILE_FILL tiling mode */
+ state->tile_fill_color[0] = 0.0f;
+ state->tile_fill_color[1] = 0.0f;
+ state->tile_fill_color[2] = 0.0f;
+ state->tile_fill_color[3] = 0.0f;
+
+ /* Color for vgClear */
+ state->clear_color[0] = 0.0f;
+ state->clear_color[1] = 0.0f;
+ state->clear_color[2] = 0.0f;
+ state->clear_color[3] = 0.0f;
+
+ /* Glyph origin */
+ state->glyph_origin[0].f = 0.0f;
+ state->glyph_origin[1].f = 0.0f;
+ state->glyph_origin[0].i = 0;
+ state->glyph_origin[1].i = 0;
+
+ /* Enable/disable alpha masking and scissoring */
+ state->masking = VG_FALSE;
+ state->scissoring = VG_FALSE;
+
+ /* Pixel layout information */
+ state->pixel_layout = VG_PIXEL_LAYOUT_UNKNOWN;
+ state->screen_layout = VG_PIXEL_LAYOUT_UNKNOWN;
+
+ /* Source format selection for image filters */
+ state->filter_format_linear = VG_FALSE;
+ state->filter_format_premultiplied = VG_FALSE;
+
+ /* Destination write enable mask for image filters */
+ state->filter_channel_mask = (VG_RED | VG_GREEN | VG_BLUE | VG_ALPHA);
+
+ matrix_load_identity(&state->path_user_to_surface_matrix);
+ matrix_load_identity(&state->image_user_to_surface_matrix);
+ matrix_load_identity(&state->fill_paint_to_user_matrix);
+ matrix_load_identity(&state->stroke_paint_to_user_matrix);
+ matrix_load_identity(&state->glyph_user_to_surface_matrix);
+}
+
+struct matrix *vg_state_matrix(struct vg_state *state)
+{
+ switch(state->matrix_mode) {
+ case VG_MATRIX_PATH_USER_TO_SURFACE:
+ return &state->path_user_to_surface_matrix;
+ case VG_MATRIX_IMAGE_USER_TO_SURFACE:
+ return &state->image_user_to_surface_matrix;
+ case VG_MATRIX_FILL_PAINT_TO_USER:
+ return &state->fill_paint_to_user_matrix;
+ case VG_MATRIX_STROKE_PAINT_TO_USER:
+ return &state->stroke_paint_to_user_matrix;
+#ifdef OPENVG_VERSION_1_1
+ case VG_MATRIX_GLYPH_USER_TO_SURFACE:
+ return &state->glyph_user_to_surface_matrix;
+#endif
+ default:
+ break;
+ }
+ return NULL;
+}
diff --git a/src/gallium/state_trackers/vega/vg_state.h b/src/gallium/state_trackers/vega/vg_state.h
new file mode 100644
index 0000000000..ed90689f91
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_state.h
@@ -0,0 +1,109 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VG_STATE_H
+#define VG_STATE_H
+
+#include "VG/openvg.h"
+
+#include "api_consts.h"
+#include "matrix.h"
+
+struct vg_value
+{
+ VGfloat f;
+ VGint i;
+};
+
+struct vg_state {
+ /* Mode settings */
+ VGMatrixMode matrix_mode;
+ VGFillRule fill_rule;
+ VGImageQuality image_quality;
+ VGRenderingQuality rendering_quality;
+ VGBlendMode blend_mode;
+ VGImageMode image_mode;
+
+ /* Scissoring rectangles */
+ struct vg_value scissor_rects[32*4];
+ VGint scissor_rects_num;
+
+ /* Color Transformation */
+ VGboolean color_transform;
+ VGfloat color_transform_values[8];
+
+ /* Stroke parameters */
+ struct {
+ struct vg_value line_width;
+ VGCapStyle cap_style;
+ VGJoinStyle join_style;
+ struct vg_value miter_limit;
+ struct vg_value dash_pattern[VEGA_MAX_DASH_COUNT];
+ VGint dash_pattern_num;
+ struct vg_value dash_phase;
+ VGboolean dash_phase_reset;
+ } stroke;
+
+ /* Edge fill color for VG_TILE_FILL tiling mode */
+ VGfloat tile_fill_color[4];
+ VGint tile_fill_colori[4];
+
+ /* Color for vgClear */
+ VGfloat clear_color[4];
+ VGint clear_colori[4];
+
+ /* Glyph origin */
+ struct vg_value glyph_origin[2];
+
+ /* Enable/disable alpha masking and scissoring */
+ VGboolean masking;
+ VGboolean scissoring;
+
+ /* Pixel layout information */
+ VGPixelLayout pixel_layout;
+ VGPixelLayout screen_layout;
+
+ /* Source format selection for image filters */
+ VGboolean filter_format_linear;
+ VGboolean filter_format_premultiplied;
+
+ /* Destination write enable mask for image filters */
+ VGbitfield filter_channel_mask;
+
+ struct matrix path_user_to_surface_matrix;
+ struct matrix image_user_to_surface_matrix;
+ struct matrix fill_paint_to_user_matrix;
+ struct matrix stroke_paint_to_user_matrix;
+ struct matrix glyph_user_to_surface_matrix;
+
+ struct vg_paint *stroke_paint;
+ struct vg_paint *fill_paint;
+};
+
+void vg_init_state(struct vg_state *state);
+struct matrix * vg_state_matrix(struct vg_state *state);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/vg_tracker.c b/src/gallium/state_trackers/vega/vg_tracker.c
new file mode 100644
index 0000000000..56cc60aebe
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_tracker.c
@@ -0,0 +1,416 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vg_context.h"
+#include "vg_tracker.h"
+#include "mask.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_screen.h"
+#include "util/u_memory.h"
+#include "util/u_math.h"
+
+static struct pipe_texture *
+create_texture(struct pipe_context *pipe, enum pipe_format format,
+ VGint width, VGint height)
+{
+ struct pipe_texture templ;
+
+ memset(&templ, 0, sizeof(templ));
+
+ if (format != PIPE_FORMAT_NONE) {
+ templ.format = format;
+ }
+ else {
+ templ.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ }
+
+ templ.target = PIPE_TEXTURE_2D;
+ pf_get_block(templ.format, &templ.block);
+ templ.width[0] = width;
+ templ.height[0] = height;
+ templ.depth[0] = 1;
+ templ.last_level = 0;
+
+ if (pf_get_component_bits(format, PIPE_FORMAT_COMP_S)) {
+ templ.tex_usage = PIPE_TEXTURE_USAGE_DEPTH_STENCIL;
+ } else {
+ templ.tex_usage = (PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
+ PIPE_TEXTURE_USAGE_RENDER_TARGET |
+ PIPE_TEXTURE_USAGE_SAMPLER);
+ }
+
+ return pipe->screen->texture_create(pipe->screen, &templ);
+}
+
+/**
+ * Allocate a renderbuffer for a an on-screen window (not a user-created
+ * renderbuffer). The window system code determines the format.
+ */
+static struct st_renderbuffer *
+st_new_renderbuffer_fb(enum pipe_format format)
+{
+ struct st_renderbuffer *strb;
+
+ strb = CALLOC_STRUCT(st_renderbuffer);
+ if (!strb) {
+ /*_vega_error(NULL, VG_OUT_OF_MEMORY, "creating renderbuffer");*/
+ return NULL;
+ }
+
+ strb->format = format;
+
+ return strb;
+}
+
+
+/**
+ * This is called to allocate the original drawing surface, and
+ * during window resize.
+ */
+static VGboolean
+st_renderbuffer_alloc_storage(struct vg_context * ctx,
+ struct st_renderbuffer *strb,
+ VGuint width, VGuint height)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ unsigned surface_usage;
+
+ /* Free the old surface and texture
+ */
+ pipe_surface_reference(&strb->surface, NULL);
+ pipe_texture_reference(&strb->texture, NULL);
+
+
+ /* Probably need dedicated flags for surface usage too:
+ */
+ surface_usage = (PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ strb->texture = create_texture(pipe, strb->format,
+ width, height);
+
+ if (!strb->texture)
+ return FALSE;
+
+ strb->surface = pipe->screen->get_tex_surface(pipe->screen,
+ strb->texture,
+ 0, 0, 0,
+ surface_usage);
+ strb->width = width;
+ strb->height = height;
+
+ assert(strb->surface->width == width);
+ assert(strb->surface->height == height);
+
+ return strb->surface != NULL;
+}
+
+struct vg_context * st_create_context(struct pipe_context *pipe,
+ const void *visual,
+ struct vg_context *share)
+{
+ struct vg_context *ctx = vg_create_context(pipe, visual, share);
+ /*debug_printf("--------- CREATE CONTEXT %p\n", ctx);*/
+ return ctx;
+}
+
+void st_destroy_context(struct vg_context *st)
+{
+ /*debug_printf("--------- DESTROY CONTEXT %p\n", st);*/
+ vg_destroy_context(st);
+}
+
+void st_copy_context_state(struct vg_context *dst, struct vg_context *src,
+ uint mask)
+{
+ fprintf(stderr, "FIXME: %s\n", __FUNCTION__);
+}
+
+void st_get_framebuffer_dimensions(struct st_framebuffer *stfb,
+ uint *width,
+ uint *height)
+{
+ *width = stfb->strb->width;
+ *height = stfb->strb->height;
+}
+
+struct st_framebuffer * st_create_framebuffer(const void *visual,
+ enum pipe_format colorFormat,
+ enum pipe_format depthFormat,
+ enum pipe_format stencilFormat,
+ uint width, uint height,
+ void *privateData)
+{
+ struct st_framebuffer *stfb = CALLOC_STRUCT(st_framebuffer);
+ if (stfb) {
+ struct st_renderbuffer *rb =
+ st_new_renderbuffer_fb(colorFormat);
+ stfb->strb = rb;
+#if 0
+ if (doubleBuffer) {
+ struct st_renderbuffer *rb =
+ st_new_renderbuffer_fb(colorFormat);
+ }
+#endif
+
+ /* we want to combine the depth/stencil */
+ if (stencilFormat == depthFormat)
+ stfb->dsrb = st_new_renderbuffer_fb(stencilFormat);
+ else
+ stfb->dsrb = st_new_renderbuffer_fb(PIPE_FORMAT_S8Z24_UNORM);
+
+ /*### currently we always allocate it but it's possible it's
+ not necessary if EGL_ALPHA_MASK_SIZE was 0
+ */
+ stfb->alpha_mask = 0;
+
+ stfb->init_width = width;
+ stfb->init_height = height;
+ stfb->privateData = privateData;
+ }
+
+ return stfb;
+}
+
+static void setup_new_alpha_mask(struct vg_context *ctx,
+ struct st_framebuffer *stfb,
+ uint width, uint height)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_texture *old_texture = stfb->alpha_mask;
+
+ /*
+ we use PIPE_FORMAT_A8R8G8B8_UNORM because we want to render to
+ this texture and use it as a sampler, so while this wastes some
+ space it makes both of those a lot simpler
+ */
+ stfb->alpha_mask =
+ create_texture(pipe, PIPE_FORMAT_A8R8G8B8_UNORM, width, height);
+
+ if (!stfb->alpha_mask) {
+ if (old_texture)
+ pipe_texture_reference(&old_texture, NULL);
+ return;
+ }
+
+ vg_validate_state(ctx);
+
+ /* alpha mask starts with 1.f alpha */
+ mask_fill(0, 0, width, height, 1.f);
+
+ /* if we had an old surface copy it over */
+ if (old_texture) {
+ struct pipe_surface *surface = pipe->screen->get_tex_surface(
+ pipe->screen,
+ stfb->alpha_mask,
+ 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+ struct pipe_surface *old_surface = pipe->screen->get_tex_surface(
+ pipe->screen,
+ old_texture,
+ 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_READ);
+ pipe->surface_copy(pipe,
+ surface,
+ 0, 0,
+ old_surface,
+ 0, 0,
+ MIN2(old_surface->width, width),
+ MIN2(old_surface->height, height));
+ if (surface)
+ pipe_surface_reference(&surface, NULL);
+ if (old_surface)
+ pipe_surface_reference(&old_surface, NULL);
+ }
+
+ /* Free the old texture
+ */
+ if (old_texture)
+ pipe_texture_reference(&old_texture, NULL);
+}
+
+void st_resize_framebuffer(struct st_framebuffer *stfb,
+ uint width, uint height)
+{
+ struct vg_context *ctx = vg_current_context();
+ struct st_renderbuffer *strb = stfb->strb;
+ struct pipe_framebuffer_state *state;
+
+ if (!ctx)
+ return;
+
+ state = &ctx->state.g3d.fb;
+
+ /* If this is a noop, exit early and don't do the clear, etc below.
+ */
+ if (strb->width == width &&
+ strb->height == height &&
+ state->zsbuf)
+ return;
+
+ if (strb->width != width || strb->height != height)
+ st_renderbuffer_alloc_storage(ctx, strb,
+ width, height);
+
+ if (stfb->dsrb->width != width || stfb->dsrb->height != height)
+ st_renderbuffer_alloc_storage(ctx, stfb->dsrb,
+ width, height);
+
+ {
+ VGuint i;
+
+ memset(state, 0, sizeof(struct pipe_framebuffer_state));
+
+ state->width = width;
+ state->height = height;
+
+ state->nr_cbufs = 1;
+ state->cbufs[0] = strb->surface;
+ for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
+ state->cbufs[i] = 0;
+
+ state->zsbuf = stfb->dsrb->surface;
+
+ cso_set_framebuffer(ctx->cso_context, state);
+ }
+
+ ctx->state.dirty |= VIEWPORT_DIRTY;
+ ctx->state.dirty |= DEPTH_STENCIL_DIRTY;/*to reset the scissors*/
+
+ ctx->pipe->clear(ctx->pipe, PIPE_CLEAR_DEPTHSTENCIL,
+ NULL, 0.0, 0);
+
+ /* we need all the other state already set */
+
+ setup_new_alpha_mask(ctx, stfb, width, height);
+
+ pipe_texture_reference( &stfb->blend_texture, NULL );
+ stfb->blend_texture = create_texture(ctx->pipe, PIPE_FORMAT_A8R8G8B8_UNORM,
+ width, height);
+}
+
+void st_set_framebuffer_surface(struct st_framebuffer *stfb,
+ uint surfIndex, struct pipe_surface *surf)
+{
+ struct st_renderbuffer *rb = stfb->strb;
+
+ /* unreference existing surfaces */
+ pipe_surface_reference( &rb->surface, NULL );
+ pipe_texture_reference( &rb->texture, NULL );
+
+ /* reference new ones */
+ pipe_surface_reference( &rb->surface, surf );
+ pipe_texture_reference( &rb->texture, surf->texture );
+
+ rb->width = surf->width;
+ rb->height = surf->height;
+}
+
+int st_get_framebuffer_surface(struct st_framebuffer *stfb,
+ uint surfIndex, struct pipe_surface **surf)
+{
+ struct st_renderbuffer *rb = stfb->strb;
+ *surf = rb->surface;
+ return VG_TRUE;
+}
+
+int st_get_framebuffer_texture(struct st_framebuffer *stfb,
+ uint surfIndex, struct pipe_texture **tex)
+{
+ struct st_renderbuffer *rb = stfb->strb;
+ *tex = rb->texture;
+ return VG_TRUE;
+}
+
+void * st_framebuffer_private(struct st_framebuffer *stfb)
+{
+ return stfb->privateData;
+}
+
+void st_unreference_framebuffer(struct st_framebuffer *stfb)
+{
+ /* FIXME */
+}
+
+void st_make_current(struct vg_context *st,
+ struct st_framebuffer *draw,
+ struct st_framebuffer *read)
+{
+ vg_set_current_context(st);
+ if (st) {
+ st->draw_buffer = draw;
+ }
+}
+
+struct vg_context *st_get_current(void)
+{
+ return vg_current_context();
+}
+
+void st_flush(struct vg_context *st, uint pipeFlushFlags,
+ struct pipe_fence_handle **fence)
+{
+ st->pipe->flush(st->pipe, pipeFlushFlags, fence);
+}
+
+void st_finish(struct vg_context *st)
+{
+ struct pipe_fence_handle *fence = NULL;
+
+ st_flush(st, PIPE_FLUSH_RENDER_CACHE, &fence);
+
+ st->pipe->screen->fence_finish(st->pipe->screen, fence, 0);
+ st->pipe->screen->fence_reference(st->pipe->screen, &fence, NULL);
+}
+
+void st_notify_swapbuffers(struct st_framebuffer *stfb)
+{
+ struct vg_context *ctx = vg_current_context();
+ if (ctx && ctx->draw_buffer == stfb) {
+ st_flush(ctx,
+ PIPE_FLUSH_RENDER_CACHE |
+ PIPE_FLUSH_SWAPBUFFERS |
+ PIPE_FLUSH_FRAME,
+ NULL);
+ }
+}
+
+void st_notify_swapbuffers_complete(struct st_framebuffer *stfb)
+{
+}
+
+int st_bind_texture_surface(struct pipe_surface *ps, int target, int level,
+ enum pipe_format format)
+{
+ return 0;
+}
+
+int st_unbind_texture_surface(struct pipe_surface *ps, int target, int level)
+{
+ return 0;
+}
diff --git a/src/gallium/state_trackers/vega/vg_tracker.h b/src/gallium/state_trackers/vega/vg_tracker.h
new file mode 100644
index 0000000000..5457631106
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_tracker.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VG_TRACKER_H
+#define VG_TRACKER_H
+
+#include "VG/openvg.h"
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+
+#define ST_SURFACE_FRONT_LEFT 0
+#define ST_SURFACE_BACK_LEFT 1
+#define ST_SURFACE_FRONT_RIGHT 2
+#define ST_SURFACE_BACK_RIGHT 3
+#define ST_SURFACE_DEPTH 8
+
+struct vg_context;
+struct st_framebuffer;
+struct pipe_context;
+struct pipe_fence_handle;
+struct pipe_surface;
+
+
+struct vg_context *st_create_context(struct pipe_context *pipe,
+ const void *visual,
+ struct vg_context *share);
+
+void st_destroy_context( struct vg_context *st );
+
+void st_copy_context_state(struct vg_context *dst, struct vg_context *src,
+ uint mask);
+
+struct st_framebuffer *st_create_framebuffer(const void *visual,
+ enum pipe_format colorFormat,
+ enum pipe_format depthFormat,
+ enum pipe_format stencilFormat,
+ uint width, uint height,
+ void *privateData);
+
+void st_resize_framebuffer(struct st_framebuffer *stfb,
+ uint width, uint height);
+
+void st_set_framebuffer_surface(struct st_framebuffer *stfb,
+ uint surfIndex, struct pipe_surface *surf);
+
+void st_get_framebuffer_dimensions( struct st_framebuffer *stfb,
+ uint *width, uint *height);
+
+int st_bind_texture_surface(struct pipe_surface *ps, int target, int level,
+ enum pipe_format format);
+
+int st_unbind_texture_surface(struct pipe_surface *ps, int target, int level);
+
+int st_get_framebuffer_surface(struct st_framebuffer *stfb,
+ uint surfIndex, struct pipe_surface **surf);
+
+int st_get_framebuffer_texture(struct st_framebuffer *stfb,
+ uint surfIndex, struct pipe_texture **tex);
+
+void *st_framebuffer_private(struct st_framebuffer *stfb);
+
+void st_unreference_framebuffer(struct st_framebuffer *stfb);
+
+void st_make_current(struct vg_context *st,
+ struct st_framebuffer *draw,
+ struct st_framebuffer *read);
+
+struct vg_context *st_get_current(void);
+
+void st_flush(struct vg_context *st, uint pipeFlushFlags,
+ struct pipe_fence_handle **fence);
+void st_finish(struct vg_context *st);
+
+void st_notify_swapbuffers(struct st_framebuffer *stfb);
+void st_notify_swapbuffers_complete(struct st_framebuffer *stfb);
+
+
+/** Generic function type */
+typedef void (*st_proc)();
+
+st_proc st_get_proc_address(const char *procname);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/vg_translate.c b/src/gallium/state_trackers/vega/vg_translate.c
new file mode 100644
index 0000000000..00e0764706
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_translate.c
@@ -0,0 +1,1030 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vg_translate.h"
+
+#include "pipe/p_format.h"
+#include "util/u_pack_color.h"
+
+void _vega_pack_rgba_span_float(struct vg_context *ctx,
+ VGuint n, VGfloat rgba[][4],
+ VGImageFormat dstFormat,
+ void *dstAddr)
+{
+ VGint i;
+
+ switch (dstFormat) {
+ case VG_sRGBX_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = 255;
+ dst[i] = r << 24 | g << 16 | b << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sRGBA_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = r << 24 | g << 16 | b << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sRGBA_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = r << 24 | g << 16 | b << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sRGB_565: {
+ VGshort *dst = (VGshort *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ r = (r / 255.0) * 32;
+ g = (g / 255.0) * 32;
+ b = (b / 255.0) * 32;
+
+ dst[i] = b | g << 5 | r << 11;
+ }
+ return;
+ }
+ break;
+ case VG_sRGBA_5551: {
+ VGshort *dst = (VGshort *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ r = (r / 255.0) * 32;
+ g = (g / 255.0) * 32;
+ b = (b / 255.0) * 32;
+ a = (a / 255.0);
+
+ dst[i] = a | b << 1 | g << 6 | r << 11;
+ }
+ return;
+ }
+ break;
+ case VG_sRGBA_4444: {
+ VGshort *dst = (VGshort *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ r = (r / 255.0) * 16;
+ g = (g / 255.0) * 16;
+ b = (b / 255.0) * 16;
+ a = (a / 255.0) * 16;
+
+ dst[i] = a | b << 4 | g << 8 | r << 12;
+ }
+ return;
+ }
+ break;
+ case VG_sL_8: {
+ VGubyte *dst = (VGubyte *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+
+ dst[i] = a;
+ }
+ return;
+ }
+ break;
+ case VG_lRGBX_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = 255;
+ dst[i] = r << 24 | g << 16 | b << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_lRGBA_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = r << 24 | g << 16 | b << 8 | a;
+ }
+ return;
+ }
+ case VG_lRGBA_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = r << 24 | g << 16 | b << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_lL_8: {
+ VGubyte *dst = (VGubyte *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a;
+ }
+ return;
+ }
+ break;
+ case VG_A_8: {
+ VGubyte *dst = (VGubyte *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+
+ dst[i] = a;
+ }
+ return;
+ }
+ break;
+ case VG_BW_1: {
+ VGshort *dst = (VGshort *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ VGubyte res;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+
+ res = (r + g + b + a)/4;
+ dst[i] = (res & (128));
+ }
+ return;
+ }
+ break;
+#ifdef OPENVG_VERSION_1_1
+ case VG_A_1: {
+ VGshort *dst = (VGshort *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+
+ dst[i] = (a & (128));
+ }
+ return;
+ }
+ break;
+ case VG_A_4: {
+ VGshort *dst = (VGshort *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b, a;
+ VGubyte res;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+
+ res = a/4;
+ dst[i] = (res & (128));
+ }
+ return;
+ }
+ break;
+#endif
+ case VG_sXRGB_8888:
+ break;
+ case VG_sARGB_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | r << 16 | g << 8 | b;
+ }
+ return;
+ }
+ break;
+ case VG_sARGB_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | r << 16 | g << 8 | b;
+ }
+ return;
+ }
+ break;
+ case VG_sARGB_1555:
+ break;
+ case VG_sARGB_4444:
+ break;
+ case VG_lXRGB_8888:
+ break;
+ case VG_lARGB_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | r << 16 | g << 8 | b;
+ }
+ return;
+ }
+ break;
+ case VG_lARGB_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | r << 16 | g << 8 | b;
+ }
+ return;
+ }
+ break;
+ case VG_sBGRX_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = 0xff;
+ dst[i] = b << 24 | g << 16 | r << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sBGRA_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = b << 24 | g << 16 | r << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sBGRA_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = b << 24 | g << 16 | r << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sBGR_565:
+ break;
+ case VG_sBGRA_5551:
+ break;
+ case VG_sBGRA_4444:
+ break;
+ case VG_lBGRX_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = 0xff;
+ dst[i] = b << 24 | g << 16 | r << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_lBGRA_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = b << 24 | g << 16 | r << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_lBGRA_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = b << 24 | g << 16 | r << 8 | a;
+ }
+ return;
+ }
+ break;
+ case VG_sXBGR_8888:
+ break;
+ case VG_sABGR_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | b << 16 | g << 8 | r;
+ }
+ return;
+ }
+ break;
+ case VG_sABGR_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | b << 16 | g << 8 | r;
+ }
+ return;
+ }
+ break;
+ case VG_sABGR_1555:
+ break;
+ case VG_sABGR_4444:
+ break;
+ case VG_lXBGR_8888:
+ break;
+ case VG_lABGR_8888: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | b << 16 | g << 8 | r;
+ }
+ return;
+ }
+ break;
+ case VG_lABGR_8888_PRE: {
+ VGint *dst = (VGint *)dstAddr;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = float_to_ubyte(rgba[i][0]);
+ g = float_to_ubyte(rgba[i][1]);
+ b = float_to_ubyte(rgba[i][2]);
+ a = float_to_ubyte(rgba[i][3]);
+ dst[i] = a << 24 | b << 16 | g << 8 | r;
+ }
+ return;
+ }
+ break;
+ default:
+ assert(!"Unknown ReadPixels format");
+ break;
+ }
+ assert(!"Not implemented ReadPixels format");
+}
+
+void _vega_unpack_float_span_rgba(struct vg_context *ctx,
+ VGuint n,
+ VGuint offset,
+ const void * data,
+ VGImageFormat dataFormat,
+ VGfloat rgba[][4])
+{
+ VGint i;
+
+ switch (dataFormat) {
+ case VG_sRGBX_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ b = (*src >> 8) & 0xff;
+ a = 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_sRGBA_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ b = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sRGBA_8888_PRE: {
+ VGint *src = (VGint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ b = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sRGB_565: {
+ VGshort *src = (VGshort *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGfloat clr[4];
+ clr[0] = ((*src >> 10) & 31)/31.;
+ clr[1] = ((*src >> 5) & 95)/95.;
+ clr[2] = ((*src >> 0) & 31)/31.;
+ clr[3] = 1.f;
+
+ util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_sRGBA_5551: {
+ VGshort *src = (VGshort *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGfloat clr[4];
+ clr[0] = ((*src >> 10) & 31)/31.;
+ clr[1] = ((*src >> 5) & 31)/31.;
+ clr[2] = ((*src >> 1) & 31)/31.;
+ clr[3] = ((*src >> 0) & 1)/1.;
+
+ util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_sRGBA_4444: {
+ VGshort *src = (VGshort *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGfloat clr[4];
+ clr[0] = ((*src >> 12) & 15)/15.;
+ clr[1] = ((*src >> 8) & 15)/15.;
+ clr[2] = ((*src >> 4) & 15)/15.;
+ clr[3] = ((*src >> 0) & 15)/15.;
+
+ util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_sL_8: {
+ VGubyte *src = (VGubyte *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ util_pack_color_ub(0xff, 0xff, 0xff, *src, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_lRGBX_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ b = (*src >> 8) & 0xff;
+ a = 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_lRGBA_8888: {
+ VGint *src = (VGint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ b = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_lRGBA_8888_PRE: {
+ VGint *src = (VGint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ r = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ b = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_lL_8: {
+ VGubyte *src = (VGubyte *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ util_pack_color_ub(0xff, 0xff, 0xff, *src, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_A_8: {
+ VGubyte *src = (VGubyte *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ util_pack_color_ub(0xff, 0xff, 0xff, *src, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ }
+ return;
+ case VG_BW_1: {
+ VGubyte *src = (VGubyte *)data;
+ src += offset;
+ for (i = 0; i < n; i += 8) {
+ VGfloat clr[4];
+ VGint j;
+ for (j = 0; j < 8 && j < n ; ++j) {
+ VGint shift = j;
+ clr[0] = (((*src) & (1<<shift)) >> shift);
+ clr[1] = clr[0];
+ clr[2] = clr[0];
+ clr[3] = 1.f;
+
+ util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i+j]);
+ }
+ ++src;
+ }
+ }
+ return;
+#ifdef OPENVG_VERSION_1_1
+ case VG_A_1: {
+ VGubyte *src = (VGubyte *)data;
+ src += offset;
+ for (i = 0; i < n; i += 8) {
+ VGfloat clr[4];
+ VGint j;
+ for (j = 0; j < 8 && j < n ; ++j) {
+ VGint shift = j;
+ clr[0] = 0.f;
+ clr[1] = 0.f;
+ clr[2] = 0.f;
+ clr[3] = (((*src) & (1<<shift)) >> shift);
+
+ util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i+j]);
+ }
+ ++src;
+ }
+ }
+ return;
+ case VG_A_4: {
+ VGubyte *src = (VGubyte *)data;
+ src += offset/2;
+ for (i = 0; i < n; i += 2) {
+ VGfloat clr[4];
+ VGint j;
+ for (j = 0; j < n && j < 2; ++j) {
+ VGint bitter, shift;
+ if (j == 0) {
+ bitter = 0x0f;
+ shift = 0;
+ } else {
+ bitter = 0xf0;
+ shift = 4;
+ }
+ clr[0] = 0.f;
+ clr[1] = 0.f;
+ clr[2] = 0.f;
+ clr[3] = ((*src) & (bitter)) >> shift;
+
+ util_pack_color(clr, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i +j]);
+ }
+ ++src;
+ }
+ }
+ return;
+#endif
+ case VG_sXRGB_8888:
+ break;
+ case VG_sARGB_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ r = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ b = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sARGB_8888_PRE: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ r = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ b = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sARGB_1555:
+ break;
+ case VG_sARGB_4444:
+ break;
+ case VG_lXRGB_8888:
+ break;
+ case VG_lARGB_8888: {
+ VGint *src = (VGint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ r = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ b = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_lARGB_8888_PRE: {
+ VGint *src = (VGint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ r = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ b = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sBGRX_8888:
+ break;
+ case VG_sBGRA_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ b = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ r = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sBGRA_8888_PRE: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ b = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ r = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sBGR_565:
+ break;
+ case VG_sBGRA_5551:
+ break;
+ case VG_sBGRA_4444:
+ break;
+ case VG_lBGRX_8888:
+ break;
+ case VG_lBGRA_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ b = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ r = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_lBGRA_8888_PRE: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ b = (*src >> 24) & 0xff;
+ g = (*src >> 16) & 0xff;
+ r = (*src >> 8) & 0xff;
+ a = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sXBGR_8888:
+ break;
+ case VG_sABGR_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ b = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ r = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sABGR_8888_PRE: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ b = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ r = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_sABGR_1555:
+ break;
+ case VG_sABGR_4444:
+ break;
+ case VG_lXBGR_8888:
+ break;
+ case VG_lABGR_8888: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ b = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ r = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ case VG_lABGR_8888_PRE: {
+ VGuint *src = (VGuint *)data;
+ src += offset;
+ for (i = 0; i < n; ++i) {
+ VGubyte r, g, b ,a;
+ a = (*src >> 24) & 0xff;
+ b = (*src >> 16) & 0xff;
+ g = (*src >> 8) & 0xff;
+ r = (*src >> 0) & 0xff;
+
+ util_pack_color_ub(r, g, b, a, PIPE_FORMAT_R32G32B32A32_FLOAT,
+ rgba[i]);
+ ++src;
+ }
+ return;
+ }
+ break;
+ default:
+ assert(!"Unknown ReadPixels format");
+ break;
+ }
+ assert(!"Not implemented ReadPixels format");
+}
+
+VGint _vega_size_for_format(VGImageFormat dataFormat)
+{
+ switch (dataFormat) {
+ case VG_sRGBX_8888:
+ case VG_sRGBA_8888:
+ case VG_sRGBA_8888_PRE:
+ return 4;
+ case VG_sRGB_565:
+ case VG_sRGBA_5551:
+ case VG_sRGBA_4444:
+ return 2;
+ case VG_sL_8:
+ return 1;
+ case VG_lRGBX_8888:
+ case VG_lRGBA_8888:
+ case VG_lRGBA_8888_PRE:
+ return 4;
+ case VG_lL_8:
+ return 1;
+ case VG_A_8:
+ return 1;
+ case VG_BW_1:
+ return 1;
+#ifdef OPENVG_VERSION_1_1
+ case VG_A_1:
+ break;
+ case VG_A_4:
+ break;
+#endif
+ case VG_sXRGB_8888:
+ case VG_sARGB_8888:
+ case VG_sARGB_8888_PRE:
+ return 4;
+ case VG_sARGB_1555:
+ case VG_sARGB_4444:
+ return 2;
+ case VG_lXRGB_8888:
+ case VG_lARGB_8888:
+ case VG_lARGB_8888_PRE:
+ case VG_sBGRX_8888:
+ case VG_sBGRA_8888:
+ case VG_sBGRA_8888_PRE:
+ return 4;
+ case VG_sBGR_565:
+ case VG_sBGRA_5551:
+ case VG_sBGRA_4444:
+ return 2;
+ case VG_lBGRX_8888:
+ case VG_lBGRA_8888:
+ case VG_lBGRA_8888_PRE:
+ case VG_sXBGR_8888:
+ case VG_sABGR_8888:
+ case VG_sABGR_8888_PRE:
+ return 4;
+ case VG_sABGR_1555:
+ case VG_sABGR_4444:
+ return 2;
+ case VG_lXBGR_8888:
+ case VG_lABGR_8888:
+ case VG_lABGR_8888_PRE:
+ return 4;
+ default:
+ assert(!"Unknown ReadPixels format");
+ break;
+ }
+ assert(!"Not implemented ReadPixels format");
+ return 0;
+}
diff --git a/src/gallium/state_trackers/vega/vg_translate.h b/src/gallium/state_trackers/vega/vg_translate.h
new file mode 100644
index 0000000000..70815bacbc
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vg_translate.h
@@ -0,0 +1,49 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VG_TRANSLATE_H
+#define VG_TRANSLATE_H
+
+#include "VG/openvg.h"
+#include "vg_context.h"
+
+/*FIXME: we really should be using translate module
+ * but pipe_format can't express some of the VG formats
+ * (the premultiplied ones) so currently it won't work */
+
+void _vega_pack_rgba_span_float(struct vg_context *ctx,
+ VGuint n, VGfloat rgba[][4],
+ VGImageFormat dstFormat,
+ void *dstAddr);
+void _vega_unpack_float_span_rgba(struct vg_context *ctx,
+ VGuint n,
+ VGuint offset,
+ const void * data,
+ VGImageFormat dataFormat,
+ VGfloat rgba[][4]);
+VGint _vega_size_for_format(VGImageFormat format);
+
+#endif
diff --git a/src/gallium/state_trackers/vega/vgu.c b/src/gallium/state_trackers/vega/vgu.c
new file mode 100644
index 0000000000..7dc51c5599
--- /dev/null
+++ b/src/gallium/state_trackers/vega/vgu.c
@@ -0,0 +1,450 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "VG/openvg.h"
+#include "VG/vgu.h"
+
+#include "matrix.h"
+#include "path.h"
+
+#include "util/u_debug.h"
+#include "util/u_pointer.h"
+
+#include <math.h>
+#include <assert.h>
+
+static VGboolean is_aligned_to(const void *ptr, VGbyte alignment)
+{
+ void *aligned = align_pointer(ptr, alignment);
+ return (ptr == aligned) ? VG_TRUE : VG_FALSE;
+}
+
+static VGboolean is_aligned(const void *ptr)
+{
+ return is_aligned_to(ptr, 4);
+}
+
+static void vgu_append_float_coords(VGPath path,
+ const VGubyte *cmds,
+ VGint num_cmds,
+ const VGfloat *coords,
+ VGint num_coords)
+{
+ VGubyte common_data[40 * sizeof(VGfloat)];
+ struct path *p = (struct path *)path;
+
+ vg_float_to_datatype(path_datatype(p), common_data, coords, num_coords);
+ vgAppendPathData(path, num_cmds, cmds, common_data);
+}
+
+VGUErrorCode vguLine(VGPath path,
+ VGfloat x0, VGfloat y0,
+ VGfloat x1, VGfloat y1)
+{
+ static const VGubyte cmds[] = {VG_MOVE_TO_ABS, VG_LINE_TO_ABS};
+ VGfloat coords[4];
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ return VGU_BAD_HANDLE_ERROR;
+ }
+ caps = vgGetPathCapabilities(path);
+ if (!(caps & VG_PATH_CAPABILITY_APPEND_TO)) {
+ return VGU_PATH_CAPABILITY_ERROR;
+ }
+
+ coords[0] = x0;
+ coords[1] = y0;
+ coords[2] = x1;
+ coords[3] = y1;
+
+ vgu_append_float_coords(path, cmds, 2, coords, 4);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguPolygon(VGPath path,
+ const VGfloat * points,
+ VGint count,
+ VGboolean closed)
+{
+ VGubyte *cmds;
+ VGfloat *coords;
+ VGbitfield caps;
+ VGint i;
+
+ if (path == VG_INVALID_HANDLE) {
+ return VGU_BAD_HANDLE_ERROR;
+ }
+
+ if (!points || count <= 0 || !is_aligned(points)) {
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+ }
+
+ caps = vgGetPathCapabilities(path);
+ if (!(caps & VG_PATH_CAPABILITY_APPEND_TO)) {
+ return VGU_PATH_CAPABILITY_ERROR;
+ }
+
+ cmds = malloc(sizeof(VGubyte) * count + 1);
+ coords = malloc(sizeof(VGfloat) * count * 2);
+
+ cmds[0] = VG_MOVE_TO_ABS;
+ coords[0] = points[0];
+ coords[1] = points[1];
+ for (i = 1; i < count; ++i) {
+ cmds[i] = VG_LINE_TO_ABS;
+ coords[2*i + 0] = points[2*i + 0];
+ coords[2*i + 1] = points[2*i + 1];
+ }
+
+ if (closed) {
+ cmds[i] = VG_CLOSE_PATH;
+ ++i;
+ }
+
+ vgu_append_float_coords(path, cmds, i, coords, 2*i);
+
+ free(cmds);
+ free(coords);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguRect(VGPath path,
+ VGfloat x, VGfloat y,
+ VGfloat width, VGfloat height)
+{
+ static const VGubyte cmds[] = {VG_MOVE_TO_ABS,
+ VG_HLINE_TO_REL,
+ VG_VLINE_TO_REL,
+ VG_HLINE_TO_REL,
+ VG_CLOSE_PATH
+ };
+ VGfloat coords[5];
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ return VGU_BAD_HANDLE_ERROR;
+ }
+ caps = vgGetPathCapabilities(path);
+ if (!(caps & VG_PATH_CAPABILITY_APPEND_TO)) {
+ return VGU_PATH_CAPABILITY_ERROR;
+ }
+ if (width <= 0 || height <= 0) {
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+ }
+
+ coords[0] = x;
+ coords[1] = y;
+ coords[2] = width;
+ coords[3] = height;
+ coords[4] = -width;
+
+ vgu_append_float_coords(path, cmds, 5, coords, 5);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguRoundRect(VGPath path,
+ VGfloat x, VGfloat y,
+ VGfloat width,
+ VGfloat height,
+ VGfloat arcWidth,
+ VGfloat arcHeight)
+{
+ static const VGubyte cmds[] = {VG_MOVE_TO_ABS,
+ VG_HLINE_TO_REL,
+ VG_SCCWARC_TO_REL,
+ VG_VLINE_TO_REL,
+ VG_SCCWARC_TO_REL,
+ VG_HLINE_TO_REL,
+ VG_SCCWARC_TO_REL,
+ VG_VLINE_TO_REL,
+ VG_SCCWARC_TO_REL,
+ VG_CLOSE_PATH
+ };
+ VGfloat c[26];
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ return VGU_BAD_HANDLE_ERROR;
+ }
+ caps = vgGetPathCapabilities(path);
+ if (!(caps & VG_PATH_CAPABILITY_APPEND_TO)) {
+ return VGU_PATH_CAPABILITY_ERROR;
+ }
+ if (width <= 0 || height <= 0) {
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+ }
+
+ c[0] = x + arcWidth/2; c[1] = y;
+
+ c[2] = width - arcWidth;
+
+ c[3] = arcWidth/2; c[4] = arcHeight/2; c[5] = 0;
+ c[6] = arcWidth/2; c[7] = arcHeight/2;
+
+ c[8] = height - arcHeight;
+
+ c[9] = arcWidth/2; c[10] = arcHeight/2; c[11] = 0;
+ c[12] = -arcWidth/2; c[13] = arcHeight/2;
+
+ c[14] = -(width - arcWidth);
+
+ c[15] = arcWidth/2; c[16] = arcHeight/2; c[17] = 0;
+ c[18] = -arcWidth/2; c[19] = -arcHeight/2;
+
+ c[20] = -(height - arcHeight);
+
+ c[21] = arcWidth/2; c[22] = arcHeight/2; c[23] = 0;
+ c[24] = arcWidth/2; c[25] = -arcHeight/2;
+
+ vgu_append_float_coords(path, cmds, 10, c, 26);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguEllipse(VGPath path,
+ VGfloat cx, VGfloat cy,
+ VGfloat width,
+ VGfloat height)
+{
+ static const VGubyte cmds[] = {VG_MOVE_TO_ABS,
+ VG_SCCWARC_TO_REL,
+ VG_SCCWARC_TO_REL,
+ VG_CLOSE_PATH
+ };
+ VGfloat coords[12];
+ VGbitfield caps;
+
+ if (path == VG_INVALID_HANDLE) {
+ return VGU_BAD_HANDLE_ERROR;
+ }
+ caps = vgGetPathCapabilities(path);
+ if (!(caps & VG_PATH_CAPABILITY_APPEND_TO)) {
+ return VGU_PATH_CAPABILITY_ERROR;
+ }
+ if (width <= 0 || height <= 0) {
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+ }
+
+ coords[0] = cx + width/2; coords[1] = cy;
+
+ coords[2] = width/2; coords[3] = height/2; coords[4] = 0;
+ coords[5] = -width; coords[6] = 0;
+
+ coords[7] = width/2; coords[8] = height/2; coords[9] = 0;
+ coords[10] = width; coords[11] = 0;
+
+ vgu_append_float_coords(path, cmds, 4, coords, 11);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguArc(VGPath path,
+ VGfloat x, VGfloat y,
+ VGfloat width, VGfloat height,
+ VGfloat startAngle,
+ VGfloat angleExtent,
+ VGUArcType arcType)
+{
+ VGubyte cmds[11];
+ VGfloat coords[40];
+ VGbitfield caps;
+ VGfloat last = startAngle + angleExtent;
+ VGint i, c = 0;
+
+ if (path == VG_INVALID_HANDLE) {
+ return VGU_BAD_HANDLE_ERROR;
+ }
+ caps = vgGetPathCapabilities(path);
+ if (!(caps & VG_PATH_CAPABILITY_APPEND_TO)) {
+ return VGU_PATH_CAPABILITY_ERROR;
+ }
+ if (width <= 0 || height <= 0) {
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+ }
+ if (arcType != VGU_ARC_OPEN &&
+ arcType != VGU_ARC_CHORD &&
+ arcType != VGU_ARC_PIE) {
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+ }
+
+ cmds[c] = VG_MOVE_TO_ABS; ++c;
+ coords[0] = x+cos(DEGREES_TO_RADIANS(startAngle))*width/2;
+ coords[1] = y+sin(DEGREES_TO_RADIANS(startAngle))*height/2;
+#ifdef DEBUG_VGUARC
+ debug_printf("start [%f, %f]\n", coords[0], coords[1]);
+#endif
+ i = 2;
+ if (angleExtent > 0) {
+ VGfloat angle = startAngle + 180;
+ while (angle < last) {
+ cmds[c] = VG_SCCWARC_TO_ABS; ++c;
+ coords[i] = width/2; coords[i+1] = height/2; coords[i+2] = 0;
+ coords[i+3] = x + cos(DEGREES_TO_RADIANS(angle))*width/2;
+ coords[i+4] = y + sin(DEGREES_TO_RADIANS(angle))*height/2;
+#ifdef DEBUG_VGUARC
+ debug_printf("1 [%f, %f]\n", coords[i+3],
+ coords[i+4]);
+#endif
+ i += 5;
+ angle += 180;
+ }
+ cmds[c] = VG_SCCWARC_TO_ABS; ++c;
+ coords[i] = width/2; coords[i+1] = height/2; coords[i+2] = 0;
+ coords[i+3] = x+cos(DEGREES_TO_RADIANS(last))*width/2;
+ coords[i+4] = y+sin(DEGREES_TO_RADIANS(last))*height/2;
+#ifdef DEBUG_VGUARC
+ debug_printf("2 [%f, %f]\n", coords[i+3],
+ coords[i+4]);
+#endif
+ i += 5;
+ } else {
+ VGfloat angle = startAngle - 180;
+ while (angle > last) {
+ cmds[c] = VG_SCWARC_TO_ABS; ++c;
+ coords[i] = width/2; coords[i+1] = height/2; coords[i+2] = 0;
+ coords[i+3] = x + cos(DEGREES_TO_RADIANS(angle)) * width/2;
+ coords[i+4] = y + sin(DEGREES_TO_RADIANS(angle)) * height/2;
+#ifdef DEBUG_VGUARC
+ debug_printf("3 [%f, %f]\n", coords[i+3],
+ coords[i+4]);
+#endif
+ angle -= 180;
+ i += 5;
+ }
+ cmds[c] = VG_SCWARC_TO_ABS; ++c;
+ coords[i] = width/2; coords[i+1] = height/2; coords[i+2] = 0;
+ coords[i+3] = x + cos(DEGREES_TO_RADIANS(last)) * width/2;
+ coords[i+4] = y + sin(DEGREES_TO_RADIANS(last)) * height/2;
+#ifdef DEBUG_VGUARC
+ debug_printf("4 [%f, %f]\n", coords[i+3],
+ coords[i+4]);
+#endif
+ i += 5;
+ }
+
+ if (arcType == VGU_ARC_PIE) {
+ cmds[c] = VG_LINE_TO_ABS; ++c;
+ coords[i] = x; coords[i + 1] = y;
+ i += 2;
+ }
+ if (arcType == VGU_ARC_PIE || arcType == VGU_ARC_CHORD) {
+ cmds[c] = VG_CLOSE_PATH;
+ ++c;
+ }
+
+ assert(c < 11);
+
+ vgu_append_float_coords(path, cmds, c, coords, i);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguComputeWarpQuadToSquare(VGfloat sx0, VGfloat sy0,
+ VGfloat sx1, VGfloat sy1,
+ VGfloat sx2, VGfloat sy2,
+ VGfloat sx3, VGfloat sy3,
+ VGfloat * matrix)
+{
+ struct matrix mat;
+
+ if (!matrix || !is_aligned(matrix))
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+
+ if (!matrix_quad_to_square(sx0, sy0,
+ sx1, sy1,
+ sx2, sy2,
+ sx3, sy3,
+ &mat))
+ return VGU_BAD_WARP_ERROR;
+
+ if (!matrix_is_invertible(&mat))
+ return VGU_BAD_WARP_ERROR;
+
+ memcpy(matrix, mat.m, sizeof(VGfloat) * 9);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguComputeWarpSquareToQuad(VGfloat dx0, VGfloat dy0,
+ VGfloat dx1, VGfloat dy1,
+ VGfloat dx2, VGfloat dy2,
+ VGfloat dx3, VGfloat dy3,
+ VGfloat * matrix)
+{
+ struct matrix mat;
+
+ if (!matrix || !is_aligned(matrix))
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+
+ if (!matrix_square_to_quad(dx0, dy0,
+ dx1, dy1,
+ dx2, dy2,
+ dx3, dy3,
+ &mat))
+ return VGU_BAD_WARP_ERROR;
+
+ if (!matrix_is_invertible(&mat))
+ return VGU_BAD_WARP_ERROR;
+
+ memcpy(matrix, mat.m, sizeof(VGfloat) * 9);
+
+ return VGU_NO_ERROR;
+}
+
+VGUErrorCode vguComputeWarpQuadToQuad(VGfloat dx0, VGfloat dy0,
+ VGfloat dx1, VGfloat dy1,
+ VGfloat dx2, VGfloat dy2,
+ VGfloat dx3, VGfloat dy3,
+ VGfloat sx0, VGfloat sy0,
+ VGfloat sx1, VGfloat sy1,
+ VGfloat sx2, VGfloat sy2,
+ VGfloat sx3, VGfloat sy3,
+ VGfloat * matrix)
+{
+ struct matrix mat;
+
+ if (!matrix || !is_aligned(matrix))
+ return VGU_ILLEGAL_ARGUMENT_ERROR;
+
+ if (!matrix_quad_to_quad(dx0, dy0,
+ dx1, dy1,
+ dx2, dy2,
+ dx3, dy3,
+ sx0, sy0,
+ sx1, sy1,
+ sx2, sy2,
+ sx3, sy3,
+ &mat))
+ return VGU_BAD_WARP_ERROR;
+
+ memcpy(matrix, mat.m, sizeof(VGfloat) * 9);
+
+ return VGU_NO_ERROR;
+}
diff --git a/src/gallium/state_trackers/wgl/SConscript b/src/gallium/state_trackers/wgl/SConscript
new file mode 100644
index 0000000000..69b88618ec
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/SConscript
@@ -0,0 +1,42 @@
+import os
+
+Import('*')
+
+if env['platform'] in ['windows']:
+
+ env = env.Clone()
+
+ env.Append(CPPPATH = [
+ '#src/mesa',
+ '.',
+ ])
+
+ env.Append(CPPDEFINES = [
+ '_GDI32_', # prevent wgl* being declared __declspec(dllimport)
+ 'BUILD_GL32', # declare gl* as __declspec(dllexport) in Mesa headers
+ 'WIN32_THREADS', # use Win32 thread API
+ ])
+
+ sources = [
+ 'icd/stw_icd.c',
+
+ 'wgl/stw_wgl.c',
+
+ 'shared/stw_context.c',
+ 'shared/stw_device.c',
+ 'shared/stw_framebuffer.c',
+ 'shared/stw_pixelformat.c',
+ 'shared/stw_extensionsstring.c',
+ 'shared/stw_extswapinterval.c',
+ 'shared/stw_getprocaddress.c',
+ 'shared/stw_extgallium.c',
+ 'shared/stw_arbpixelformat.c',
+ 'shared/stw_tls.c',
+ ]
+
+ wgl = env.ConvenienceLibrary(
+ target ='wgl',
+ source = sources,
+ )
+
+ Export('wgl')
diff --git a/src/gallium/state_trackers/wgl/icd/stw_icd.c b/src/gallium/state_trackers/wgl/icd/stw_icd.c
new file mode 100644
index 0000000000..347f40aa06
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/icd/stw_icd.c
@@ -0,0 +1,617 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+#include <stdio.h>
+
+#include "GL/gl.h"
+
+#include "util/u_debug.h"
+#include "pipe/p_thread.h"
+
+#include "shared/stw_public.h"
+#include "icd/stw_icd.h"
+
+#define DBG 0
+
+
+BOOL APIENTRY
+DrvCopyContext(
+ DHGLRC dhrcSource,
+ DHGLRC dhrcDest,
+ UINT fuMask )
+{
+ return stw_copy_context(dhrcSource, dhrcDest, fuMask);
+}
+
+
+DHGLRC APIENTRY
+DrvCreateLayerContext(
+ HDC hdc,
+ INT iLayerPlane )
+{
+ DHGLRC r;
+
+ r = stw_create_layer_context( hdc, iLayerPlane );
+
+ if (DBG)
+ debug_printf( "%s( %p, %i ) = %u\n",
+ __FUNCTION__, hdc, iLayerPlane, r );
+
+ return r;
+}
+
+DHGLRC APIENTRY
+DrvCreateContext(
+ HDC hdc )
+{
+ return DrvCreateLayerContext( hdc, 0 );
+}
+
+BOOL APIENTRY
+DrvDeleteContext(
+ DHGLRC dhglrc )
+{
+ BOOL r;
+
+ r = stw_delete_context( dhglrc );
+
+ if (DBG)
+ debug_printf( "%s( %u ) = %u\n",
+ __FUNCTION__, dhglrc, r );
+
+ return r;
+}
+
+BOOL APIENTRY
+DrvDescribeLayerPlane(
+ HDC hdc,
+ INT iPixelFormat,
+ INT iLayerPlane,
+ UINT nBytes,
+ LPLAYERPLANEDESCRIPTOR plpd )
+{
+ if (DBG)
+ debug_printf( "%s\n", __FUNCTION__ );
+
+ return FALSE;
+}
+
+LONG APIENTRY
+DrvDescribePixelFormat(
+ HDC hdc,
+ INT iPixelFormat,
+ ULONG cjpfd,
+ PIXELFORMATDESCRIPTOR *ppfd )
+{
+ LONG r;
+
+ r = stw_pixelformat_describe( hdc, iPixelFormat, cjpfd, ppfd );
+
+ if (DBG)
+ debug_printf( "%s( %p, %d, %u, %p ) = %d\n",
+ __FUNCTION__, hdc, iPixelFormat, cjpfd, ppfd, r );
+
+ return r;
+}
+
+int APIENTRY
+DrvGetLayerPaletteEntries(
+ HDC hdc,
+ INT iLayerPlane,
+ INT iStart,
+ INT cEntries,
+ COLORREF *pcr )
+{
+ if (DBG)
+ debug_printf( "%s\n", __FUNCTION__ );
+
+ return 0;
+}
+
+PROC APIENTRY
+DrvGetProcAddress(
+ LPCSTR lpszProc )
+{
+ PROC r;
+
+ r = stw_get_proc_address( lpszProc );
+
+ if (DBG)
+ debug_printf( "%s( \"%s\" ) = %p\n", __FUNCTION__, lpszProc, r );
+
+ return r;
+}
+
+BOOL APIENTRY
+DrvRealizeLayerPalette(
+ HDC hdc,
+ INT iLayerPlane,
+ BOOL bRealize )
+{
+ if (DBG)
+ debug_printf( "%s\n", __FUNCTION__ );
+
+ return FALSE;
+}
+
+BOOL APIENTRY
+DrvReleaseContext(
+ DHGLRC dhglrc )
+{
+ return stw_release_context(dhglrc);
+}
+
+void APIENTRY
+DrvSetCallbackProcs(
+ INT nProcs,
+ PROC *pProcs )
+{
+ if (DBG)
+ debug_printf( "%s( %d, %p )\n", __FUNCTION__, nProcs, pProcs );
+
+ return;
+}
+
+
+/**
+ * Although WGL allows different dispatch entrypoints per context
+ */
+static const GLCLTPROCTABLE cpt =
+{
+ OPENGL_VERSION_110_ENTRIES,
+ {
+ &glNewList,
+ &glEndList,
+ &glCallList,
+ &glCallLists,
+ &glDeleteLists,
+ &glGenLists,
+ &glListBase,
+ &glBegin,
+ &glBitmap,
+ &glColor3b,
+ &glColor3bv,
+ &glColor3d,
+ &glColor3dv,
+ &glColor3f,
+ &glColor3fv,
+ &glColor3i,
+ &glColor3iv,
+ &glColor3s,
+ &glColor3sv,
+ &glColor3ub,
+ &glColor3ubv,
+ &glColor3ui,
+ &glColor3uiv,
+ &glColor3us,
+ &glColor3usv,
+ &glColor4b,
+ &glColor4bv,
+ &glColor4d,
+ &glColor4dv,
+ &glColor4f,
+ &glColor4fv,
+ &glColor4i,
+ &glColor4iv,
+ &glColor4s,
+ &glColor4sv,
+ &glColor4ub,
+ &glColor4ubv,
+ &glColor4ui,
+ &glColor4uiv,
+ &glColor4us,
+ &glColor4usv,
+ &glEdgeFlag,
+ &glEdgeFlagv,
+ &glEnd,
+ &glIndexd,
+ &glIndexdv,
+ &glIndexf,
+ &glIndexfv,
+ &glIndexi,
+ &glIndexiv,
+ &glIndexs,
+ &glIndexsv,
+ &glNormal3b,
+ &glNormal3bv,
+ &glNormal3d,
+ &glNormal3dv,
+ &glNormal3f,
+ &glNormal3fv,
+ &glNormal3i,
+ &glNormal3iv,
+ &glNormal3s,
+ &glNormal3sv,
+ &glRasterPos2d,
+ &glRasterPos2dv,
+ &glRasterPos2f,
+ &glRasterPos2fv,
+ &glRasterPos2i,
+ &glRasterPos2iv,
+ &glRasterPos2s,
+ &glRasterPos2sv,
+ &glRasterPos3d,
+ &glRasterPos3dv,
+ &glRasterPos3f,
+ &glRasterPos3fv,
+ &glRasterPos3i,
+ &glRasterPos3iv,
+ &glRasterPos3s,
+ &glRasterPos3sv,
+ &glRasterPos4d,
+ &glRasterPos4dv,
+ &glRasterPos4f,
+ &glRasterPos4fv,
+ &glRasterPos4i,
+ &glRasterPos4iv,
+ &glRasterPos4s,
+ &glRasterPos4sv,
+ &glRectd,
+ &glRectdv,
+ &glRectf,
+ &glRectfv,
+ &glRecti,
+ &glRectiv,
+ &glRects,
+ &glRectsv,
+ &glTexCoord1d,
+ &glTexCoord1dv,
+ &glTexCoord1f,
+ &glTexCoord1fv,
+ &glTexCoord1i,
+ &glTexCoord1iv,
+ &glTexCoord1s,
+ &glTexCoord1sv,
+ &glTexCoord2d,
+ &glTexCoord2dv,
+ &glTexCoord2f,
+ &glTexCoord2fv,
+ &glTexCoord2i,
+ &glTexCoord2iv,
+ &glTexCoord2s,
+ &glTexCoord2sv,
+ &glTexCoord3d,
+ &glTexCoord3dv,
+ &glTexCoord3f,
+ &glTexCoord3fv,
+ &glTexCoord3i,
+ &glTexCoord3iv,
+ &glTexCoord3s,
+ &glTexCoord3sv,
+ &glTexCoord4d,
+ &glTexCoord4dv,
+ &glTexCoord4f,
+ &glTexCoord4fv,
+ &glTexCoord4i,
+ &glTexCoord4iv,
+ &glTexCoord4s,
+ &glTexCoord4sv,
+ &glVertex2d,
+ &glVertex2dv,
+ &glVertex2f,
+ &glVertex2fv,
+ &glVertex2i,
+ &glVertex2iv,
+ &glVertex2s,
+ &glVertex2sv,
+ &glVertex3d,
+ &glVertex3dv,
+ &glVertex3f,
+ &glVertex3fv,
+ &glVertex3i,
+ &glVertex3iv,
+ &glVertex3s,
+ &glVertex3sv,
+ &glVertex4d,
+ &glVertex4dv,
+ &glVertex4f,
+ &glVertex4fv,
+ &glVertex4i,
+ &glVertex4iv,
+ &glVertex4s,
+ &glVertex4sv,
+ &glClipPlane,
+ &glColorMaterial,
+ &glCullFace,
+ &glFogf,
+ &glFogfv,
+ &glFogi,
+ &glFogiv,
+ &glFrontFace,
+ &glHint,
+ &glLightf,
+ &glLightfv,
+ &glLighti,
+ &glLightiv,
+ &glLightModelf,
+ &glLightModelfv,
+ &glLightModeli,
+ &glLightModeliv,
+ &glLineStipple,
+ &glLineWidth,
+ &glMaterialf,
+ &glMaterialfv,
+ &glMateriali,
+ &glMaterialiv,
+ &glPointSize,
+ &glPolygonMode,
+ &glPolygonStipple,
+ &glScissor,
+ &glShadeModel,
+ &glTexParameterf,
+ &glTexParameterfv,
+ &glTexParameteri,
+ &glTexParameteriv,
+ &glTexImage1D,
+ &glTexImage2D,
+ &glTexEnvf,
+ &glTexEnvfv,
+ &glTexEnvi,
+ &glTexEnviv,
+ &glTexGend,
+ &glTexGendv,
+ &glTexGenf,
+ &glTexGenfv,
+ &glTexGeni,
+ &glTexGeniv,
+ &glFeedbackBuffer,
+ &glSelectBuffer,
+ &glRenderMode,
+ &glInitNames,
+ &glLoadName,
+ &glPassThrough,
+ &glPopName,
+ &glPushName,
+ &glDrawBuffer,
+ &glClear,
+ &glClearAccum,
+ &glClearIndex,
+ &glClearColor,
+ &glClearStencil,
+ &glClearDepth,
+ &glStencilMask,
+ &glColorMask,
+ &glDepthMask,
+ &glIndexMask,
+ &glAccum,
+ &glDisable,
+ &glEnable,
+ &glFinish,
+ &glFlush,
+ &glPopAttrib,
+ &glPushAttrib,
+ &glMap1d,
+ &glMap1f,
+ &glMap2d,
+ &glMap2f,
+ &glMapGrid1d,
+ &glMapGrid1f,
+ &glMapGrid2d,
+ &glMapGrid2f,
+ &glEvalCoord1d,
+ &glEvalCoord1dv,
+ &glEvalCoord1f,
+ &glEvalCoord1fv,
+ &glEvalCoord2d,
+ &glEvalCoord2dv,
+ &glEvalCoord2f,
+ &glEvalCoord2fv,
+ &glEvalMesh1,
+ &glEvalPoint1,
+ &glEvalMesh2,
+ &glEvalPoint2,
+ &glAlphaFunc,
+ &glBlendFunc,
+ &glLogicOp,
+ &glStencilFunc,
+ &glStencilOp,
+ &glDepthFunc,
+ &glPixelZoom,
+ &glPixelTransferf,
+ &glPixelTransferi,
+ &glPixelStoref,
+ &glPixelStorei,
+ &glPixelMapfv,
+ &glPixelMapuiv,
+ &glPixelMapusv,
+ &glReadBuffer,
+ &glCopyPixels,
+ &glReadPixels,
+ &glDrawPixels,
+ &glGetBooleanv,
+ &glGetClipPlane,
+ &glGetDoublev,
+ &glGetError,
+ &glGetFloatv,
+ &glGetIntegerv,
+ &glGetLightfv,
+ &glGetLightiv,
+ &glGetMapdv,
+ &glGetMapfv,
+ &glGetMapiv,
+ &glGetMaterialfv,
+ &glGetMaterialiv,
+ &glGetPixelMapfv,
+ &glGetPixelMapuiv,
+ &glGetPixelMapusv,
+ &glGetPolygonStipple,
+ &glGetString,
+ &glGetTexEnvfv,
+ &glGetTexEnviv,
+ &glGetTexGendv,
+ &glGetTexGenfv,
+ &glGetTexGeniv,
+ &glGetTexImage,
+ &glGetTexParameterfv,
+ &glGetTexParameteriv,
+ &glGetTexLevelParameterfv,
+ &glGetTexLevelParameteriv,
+ &glIsEnabled,
+ &glIsList,
+ &glDepthRange,
+ &glFrustum,
+ &glLoadIdentity,
+ &glLoadMatrixf,
+ &glLoadMatrixd,
+ &glMatrixMode,
+ &glMultMatrixf,
+ &glMultMatrixd,
+ &glOrtho,
+ &glPopMatrix,
+ &glPushMatrix,
+ &glRotated,
+ &glRotatef,
+ &glScaled,
+ &glScalef,
+ &glTranslated,
+ &glTranslatef,
+ &glViewport,
+ &glArrayElement,
+ &glBindTexture,
+ &glColorPointer,
+ &glDisableClientState,
+ &glDrawArrays,
+ &glDrawElements,
+ &glEdgeFlagPointer,
+ &glEnableClientState,
+ &glIndexPointer,
+ &glIndexub,
+ &glIndexubv,
+ &glInterleavedArrays,
+ &glNormalPointer,
+ &glPolygonOffset,
+ &glTexCoordPointer,
+ &glVertexPointer,
+ &glAreTexturesResident,
+ &glCopyTexImage1D,
+ &glCopyTexImage2D,
+ &glCopyTexSubImage1D,
+ &glCopyTexSubImage2D,
+ &glDeleteTextures,
+ &glGenTextures,
+ &glGetPointerv,
+ &glIsTexture,
+ &glPrioritizeTextures,
+ &glTexSubImage1D,
+ &glTexSubImage2D,
+ &glPopClientAttrib,
+ &glPushClientAttrib
+ }
+};
+
+
+PGLCLTPROCTABLE APIENTRY
+DrvSetContext(
+ HDC hdc,
+ DHGLRC dhglrc,
+ PFN_SETPROCTABLE pfnSetProcTable )
+{
+ PGLCLTPROCTABLE r = (PGLCLTPROCTABLE)&cpt;
+
+ if (!stw_make_current( hdc, dhglrc ))
+ r = NULL;
+
+ if (DBG)
+ debug_printf( "%s( 0x%p, %u, 0x%p ) = %p\n",
+ __FUNCTION__, hdc, dhglrc, pfnSetProcTable, r );
+
+ return r;
+}
+
+int APIENTRY
+DrvSetLayerPaletteEntries(
+ HDC hdc,
+ INT iLayerPlane,
+ INT iStart,
+ INT cEntries,
+ CONST COLORREF *pcr )
+{
+ if (DBG)
+ debug_printf( "%s\n", __FUNCTION__ );
+
+ return 0;
+}
+
+BOOL APIENTRY
+DrvSetPixelFormat(
+ HDC hdc,
+ LONG iPixelFormat )
+{
+ BOOL r;
+
+ r = stw_pixelformat_set( hdc, iPixelFormat );
+
+ if (DBG)
+ debug_printf( "%s( %p, %d ) = %s\n", __FUNCTION__, hdc, iPixelFormat, r ? "TRUE" : "FALSE" );
+
+ return r;
+}
+
+BOOL APIENTRY
+DrvShareLists(
+ DHGLRC dhglrc1,
+ DHGLRC dhglrc2 )
+{
+ if (DBG)
+ debug_printf( "%s\n", __FUNCTION__ );
+
+ return stw_share_lists(dhglrc1, dhglrc2);
+}
+
+BOOL APIENTRY
+DrvSwapBuffers(
+ HDC hdc )
+{
+ if (DBG)
+ debug_printf( "%s( %p )\n", __FUNCTION__, hdc );
+
+ return stw_swap_buffers( hdc );
+}
+
+BOOL APIENTRY
+DrvSwapLayerBuffers(
+ HDC hdc,
+ UINT fuPlanes )
+{
+ if (DBG)
+ debug_printf( "%s\n", __FUNCTION__ );
+
+ return stw_swap_layer_buffers( hdc, fuPlanes );
+}
+
+BOOL APIENTRY
+DrvValidateVersion(
+ ULONG ulVersion )
+{
+ if (DBG)
+ debug_printf( "%s( %u )\n", __FUNCTION__, ulVersion );
+
+ /* TODO: get the expected version from the winsys */
+
+ return ulVersion == 1;
+}
diff --git a/src/gallium/state_trackers/wgl/icd/stw_icd.h b/src/gallium/state_trackers/wgl/icd/stw_icd.h
new file mode 100644
index 0000000000..cbc1a66548
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/icd/stw_icd.h
@@ -0,0 +1,489 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_ICD_H
+#define STW_ICD_H
+
+
+#include <windows.h>
+
+#include "GL/gl.h"
+
+
+typedef ULONG DHGLRC;
+
+#define OPENGL_VERSION_110_ENTRIES 336
+
+struct __GLdispatchTableRec
+{
+ void (GLAPIENTRY * NewList)(GLuint, GLenum);
+ void (GLAPIENTRY * EndList)(void);
+ void (GLAPIENTRY * CallList)(GLuint);
+ void (GLAPIENTRY * CallLists)(GLsizei, GLenum, const GLvoid *);
+ void (GLAPIENTRY * DeleteLists)(GLuint, GLsizei);
+ GLuint (GLAPIENTRY * GenLists)(GLsizei);
+ void (GLAPIENTRY * ListBase)(GLuint);
+ void (GLAPIENTRY * Begin)(GLenum);
+ void (GLAPIENTRY * Bitmap)(GLsizei, GLsizei, GLfloat, GLfloat, GLfloat, GLfloat, const GLubyte *);
+ void (GLAPIENTRY * Color3b)(GLbyte, GLbyte, GLbyte);
+ void (GLAPIENTRY * Color3bv)(const GLbyte *);
+ void (GLAPIENTRY * Color3d)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Color3dv)(const GLdouble *);
+ void (GLAPIENTRY * Color3f)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Color3fv)(const GLfloat *);
+ void (GLAPIENTRY * Color3i)(GLint, GLint, GLint);
+ void (GLAPIENTRY * Color3iv)(const GLint *);
+ void (GLAPIENTRY * Color3s)(GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * Color3sv)(const GLshort *);
+ void (GLAPIENTRY * Color3ub)(GLubyte, GLubyte, GLubyte);
+ void (GLAPIENTRY * Color3ubv)(const GLubyte *);
+ void (GLAPIENTRY * Color3ui)(GLuint, GLuint, GLuint);
+ void (GLAPIENTRY * Color3uiv)(const GLuint *);
+ void (GLAPIENTRY * Color3us)(GLushort, GLushort, GLushort);
+ void (GLAPIENTRY * Color3usv)(const GLushort *);
+ void (GLAPIENTRY * Color4b)(GLbyte, GLbyte, GLbyte, GLbyte);
+ void (GLAPIENTRY * Color4bv)(const GLbyte *);
+ void (GLAPIENTRY * Color4d)(GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Color4dv)(const GLdouble *);
+ void (GLAPIENTRY * Color4f)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Color4fv)(const GLfloat *);
+ void (GLAPIENTRY * Color4i)(GLint, GLint, GLint, GLint);
+ void (GLAPIENTRY * Color4iv)(const GLint *);
+ void (GLAPIENTRY * Color4s)(GLshort, GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * Color4sv)(const GLshort *);
+ void (GLAPIENTRY * Color4ub)(GLubyte, GLubyte, GLubyte, GLubyte);
+ void (GLAPIENTRY * Color4ubv)(const GLubyte *);
+ void (GLAPIENTRY * Color4ui)(GLuint, GLuint, GLuint, GLuint);
+ void (GLAPIENTRY * Color4uiv)(const GLuint *);
+ void (GLAPIENTRY * Color4us)(GLushort, GLushort, GLushort, GLushort);
+ void (GLAPIENTRY * Color4usv)(const GLushort *);
+ void (GLAPIENTRY * EdgeFlag)(GLboolean);
+ void (GLAPIENTRY * EdgeFlagv)(const GLboolean *);
+ void (GLAPIENTRY * End)(void);
+ void (GLAPIENTRY * Indexd)(GLdouble);
+ void (GLAPIENTRY * Indexdv)(const GLdouble *);
+ void (GLAPIENTRY * Indexf)(GLfloat);
+ void (GLAPIENTRY * Indexfv)(const GLfloat *);
+ void (GLAPIENTRY * Indexi)(GLint);
+ void (GLAPIENTRY * Indexiv)(const GLint *);
+ void (GLAPIENTRY * Indexs)(GLshort);
+ void (GLAPIENTRY * Indexsv)(const GLshort *);
+ void (GLAPIENTRY * Normal3b)(GLbyte, GLbyte, GLbyte);
+ void (GLAPIENTRY * Normal3bv)(const GLbyte *);
+ void (GLAPIENTRY * Normal3d)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Normal3dv)(const GLdouble *);
+ void (GLAPIENTRY * Normal3f)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Normal3fv)(const GLfloat *);
+ void (GLAPIENTRY * Normal3i)(GLint, GLint, GLint);
+ void (GLAPIENTRY * Normal3iv)(const GLint *);
+ void (GLAPIENTRY * Normal3s)(GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * Normal3sv)(const GLshort *);
+ void (GLAPIENTRY * RasterPos2d)(GLdouble, GLdouble);
+ void (GLAPIENTRY * RasterPos2dv)(const GLdouble *);
+ void (GLAPIENTRY * RasterPos2f)(GLfloat, GLfloat);
+ void (GLAPIENTRY * RasterPos2fv)(const GLfloat *);
+ void (GLAPIENTRY * RasterPos2i)(GLint, GLint);
+ void (GLAPIENTRY * RasterPos2iv)(const GLint *);
+ void (GLAPIENTRY * RasterPos2s)(GLshort, GLshort);
+ void (GLAPIENTRY * RasterPos2sv)(const GLshort *);
+ void (GLAPIENTRY * RasterPos3d)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * RasterPos3dv)(const GLdouble *);
+ void (GLAPIENTRY * RasterPos3f)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * RasterPos3fv)(const GLfloat *);
+ void (GLAPIENTRY * RasterPos3i)(GLint, GLint, GLint);
+ void (GLAPIENTRY * RasterPos3iv)(const GLint *);
+ void (GLAPIENTRY * RasterPos3s)(GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * RasterPos3sv)(const GLshort *);
+ void (GLAPIENTRY * RasterPos4d)(GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * RasterPos4dv)(const GLdouble *);
+ void (GLAPIENTRY * RasterPos4f)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * RasterPos4fv)(const GLfloat *);
+ void (GLAPIENTRY * RasterPos4i)(GLint, GLint, GLint, GLint);
+ void (GLAPIENTRY * RasterPos4iv)(const GLint *);
+ void (GLAPIENTRY * RasterPos4s)(GLshort, GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * RasterPos4sv)(const GLshort *);
+ void (GLAPIENTRY * Rectd)(GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Rectdv)(const GLdouble *, const GLdouble *);
+ void (GLAPIENTRY * Rectf)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Rectfv)(const GLfloat *, const GLfloat *);
+ void (GLAPIENTRY * Recti)(GLint, GLint, GLint, GLint);
+ void (GLAPIENTRY * Rectiv)(const GLint *, const GLint *);
+ void (GLAPIENTRY * Rects)(GLshort, GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * Rectsv)(const GLshort *, const GLshort *);
+ void (GLAPIENTRY * TexCoord1d)(GLdouble);
+ void (GLAPIENTRY * TexCoord1dv)(const GLdouble *);
+ void (GLAPIENTRY * TexCoord1f)(GLfloat);
+ void (GLAPIENTRY * TexCoord1fv)(const GLfloat *);
+ void (GLAPIENTRY * TexCoord1i)(GLint);
+ void (GLAPIENTRY * TexCoord1iv)(const GLint *);
+ void (GLAPIENTRY * TexCoord1s)(GLshort);
+ void (GLAPIENTRY * TexCoord1sv)(const GLshort *);
+ void (GLAPIENTRY * TexCoord2d)(GLdouble, GLdouble);
+ void (GLAPIENTRY * TexCoord2dv)(const GLdouble *);
+ void (GLAPIENTRY * TexCoord2f)(GLfloat, GLfloat);
+ void (GLAPIENTRY * TexCoord2fv)(const GLfloat *);
+ void (GLAPIENTRY * TexCoord2i)(GLint, GLint);
+ void (GLAPIENTRY * TexCoord2iv)(const GLint *);
+ void (GLAPIENTRY * TexCoord2s)(GLshort, GLshort);
+ void (GLAPIENTRY * TexCoord2sv)(const GLshort *);
+ void (GLAPIENTRY * TexCoord3d)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * TexCoord3dv)(const GLdouble *);
+ void (GLAPIENTRY * TexCoord3f)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * TexCoord3fv)(const GLfloat *);
+ void (GLAPIENTRY * TexCoord3i)(GLint, GLint, GLint);
+ void (GLAPIENTRY * TexCoord3iv)(const GLint *);
+ void (GLAPIENTRY * TexCoord3s)(GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * TexCoord3sv)(const GLshort *);
+ void (GLAPIENTRY * TexCoord4d)(GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * TexCoord4dv)(const GLdouble *);
+ void (GLAPIENTRY * TexCoord4f)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * TexCoord4fv)(const GLfloat *);
+ void (GLAPIENTRY * TexCoord4i)(GLint, GLint, GLint, GLint);
+ void (GLAPIENTRY * TexCoord4iv)(const GLint *);
+ void (GLAPIENTRY * TexCoord4s)(GLshort, GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * TexCoord4sv)(const GLshort *);
+ void (GLAPIENTRY * Vertex2d)(GLdouble, GLdouble);
+ void (GLAPIENTRY * Vertex2dv)(const GLdouble *);
+ void (GLAPIENTRY * Vertex2f)(GLfloat, GLfloat);
+ void (GLAPIENTRY * Vertex2fv)(const GLfloat *);
+ void (GLAPIENTRY * Vertex2i)(GLint, GLint);
+ void (GLAPIENTRY * Vertex2iv)(const GLint *);
+ void (GLAPIENTRY * Vertex2s)(GLshort, GLshort);
+ void (GLAPIENTRY * Vertex2sv)(const GLshort *);
+ void (GLAPIENTRY * Vertex3d)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Vertex3dv)(const GLdouble *);
+ void (GLAPIENTRY * Vertex3f)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Vertex3fv)(const GLfloat *);
+ void (GLAPIENTRY * Vertex3i)(GLint, GLint, GLint);
+ void (GLAPIENTRY * Vertex3iv)(const GLint *);
+ void (GLAPIENTRY * Vertex3s)(GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * Vertex3sv)(const GLshort *);
+ void (GLAPIENTRY * Vertex4d)(GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Vertex4dv)(const GLdouble *);
+ void (GLAPIENTRY * Vertex4f)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Vertex4fv)(const GLfloat *);
+ void (GLAPIENTRY * Vertex4i)(GLint, GLint, GLint, GLint);
+ void (GLAPIENTRY * Vertex4iv)(const GLint *);
+ void (GLAPIENTRY * Vertex4s)(GLshort, GLshort, GLshort, GLshort);
+ void (GLAPIENTRY * Vertex4sv)(const GLshort *);
+ void (GLAPIENTRY * ClipPlane)(GLenum, const GLdouble *);
+ void (GLAPIENTRY * ColorMaterial)(GLenum, GLenum);
+ void (GLAPIENTRY * CullFace)(GLenum);
+ void (GLAPIENTRY * Fogf)(GLenum, GLfloat);
+ void (GLAPIENTRY * Fogfv)(GLenum, const GLfloat *);
+ void (GLAPIENTRY * Fogi)(GLenum, GLint);
+ void (GLAPIENTRY * Fogiv)(GLenum, const GLint *);
+ void (GLAPIENTRY * FrontFace)(GLenum);
+ void (GLAPIENTRY * Hint)(GLenum, GLenum);
+ void (GLAPIENTRY * Lightf)(GLenum, GLenum, GLfloat);
+ void (GLAPIENTRY * Lightfv)(GLenum, GLenum, const GLfloat *);
+ void (GLAPIENTRY * Lighti)(GLenum, GLenum, GLint);
+ void (GLAPIENTRY * Lightiv)(GLenum, GLenum, const GLint *);
+ void (GLAPIENTRY * LightModelf)(GLenum, GLfloat);
+ void (GLAPIENTRY * LightModelfv)(GLenum, const GLfloat *);
+ void (GLAPIENTRY * LightModeli)(GLenum, GLint);
+ void (GLAPIENTRY * LightModeliv)(GLenum, const GLint *);
+ void (GLAPIENTRY * LineStipple)(GLint, GLushort);
+ void (GLAPIENTRY * LineWidth)(GLfloat);
+ void (GLAPIENTRY * Materialf)(GLenum, GLenum, GLfloat);
+ void (GLAPIENTRY * Materialfv)(GLenum, GLenum, const GLfloat *);
+ void (GLAPIENTRY * Materiali)(GLenum, GLenum, GLint);
+ void (GLAPIENTRY * Materialiv)(GLenum, GLenum, const GLint *);
+ void (GLAPIENTRY * PointSize)(GLfloat);
+ void (GLAPIENTRY * PolygonMode)(GLenum, GLenum);
+ void (GLAPIENTRY * PolygonStipple)(const GLubyte *);
+ void (GLAPIENTRY * Scissor)(GLint, GLint, GLsizei, GLsizei);
+ void (GLAPIENTRY * ShadeModel)(GLenum);
+ void (GLAPIENTRY * TexParameterf)(GLenum, GLenum, GLfloat);
+ void (GLAPIENTRY * TexParameterfv)(GLenum, GLenum, const GLfloat *);
+ void (GLAPIENTRY * TexParameteri)(GLenum, GLenum, GLint);
+ void (GLAPIENTRY * TexParameteriv)(GLenum, GLenum, const GLint *);
+ void (GLAPIENTRY * TexImage1D)(GLenum, GLint, GLint, GLsizei, GLint, GLenum, GLenum, const GLvoid *);
+ void (GLAPIENTRY * TexImage2D)(GLenum, GLint, GLint, GLsizei, GLsizei, GLint, GLenum, GLenum, const GLvoid *);
+ void (GLAPIENTRY * TexEnvf)(GLenum, GLenum, GLfloat);
+ void (GLAPIENTRY * TexEnvfv)(GLenum, GLenum, const GLfloat *);
+ void (GLAPIENTRY * TexEnvi)(GLenum, GLenum, GLint);
+ void (GLAPIENTRY * TexEnviv)(GLenum, GLenum, const GLint *);
+ void (GLAPIENTRY * TexGend)(GLenum, GLenum, GLdouble);
+ void (GLAPIENTRY * TexGendv)(GLenum, GLenum, const GLdouble *);
+ void (GLAPIENTRY * TexGenf)(GLenum, GLenum, GLfloat);
+ void (GLAPIENTRY * TexGenfv)(GLenum, GLenum, const GLfloat *);
+ void (GLAPIENTRY * TexGeni)(GLenum, GLenum, GLint);
+ void (GLAPIENTRY * TexGeniv)(GLenum, GLenum, const GLint *);
+ void (GLAPIENTRY * FeedbackBuffer)(GLsizei, GLenum, GLfloat *);
+ void (GLAPIENTRY * SelectBuffer)(GLsizei, GLuint *);
+ GLint (GLAPIENTRY * RenderMode)(GLenum);
+ void (GLAPIENTRY * InitNames)(void);
+ void (GLAPIENTRY * LoadName)(GLuint);
+ void (GLAPIENTRY * PassThrough)(GLfloat);
+ void (GLAPIENTRY * PopName)(void);
+ void (GLAPIENTRY * PushName)(GLuint);
+ void (GLAPIENTRY * DrawBuffer)(GLenum);
+ void (GLAPIENTRY * Clear)(GLbitfield);
+ void (GLAPIENTRY * ClearAccum)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * ClearIndex)(GLfloat);
+ void (GLAPIENTRY * ClearColor)(GLclampf, GLclampf, GLclampf, GLclampf);
+ void (GLAPIENTRY * ClearStencil)(GLint);
+ void (GLAPIENTRY * ClearDepth)(GLclampd);
+ void (GLAPIENTRY * StencilMask)(GLuint);
+ void (GLAPIENTRY * ColorMask)(GLboolean, GLboolean, GLboolean, GLboolean);
+ void (GLAPIENTRY * DepthMask)(GLboolean);
+ void (GLAPIENTRY * IndexMask)(GLuint);
+ void (GLAPIENTRY * Accum)(GLenum, GLfloat);
+ void (GLAPIENTRY * Disable)(GLenum);
+ void (GLAPIENTRY * Enable)(GLenum);
+ void (GLAPIENTRY * Finish)(void);
+ void (GLAPIENTRY * Flush)(void);
+ void (GLAPIENTRY * PopAttrib)(void);
+ void (GLAPIENTRY * PushAttrib)(GLbitfield);
+ void (GLAPIENTRY * Map1d)(GLenum, GLdouble, GLdouble, GLint, GLint, const GLdouble *);
+ void (GLAPIENTRY * Map1f)(GLenum, GLfloat, GLfloat, GLint, GLint, const GLfloat *);
+ void (GLAPIENTRY * Map2d)(GLenum, GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble, GLint, GLint, const GLdouble *);
+ void (GLAPIENTRY * Map2f)(GLenum, GLfloat, GLfloat, GLint, GLint, GLfloat, GLfloat, GLint, GLint, const GLfloat *);
+ void (GLAPIENTRY * MapGrid1d)(GLint, GLdouble, GLdouble);
+ void (GLAPIENTRY * MapGrid1f)(GLint, GLfloat, GLfloat);
+ void (GLAPIENTRY * MapGrid2d)(GLint, GLdouble, GLdouble, GLint, GLdouble, GLdouble);
+ void (GLAPIENTRY * MapGrid2f)(GLint, GLfloat, GLfloat, GLint, GLfloat, GLfloat);
+ void (GLAPIENTRY * EvalCoord1d)(GLdouble);
+ void (GLAPIENTRY * EvalCoord1dv)(const GLdouble *);
+ void (GLAPIENTRY * EvalCoord1f)(GLfloat);
+ void (GLAPIENTRY * EvalCoord1fv)(const GLfloat *);
+ void (GLAPIENTRY * EvalCoord2d)(GLdouble, GLdouble);
+ void (GLAPIENTRY * EvalCoord2dv)(const GLdouble *);
+ void (GLAPIENTRY * EvalCoord2f)(GLfloat, GLfloat);
+ void (GLAPIENTRY * EvalCoord2fv)(const GLfloat *);
+ void (GLAPIENTRY * EvalMesh1)(GLenum, GLint, GLint);
+ void (GLAPIENTRY * EvalPoint1)(GLint);
+ void (GLAPIENTRY * EvalMesh2)(GLenum, GLint, GLint, GLint, GLint);
+ void (GLAPIENTRY * EvalPoint2)(GLint, GLint);
+ void (GLAPIENTRY * AlphaFunc)(GLenum, GLclampf);
+ void (GLAPIENTRY * BlendFunc)(GLenum, GLenum);
+ void (GLAPIENTRY * LogicOp)(GLenum);
+ void (GLAPIENTRY * StencilFunc)(GLenum, GLint, GLuint);
+ void (GLAPIENTRY * StencilOp)(GLenum, GLenum, GLenum);
+ void (GLAPIENTRY * DepthFunc)(GLenum);
+ void (GLAPIENTRY * PixelZoom)(GLfloat, GLfloat);
+ void (GLAPIENTRY * PixelTransferf)(GLenum, GLfloat);
+ void (GLAPIENTRY * PixelTransferi)(GLenum, GLint);
+ void (GLAPIENTRY * PixelStoref)(GLenum, GLfloat);
+ void (GLAPIENTRY * PixelStorei)(GLenum, GLint);
+ void (GLAPIENTRY * PixelMapfv)(GLenum, GLint, const GLfloat *);
+ void (GLAPIENTRY * PixelMapuiv)(GLenum, GLint, const GLuint *);
+ void (GLAPIENTRY * PixelMapusv)(GLenum, GLint, const GLushort *);
+ void (GLAPIENTRY * ReadBuffer)(GLenum);
+ void (GLAPIENTRY * CopyPixels)(GLint, GLint, GLsizei, GLsizei, GLenum);
+ void (GLAPIENTRY * ReadPixels)(GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, GLvoid *);
+ void (GLAPIENTRY * DrawPixels)(GLsizei, GLsizei, GLenum, GLenum, const GLvoid *);
+ void (GLAPIENTRY * GetBooleanv)(GLenum, GLboolean *);
+ void (GLAPIENTRY * GetClipPlane)(GLenum, GLdouble *);
+ void (GLAPIENTRY * GetDoublev)(GLenum, GLdouble *);
+ GLenum (GLAPIENTRY * GetError)(void);
+ void (GLAPIENTRY * GetFloatv)(GLenum, GLfloat *);
+ void (GLAPIENTRY * GetIntegerv)(GLenum, GLint *);
+ void (GLAPIENTRY * GetLightfv)(GLenum, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetLightiv)(GLenum, GLenum, GLint *);
+ void (GLAPIENTRY * GetMapdv)(GLenum, GLenum, GLdouble *);
+ void (GLAPIENTRY * GetMapfv)(GLenum, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetMapiv)(GLenum, GLenum, GLint *);
+ void (GLAPIENTRY * GetMaterialfv)(GLenum, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetMaterialiv)(GLenum, GLenum, GLint *);
+ void (GLAPIENTRY * GetPixelMapfv)(GLenum, GLfloat *);
+ void (GLAPIENTRY * GetPixelMapuiv)(GLenum, GLuint *);
+ void (GLAPIENTRY * GetPixelMapusv)(GLenum, GLushort *);
+ void (GLAPIENTRY * GetPolygonStipple)(GLubyte *);
+ const GLubyte * (GLAPIENTRY * GetString)(GLenum);
+ void (GLAPIENTRY * GetTexEnvfv)(GLenum, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetTexEnviv)(GLenum, GLenum, GLint *);
+ void (GLAPIENTRY * GetTexGendv)(GLenum, GLenum, GLdouble *);
+ void (GLAPIENTRY * GetTexGenfv)(GLenum, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetTexGeniv)(GLenum, GLenum, GLint *);
+ void (GLAPIENTRY * GetTexImage)(GLenum, GLint, GLenum, GLenum, GLvoid *);
+ void (GLAPIENTRY * GetTexParameterfv)(GLenum, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetTexParameteriv)(GLenum, GLenum, GLint *);
+ void (GLAPIENTRY * GetTexLevelParameterfv)(GLenum, GLint, GLenum, GLfloat *);
+ void (GLAPIENTRY * GetTexLevelParameteriv)(GLenum, GLint, GLenum, GLint *);
+ GLboolean (GLAPIENTRY * IsEnabled)(GLenum);
+ GLboolean (GLAPIENTRY * IsList)(GLuint);
+ void (GLAPIENTRY * DepthRange)(GLclampd, GLclampd);
+ void (GLAPIENTRY * Frustum)(GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * LoadIdentity)(void);
+ void (GLAPIENTRY * LoadMatrixf)(const GLfloat *);
+ void (GLAPIENTRY * LoadMatrixd)(const GLdouble *);
+ void (GLAPIENTRY * MatrixMode)(GLenum);
+ void (GLAPIENTRY * MultMatrixf)(const GLfloat *);
+ void (GLAPIENTRY * MultMatrixd)(const GLdouble *);
+ void (GLAPIENTRY * Ortho)(GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * PopMatrix)(void);
+ void (GLAPIENTRY * PushMatrix)(void);
+ void (GLAPIENTRY * Rotated)(GLdouble, GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Rotatef)(GLfloat, GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Scaled)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Scalef)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Translated)(GLdouble, GLdouble, GLdouble);
+ void (GLAPIENTRY * Translatef)(GLfloat, GLfloat, GLfloat);
+ void (GLAPIENTRY * Viewport)(GLint, GLint, GLsizei, GLsizei);
+ void (GLAPIENTRY * ArrayElement)(GLint);
+ void (GLAPIENTRY * BindTexture)(GLenum, GLuint);
+ void (GLAPIENTRY * ColorPointer)(GLint, GLenum, GLsizei, const GLvoid *);
+ void (GLAPIENTRY * DisableClientState)(GLenum);
+ void (GLAPIENTRY * DrawArrays)(GLenum, GLint, GLsizei);
+ void (GLAPIENTRY * DrawElements)(GLenum, GLsizei, GLenum, const GLvoid *);
+ void (GLAPIENTRY * EdgeFlagPointer)(GLsizei, const GLvoid *);
+ void (GLAPIENTRY * EnableClientState)(GLenum);
+ void (GLAPIENTRY * IndexPointer)(GLenum, GLsizei, const GLvoid *);
+ void (GLAPIENTRY * Indexub)(GLubyte);
+ void (GLAPIENTRY * Indexubv)(const GLubyte *);
+ void (GLAPIENTRY * InterleavedArrays)(GLenum, GLsizei, const GLvoid *);
+ void (GLAPIENTRY * NormalPointer)(GLenum, GLsizei, const GLvoid *);
+ void (GLAPIENTRY * PolygonOffset)(GLfloat, GLfloat);
+ void (GLAPIENTRY * TexCoordPointer)(GLint, GLenum, GLsizei, const GLvoid *);
+ void (GLAPIENTRY * VertexPointer)(GLint, GLenum, GLsizei, const GLvoid *);
+ GLboolean (GLAPIENTRY * AreTexturesResident)(GLsizei, const GLuint *, GLboolean *);
+ void (GLAPIENTRY * CopyTexImage1D)(GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLint);
+ void (GLAPIENTRY * CopyTexImage2D)(GLenum, GLint, GLenum, GLint, GLint, GLsizei, GLsizei, GLint);
+ void (GLAPIENTRY * CopyTexSubImage1D)(GLenum, GLint, GLint, GLint, GLint, GLsizei);
+ void (GLAPIENTRY * CopyTexSubImage2D)(GLenum, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei);
+ void (GLAPIENTRY * DeleteTextures)(GLsizei, const GLuint *);
+ void (GLAPIENTRY * GenTextures)(GLsizei, GLuint *);
+ void (GLAPIENTRY * GetPointerv)(GLenum, GLvoid **);
+ GLboolean (GLAPIENTRY * IsTexture)(GLuint);
+ void (GLAPIENTRY * PrioritizeTextures)(GLsizei, const GLuint *, const GLclampf *);
+ void (GLAPIENTRY * TexSubImage1D)(GLenum, GLint, GLint, GLsizei, GLenum, GLenum, const GLvoid *);
+ void (GLAPIENTRY * TexSubImage2D)(GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLenum, const GLvoid *);
+ void (GLAPIENTRY * PopClientAttrib)(void);
+ void (GLAPIENTRY * PushClientAttrib)(GLbitfield);
+};
+
+typedef struct __GLdispatchTableRec GLDISPATCHTABLE;
+
+typedef struct _GLCLTPROCTABLE
+{
+ int cEntries;
+ GLDISPATCHTABLE glDispatchTable;
+} GLCLTPROCTABLE, * PGLCLTPROCTABLE;
+
+typedef VOID (APIENTRY * PFN_SETPROCTABLE)(PGLCLTPROCTABLE);
+
+BOOL APIENTRY
+DrvCopyContext(
+ DHGLRC dhrcSource,
+ DHGLRC dhrcDest,
+ UINT fuMask );
+
+DHGLRC APIENTRY
+DrvCreateLayerContext(
+ HDC hdc,
+ INT iLayerPlane );
+
+DHGLRC APIENTRY
+DrvCreateContext(
+ HDC hdc );
+
+BOOL APIENTRY
+DrvDeleteContext(
+ DHGLRC dhglrc );
+
+BOOL APIENTRY
+DrvDescribeLayerPlane(
+ HDC hdc,
+ INT iPixelFormat,
+ INT iLayerPlane,
+ UINT nBytes,
+ LPLAYERPLANEDESCRIPTOR plpd );
+
+LONG APIENTRY
+DrvDescribePixelFormat(
+ HDC hdc,
+ INT iPixelFormat,
+ ULONG cjpfd,
+ PIXELFORMATDESCRIPTOR *ppfd );
+
+int APIENTRY
+DrvGetLayerPaletteEntries(
+ HDC hdc,
+ INT iLayerPlane,
+ INT iStart,
+ INT cEntries,
+ COLORREF *pcr );
+
+PROC APIENTRY
+DrvGetProcAddress(
+ LPCSTR lpszProc );
+
+BOOL APIENTRY
+DrvRealizeLayerPalette(
+ HDC hdc,
+ INT iLayerPlane,
+ BOOL bRealize );
+
+BOOL APIENTRY
+DrvReleaseContext(
+ DHGLRC dhglrc );
+
+void APIENTRY
+DrvSetCallbackProcs(
+ INT nProcs,
+ PROC *pProcs );
+
+PGLCLTPROCTABLE APIENTRY
+DrvSetContext(
+ HDC hdc,
+ DHGLRC dhglrc,
+ PFN_SETPROCTABLE pfnSetProcTable );
+
+int APIENTRY
+DrvSetLayerPaletteEntries(
+ HDC hdc,
+ INT iLayerPlane,
+ INT iStart,
+ INT cEntries,
+ CONST COLORREF *pcr );
+
+BOOL APIENTRY
+DrvSetPixelFormat(
+ HDC hdc,
+ LONG iPixelFormat );
+
+BOOL APIENTRY
+DrvShareLists(
+ DHGLRC dhglrc1,
+ DHGLRC dhglrc2 );
+
+BOOL APIENTRY
+DrvSwapBuffers(
+ HDC hdc );
+
+BOOL APIENTRY
+DrvSwapLayerBuffers(
+ HDC hdc,
+ UINT fuPlanes );
+
+BOOL APIENTRY
+DrvValidateVersion(
+ ULONG ulVersion );
+
+#endif /* STW_ICD_H */
diff --git a/src/gallium/state_trackers/wgl/opengl32.def b/src/gallium/state_trackers/wgl/opengl32.def
new file mode 100644
index 0000000000..596417ed84
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/opengl32.def
@@ -0,0 +1,388 @@
+EXPORTS
+; GlmfBeginGlsBlock
+; GlmfCloseMetaFile
+; GlmfEndGlsBlock
+; GlmfEndPlayback
+; GlmfInitPlayback
+; GlmfPlayGlsRecord
+ glAccum
+ glAlphaFunc
+ glAreTexturesResident
+ glArrayElement
+ glBegin
+ glBindTexture
+ glBitmap
+ glBlendFunc
+ glCallList
+ glCallLists
+ glClear
+ glClearAccum
+ glClearColor
+ glClearDepth
+ glClearIndex
+ glClearStencil
+ glClipPlane
+ glColor3b
+ glColor3bv
+ glColor3d
+ glColor3dv
+ glColor3f
+ glColor3fv
+ glColor3i
+ glColor3iv
+ glColor3s
+ glColor3sv
+ glColor3ub
+ glColor3ubv
+ glColor3ui
+ glColor3uiv
+ glColor3us
+ glColor3usv
+ glColor4b
+ glColor4bv
+ glColor4d
+ glColor4dv
+ glColor4f
+ glColor4fv
+ glColor4i
+ glColor4iv
+ glColor4s
+ glColor4sv
+ glColor4ub
+ glColor4ubv
+ glColor4ui
+ glColor4uiv
+ glColor4us
+ glColor4usv
+ glColorMask
+ glColorMaterial
+ glColorPointer
+ glCopyPixels
+ glCopyTexImage1D
+ glCopyTexImage2D
+ glCopyTexSubImage1D
+ glCopyTexSubImage2D
+ glCullFace
+; glDebugEntry
+ glDeleteLists
+ glDeleteTextures
+ glDepthFunc
+ glDepthMask
+ glDepthRange
+ glDisable
+ glDisableClientState
+ glDrawArrays
+ glDrawBuffer
+ glDrawElements
+ glDrawPixels
+ glEdgeFlag
+ glEdgeFlagPointer
+ glEdgeFlagv
+ glEnable
+ glEnableClientState
+ glEnd
+ glEndList
+ glEvalCoord1d
+ glEvalCoord1dv
+ glEvalCoord1f
+ glEvalCoord1fv
+ glEvalCoord2d
+ glEvalCoord2dv
+ glEvalCoord2f
+ glEvalCoord2fv
+ glEvalMesh1
+ glEvalMesh2
+ glEvalPoint1
+ glEvalPoint2
+ glFeedbackBuffer
+ glFinish
+ glFlush
+ glFogf
+ glFogfv
+ glFogi
+ glFogiv
+ glFrontFace
+ glFrustum
+ glGenLists
+ glGenTextures
+ glGetBooleanv
+ glGetClipPlane
+ glGetDoublev
+ glGetError
+ glGetFloatv
+ glGetIntegerv
+ glGetLightfv
+ glGetLightiv
+ glGetMapdv
+ glGetMapfv
+ glGetMapiv
+ glGetMaterialfv
+ glGetMaterialiv
+ glGetPixelMapfv
+ glGetPixelMapuiv
+ glGetPixelMapusv
+ glGetPointerv
+ glGetPolygonStipple
+ glGetString
+ glGetTexEnvfv
+ glGetTexEnviv
+ glGetTexGendv
+ glGetTexGenfv
+ glGetTexGeniv
+ glGetTexImage
+ glGetTexLevelParameterfv
+ glGetTexLevelParameteriv
+ glGetTexParameterfv
+ glGetTexParameteriv
+ glHint
+ glIndexMask
+ glIndexPointer
+ glIndexd
+ glIndexdv
+ glIndexf
+ glIndexfv
+ glIndexi
+ glIndexiv
+ glIndexs
+ glIndexsv
+ glIndexub
+ glIndexubv
+ glInitNames
+ glInterleavedArrays
+ glIsEnabled
+ glIsList
+ glIsTexture
+ glLightModelf
+ glLightModelfv
+ glLightModeli
+ glLightModeliv
+ glLightf
+ glLightfv
+ glLighti
+ glLightiv
+ glLineStipple
+ glLineWidth
+ glListBase
+ glLoadIdentity
+ glLoadMatrixd
+ glLoadMatrixf
+ glLoadName
+ glLogicOp
+ glMap1d
+ glMap1f
+ glMap2d
+ glMap2f
+ glMapGrid1d
+ glMapGrid1f
+ glMapGrid2d
+ glMapGrid2f
+ glMaterialf
+ glMaterialfv
+ glMateriali
+ glMaterialiv
+ glMatrixMode
+ glMultMatrixd
+ glMultMatrixf
+ glNewList
+ glNormal3b
+ glNormal3bv
+ glNormal3d
+ glNormal3dv
+ glNormal3f
+ glNormal3fv
+ glNormal3i
+ glNormal3iv
+ glNormal3s
+ glNormal3sv
+ glNormalPointer
+ glOrtho
+ glPassThrough
+ glPixelMapfv
+ glPixelMapuiv
+ glPixelMapusv
+ glPixelStoref
+ glPixelStorei
+ glPixelTransferf
+ glPixelTransferi
+ glPixelZoom
+ glPointSize
+ glPolygonMode
+ glPolygonOffset
+ glPolygonStipple
+ glPopAttrib
+ glPopClientAttrib
+ glPopMatrix
+ glPopName
+ glPrioritizeTextures
+ glPushAttrib
+ glPushClientAttrib
+ glPushMatrix
+ glPushName
+ glRasterPos2d
+ glRasterPos2dv
+ glRasterPos2f
+ glRasterPos2fv
+ glRasterPos2i
+ glRasterPos2iv
+ glRasterPos2s
+ glRasterPos2sv
+ glRasterPos3d
+ glRasterPos3dv
+ glRasterPos3f
+ glRasterPos3fv
+ glRasterPos3i
+ glRasterPos3iv
+ glRasterPos3s
+ glRasterPos3sv
+ glRasterPos4d
+ glRasterPos4dv
+ glRasterPos4f
+ glRasterPos4fv
+ glRasterPos4i
+ glRasterPos4iv
+ glRasterPos4s
+ glRasterPos4sv
+ glReadBuffer
+ glReadPixels
+ glRectd
+ glRectdv
+ glRectf
+ glRectfv
+ glRecti
+ glRectiv
+ glRects
+ glRectsv
+ glRenderMode
+ glRotated
+ glRotatef
+ glScaled
+ glScalef
+ glScissor
+ glSelectBuffer
+ glShadeModel
+ glStencilFunc
+ glStencilMask
+ glStencilOp
+ glTexCoord1d
+ glTexCoord1dv
+ glTexCoord1f
+ glTexCoord1fv
+ glTexCoord1i
+ glTexCoord1iv
+ glTexCoord1s
+ glTexCoord1sv
+ glTexCoord2d
+ glTexCoord2dv
+ glTexCoord2f
+ glTexCoord2fv
+ glTexCoord2i
+ glTexCoord2iv
+ glTexCoord2s
+ glTexCoord2sv
+ glTexCoord3d
+ glTexCoord3dv
+ glTexCoord3f
+ glTexCoord3fv
+ glTexCoord3i
+ glTexCoord3iv
+ glTexCoord3s
+ glTexCoord3sv
+ glTexCoord4d
+ glTexCoord4dv
+ glTexCoord4f
+ glTexCoord4fv
+ glTexCoord4i
+ glTexCoord4iv
+ glTexCoord4s
+ glTexCoord4sv
+ glTexCoordPointer
+ glTexEnvf
+ glTexEnvfv
+ glTexEnvi
+ glTexEnviv
+ glTexGend
+ glTexGendv
+ glTexGenf
+ glTexGenfv
+ glTexGeni
+ glTexGeniv
+ glTexImage1D
+ glTexImage2D
+ glTexParameterf
+ glTexParameterfv
+ glTexParameteri
+ glTexParameteriv
+ glTexSubImage1D
+ glTexSubImage2D
+ glTranslated
+ glTranslatef
+ glVertex2d
+ glVertex2dv
+ glVertex2f
+ glVertex2fv
+ glVertex2i
+ glVertex2iv
+ glVertex2s
+ glVertex2sv
+ glVertex3d
+ glVertex3dv
+ glVertex3f
+ glVertex3fv
+ glVertex3i
+ glVertex3iv
+ glVertex3s
+ glVertex3sv
+ glVertex4d
+ glVertex4dv
+ glVertex4f
+ glVertex4fv
+ glVertex4i
+ glVertex4iv
+ glVertex4s
+ glVertex4sv
+ glVertexPointer
+ glViewport
+ wglChoosePixelFormat
+ wglCopyContext
+ wglCreateContext
+ wglCreateLayerContext
+ wglDeleteContext
+ wglDescribeLayerPlane
+ wglDescribePixelFormat
+ wglGetCurrentContext
+ wglGetCurrentDC
+; wglGetDefaultProcAddress
+ wglGetLayerPaletteEntries
+ wglGetPixelFormat
+ wglGetProcAddress
+ wglMakeCurrent
+ wglRealizeLayerPalette
+ wglSetLayerPaletteEntries
+ wglSetPixelFormat
+ wglShareLists
+ wglSwapBuffers
+ wglSwapLayerBuffers
+; wglSwapMultipleBuffers
+ wglUseFontBitmapsA
+ wglUseFontBitmapsW
+ wglUseFontOutlinesA
+ wglUseFontOutlinesW
+ wglGetExtensionsStringARB
+ DrvCopyContext
+ DrvCreateContext
+ DrvCreateLayerContext
+ DrvDeleteContext
+ DrvDescribeLayerPlane
+ DrvDescribePixelFormat
+ DrvGetLayerPaletteEntries
+ DrvGetProcAddress
+ DrvRealizeLayerPalette
+ DrvReleaseContext
+ DrvSetCallbackProcs
+ DrvSetContext
+ DrvSetLayerPaletteEntries
+ DrvSetPixelFormat
+ DrvShareLists
+ DrvSwapBuffers
+ DrvSwapLayerBuffers
+ DrvValidateVersion
diff --git a/src/gallium/state_trackers/wgl/opengl32.mingw.def b/src/gallium/state_trackers/wgl/opengl32.mingw.def
new file mode 100644
index 0000000000..1f03ea3b37
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/opengl32.mingw.def
@@ -0,0 +1,387 @@
+EXPORTS
+; GlmfBeginGlsBlock = GlmfBeginGlsBlock@4
+; GlmfCloseMetaFile = GlmfCloseMetaFile@4
+; GlmfEndGlsBlock = GlmfEndGlsBlock@4
+; GlmfEndPlayback = GlmfEndPlayback@4
+; GlmfInitPlayback = GlmfInitPlayback@12
+; GlmfPlayGlsRecord = GlmfPlayGlsRecord@16
+ glAccum = glAccum@8
+ glAlphaFunc = glAlphaFunc@8
+ glAreTexturesResident = glAreTexturesResident@12
+ glArrayElement = glArrayElement@4
+ glBegin = glBegin@4
+ glBindTexture = glBindTexture@8
+ glBitmap = glBitmap@28
+ glBlendFunc = glBlendFunc@8
+ glCallList = glCallList@4
+ glCallLists = glCallLists@12
+ glClear = glClear@4
+ glClearAccum = glClearAccum@16
+ glClearColor = glClearColor@16
+ glClearDepth = glClearDepth@8
+ glClearIndex = glClearIndex@4
+ glClearStencil = glClearStencil@4
+ glClipPlane = glClipPlane@8
+ glColor3b = glColor3b@12
+ glColor3bv = glColor3bv@4
+ glColor3d = glColor3d@24
+ glColor3dv = glColor3dv@4
+ glColor3f = glColor3f@12
+ glColor3fv = glColor3fv@4
+ glColor3i = glColor3i@12
+ glColor3iv = glColor3iv@4
+ glColor3s = glColor3s@12
+ glColor3sv = glColor3sv@4
+ glColor3ub = glColor3ub@12
+ glColor3ubv = glColor3ubv@4
+ glColor3ui = glColor3ui@12
+ glColor3uiv = glColor3uiv@4
+ glColor3us = glColor3us@12
+ glColor3usv = glColor3usv@4
+ glColor4b = glColor4b@16
+ glColor4bv = glColor4bv@4
+ glColor4d = glColor4d@32
+ glColor4dv = glColor4dv@4
+ glColor4f = glColor4f@16
+ glColor4fv = glColor4fv@4
+ glColor4i = glColor4i@16
+ glColor4iv = glColor4iv@4
+ glColor4s = glColor4s@16
+ glColor4sv = glColor4sv@4
+ glColor4ub = glColor4ub@16
+ glColor4ubv = glColor4ubv@4
+ glColor4ui = glColor4ui@16
+ glColor4uiv = glColor4uiv@4
+ glColor4us = glColor4us@16
+ glColor4usv = glColor4usv@4
+ glColorMask = glColorMask@16
+ glColorMaterial = glColorMaterial@8
+ glColorPointer = glColorPointer@16
+ glCopyPixels = glCopyPixels@20
+ glCopyTexImage1D = glCopyTexImage1D@28
+ glCopyTexImage2D = glCopyTexImage2D@32
+ glCopyTexSubImage1D = glCopyTexSubImage1D@24
+ glCopyTexSubImage2D = glCopyTexSubImage2D@32
+ glCullFace = glCullFace@4
+; glDebugEntry = glDebugEntry@8
+ glDeleteLists = glDeleteLists@8
+ glDeleteTextures = glDeleteTextures@8
+ glDepthFunc = glDepthFunc@4
+ glDepthMask = glDepthMask@4
+ glDepthRange = glDepthRange@16
+ glDisable = glDisable@4
+ glDisableClientState = glDisableClientState@4
+ glDrawArrays = glDrawArrays@12
+ glDrawBuffer = glDrawBuffer@4
+ glDrawElements = glDrawElements@16
+ glDrawPixels = glDrawPixels@20
+ glEdgeFlag = glEdgeFlag@4
+ glEdgeFlagPointer = glEdgeFlagPointer@8
+ glEdgeFlagv = glEdgeFlagv@4
+ glEnable = glEnable@4
+ glEnableClientState = glEnableClientState@4
+ glEnd = glEnd@0
+ glEndList = glEndList@0
+ glEvalCoord1d = glEvalCoord1d@8
+ glEvalCoord1dv = glEvalCoord1dv@4
+ glEvalCoord1f = glEvalCoord1f@4
+ glEvalCoord1fv = glEvalCoord1fv@4
+ glEvalCoord2d = glEvalCoord2d@16
+ glEvalCoord2dv = glEvalCoord2dv@4
+ glEvalCoord2f = glEvalCoord2f@8
+ glEvalCoord2fv = glEvalCoord2fv@4
+ glEvalMesh1 = glEvalMesh1@12
+ glEvalMesh2 = glEvalMesh2@20
+ glEvalPoint1 = glEvalPoint1@4
+ glEvalPoint2 = glEvalPoint2@8
+ glFeedbackBuffer = glFeedbackBuffer@12
+ glFinish = glFinish@0
+ glFlush = glFlush@0
+ glFogf = glFogf@8
+ glFogfv = glFogfv@8
+ glFogi = glFogi@8
+ glFogiv = glFogiv@8
+ glFrontFace = glFrontFace@4
+ glFrustum = glFrustum@48
+ glGenLists = glGenLists@4
+ glGenTextures = glGenTextures@8
+ glGetBooleanv = glGetBooleanv@8
+ glGetClipPlane = glGetClipPlane@8
+ glGetDoublev = glGetDoublev@8
+ glGetError = glGetError@0
+ glGetFloatv = glGetFloatv@8
+ glGetIntegerv = glGetIntegerv@8
+ glGetLightfv = glGetLightfv@12
+ glGetLightiv = glGetLightiv@12
+ glGetMapdv = glGetMapdv@12
+ glGetMapfv = glGetMapfv@12
+ glGetMapiv = glGetMapiv@12
+ glGetMaterialfv = glGetMaterialfv@12
+ glGetMaterialiv = glGetMaterialiv@12
+ glGetPixelMapfv = glGetPixelMapfv@8
+ glGetPixelMapuiv = glGetPixelMapuiv@8
+ glGetPixelMapusv = glGetPixelMapusv@8
+ glGetPointerv = glGetPointerv@8
+ glGetPolygonStipple = glGetPolygonStipple@4
+ glGetString = glGetString@4
+ glGetTexEnvfv = glGetTexEnvfv@12
+ glGetTexEnviv = glGetTexEnviv@12
+ glGetTexGendv = glGetTexGendv@12
+ glGetTexGenfv = glGetTexGenfv@12
+ glGetTexGeniv = glGetTexGeniv@12
+ glGetTexImage = glGetTexImage@20
+ glGetTexLevelParameterfv = glGetTexLevelParameterfv@16
+ glGetTexLevelParameteriv = glGetTexLevelParameteriv@16
+ glGetTexParameterfv = glGetTexParameterfv@12
+ glGetTexParameteriv = glGetTexParameteriv@12
+ glHint = glHint@8
+ glIndexMask = glIndexMask@4
+ glIndexPointer = glIndexPointer@12
+ glIndexd = glIndexd@8
+ glIndexdv = glIndexdv@4
+ glIndexf = glIndexf@4
+ glIndexfv = glIndexfv@4
+ glIndexi = glIndexi@4
+ glIndexiv = glIndexiv@4
+ glIndexs = glIndexs@4
+ glIndexsv = glIndexsv@4
+ glIndexub = glIndexub@4
+ glIndexubv = glIndexubv@4
+ glInitNames = glInitNames@0
+ glInterleavedArrays = glInterleavedArrays@12
+ glIsEnabled = glIsEnabled@4
+ glIsList = glIsList@4
+ glIsTexture = glIsTexture@4
+ glLightModelf = glLightModelf@8
+ glLightModelfv = glLightModelfv@8
+ glLightModeli = glLightModeli@8
+ glLightModeliv = glLightModeliv@8
+ glLightf = glLightf@12
+ glLightfv = glLightfv@12
+ glLighti = glLighti@12
+ glLightiv = glLightiv@12
+ glLineStipple = glLineStipple@8
+ glLineWidth = glLineWidth@4
+ glListBase = glListBase@4
+ glLoadIdentity = glLoadIdentity@0
+ glLoadMatrixd = glLoadMatrixd@4
+ glLoadMatrixf = glLoadMatrixf@4
+ glLoadName = glLoadName@4
+ glLogicOp = glLogicOp@4
+ glMap1d = glMap1d@32
+ glMap1f = glMap1f@24
+ glMap2d = glMap2d@56
+ glMap2f = glMap2f@40
+ glMapGrid1d = glMapGrid1d@20
+ glMapGrid1f = glMapGrid1f@12
+ glMapGrid2d = glMapGrid2d@40
+ glMapGrid2f = glMapGrid2f@24
+ glMaterialf = glMaterialf@12
+ glMaterialfv = glMaterialfv@12
+ glMateriali = glMateriali@12
+ glMaterialiv = glMaterialiv@12
+ glMatrixMode = glMatrixMode@4
+ glMultMatrixd = glMultMatrixd@4
+ glMultMatrixf = glMultMatrixf@4
+ glNewList = glNewList@8
+ glNormal3b = glNormal3b@12
+ glNormal3bv = glNormal3bv@4
+ glNormal3d = glNormal3d@24
+ glNormal3dv = glNormal3dv@4
+ glNormal3f = glNormal3f@12
+ glNormal3fv = glNormal3fv@4
+ glNormal3i = glNormal3i@12
+ glNormal3iv = glNormal3iv@4
+ glNormal3s = glNormal3s@12
+ glNormal3sv = glNormal3sv@4
+ glNormalPointer = glNormalPointer@12
+ glOrtho = glOrtho@48
+ glPassThrough = glPassThrough@4
+ glPixelMapfv = glPixelMapfv@12
+ glPixelMapuiv = glPixelMapuiv@12
+ glPixelMapusv = glPixelMapusv@12
+ glPixelStoref = glPixelStoref@8
+ glPixelStorei = glPixelStorei@8
+ glPixelTransferf = glPixelTransferf@8
+ glPixelTransferi = glPixelTransferi@8
+ glPixelZoom = glPixelZoom@8
+ glPointSize = glPointSize@4
+ glPolygonMode = glPolygonMode@8
+ glPolygonOffset = glPolygonOffset@8
+ glPolygonStipple = glPolygonStipple@4
+ glPopAttrib = glPopAttrib@0
+ glPopClientAttrib = glPopClientAttrib@0
+ glPopMatrix = glPopMatrix@0
+ glPopName = glPopName@0
+ glPrioritizeTextures = glPrioritizeTextures@12
+ glPushAttrib = glPushAttrib@4
+ glPushClientAttrib = glPushClientAttrib@4
+ glPushMatrix = glPushMatrix@0
+ glPushName = glPushName@4
+ glRasterPos2d = glRasterPos2d@16
+ glRasterPos2dv = glRasterPos2dv@4
+ glRasterPos2f = glRasterPos2f@8
+ glRasterPos2fv = glRasterPos2fv@4
+ glRasterPos2i = glRasterPos2i@8
+ glRasterPos2iv = glRasterPos2iv@4
+ glRasterPos2s = glRasterPos2s@8
+ glRasterPos2sv = glRasterPos2sv@4
+ glRasterPos3d = glRasterPos3d@24
+ glRasterPos3dv = glRasterPos3dv@4
+ glRasterPos3f = glRasterPos3f@12
+ glRasterPos3fv = glRasterPos3fv@4
+ glRasterPos3i = glRasterPos3i@12
+ glRasterPos3iv = glRasterPos3iv@4
+ glRasterPos3s = glRasterPos3s@12
+ glRasterPos3sv = glRasterPos3sv@4
+ glRasterPos4d = glRasterPos4d@32
+ glRasterPos4dv = glRasterPos4dv@4
+ glRasterPos4f = glRasterPos4f@16
+ glRasterPos4fv = glRasterPos4fv@4
+ glRasterPos4i = glRasterPos4i@16
+ glRasterPos4iv = glRasterPos4iv@4
+ glRasterPos4s = glRasterPos4s@16
+ glRasterPos4sv = glRasterPos4sv@4
+ glReadBuffer = glReadBuffer@4
+ glReadPixels = glReadPixels@28
+ glRectd = glRectd@32
+ glRectdv = glRectdv@8
+ glRectf = glRectf@16
+ glRectfv = glRectfv@8
+ glRecti = glRecti@16
+ glRectiv = glRectiv@8
+ glRects = glRects@16
+ glRectsv = glRectsv@8
+ glRenderMode = glRenderMode@4
+ glRotated = glRotated@32
+ glRotatef = glRotatef@16
+ glScaled = glScaled@24
+ glScalef = glScalef@12
+ glScissor = glScissor@16
+ glSelectBuffer = glSelectBuffer@8
+ glShadeModel = glShadeModel@4
+ glStencilFunc = glStencilFunc@12
+ glStencilMask = glStencilMask@4
+ glStencilOp = glStencilOp@12
+ glTexCoord1d = glTexCoord1d@8
+ glTexCoord1dv = glTexCoord1dv@4
+ glTexCoord1f = glTexCoord1f@4
+ glTexCoord1fv = glTexCoord1fv@4
+ glTexCoord1i = glTexCoord1i@4
+ glTexCoord1iv = glTexCoord1iv@4
+ glTexCoord1s = glTexCoord1s@4
+ glTexCoord1sv = glTexCoord1sv@4
+ glTexCoord2d = glTexCoord2d@16
+ glTexCoord2dv = glTexCoord2dv@4
+ glTexCoord2f = glTexCoord2f@8
+ glTexCoord2fv = glTexCoord2fv@4
+ glTexCoord2i = glTexCoord2i@8
+ glTexCoord2iv = glTexCoord2iv@4
+ glTexCoord2s = glTexCoord2s@8
+ glTexCoord2sv = glTexCoord2sv@4
+ glTexCoord3d = glTexCoord3d@24
+ glTexCoord3dv = glTexCoord3dv@4
+ glTexCoord3f = glTexCoord3f@12
+ glTexCoord3fv = glTexCoord3fv@4
+ glTexCoord3i = glTexCoord3i@12
+ glTexCoord3iv = glTexCoord3iv@4
+ glTexCoord3s = glTexCoord3s@12
+ glTexCoord3sv = glTexCoord3sv@4
+ glTexCoord4d = glTexCoord4d@32
+ glTexCoord4dv = glTexCoord4dv@4
+ glTexCoord4f = glTexCoord4f@16
+ glTexCoord4fv = glTexCoord4fv@4
+ glTexCoord4i = glTexCoord4i@16
+ glTexCoord4iv = glTexCoord4iv@4
+ glTexCoord4s = glTexCoord4s@16
+ glTexCoord4sv = glTexCoord4sv@4
+ glTexCoordPointer = glTexCoordPointer@16
+ glTexEnvf = glTexEnvf@12
+ glTexEnvfv = glTexEnvfv@12
+ glTexEnvi = glTexEnvi@12
+ glTexEnviv = glTexEnviv@12
+ glTexGend = glTexGend@16
+ glTexGendv = glTexGendv@12
+ glTexGenf = glTexGenf@12
+ glTexGenfv = glTexGenfv@12
+ glTexGeni = glTexGeni@12
+ glTexGeniv = glTexGeniv@12
+ glTexImage1D = glTexImage1D@32
+ glTexImage2D = glTexImage2D@36
+ glTexParameterf = glTexParameterf@12
+ glTexParameterfv = glTexParameterfv@12
+ glTexParameteri = glTexParameteri@12
+ glTexParameteriv = glTexParameteriv@12
+ glTexSubImage1D = glTexSubImage1D@28
+ glTexSubImage2D = glTexSubImage2D@36
+ glTranslated = glTranslated@24
+ glTranslatef = glTranslatef@12
+ glVertex2d = glVertex2d@16
+ glVertex2dv = glVertex2dv@4
+ glVertex2f = glVertex2f@8
+ glVertex2fv = glVertex2fv@4
+ glVertex2i = glVertex2i@8
+ glVertex2iv = glVertex2iv@4
+ glVertex2s = glVertex2s@8
+ glVertex2sv = glVertex2sv@4
+ glVertex3d = glVertex3d@24
+ glVertex3dv = glVertex3dv@4
+ glVertex3f = glVertex3f@12
+ glVertex3fv = glVertex3fv@4
+ glVertex3i = glVertex3i@12
+ glVertex3iv = glVertex3iv@4
+ glVertex3s = glVertex3s@12
+ glVertex3sv = glVertex3sv@4
+ glVertex4d = glVertex4d@32
+ glVertex4dv = glVertex4dv@4
+ glVertex4f = glVertex4f@16
+ glVertex4fv = glVertex4fv@4
+ glVertex4i = glVertex4i@16
+ glVertex4iv = glVertex4iv@4
+ glVertex4s = glVertex4s@16
+ glVertex4sv = glVertex4sv@4
+ glVertexPointer = glVertexPointer@16
+ glViewport = glViewport@16
+ wglChoosePixelFormat = wglChoosePixelFormat@8
+ wglCopyContext = wglCopyContext@12
+ wglCreateContext = wglCreateContext@4
+ wglCreateLayerContext = wglCreateLayerContext@8
+ wglDeleteContext = wglDeleteContext@4
+ wglDescribeLayerPlane = wglDescribeLayerPlane@20
+ wglDescribePixelFormat = wglDescribePixelFormat@16
+ wglGetCurrentContext = wglGetCurrentContext@0
+ wglGetCurrentDC = wglGetCurrentDC@0
+; wglGetDefaultProcAddress = wglGetDefaultProcAddress@4
+ wglGetLayerPaletteEntries = wglGetLayerPaletteEntries@20
+ wglGetPixelFormat = wglGetPixelFormat@4
+ wglGetProcAddress = wglGetProcAddress@4
+ wglMakeCurrent = wglMakeCurrent@8
+ wglRealizeLayerPalette = wglRealizeLayerPalette@12
+ wglSetLayerPaletteEntries = wglSetLayerPaletteEntries@20
+ wglSetPixelFormat = wglSetPixelFormat@12
+ wglShareLists = wglShareLists@8
+ wglSwapBuffers = wglSwapBuffers@4
+ wglSwapLayerBuffers = wglSwapLayerBuffers@8
+; wglSwapMultipleBuffers = wglSwapMultipleBuffers@8
+ wglUseFontBitmapsA = wglUseFontBitmapsA@16
+ wglUseFontBitmapsW = wglUseFontBitmapsW@16
+ wglUseFontOutlinesA = wglUseFontOutlinesA@32
+ wglUseFontOutlinesW = wglUseFontOutlinesW@32
+ DrvCopyContext = DrvCopyContext@12
+ DrvCreateContext = DrvCreateContext@4
+ DrvCreateLayerContext = DrvCreateLayerContext@8
+ DrvDeleteContext = DrvDeleteContext@4
+ DrvDescribeLayerPlane = DrvDescribeLayerPlane@20
+ DrvDescribePixelFormat = DrvDescribePixelFormat@16
+ DrvGetLayerPaletteEntries = DrvGetLayerPaletteEntries@20
+ DrvGetProcAddress = DrvGetProcAddress@4
+ DrvRealizeLayerPalette = DrvRealizeLayerPalette@12
+ DrvReleaseContext = DrvReleaseContext@4
+ DrvSetCallbackProcs = DrvSetCallbackProcs@8
+ DrvSetContext = DrvSetContext@12
+ DrvSetLayerPaletteEntries = DrvSetLayerPaletteEntries@20
+ DrvSetPixelFormat = DrvSetPixelFormat@8
+ DrvShareLists = DrvShareLists@8
+ DrvSwapBuffers = DrvSwapBuffers@4
+ DrvSwapLayerBuffers = DrvSwapLayerBuffers@8
+ DrvValidateVersion = DrvValidateVersion@4
diff --git a/src/gallium/state_trackers/wgl/shared/stw_arbpixelformat.c b/src/gallium/state_trackers/wgl/shared/stw_arbpixelformat.c
new file mode 100644
index 0000000000..0e2d407699
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_arbpixelformat.c
@@ -0,0 +1,483 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ *
+ * WGL_ARB_pixel_format extension implementation.
+ *
+ * @sa http://www.opengl.org/registry/specs/ARB/wgl_pixel_format.txt
+ */
+
+
+#include <windows.h>
+
+#define WGL_WGLEXT_PROTOTYPES
+
+#include <GL/gl.h>
+#include <GL/wglext.h>
+
+#include "pipe/p_compiler.h"
+#include "util/u_memory.h"
+#include "stw_public.h"
+#include "stw_pixelformat.h"
+
+
+static boolean
+stw_query_attrib(
+ int iPixelFormat,
+ int iLayerPlane,
+ int attrib,
+ int *pvalue )
+{
+ uint count;
+ uint index;
+ const struct stw_pixelformat_info *pfi;
+
+ count = stw_pixelformat_get_extended_count();
+
+ if (attrib == WGL_NUMBER_PIXEL_FORMATS_ARB) {
+ *pvalue = (int) count;
+ return TRUE;
+ }
+
+ index = (uint) iPixelFormat - 1;
+ if (index >= count)
+ return FALSE;
+
+ pfi = stw_pixelformat_get_info( index );
+
+ switch (attrib) {
+ case WGL_DRAW_TO_WINDOW_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_DRAW_TO_WINDOW ? TRUE : FALSE;
+ return TRUE;
+
+ case WGL_DRAW_TO_BITMAP_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_DRAW_TO_BITMAP ? TRUE : FALSE;
+ return TRUE;
+
+ case WGL_NEED_PALETTE_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_NEED_PALETTE ? TRUE : FALSE;
+ return TRUE;
+
+ case WGL_NEED_SYSTEM_PALETTE_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_NEED_SYSTEM_PALETTE ? TRUE : FALSE;
+ return TRUE;
+
+ case WGL_SWAP_METHOD_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_SWAP_COPY ? WGL_SWAP_COPY_ARB : WGL_SWAP_UNDEFINED_ARB;
+ return TRUE;
+
+ case WGL_SWAP_LAYER_BUFFERS_ARB:
+ *pvalue = FALSE;
+ return TRUE;
+
+ case WGL_NUMBER_OVERLAYS_ARB:
+ *pvalue = 0;
+ return TRUE;
+
+ case WGL_NUMBER_UNDERLAYS_ARB:
+ *pvalue = 0;
+ return TRUE;
+ }
+
+ if (iLayerPlane != 0)
+ return FALSE;
+
+ switch (attrib) {
+ case WGL_ACCELERATION_ARB:
+ *pvalue = WGL_FULL_ACCELERATION_ARB;
+ break;
+
+ case WGL_TRANSPARENT_ARB:
+ *pvalue = FALSE;
+ break;
+
+ case WGL_TRANSPARENT_RED_VALUE_ARB:
+ case WGL_TRANSPARENT_GREEN_VALUE_ARB:
+ case WGL_TRANSPARENT_BLUE_VALUE_ARB:
+ case WGL_TRANSPARENT_ALPHA_VALUE_ARB:
+ case WGL_TRANSPARENT_INDEX_VALUE_ARB:
+ break;
+
+ case WGL_SHARE_DEPTH_ARB:
+ case WGL_SHARE_STENCIL_ARB:
+ case WGL_SHARE_ACCUM_ARB:
+ *pvalue = TRUE;
+ break;
+
+ case WGL_SUPPORT_GDI_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_SUPPORT_GDI ? TRUE : FALSE;
+ break;
+
+ case WGL_SUPPORT_OPENGL_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_SUPPORT_OPENGL ? TRUE : FALSE;
+ break;
+
+ case WGL_DOUBLE_BUFFER_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_DOUBLEBUFFER ? TRUE : FALSE;
+ break;
+
+ case WGL_STEREO_ARB:
+ *pvalue = pfi->pfd.dwFlags & PFD_STEREO ? TRUE : FALSE;
+ break;
+
+ case WGL_PIXEL_TYPE_ARB:
+ switch (pfi->pfd.iPixelType) {
+ case PFD_TYPE_RGBA:
+ *pvalue = WGL_TYPE_RGBA_ARB;
+ break;
+ case PFD_TYPE_COLORINDEX:
+ *pvalue = WGL_TYPE_COLORINDEX_ARB;
+ break;
+ default:
+ return FALSE;
+ }
+ break;
+
+ case WGL_COLOR_BITS_ARB:
+ *pvalue = pfi->pfd.cColorBits;
+ break;
+
+ case WGL_RED_BITS_ARB:
+ *pvalue = pfi->pfd.cRedBits;
+ break;
+
+ case WGL_RED_SHIFT_ARB:
+ *pvalue = pfi->pfd.cRedShift;
+ break;
+
+ case WGL_GREEN_BITS_ARB:
+ *pvalue = pfi->pfd.cGreenBits;
+ break;
+
+ case WGL_GREEN_SHIFT_ARB:
+ *pvalue = pfi->pfd.cGreenShift;
+ break;
+
+ case WGL_BLUE_BITS_ARB:
+ *pvalue = pfi->pfd.cBlueBits;
+ break;
+
+ case WGL_BLUE_SHIFT_ARB:
+ *pvalue = pfi->pfd.cBlueShift;
+ break;
+
+ case WGL_ALPHA_BITS_ARB:
+ *pvalue = pfi->pfd.cAlphaBits;
+ break;
+
+ case WGL_ALPHA_SHIFT_ARB:
+ *pvalue = pfi->pfd.cAlphaShift;
+ break;
+
+ case WGL_ACCUM_BITS_ARB:
+ *pvalue = pfi->pfd.cAccumBits;
+ break;
+
+ case WGL_ACCUM_RED_BITS_ARB:
+ *pvalue = pfi->pfd.cAccumRedBits;
+ break;
+
+ case WGL_ACCUM_GREEN_BITS_ARB:
+ *pvalue = pfi->pfd.cAccumGreenBits;
+ break;
+
+ case WGL_ACCUM_BLUE_BITS_ARB:
+ *pvalue = pfi->pfd.cAccumBlueBits;
+ break;
+
+ case WGL_ACCUM_ALPHA_BITS_ARB:
+ *pvalue = pfi->pfd.cAccumAlphaBits;
+ break;
+
+ case WGL_DEPTH_BITS_ARB:
+ *pvalue = pfi->pfd.cDepthBits;
+ break;
+
+ case WGL_STENCIL_BITS_ARB:
+ *pvalue = pfi->pfd.cStencilBits;
+ break;
+
+ case WGL_AUX_BUFFERS_ARB:
+ *pvalue = pfi->pfd.cAuxBuffers;
+ break;
+
+ case WGL_SAMPLE_BUFFERS_ARB:
+ *pvalue = pfi->numSampleBuffers;
+ break;
+
+ case WGL_SAMPLES_ARB:
+ *pvalue = pfi->numSamples;
+ break;
+
+ default:
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+struct attrib_match_info
+{
+ int attribute;
+ int weight;
+ BOOL exact;
+};
+
+static const struct attrib_match_info attrib_match[] = {
+
+ /* WGL_ARB_pixel_format */
+ { WGL_DRAW_TO_WINDOW_ARB, 0, TRUE },
+ { WGL_DRAW_TO_BITMAP_ARB, 0, TRUE },
+ { WGL_ACCELERATION_ARB, 0, TRUE },
+ { WGL_NEED_PALETTE_ARB, 0, TRUE },
+ { WGL_NEED_SYSTEM_PALETTE_ARB, 0, TRUE },
+ { WGL_SWAP_LAYER_BUFFERS_ARB, 0, TRUE },
+ { WGL_SWAP_METHOD_ARB, 0, TRUE },
+ { WGL_NUMBER_OVERLAYS_ARB, 4, FALSE },
+ { WGL_NUMBER_UNDERLAYS_ARB, 4, FALSE },
+ /*{ WGL_SHARE_DEPTH_ARB, 0, TRUE },*/ /* no overlays -- ignore */
+ /*{ WGL_SHARE_STENCIL_ARB, 0, TRUE },*/ /* no overlays -- ignore */
+ /*{ WGL_SHARE_ACCUM_ARB, 0, TRUE },*/ /* no overlays -- ignore */
+ { WGL_SUPPORT_GDI_ARB, 0, TRUE },
+ { WGL_SUPPORT_OPENGL_ARB, 0, TRUE },
+ { WGL_DOUBLE_BUFFER_ARB, 0, TRUE },
+ { WGL_STEREO_ARB, 0, TRUE },
+ { WGL_PIXEL_TYPE_ARB, 0, TRUE },
+ { WGL_COLOR_BITS_ARB, 1, FALSE },
+ { WGL_RED_BITS_ARB, 1, FALSE },
+ { WGL_GREEN_BITS_ARB, 1, FALSE },
+ { WGL_BLUE_BITS_ARB, 1, FALSE },
+ { WGL_ALPHA_BITS_ARB, 1, FALSE },
+ { WGL_ACCUM_BITS_ARB, 1, FALSE },
+ { WGL_ACCUM_RED_BITS_ARB, 1, FALSE },
+ { WGL_ACCUM_GREEN_BITS_ARB, 1, FALSE },
+ { WGL_ACCUM_BLUE_BITS_ARB, 1, FALSE },
+ { WGL_ACCUM_ALPHA_BITS_ARB, 1, FALSE },
+ { WGL_DEPTH_BITS_ARB, 1, FALSE },
+ { WGL_STENCIL_BITS_ARB, 1, FALSE },
+ { WGL_AUX_BUFFERS_ARB, 2, FALSE },
+
+ /* WGL_ARB_multisample */
+ { WGL_SAMPLE_BUFFERS_ARB, 2, FALSE },
+ { WGL_SAMPLES_ARB, 2, FALSE }
+};
+
+struct stw_pixelformat_score
+{
+ int points;
+ uint index;
+};
+
+static BOOL
+score_pixelformats(
+ struct stw_pixelformat_score *scores,
+ uint count,
+ int attribute,
+ int expected_value )
+{
+ uint i;
+ const struct attrib_match_info *ami = NULL;
+ uint index;
+
+ /* Find out if a given attribute should be considered for score calculation.
+ */
+ for (i = 0; i < sizeof( attrib_match ) / sizeof( attrib_match[0] ); i++) {
+ if (attrib_match[i].attribute == attribute) {
+ ami = &attrib_match[i];
+ break;
+ }
+ }
+ if (ami == NULL)
+ return TRUE;
+
+ /* Iterate all pixelformats, query the requested attribute and calculate
+ * score points.
+ */
+ for (index = 0; index < count; index++) {
+ int actual_value;
+
+ if (!stw_query_attrib( index + 1, 0, attribute, &actual_value ))
+ return FALSE;
+
+ if (ami->exact) {
+ /* For an exact match criteria, if the actual and expected values differ,
+ * the score is set to 0 points, effectively removing the pixelformat
+ * from a list of matching pixelformats.
+ */
+ if (actual_value != expected_value)
+ scores[index].points = 0;
+ }
+ else {
+ /* For a minimum match criteria, if the actual value is smaller than the expected
+ * value, the pixelformat is rejected (score set to 0). However, if the actual
+ * value is bigger, the pixelformat is given a penalty to favour pixelformats that
+ * more closely match the expected values.
+ */
+ if (actual_value < expected_value)
+ scores[index].points = 0;
+ else if (actual_value > expected_value)
+ scores[index].points -= (actual_value - expected_value) * ami->weight;
+ }
+ }
+
+ return TRUE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglChoosePixelFormatARB(
+ HDC hdc,
+ const int *piAttribIList,
+ const FLOAT *pfAttribFList,
+ UINT nMaxFormats,
+ int *piFormats,
+ UINT *nNumFormats )
+{
+ uint count;
+ struct stw_pixelformat_score *scores;
+ uint i;
+
+ *nNumFormats = 0;
+
+ /* Allocate and initialize pixelformat score table -- better matches
+ * have higher scores. Start with a high score and take out penalty
+ * points for a mismatch when the match does not have to be exact.
+ * Set a score to 0 if there is a mismatch for an exact match criteria.
+ */
+ count = stw_pixelformat_get_extended_count();
+ scores = (struct stw_pixelformat_score *) MALLOC( count * sizeof( struct stw_pixelformat_score ) );
+ if (scores == NULL)
+ return FALSE;
+ for (i = 0; i < count; i++) {
+ scores[i].points = 0x7fffffff;
+ scores[i].index = i;
+ }
+
+ /* Given the attribute list calculate a score for each pixelformat.
+ */
+ if (piAttribIList != NULL) {
+ while (*piAttribIList != 0) {
+ if (!score_pixelformats( scores, count, piAttribIList[0], piAttribIList[1] )) {
+ FREE( scores );
+ return FALSE;
+ }
+ piAttribIList += 2;
+ }
+ }
+ if (pfAttribFList != NULL) {
+ while (*pfAttribFList != 0) {
+ if (!score_pixelformats( scores, count, (int) pfAttribFList[0], (int) pfAttribFList[1] )) {
+ FREE( scores );
+ return FALSE;
+ }
+ pfAttribFList += 2;
+ }
+ }
+
+ /* Bubble-sort the resulting scores. Pixelformats with higher scores go first.
+ * TODO: Find out if there are any patent issues with it.
+ */
+ if (count > 1) {
+ uint n = count;
+ boolean swapped;
+
+ do {
+ swapped = FALSE;
+ for (i = 1; i < n; i++) {
+ if (scores[i - 1].points < scores[i].points) {
+ struct stw_pixelformat_score score = scores[i - 1];
+
+ scores[i - 1] = scores[i];
+ scores[i] = score;
+ swapped = TRUE;
+ }
+ }
+ n--;
+ }
+ while (swapped);
+ }
+
+ /* Return a list of pixelformats that are the best match.
+ * Reject pixelformats with non-positive scores.
+ */
+ for (i = 0; i < count; i++) {
+ if (scores[i].points > 0) {
+ if (*nNumFormats < nMaxFormats)
+ piFormats[*nNumFormats] = scores[i].index + 1;
+ (*nNumFormats)++;
+ }
+ }
+
+ FREE( scores );
+ return TRUE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglGetPixelFormatAttribfvARB(
+ HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ FLOAT *pfValues )
+{
+ UINT i;
+
+ (void) hdc;
+
+ for (i = 0; i < nAttributes; i++) {
+ int value;
+
+ if (!stw_query_attrib( iPixelFormat, iLayerPlane, piAttributes[i], &value ))
+ return FALSE;
+ pfValues[i] = (FLOAT) value;
+ }
+
+ return TRUE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglGetPixelFormatAttribivARB(
+ HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nAttributes,
+ const int *piAttributes,
+ int *piValues )
+{
+ UINT i;
+
+ (void) hdc;
+
+ for (i = 0; i < nAttributes; i++) {
+ if (!stw_query_attrib( iPixelFormat, iLayerPlane, piAttributes[i], &piValues[i] ))
+ return FALSE;
+ }
+
+ return TRUE;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_context.c b/src/gallium/state_trackers/wgl/shared/stw_context.c
new file mode 100644
index 0000000000..4968ecc692
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_context.c
@@ -0,0 +1,382 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#include "main/mtypes.h"
+#include "main/context.h"
+#include "pipe/p_compiler.h"
+#include "pipe/p_context.h"
+#include "state_tracker/st_context.h"
+#include "state_tracker/st_public.h"
+
+#ifdef DEBUG
+#include "trace/tr_screen.h"
+#include "trace/tr_context.h"
+#endif
+
+#include "shared/stw_device.h"
+#include "shared/stw_winsys.h"
+#include "shared/stw_framebuffer.h"
+#include "shared/stw_pixelformat.h"
+#include "stw_public.h"
+#include "stw_context.h"
+#include "stw_tls.h"
+
+
+static INLINE struct stw_context *
+stw_context(GLcontext *glctx)
+{
+ if(!glctx)
+ return NULL;
+ assert(glctx->DriverCtx);
+ return (struct stw_context *)glctx->DriverCtx;
+}
+
+static INLINE struct stw_context *
+stw_current_context(void)
+{
+ /* We must check if multiple threads are being used or GET_CURRENT_CONTEXT
+ * might return the current context of the thread first seen. */
+ _glapi_check_multithread();
+
+ {
+ GET_CURRENT_CONTEXT( glctx );
+ return stw_context(glctx);
+ }
+}
+
+BOOL
+stw_copy_context(
+ UINT_PTR hglrcSrc,
+ UINT_PTR hglrcDst,
+ UINT mask )
+{
+ struct stw_context *src;
+ struct stw_context *dst;
+ BOOL ret = FALSE;
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+
+ src = stw_lookup_context_locked( hglrcSrc );
+ dst = stw_lookup_context_locked( hglrcDst );
+
+ if (src && dst) {
+ /* FIXME */
+ assert(0);
+ (void) src;
+ (void) dst;
+ (void) mask;
+ }
+
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+
+ return ret;
+}
+
+BOOL
+stw_share_lists(
+ UINT_PTR hglrc1,
+ UINT_PTR hglrc2 )
+{
+ struct stw_context *ctx1;
+ struct stw_context *ctx2;
+ BOOL ret = FALSE;
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+
+ ctx1 = stw_lookup_context_locked( hglrc1 );
+ ctx2 = stw_lookup_context_locked( hglrc2 );
+
+ if (ctx1 && ctx2 &&
+ ctx1->iPixelFormat == ctx2->iPixelFormat) {
+ ret = _mesa_share_state(ctx2->st->ctx, ctx1->st->ctx);
+ }
+
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+
+ return ret;
+}
+
+static void
+stw_viewport(GLcontext * glctx, GLint x, GLint y,
+ GLsizei width, GLsizei height)
+{
+ struct stw_context *ctx = (struct stw_context *)glctx->DriverCtx;
+ struct stw_framebuffer *fb;
+
+ fb = stw_framebuffer_from_hdc( ctx->hdc );
+ if(fb) {
+ stw_framebuffer_update(fb);
+ stw_framebuffer_release(fb);
+ }
+}
+
+UINT_PTR
+stw_create_layer_context(
+ HDC hdc,
+ int iLayerPlane )
+{
+ int iPixelFormat;
+ const struct stw_pixelformat_info *pfi;
+ GLvisual visual;
+ struct stw_context *ctx = NULL;
+ struct pipe_screen *screen = NULL;
+ struct pipe_context *pipe = NULL;
+
+ if(!stw_dev)
+ return 0;
+
+ if (iLayerPlane != 0)
+ return 0;
+
+ iPixelFormat = GetPixelFormat(hdc);
+ if(!iPixelFormat)
+ return 0;
+
+ pfi = stw_pixelformat_get_info( iPixelFormat - 1 );
+ stw_pixelformat_visual(&visual, pfi);
+
+ ctx = CALLOC_STRUCT( stw_context );
+ if (ctx == NULL)
+ goto no_ctx;
+
+ ctx->hdc = hdc;
+ ctx->iPixelFormat = iPixelFormat;
+
+ screen = stw_dev->screen;
+
+#ifdef DEBUG
+ /* Unwrap screen */
+ if(stw_dev->trace_running)
+ screen = trace_screen(screen)->screen;
+#endif
+
+ pipe = stw_dev->stw_winsys->create_context( screen );
+ if (pipe == NULL)
+ goto no_pipe;
+
+#ifdef DEBUG
+ /* Wrap context */
+ if(stw_dev->trace_running)
+ pipe = trace_context_create(stw_dev->screen, pipe);
+#endif
+
+ /* pass to stw_flush_frontbuffer as context_private */
+ assert(!pipe->priv);
+ pipe->priv = hdc;
+
+ ctx->st = st_create_context( pipe, &visual, NULL );
+ if (ctx->st == NULL)
+ goto no_st_ctx;
+
+ ctx->st->ctx->DriverCtx = ctx;
+ ctx->st->ctx->Driver.Viewport = stw_viewport;
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+ ctx->hglrc = handle_table_add(stw_dev->ctx_table, ctx);
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+ if (!ctx->hglrc)
+ goto no_hglrc;
+
+ return ctx->hglrc;
+
+no_hglrc:
+ st_destroy_context(ctx->st);
+ goto no_pipe; /* st_context_destroy already destroys pipe */
+no_st_ctx:
+ pipe->destroy( pipe );
+no_pipe:
+ FREE(ctx);
+no_ctx:
+ return 0;
+}
+
+BOOL
+stw_delete_context(
+ UINT_PTR hglrc )
+{
+ struct stw_context *ctx ;
+ BOOL ret = FALSE;
+
+ if (!stw_dev)
+ return FALSE;
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+ ctx = stw_lookup_context_locked(hglrc);
+ handle_table_remove(stw_dev->ctx_table, hglrc);
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+
+ if (ctx) {
+ struct stw_context *curctx = stw_current_context();
+
+ /* Unbind current if deleting current context. */
+ if (curctx == ctx)
+ st_make_current( NULL, NULL, NULL );
+
+ st_destroy_context(ctx->st);
+ FREE(ctx);
+
+ ret = TRUE;
+ }
+
+ return ret;
+}
+
+BOOL
+stw_release_context(
+ UINT_PTR hglrc )
+{
+ struct stw_context *ctx;
+
+ if (!stw_dev)
+ return FALSE;
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+ ctx = stw_lookup_context_locked( hglrc );
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+
+ if (!ctx)
+ return FALSE;
+
+ /* The expectation is that ctx is the same context which is
+ * current for this thread. We should check that and return False
+ * if not the case.
+ */
+ if (ctx != stw_current_context())
+ return FALSE;
+
+ if (stw_make_current( NULL, 0 ) == FALSE)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+UINT_PTR
+stw_get_current_context( void )
+{
+ struct stw_context *ctx;
+
+ ctx = stw_current_context();
+ if(!ctx)
+ return 0;
+
+ return ctx->hglrc;
+}
+
+HDC
+stw_get_current_dc( void )
+{
+ struct stw_context *ctx;
+
+ ctx = stw_current_context();
+ if(!ctx)
+ return NULL;
+
+ return ctx->hdc;
+}
+
+BOOL
+stw_make_current(
+ HDC hdc,
+ UINT_PTR hglrc )
+{
+ struct stw_context *curctx = NULL;
+ struct stw_context *ctx = NULL;
+ struct stw_framebuffer *fb = NULL;
+
+ if (!stw_dev)
+ goto fail;
+
+ curctx = stw_current_context();
+ if (curctx != NULL) {
+ if (curctx->hglrc != hglrc)
+ st_flush(curctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+ /* Return if already current. */
+ if (curctx->hglrc == hglrc && curctx->hdc == hdc) {
+ ctx = curctx;
+ fb = stw_framebuffer_from_hdc( hdc );
+ goto success;
+ }
+ }
+
+ if (hdc == NULL || hglrc == 0) {
+ return st_make_current( NULL, NULL, NULL );
+ }
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+ ctx = stw_lookup_context_locked( hglrc );
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+ if(!ctx)
+ goto fail;
+
+ fb = stw_framebuffer_from_hdc( hdc );
+ if(!fb) {
+ /* Applications should call SetPixelFormat before creating a context,
+ * but not all do, and the opengl32 runtime seems to use a default pixel
+ * format in some cases, so we must create a framebuffer for those here
+ */
+ int iPixelFormat = GetPixelFormat(hdc);
+ if(iPixelFormat)
+ fb = stw_framebuffer_create( hdc, iPixelFormat );
+ if(!fb)
+ goto fail;
+ }
+
+ if(fb->iPixelFormat != ctx->iPixelFormat)
+ goto fail;
+
+ /* Lazy allocation of the frame buffer */
+ if(!stw_framebuffer_allocate(fb))
+ goto fail;
+
+ /* Bind the new framebuffer */
+ ctx->hdc = hdc;
+
+ /* pass to stw_flush_frontbuffer as context_private */
+ ctx->st->pipe->priv = hdc;
+
+ if(!st_make_current( ctx->st, fb->stfb, fb->stfb ))
+ goto fail;
+
+success:
+ assert(fb);
+ if(fb) {
+ stw_framebuffer_update(fb);
+ stw_framebuffer_release(fb);
+ }
+
+ return TRUE;
+
+fail:
+ if(fb)
+ stw_framebuffer_release(fb);
+ st_make_current( NULL, NULL, NULL );
+ return FALSE;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_context.h b/src/gallium/state_trackers/wgl/shared/stw_context.h
new file mode 100644
index 0000000000..166471de5e
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_context.h
@@ -0,0 +1,43 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_CONTEXT_H
+#define STW_CONTEXT_H
+
+#include <windows.h>
+
+struct st_context;
+
+struct stw_context
+{
+ struct st_context *st;
+ UINT_PTR hglrc;
+ int iPixelFormat;
+ HDC hdc;
+};
+
+#endif /* STW_CONTEXT_H */
diff --git a/src/gallium/state_trackers/wgl/shared/stw_device.c b/src/gallium/state_trackers/wgl/shared/stw_device.c
new file mode 100644
index 0000000000..0b6954915a
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_device.c
@@ -0,0 +1,225 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#include "glapi/glthread.h"
+#include "util/u_debug.h"
+#include "pipe/p_screen.h"
+#include "state_tracker/st_public.h"
+
+#ifdef DEBUG
+#include "trace/tr_screen.h"
+#include "trace/tr_texture.h"
+#endif
+
+#include "shared/stw_device.h"
+#include "shared/stw_winsys.h"
+#include "shared/stw_pixelformat.h"
+#include "shared/stw_public.h"
+#include "shared/stw_tls.h"
+#include "shared/stw_framebuffer.h"
+
+#ifdef WIN32_THREADS
+extern _glthread_Mutex OneTimeLock;
+extern void FreeAllTSD(void);
+#endif
+
+
+struct stw_device *stw_dev = NULL;
+
+
+/**
+ * XXX: Dispatch pipe_screen::flush_front_buffer to our
+ * stw_winsys::flush_front_buffer.
+ */
+static void
+stw_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surface,
+ void *context_private )
+{
+ const struct stw_winsys *stw_winsys = stw_dev->stw_winsys;
+ HDC hdc = (HDC)context_private;
+ struct stw_framebuffer *fb;
+
+ fb = stw_framebuffer_from_hdc( hdc );
+ /* fb can be NULL if window was destroyed already */
+ if (fb) {
+#if DEBUG
+ {
+ struct pipe_surface *surface2;
+
+ if(!st_get_framebuffer_surface( fb->stfb, ST_SURFACE_FRONT_LEFT, &surface2 ))
+ assert(0);
+ else
+ assert(surface2 == surface);
+ }
+#endif
+
+#ifdef DEBUG
+ if(stw_dev->trace_running) {
+ screen = trace_screen(screen)->screen;
+ surface = trace_surface(surface)->surface;
+ }
+#endif
+ }
+
+ stw_winsys->flush_frontbuffer(screen, surface, hdc);
+
+ if(fb) {
+ stw_framebuffer_update(fb);
+ stw_framebuffer_release(fb);
+ }
+}
+
+
+boolean
+stw_init(const struct stw_winsys *stw_winsys)
+{
+ static struct stw_device stw_dev_storage;
+ struct pipe_screen *screen;
+
+ debug_printf("%s\n", __FUNCTION__);
+
+ assert(!stw_dev);
+
+ stw_tls_init();
+
+ stw_dev = &stw_dev_storage;
+ memset(stw_dev, 0, sizeof(*stw_dev));
+
+#ifdef DEBUG
+ stw_dev->memdbg_no = debug_memory_begin();
+#endif
+
+ stw_dev->stw_winsys = stw_winsys;
+
+#ifdef WIN32_THREADS
+ _glthread_INIT_MUTEX(OneTimeLock);
+#endif
+
+ screen = stw_winsys->create_screen();
+ if(!screen)
+ goto error1;
+
+#ifdef DEBUG
+ stw_dev->screen = trace_screen_create(screen);
+ stw_dev->trace_running = stw_dev->screen != screen ? TRUE : FALSE;
+#else
+ stw_dev->screen = screen;
+#endif
+
+ stw_dev->screen->flush_frontbuffer = &stw_flush_frontbuffer;
+
+ pipe_mutex_init( stw_dev->ctx_mutex );
+ pipe_mutex_init( stw_dev->fb_mutex );
+
+ stw_dev->ctx_table = handle_table_create();
+ if (!stw_dev->ctx_table) {
+ goto error1;
+ }
+
+ stw_pixelformat_init();
+
+ return TRUE;
+
+error1:
+ stw_dev = NULL;
+ return FALSE;
+}
+
+
+boolean
+stw_init_thread(void)
+{
+ return stw_tls_init_thread();
+}
+
+
+void
+stw_cleanup_thread(void)
+{
+ stw_tls_cleanup_thread();
+}
+
+
+void
+stw_cleanup(void)
+{
+ unsigned i;
+
+ debug_printf("%s\n", __FUNCTION__);
+
+ if (!stw_dev)
+ return;
+
+ pipe_mutex_lock( stw_dev->ctx_mutex );
+ {
+ /* Ensure all contexts are destroyed */
+ i = handle_table_get_first_handle(stw_dev->ctx_table);
+ while (i) {
+ stw_delete_context(i);
+ i = handle_table_get_next_handle(stw_dev->ctx_table, i);
+ }
+ handle_table_destroy(stw_dev->ctx_table);
+ }
+ pipe_mutex_unlock( stw_dev->ctx_mutex );
+
+ stw_framebuffer_cleanup();
+
+ pipe_mutex_destroy( stw_dev->fb_mutex );
+ pipe_mutex_destroy( stw_dev->ctx_mutex );
+
+ stw_dev->screen->destroy(stw_dev->screen);
+
+#ifdef WIN32_THREADS
+ _glthread_DESTROY_MUTEX(OneTimeLock);
+ FreeAllTSD();
+#endif
+
+#ifdef DEBUG
+ debug_memory_end(stw_dev->memdbg_no);
+#endif
+
+ stw_tls_cleanup();
+
+ stw_dev = NULL;
+}
+
+
+struct stw_context *
+stw_lookup_context_locked( UINT_PTR dhglrc )
+{
+ if (dhglrc == 0)
+ return NULL;
+
+ if (stw_dev == NULL)
+ return NULL;
+
+ return (struct stw_context *) handle_table_get(stw_dev->ctx_table, dhglrc);
+}
+
diff --git a/src/gallium/state_trackers/wgl/shared/stw_device.h b/src/gallium/state_trackers/wgl/shared/stw_device.h
new file mode 100644
index 0000000000..e1bb9518dd
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_device.h
@@ -0,0 +1,77 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_DEVICE_H_
+#define STW_DEVICE_H_
+
+
+#include <windows.h>
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_thread.h"
+#include "util/u_handle_table.h"
+#include "stw_pixelformat.h"
+
+
+#define STW_MAX_PIXELFORMATS 256
+
+
+struct pipe_screen;
+struct stw_framebuffer;
+
+struct stw_device
+{
+ const struct stw_winsys *stw_winsys;
+
+ struct pipe_screen *screen;
+
+#ifdef DEBUG
+ boolean trace_running;
+#endif
+
+ struct stw_pixelformat_info pixelformats[STW_MAX_PIXELFORMATS];
+ unsigned pixelformat_count;
+ unsigned pixelformat_extended_count;
+
+ pipe_mutex ctx_mutex;
+ struct handle_table *ctx_table;
+
+ pipe_mutex fb_mutex;
+ struct stw_framebuffer *fb_head;
+
+#ifdef DEBUG
+ unsigned long memdbg_no;
+#endif
+};
+
+struct stw_context *
+stw_lookup_context_locked( UINT_PTR hglrc );
+
+extern struct stw_device *stw_dev;
+
+
+#endif /* STW_DEVICE_H_ */
diff --git a/src/gallium/state_trackers/wgl/shared/stw_extensionsstring.c b/src/gallium/state_trackers/wgl/shared/stw_extensionsstring.c
new file mode 100644
index 0000000000..62c859e1f9
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_extensionsstring.c
@@ -0,0 +1,59 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#define WGL_WGLEXT_PROTOTYPES
+
+#include <GL/gl.h>
+#include <GL/wglext.h>
+
+
+static const char *stw_extension_string =
+ "WGL_ARB_extensions_string "
+ "WGL_ARB_multisample "
+ "WGL_ARB_pixel_format "
+/* "WGL_EXT_swap_interval " */
+ "WGL_EXT_extensions_string";
+
+
+WINGDIAPI const char * APIENTRY
+wglGetExtensionsStringARB(
+ HDC hdc )
+{
+ (void) hdc;
+
+ return stw_extension_string;
+}
+
+
+WINGDIAPI const char * APIENTRY
+wglGetExtensionsStringEXT( void )
+{
+ return stw_extension_string;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_extgallium.c b/src/gallium/state_trackers/wgl/shared/stw_extgallium.c
new file mode 100644
index 0000000000..fc22737d7e
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_extgallium.c
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_screen.h"
+#include "stw_public.h"
+#include "stw_device.h"
+#include "stw_winsys.h"
+
+#ifdef DEBUG
+#include "trace/tr_screen.h"
+#include "trace/tr_context.h"
+#endif
+
+
+struct pipe_screen * APIENTRY
+wglGetGalliumScreenMESA(void)
+{
+ return stw_dev ? stw_dev->screen : NULL;
+}
+
+
+/* XXX: Unify with stw_create_layer_context */
+struct pipe_context * APIENTRY
+wglCreateGalliumContextMESA(void)
+{
+ struct pipe_screen *screen = NULL;
+ struct pipe_context *pipe = NULL;
+
+ if(!stw_dev)
+ return NULL;
+
+ screen = stw_dev->screen;
+
+#ifdef DEBUG
+ /* Unwrap screen */
+ if(stw_dev->trace_running)
+ screen = trace_screen(screen)->screen;
+#endif
+
+ pipe = stw_dev->stw_winsys->create_context( screen );
+ if (pipe == NULL)
+ goto no_pipe;
+
+#ifdef DEBUG
+ /* Wrap context */
+ if(stw_dev->trace_running)
+ pipe = trace_context_create(stw_dev->screen, pipe);
+#endif
+
+ return pipe;
+
+no_pipe:
+ return NULL;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_extgallium.h b/src/gallium/state_trackers/wgl/shared/stw_extgallium.h
new file mode 100644
index 0000000000..cc35f2bb7f
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_extgallium.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_EXTGALLIUM_H_
+#define STW_EXTGALLIUM_H_
+
+
+#include <windows.h>
+
+
+struct pipe_screen;
+struct pipe_context;
+
+
+struct pipe_screen * APIENTRY
+wglGetGalliumScreenMESA(void);
+
+
+struct pipe_context * APIENTRY
+wglCreateGalliumContextMESA(void);
+
+
+#endif /* STW_EXTGALLIUM_H_ */
diff --git a/src/gallium/state_trackers/wgl/shared/stw_extswapinterval.c b/src/gallium/state_trackers/wgl/shared/stw_extswapinterval.c
new file mode 100644
index 0000000000..9eac6a1d09
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_extswapinterval.c
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#define WGL_WGLEXT_PROTOTYPES
+
+#include <GL/gl.h>
+#include <GL/wglext.h>
+#include "util/u_debug.h"
+
+/* A dummy implementation of this extension.
+ *
+ * Required as some applications retrieve and call these functions
+ * regardless of the fact that we don't advertise the extension and
+ * further more the results of wglGetProcAddress are NULL.
+ */
+WINGDIAPI BOOL APIENTRY
+wglSwapIntervalEXT(int interval)
+{
+ (void) interval;
+ debug_printf("%s: %d\n", __FUNCTION__, interval);
+ return TRUE;
+}
+
+WINGDIAPI int APIENTRY
+wglGetSwapIntervalEXT(void)
+{
+ return 0;
+}
+
+
diff --git a/src/gallium/state_trackers/wgl/shared/stw_framebuffer.c b/src/gallium/state_trackers/wgl/shared/stw_framebuffer.c
new file mode 100644
index 0000000000..b8956bb550
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_framebuffer.c
@@ -0,0 +1,493 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#include "main/context.h"
+#include "pipe/p_format.h"
+#include "pipe/p_screen.h"
+#include "state_tracker/st_context.h"
+#include "state_tracker/st_public.h"
+
+#ifdef DEBUG
+#include "trace/tr_screen.h"
+#include "trace/tr_texture.h"
+#endif
+
+#include "stw_framebuffer.h"
+#include "stw_device.h"
+#include "stw_public.h"
+#include "stw_winsys.h"
+#include "stw_tls.h"
+
+
+/**
+ * Search the framebuffer with the matching HWND while holding the
+ * stw_dev::fb_mutex global lock.
+ */
+static INLINE struct stw_framebuffer *
+stw_framebuffer_from_hwnd_locked(
+ HWND hwnd )
+{
+ struct stw_framebuffer *fb;
+
+ for (fb = stw_dev->fb_head; fb != NULL; fb = fb->next)
+ if (fb->hWnd == hwnd) {
+ pipe_mutex_lock(fb->mutex);
+ break;
+ }
+
+ return fb;
+}
+
+
+/**
+ * Destroy this framebuffer. Both stw_dev::fb_mutex and stw_framebuffer::mutex
+ * must be held, by this order. Obviously no further access to fb can be done
+ * after this.
+ */
+static INLINE void
+stw_framebuffer_destroy_locked(
+ struct stw_framebuffer *fb )
+{
+ struct stw_framebuffer **link;
+
+ link = &stw_dev->fb_head;
+ while (*link != fb)
+ link = &(*link)->next;
+ assert(*link);
+ *link = fb->next;
+ fb->next = NULL;
+
+ st_unreference_framebuffer(fb->stfb);
+
+ pipe_mutex_unlock( fb->mutex );
+
+ pipe_mutex_destroy( fb->mutex );
+
+ FREE( fb );
+}
+
+
+void
+stw_framebuffer_release(
+ struct stw_framebuffer *fb)
+{
+ assert(fb);
+ pipe_mutex_unlock( fb->mutex );
+}
+
+
+static INLINE void
+stw_framebuffer_get_size( struct stw_framebuffer *fb )
+{
+ unsigned width, height;
+ RECT rect;
+
+ assert(fb->hWnd);
+
+ GetClientRect( fb->hWnd, &rect );
+ width = rect.right - rect.left;
+ height = rect.bottom - rect.top;
+
+ if(width < 1)
+ width = 1;
+ if(height < 1)
+ height = 1;
+
+ if(width != fb->width || height != fb->height) {
+ fb->must_resize = TRUE;
+ fb->width = width;
+ fb->height = height;
+ }
+}
+
+
+/**
+ * @sa http://msdn.microsoft.com/en-us/library/ms644975(VS.85).aspx
+ * @sa http://msdn.microsoft.com/en-us/library/ms644960(VS.85).aspx
+ */
+LRESULT CALLBACK
+stw_call_window_proc(
+ int nCode,
+ WPARAM wParam,
+ LPARAM lParam )
+{
+ struct stw_tls_data *tls_data;
+ PCWPSTRUCT pParams = (PCWPSTRUCT)lParam;
+ struct stw_framebuffer *fb;
+
+ tls_data = stw_tls_get_data();
+ if(!tls_data)
+ return 0;
+
+ if (nCode < 0)
+ return CallNextHookEx(tls_data->hCallWndProcHook, nCode, wParam, lParam);
+
+ if (pParams->message == WM_WINDOWPOSCHANGED) {
+ /* We handle WM_WINDOWPOSCHANGED instead of WM_SIZE because according to
+ * http://blogs.msdn.com/oldnewthing/archive/2008/01/15/7113860.aspx
+ * WM_SIZE is generated from WM_WINDOWPOSCHANGED by DefWindowProc so it
+ * can be masked out by the application. */
+ LPWINDOWPOS lpWindowPos = (LPWINDOWPOS)pParams->lParam;
+ if((lpWindowPos->flags & SWP_SHOWWINDOW) ||
+ !(lpWindowPos->flags & SWP_NOSIZE)) {
+ fb = stw_framebuffer_from_hwnd( pParams->hwnd );
+ if(fb) {
+ /* Size in WINDOWPOS includes the window frame, so get the size
+ * of the client area via GetClientRect. */
+ stw_framebuffer_get_size(fb);
+ stw_framebuffer_release(fb);
+ }
+ }
+ }
+ else if (pParams->message == WM_DESTROY) {
+ pipe_mutex_lock( stw_dev->fb_mutex );
+ fb = stw_framebuffer_from_hwnd_locked( pParams->hwnd );
+ if(fb)
+ stw_framebuffer_destroy_locked(fb);
+ pipe_mutex_unlock( stw_dev->fb_mutex );
+ }
+
+ return CallNextHookEx(tls_data->hCallWndProcHook, nCode, wParam, lParam);
+}
+
+
+struct stw_framebuffer *
+stw_framebuffer_create(
+ HDC hdc,
+ int iPixelFormat )
+{
+ HWND hWnd;
+ struct stw_framebuffer *fb;
+ const struct stw_pixelformat_info *pfi;
+
+ /* We only support drawing to a window. */
+ hWnd = WindowFromDC( hdc );
+ if(!hWnd)
+ return NULL;
+
+ fb = CALLOC_STRUCT( stw_framebuffer );
+ if (fb == NULL)
+ return NULL;
+
+ fb->hDC = hdc;
+ fb->hWnd = hWnd;
+ fb->iPixelFormat = iPixelFormat;
+
+ fb->pfi = pfi = stw_pixelformat_get_info( iPixelFormat - 1 );
+
+ stw_pixelformat_visual(&fb->visual, pfi);
+
+ stw_framebuffer_get_size(fb);
+
+ pipe_mutex_init( fb->mutex );
+
+ /* This is the only case where we lock the stw_framebuffer::mutex before
+ * stw_dev::fb_mutex, since no other thread can know about this framebuffer
+ * and we must prevent any other thread from destroying it before we return.
+ */
+ pipe_mutex_lock( fb->mutex );
+
+ pipe_mutex_lock( stw_dev->fb_mutex );
+ fb->next = stw_dev->fb_head;
+ stw_dev->fb_head = fb;
+ pipe_mutex_unlock( stw_dev->fb_mutex );
+
+ return fb;
+}
+
+
+BOOL
+stw_framebuffer_allocate(
+ struct stw_framebuffer *fb)
+{
+ assert(fb);
+
+ if(!fb->stfb) {
+ const struct stw_pixelformat_info *pfi = fb->pfi;
+ enum pipe_format colorFormat, depthFormat, stencilFormat;
+
+ colorFormat = pfi->color_format;
+
+ assert(pf_layout( pfi->depth_stencil_format ) == PIPE_FORMAT_LAYOUT_RGBAZS );
+
+ if(pf_get_component_bits( pfi->depth_stencil_format, PIPE_FORMAT_COMP_Z ))
+ depthFormat = pfi->depth_stencil_format;
+ else
+ depthFormat = PIPE_FORMAT_NONE;
+
+ if(pf_get_component_bits( pfi->depth_stencil_format, PIPE_FORMAT_COMP_S ))
+ stencilFormat = pfi->depth_stencil_format;
+ else
+ stencilFormat = PIPE_FORMAT_NONE;
+
+ assert(fb->must_resize);
+ assert(fb->width);
+ assert(fb->height);
+
+ fb->stfb = st_create_framebuffer(
+ &fb->visual,
+ colorFormat,
+ depthFormat,
+ stencilFormat,
+ fb->width,
+ fb->height,
+ (void *) fb );
+
+ // to notify the context
+ fb->must_resize = TRUE;
+ }
+
+ return fb->stfb ? TRUE : FALSE;
+}
+
+
+/**
+ * Update the framebuffer's size if necessary.
+ */
+void
+stw_framebuffer_update(
+ struct stw_framebuffer *fb)
+{
+ assert(fb->stfb);
+ assert(fb->height);
+ assert(fb->width);
+
+ /* XXX: It would be nice to avoid checking the size again -- in theory
+ * stw_call_window_proc would have cought the resize and stored the right
+ * size already, but unfortunately threads created before the DllMain is
+ * called don't get a DLL_THREAD_ATTACH notification, and there is no way
+ * to know of their existing without using the not very portable PSAPI.
+ */
+ stw_framebuffer_get_size(fb);
+
+ if(fb->must_resize) {
+ st_resize_framebuffer(fb->stfb, fb->width, fb->height);
+ fb->must_resize = FALSE;
+ }
+}
+
+
+void
+stw_framebuffer_cleanup( void )
+{
+ struct stw_framebuffer *fb;
+ struct stw_framebuffer *next;
+
+ pipe_mutex_lock( stw_dev->fb_mutex );
+
+ fb = stw_dev->fb_head;
+ while (fb) {
+ next = fb->next;
+
+ pipe_mutex_lock(fb->mutex);
+ stw_framebuffer_destroy_locked(fb);
+
+ fb = next;
+ }
+ stw_dev->fb_head = NULL;
+
+ pipe_mutex_unlock( stw_dev->fb_mutex );
+}
+
+
+/**
+ * Given an hdc, return the corresponding stw_framebuffer.
+ */
+static INLINE struct stw_framebuffer *
+stw_framebuffer_from_hdc_locked(
+ HDC hdc )
+{
+ HWND hwnd;
+ struct stw_framebuffer *fb;
+
+ /*
+ * Some applications create and use several HDCs for the same window, so
+ * looking up the framebuffer by the HDC is not reliable. Use HWND whenever
+ * possible.
+ */
+ hwnd = WindowFromDC(hdc);
+ if(hwnd)
+ return stw_framebuffer_from_hwnd_locked(hwnd);
+
+ for (fb = stw_dev->fb_head; fb != NULL; fb = fb->next)
+ if (fb->hDC == hdc) {
+ pipe_mutex_lock(fb->mutex);
+ break;
+ }
+
+ return fb;
+}
+
+
+/**
+ * Given an hdc, return the corresponding stw_framebuffer.
+ */
+struct stw_framebuffer *
+stw_framebuffer_from_hdc(
+ HDC hdc )
+{
+ struct stw_framebuffer *fb;
+
+ pipe_mutex_lock( stw_dev->fb_mutex );
+ fb = stw_framebuffer_from_hdc_locked(hdc);
+ pipe_mutex_unlock( stw_dev->fb_mutex );
+
+ return fb;
+}
+
+
+/**
+ * Given an hdc, return the corresponding stw_framebuffer.
+ */
+struct stw_framebuffer *
+stw_framebuffer_from_hwnd(
+ HWND hwnd )
+{
+ struct stw_framebuffer *fb;
+
+ pipe_mutex_lock( stw_dev->fb_mutex );
+ fb = stw_framebuffer_from_hwnd_locked(hwnd);
+ pipe_mutex_unlock( stw_dev->fb_mutex );
+
+ return fb;
+}
+
+
+BOOL
+stw_pixelformat_set(
+ HDC hdc,
+ int iPixelFormat )
+{
+ uint count;
+ uint index;
+ struct stw_framebuffer *fb;
+
+ index = (uint) iPixelFormat - 1;
+ count = stw_pixelformat_get_extended_count();
+ if (index >= count)
+ return FALSE;
+
+ fb = stw_framebuffer_from_hdc_locked(hdc);
+ if(fb) {
+ /* SetPixelFormat must be called only once */
+ stw_framebuffer_release( fb );
+ return FALSE;
+ }
+
+ fb = stw_framebuffer_create(hdc, iPixelFormat);
+ if(!fb) {
+ return FALSE;
+ }
+
+ stw_framebuffer_release( fb );
+
+ /* Some applications mistakenly use the undocumented wglSetPixelFormat
+ * function instead of SetPixelFormat, so we call SetPixelFormat here to
+ * avoid opengl32.dll's wglCreateContext to fail */
+ if (GetPixelFormat(hdc) == 0) {
+ SetPixelFormat(hdc, iPixelFormat, NULL);
+ }
+
+ return TRUE;
+}
+
+
+int
+stw_pixelformat_get(
+ HDC hdc )
+{
+ int iPixelFormat = 0;
+ struct stw_framebuffer *fb;
+
+ fb = stw_framebuffer_from_hdc(hdc);
+ if(fb) {
+ iPixelFormat = fb->iPixelFormat;
+ stw_framebuffer_release(fb);
+ }
+
+ return iPixelFormat;
+}
+
+
+BOOL
+stw_swap_buffers(
+ HDC hdc )
+{
+ struct stw_framebuffer *fb;
+ struct pipe_screen *screen;
+ struct pipe_surface *surface;
+
+ fb = stw_framebuffer_from_hdc( hdc );
+ if (fb == NULL)
+ return FALSE;
+
+ if (!(fb->pfi->pfd.dwFlags & PFD_DOUBLEBUFFER)) {
+ stw_framebuffer_release(fb);
+ return TRUE;
+ }
+
+ /* If we're swapping the buffer associated with the current context
+ * we have to flush any pending rendering commands first.
+ */
+ st_notify_swapbuffers( fb->stfb );
+
+ screen = stw_dev->screen;
+
+ if(!st_get_framebuffer_surface( fb->stfb, ST_SURFACE_BACK_LEFT, &surface )) {
+ /* FIXME: this shouldn't happen, but does on glean */
+ stw_framebuffer_release(fb);
+ return FALSE;
+ }
+
+#ifdef DEBUG
+ if(stw_dev->trace_running) {
+ screen = trace_screen(screen)->screen;
+ surface = trace_surface(surface)->surface;
+ }
+#endif
+
+ stw_dev->stw_winsys->flush_frontbuffer( screen, surface, hdc );
+
+ stw_framebuffer_update(fb);
+ stw_framebuffer_release(fb);
+
+ return TRUE;
+}
+
+
+BOOL
+stw_swap_layer_buffers(
+ HDC hdc,
+ UINT fuPlanes )
+{
+ if(fuPlanes & WGL_SWAP_MAIN_PLANE)
+ return stw_swap_buffers(hdc);
+
+ return FALSE;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_framebuffer.h b/src/gallium/state_trackers/wgl/shared/stw_framebuffer.h
new file mode 100644
index 0000000000..13d29f37e4
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_framebuffer.h
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_FRAMEBUFFER_H
+#define STW_FRAMEBUFFER_H
+
+#include <windows.h>
+
+#include "main/mtypes.h"
+
+#include "pipe/p_thread.h"
+
+struct stw_pixelformat_info;
+
+/**
+ * Windows framebuffer, derived from gl_framebuffer.
+ */
+struct stw_framebuffer
+{
+ /**
+ * This mutex has two purposes:
+ * - protect the access to the mutable data members below
+ * - prevent the the framebuffer from being deleted while being accessed.
+ *
+ * It is OK to lock this mutex while holding the stw_device::fb_mutex lock,
+ * but the opposite must never happen.
+ */
+ pipe_mutex mutex;
+
+ /*
+ * Immutable members.
+ *
+ * Note that even access to immutable members implies acquiring the mutex
+ * above, to prevent the framebuffer from being destroyed.
+ */
+
+ HDC hDC;
+ HWND hWnd;
+
+ int iPixelFormat;
+ const struct stw_pixelformat_info *pfi;
+ GLvisual visual;
+
+ /*
+ * Mutable members.
+ */
+
+ struct st_framebuffer *stfb;
+
+ /* FIXME: Make this work for multiple contexts bound to the same framebuffer */
+ boolean must_resize;
+ unsigned width;
+ unsigned height;
+
+ /**
+ * This is protected by stw_device::fb_mutex, not the mutex above.
+ *
+ * Deletions must be done by first acquiring stw_device::fb_mutex, and then
+ * acquiring the stw_framebuffer::mutex of the framebuffer to be deleted.
+ * This ensures that nobody else is reading/writing to the.
+ *
+ * It is not necessary to aquire the mutex above to navigate the linked list
+ * given that deletions are done with stw_device::fb_mutex held, so no other
+ * thread can delete.
+ */
+ struct stw_framebuffer *next;
+};
+
+
+/**
+ * Create a new framebuffer object which will correspond to the given HDC.
+ *
+ * This function will acquire stw_framebuffer::mutex. stw_framebuffer_release
+ * must be called when done
+ */
+struct stw_framebuffer *
+stw_framebuffer_create(
+ HDC hdc,
+ int iPixelFormat );
+
+/**
+ * Search a framebuffer with a matching HWND.
+ *
+ * This function will acquire stw_framebuffer::mutex. stw_framebuffer_release
+ * must be called when done
+ */
+struct stw_framebuffer *
+stw_framebuffer_from_hwnd(
+ HWND hwnd );
+
+/**
+ * Search a framebuffer with a matching HDC.
+ *
+ * This function will acquire stw_framebuffer::mutex. stw_framebuffer_release
+ * must be called when done
+ */
+struct stw_framebuffer *
+stw_framebuffer_from_hdc(
+ HDC hdc );
+
+BOOL
+stw_framebuffer_allocate(
+ struct stw_framebuffer *fb );
+
+void
+stw_framebuffer_update(
+ struct stw_framebuffer *fb);
+
+/**
+ * Release stw_framebuffer::mutex lock. This framebuffer must not be accessed
+ * after calling this function, as it may have been deleted by another thread
+ * in the meanwhile.
+ */
+void
+stw_framebuffer_release(
+ struct stw_framebuffer *fb);
+
+/**
+ * Cleanup any existing framebuffers when exiting application.
+ */
+void
+stw_framebuffer_cleanup(void);
+
+#endif /* STW_FRAMEBUFFER_H */
diff --git a/src/gallium/state_trackers/wgl/shared/stw_getprocaddress.c b/src/gallium/state_trackers/wgl/shared/stw_getprocaddress.c
new file mode 100644
index 0000000000..879ced925a
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_getprocaddress.c
@@ -0,0 +1,86 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#define WGL_WGLEXT_PROTOTYPES
+
+#include <GL/gl.h>
+#include <GL/wglext.h>
+
+#include "glapi/glapi.h"
+#include "stw_public.h"
+#include "stw_extgallium.h"
+
+struct stw_extension_entry
+{
+ const char *name;
+ PROC proc;
+};
+
+#define STW_EXTENSION_ENTRY(P) { #P, (PROC) P }
+
+static const struct stw_extension_entry stw_extension_entries[] = {
+
+ /* WGL_ARB_extensions_string */
+ STW_EXTENSION_ENTRY( wglGetExtensionsStringARB ),
+
+ /* WGL_ARB_pixel_format */
+ STW_EXTENSION_ENTRY( wglChoosePixelFormatARB ),
+ STW_EXTENSION_ENTRY( wglGetPixelFormatAttribfvARB ),
+ STW_EXTENSION_ENTRY( wglGetPixelFormatAttribivARB ),
+
+ /* WGL_EXT_extensions_string */
+ STW_EXTENSION_ENTRY( wglGetExtensionsStringEXT ),
+
+ /* WGL_EXT_swap_interval */
+ STW_EXTENSION_ENTRY( wglGetSwapIntervalEXT ),
+ STW_EXTENSION_ENTRY( wglSwapIntervalEXT ),
+
+ /* WGL_EXT_gallium ? */
+ STW_EXTENSION_ENTRY( wglGetGalliumScreenMESA ),
+ STW_EXTENSION_ENTRY( wglCreateGalliumContextMESA ),
+
+ { NULL, NULL }
+};
+
+PROC
+stw_get_proc_address(
+ LPCSTR lpszProc )
+{
+ const struct stw_extension_entry *entry;
+
+ if (lpszProc[0] == 'w' && lpszProc[1] == 'g' && lpszProc[2] == 'l')
+ for (entry = stw_extension_entries; entry->name; entry++)
+ if (strcmp( lpszProc, entry->name ) == 0)
+ return entry->proc;
+
+ if (lpszProc[0] == 'g' && lpszProc[1] == 'l')
+ return (PROC) _glapi_get_proc_address( lpszProc );
+
+ return NULL;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_pixelformat.c b/src/gallium/state_trackers/wgl/shared/stw_pixelformat.c
new file mode 100644
index 0000000000..c296744838
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_pixelformat.c
@@ -0,0 +1,370 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/context.h"
+
+#include "pipe/p_format.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_screen.h"
+
+#include "util/u_debug.h"
+
+#include "stw_device.h"
+#include "stw_pixelformat.h"
+#include "stw_public.h"
+#include "stw_tls.h"
+
+
+struct stw_pf_color_info
+{
+ enum pipe_format format;
+ struct {
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
+ unsigned char alpha;
+ } bits;
+ struct {
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
+ unsigned char alpha;
+ } shift;
+};
+
+struct stw_pf_depth_info
+{
+ enum pipe_format format;
+ struct {
+ unsigned char depth;
+ unsigned char stencil;
+ } bits;
+};
+
+
+/* NOTE: order matters, since in otherwise equal circumstances the first
+ * format listed will get chosen */
+
+static const struct stw_pf_color_info
+stw_pf_color[] = {
+ /* no-alpha */
+ { PIPE_FORMAT_X8R8G8B8_UNORM, { 8, 8, 8, 0}, {16, 8, 0, 0} },
+ { PIPE_FORMAT_B8G8R8X8_UNORM, { 8, 8, 8, 0}, { 8, 16, 24, 0} },
+ { PIPE_FORMAT_R5G6B5_UNORM, { 5, 6, 5, 0}, {11, 5, 0, 0} },
+ /* alpha */
+ { PIPE_FORMAT_A8R8G8B8_UNORM, { 8, 8, 8, 8}, {16, 8, 0, 24} },
+ { PIPE_FORMAT_B8G8R8A8_UNORM, { 8, 8, 8, 8}, { 8, 16, 24, 0} },
+#if 0
+ { PIPE_FORMAT_A2B10G10R10_UNORM, {10, 10, 10, 2}, { 0, 10, 20, 30} },
+#endif
+ { PIPE_FORMAT_A1R5G5B5_UNORM, { 5, 5, 5, 1}, {10, 5, 0, 15} },
+ { PIPE_FORMAT_A4R4G4B4_UNORM, { 4, 4, 4, 4}, {16, 4, 0, 12} }
+};
+
+
+static const struct stw_pf_depth_info
+stw_pf_depth_stencil[] = {
+ /* pure depth */
+ { PIPE_FORMAT_Z32_UNORM, {32, 0} },
+ { PIPE_FORMAT_Z24X8_UNORM, {24, 0} },
+ { PIPE_FORMAT_X8Z24_UNORM, {24, 0} },
+ { PIPE_FORMAT_Z16_UNORM, {16, 0} },
+ /* pure stencil */
+ { PIPE_FORMAT_S8_UNORM, { 0, 8} },
+ /* combined depth-stencil */
+ { PIPE_FORMAT_S8Z24_UNORM, {24, 8} },
+ { PIPE_FORMAT_Z24S8_UNORM, {24, 8} }
+};
+
+
+static const boolean
+stw_pf_doublebuffer[] = {
+ FALSE,
+ TRUE,
+};
+
+
+const unsigned
+stw_pf_multisample[] = {
+ 0,
+ 4
+};
+
+
+static void
+stw_pixelformat_add(
+ struct stw_device *stw_dev,
+ const struct stw_pf_color_info *color,
+ const struct stw_pf_depth_info *depth,
+ unsigned accum,
+ boolean doublebuffer,
+ unsigned samples )
+{
+ boolean extended = FALSE;
+ struct stw_pixelformat_info *pfi;
+
+ assert(stw_dev->pixelformat_extended_count < STW_MAX_PIXELFORMATS);
+ if(stw_dev->pixelformat_extended_count >= STW_MAX_PIXELFORMATS)
+ return;
+
+ assert(pf_layout( color->format ) == PIPE_FORMAT_LAYOUT_RGBAZS );
+ assert(pf_get_component_bits( color->format, PIPE_FORMAT_COMP_R ) == color->bits.red );
+ assert(pf_get_component_bits( color->format, PIPE_FORMAT_COMP_G ) == color->bits.green );
+ assert(pf_get_component_bits( color->format, PIPE_FORMAT_COMP_B ) == color->bits.blue );
+ assert(pf_get_component_bits( color->format, PIPE_FORMAT_COMP_A ) == color->bits.alpha );
+ assert(pf_layout( depth->format ) == PIPE_FORMAT_LAYOUT_RGBAZS );
+ assert(pf_get_component_bits( depth->format, PIPE_FORMAT_COMP_Z ) == depth->bits.depth );
+ assert(pf_get_component_bits( depth->format, PIPE_FORMAT_COMP_S ) == depth->bits.stencil );
+
+ pfi = &stw_dev->pixelformats[stw_dev->pixelformat_extended_count];
+
+ memset(pfi, 0, sizeof *pfi);
+
+ pfi->color_format = color->format;
+ pfi->depth_stencil_format = depth->format;
+
+ pfi->pfd.nSize = sizeof pfi->pfd;
+ pfi->pfd.nVersion = 1;
+
+ pfi->pfd.dwFlags = PFD_SUPPORT_OPENGL;
+
+ /* TODO: also support non-native pixel formats */
+ pfi->pfd.dwFlags |= PFD_DRAW_TO_WINDOW ;
+
+ if (doublebuffer)
+ pfi->pfd.dwFlags |= PFD_DOUBLEBUFFER | PFD_SWAP_COPY;
+
+ pfi->pfd.iPixelType = PFD_TYPE_RGBA;
+
+ pfi->pfd.cColorBits = color->bits.red + color->bits.green + color->bits.blue + color->bits.alpha;
+ pfi->pfd.cRedBits = color->bits.red;
+ pfi->pfd.cRedShift = color->shift.red;
+ pfi->pfd.cGreenBits = color->bits.green;
+ pfi->pfd.cGreenShift = color->shift.green;
+ pfi->pfd.cBlueBits = color->bits.blue;
+ pfi->pfd.cBlueShift = color->shift.blue;
+ pfi->pfd.cAlphaBits = color->bits.alpha;
+ pfi->pfd.cAlphaShift = color->shift.alpha;
+ pfi->pfd.cAccumBits = 4*accum;
+ pfi->pfd.cAccumRedBits = accum;
+ pfi->pfd.cAccumGreenBits = accum;
+ pfi->pfd.cAccumBlueBits = accum;
+ pfi->pfd.cAccumAlphaBits = accum;
+ pfi->pfd.cDepthBits = depth->bits.depth;
+ pfi->pfd.cStencilBits = depth->bits.stencil;
+ pfi->pfd.cAuxBuffers = 0;
+ pfi->pfd.iLayerType = 0;
+ pfi->pfd.bReserved = 0;
+ pfi->pfd.dwLayerMask = 0;
+ pfi->pfd.dwVisibleMask = 0;
+ pfi->pfd.dwDamageMask = 0;
+
+ if(samples) {
+ pfi->numSampleBuffers = 1;
+ pfi->numSamples = samples;
+ extended = TRUE;
+ }
+
+ ++stw_dev->pixelformat_extended_count;
+
+ if(!extended) {
+ ++stw_dev->pixelformat_count;
+ assert(stw_dev->pixelformat_count == stw_dev->pixelformat_extended_count);
+ }
+}
+
+void
+stw_pixelformat_init( void )
+{
+ struct pipe_screen *screen = stw_dev->screen;
+ unsigned i, j, k, l;
+
+ assert( !stw_dev->pixelformat_count );
+ assert( !stw_dev->pixelformat_extended_count );
+
+ for(i = 0; i < Elements(stw_pf_multisample); ++i) {
+ unsigned samples = stw_pf_multisample[i];
+
+ /* FIXME: re-enabled MSAA when we can query it */
+ if(samples)
+ continue;
+
+ for(j = 0; j < Elements(stw_pf_color); ++j) {
+ const struct stw_pf_color_info *color = &stw_pf_color[j];
+
+ if(!screen->is_format_supported(screen, color->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0))
+ continue;
+
+ for(k = 0; k < Elements(stw_pf_doublebuffer); ++k) {
+ unsigned doublebuffer = stw_pf_doublebuffer[k];
+
+ for(l = 0; l < Elements(stw_pf_depth_stencil); ++l) {
+ const struct stw_pf_depth_info *depth = &stw_pf_depth_stencil[l];
+
+ if(!screen->is_format_supported(screen, depth->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0))
+ continue;
+
+ stw_pixelformat_add( stw_dev, color, depth, 0, doublebuffer, samples );
+ stw_pixelformat_add( stw_dev, color, depth, 16, doublebuffer, samples );
+ }
+ }
+ }
+ }
+
+ assert( stw_dev->pixelformat_count <= stw_dev->pixelformat_extended_count );
+ assert( stw_dev->pixelformat_extended_count <= STW_MAX_PIXELFORMATS );
+}
+
+uint
+stw_pixelformat_get_count( void )
+{
+ return stw_dev->pixelformat_count;
+}
+
+uint
+stw_pixelformat_get_extended_count( void )
+{
+ return stw_dev->pixelformat_extended_count;
+}
+
+const struct stw_pixelformat_info *
+stw_pixelformat_get_info( uint index )
+{
+ assert( index < stw_dev->pixelformat_extended_count );
+
+ return &stw_dev->pixelformats[index];
+}
+
+
+void
+stw_pixelformat_visual(GLvisual *visual,
+ const struct stw_pixelformat_info *pfi )
+{
+ memset(visual, 0, sizeof *visual);
+ _mesa_initialize_visual(
+ visual,
+ (pfi->pfd.iPixelType == PFD_TYPE_RGBA) ? GL_TRUE : GL_FALSE,
+ (pfi->pfd.dwFlags & PFD_DOUBLEBUFFER) ? GL_TRUE : GL_FALSE,
+ (pfi->pfd.dwFlags & PFD_STEREO) ? GL_TRUE : GL_FALSE,
+ pfi->pfd.cRedBits,
+ pfi->pfd.cGreenBits,
+ pfi->pfd.cBlueBits,
+ pfi->pfd.cAlphaBits,
+ (pfi->pfd.iPixelType == PFD_TYPE_COLORINDEX) ? pfi->pfd.cColorBits : 0,
+ pfi->pfd.cDepthBits,
+ pfi->pfd.cStencilBits,
+ pfi->pfd.cAccumRedBits,
+ pfi->pfd.cAccumGreenBits,
+ pfi->pfd.cAccumBlueBits,
+ pfi->pfd.cAccumAlphaBits,
+ pfi->numSamples );
+}
+
+
+int
+stw_pixelformat_describe(
+ HDC hdc,
+ int iPixelFormat,
+ UINT nBytes,
+ LPPIXELFORMATDESCRIPTOR ppfd )
+{
+ uint count;
+ uint index;
+ const struct stw_pixelformat_info *pfi;
+
+ (void) hdc;
+
+ count = stw_pixelformat_get_extended_count();
+ index = (uint) iPixelFormat - 1;
+
+ if (ppfd == NULL)
+ return count;
+ if (index >= count || nBytes != sizeof( PIXELFORMATDESCRIPTOR ))
+ return 0;
+
+ pfi = stw_pixelformat_get_info( index );
+
+ memcpy(ppfd, &pfi->pfd, sizeof( PIXELFORMATDESCRIPTOR ));
+
+ return count;
+}
+
+/* Only used by the wgl code, but have it here to avoid exporting the
+ * pixelformat.h functionality.
+ */
+int stw_pixelformat_choose( HDC hdc,
+ CONST PIXELFORMATDESCRIPTOR *ppfd )
+{
+ uint count;
+ uint index;
+ uint bestindex;
+ uint bestdelta;
+
+ (void) hdc;
+
+ count = stw_pixelformat_get_count();
+ bestindex = count;
+ bestdelta = ~0U;
+
+ for (index = 0; index < count; index++) {
+ uint delta = 0;
+ const struct stw_pixelformat_info *pfi = stw_pixelformat_get_info( index );
+
+ if (!(ppfd->dwFlags & PFD_DOUBLEBUFFER_DONTCARE) &&
+ !!(ppfd->dwFlags & PFD_DOUBLEBUFFER) !=
+ !!(pfi->pfd.dwFlags & PFD_DOUBLEBUFFER))
+ continue;
+
+ /* FIXME: Take in account individual channel bits */
+ if (ppfd->cColorBits != pfi->pfd.cColorBits)
+ delta += 8;
+
+ if (ppfd->cDepthBits != pfi->pfd.cDepthBits)
+ delta += 4;
+
+ if (ppfd->cStencilBits != pfi->pfd.cStencilBits)
+ delta += 2;
+
+ if (ppfd->cAlphaBits != pfi->pfd.cAlphaBits)
+ delta++;
+
+ if (delta < bestdelta) {
+ bestindex = index;
+ bestdelta = delta;
+ if (bestdelta == 0)
+ break;
+ }
+ }
+
+ if (bestindex == count)
+ return 0;
+
+ return bestindex + 1;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_pixelformat.h b/src/gallium/state_trackers/wgl/shared/stw_pixelformat.h
new file mode 100644
index 0000000000..bec429231b
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_pixelformat.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_PIXELFORMAT_H
+#define STW_PIXELFORMAT_H
+
+#include <windows.h>
+
+#include "main/mtypes.h"
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+
+struct stw_pixelformat_info
+{
+ enum pipe_format color_format;
+ enum pipe_format depth_stencil_format;
+
+ PIXELFORMATDESCRIPTOR pfd;
+
+ unsigned numSampleBuffers;
+ unsigned numSamples;
+};
+
+void
+stw_pixelformat_init( void );
+
+uint
+stw_pixelformat_get_count( void );
+
+uint
+stw_pixelformat_get_extended_count( void );
+
+const struct stw_pixelformat_info *
+stw_pixelformat_get_info( uint index );
+
+void
+stw_pixelformat_visual(GLvisual *visual,
+ const struct stw_pixelformat_info *pfi );
+
+#endif /* STW_PIXELFORMAT_H */
diff --git a/src/gallium/state_trackers/wgl/shared/stw_public.h b/src/gallium/state_trackers/wgl/shared/stw_public.h
new file mode 100644
index 0000000000..7fe9cfb356
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_public.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_PUBLIC_H
+#define STW_PUBLIC_H
+
+#include <windows.h>
+
+BOOL stw_copy_context( UINT_PTR hglrcSrc,
+ UINT_PTR hglrcDst,
+ UINT mask );
+
+UINT_PTR stw_create_layer_context( HDC hdc,
+ int iLayerPlane );
+
+BOOL stw_share_lists( UINT_PTR hglrc1, UINT_PTR hglrc2 );
+
+BOOL stw_delete_context( UINT_PTR hglrc );
+
+BOOL
+stw_release_context( UINT_PTR dhglrc );
+
+UINT_PTR stw_get_current_context( void );
+
+HDC stw_get_current_dc( void );
+
+BOOL stw_make_current( HDC hdc, UINT_PTR hglrc );
+
+BOOL stw_swap_buffers( HDC hdc );
+
+BOOL
+stw_swap_layer_buffers( HDC hdc, UINT fuPlanes );
+
+PROC stw_get_proc_address( LPCSTR lpszProc );
+
+int stw_pixelformat_describe( HDC hdc,
+ int iPixelFormat,
+ UINT nBytes,
+ LPPIXELFORMATDESCRIPTOR ppfd );
+
+int stw_pixelformat_get( HDC hdc );
+
+BOOL stw_pixelformat_set( HDC hdc,
+ int iPixelFormat );
+
+int stw_pixelformat_choose( HDC hdc,
+ CONST PIXELFORMATDESCRIPTOR *ppfd );
+
+#endif
diff --git a/src/gallium/state_trackers/wgl/shared/stw_tls.c b/src/gallium/state_trackers/wgl/shared/stw_tls.c
new file mode 100644
index 0000000000..4bd6a9289c
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_tls.c
@@ -0,0 +1,139 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#include "pipe/p_compiler.h"
+#include "util/u_memory.h"
+#include "stw_tls.h"
+
+static DWORD tlsIndex = TLS_OUT_OF_INDEXES;
+
+boolean
+stw_tls_init(void)
+{
+ tlsIndex = TlsAlloc();
+ if (tlsIndex == TLS_OUT_OF_INDEXES) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static INLINE struct stw_tls_data *
+stw_tls_data_create()
+{
+ struct stw_tls_data *data;
+
+ data = CALLOC_STRUCT(stw_tls_data);
+ if (!data)
+ goto no_data;
+
+ data->hCallWndProcHook = SetWindowsHookEx(WH_CALLWNDPROC,
+ stw_call_window_proc,
+ NULL,
+ GetCurrentThreadId());
+ if(data->hCallWndProcHook == NULL)
+ goto no_hook;
+
+ TlsSetValue(tlsIndex, data);
+
+ return data;
+
+no_hook:
+ FREE(data);
+no_data:
+ return NULL;
+}
+
+boolean
+stw_tls_init_thread(void)
+{
+ struct stw_tls_data *data;
+
+ if (tlsIndex == TLS_OUT_OF_INDEXES) {
+ return FALSE;
+ }
+
+ data = stw_tls_data_create();
+ if(!data)
+ return FALSE;
+
+ return TRUE;
+}
+
+void
+stw_tls_cleanup_thread(void)
+{
+ struct stw_tls_data *data;
+
+ if (tlsIndex == TLS_OUT_OF_INDEXES) {
+ return;
+ }
+
+ data = (struct stw_tls_data *) TlsGetValue(tlsIndex);
+ if(data) {
+ TlsSetValue(tlsIndex, NULL);
+
+ if(data->hCallWndProcHook) {
+ UnhookWindowsHookEx(data->hCallWndProcHook);
+ data->hCallWndProcHook = NULL;
+ }
+
+ FREE(data);
+ }
+}
+
+void
+stw_tls_cleanup(void)
+{
+ if (tlsIndex != TLS_OUT_OF_INDEXES) {
+ TlsFree(tlsIndex);
+ tlsIndex = TLS_OUT_OF_INDEXES;
+ }
+}
+
+struct stw_tls_data *
+stw_tls_get_data(void)
+{
+ struct stw_tls_data *data;
+
+ if (tlsIndex == TLS_OUT_OF_INDEXES) {
+ return NULL;
+ }
+
+ data = (struct stw_tls_data *) TlsGetValue(tlsIndex);
+ if(!data) {
+ /* DllMain is called with DLL_THREAD_ATTACH only by threads created after
+ * the DLL is loaded by the process */
+ data = stw_tls_data_create();
+ if(!data)
+ return NULL;
+ }
+
+ return data;
+}
diff --git a/src/gallium/state_trackers/wgl/shared/stw_tls.h b/src/gallium/state_trackers/wgl/shared/stw_tls.h
new file mode 100644
index 0000000000..fbf8b1cbee
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_tls.h
@@ -0,0 +1,59 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_TLS_H
+#define STW_TLS_H
+
+#include <windows.h>
+
+struct stw_tls_data
+{
+ HHOOK hCallWndProcHook;
+};
+
+boolean
+stw_tls_init(void);
+
+boolean
+stw_tls_init_thread(void);
+
+void
+stw_tls_cleanup_thread(void);
+
+void
+stw_tls_cleanup(void);
+
+struct stw_tls_data *
+stw_tls_get_data(void);
+
+LRESULT CALLBACK
+stw_call_window_proc(
+ int nCode,
+ WPARAM wParam,
+ LPARAM lParam );
+
+#endif /* STW_TLS_H */
diff --git a/src/gallium/state_trackers/wgl/shared/stw_winsys.h b/src/gallium/state_trackers/wgl/shared/stw_winsys.h
new file mode 100644
index 0000000000..c0bf82c9ed
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/shared/stw_winsys.h
@@ -0,0 +1,65 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_WINSYS_H
+#define STW_WINSYS_H
+
+#include <windows.h> /* for HDC */
+
+#include "pipe/p_compiler.h"
+
+struct pipe_screen;
+struct pipe_context;
+struct pipe_surface;
+
+struct stw_winsys
+{
+ struct pipe_screen *
+ (*create_screen)( void );
+
+ struct pipe_context *
+ (*create_context)( struct pipe_screen *screen );
+
+ void
+ (*flush_frontbuffer)( struct pipe_screen *screen,
+ struct pipe_surface *surf,
+ HDC hDC );
+};
+
+boolean
+stw_init(const struct stw_winsys *stw_winsys);
+
+boolean
+stw_init_thread(void);
+
+void
+stw_cleanup_thread(void);
+
+void
+stw_cleanup(void);
+
+#endif /* STW_WINSYS_H */
diff --git a/src/gallium/state_trackers/wgl/wgl/stw_wgl.c b/src/gallium/state_trackers/wgl/wgl/stw_wgl.c
new file mode 100644
index 0000000000..a131292f7a
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/wgl/stw_wgl.c
@@ -0,0 +1,329 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <windows.h>
+
+#include "util/u_debug.h"
+#include "shared/stw_public.h"
+#include "stw_wgl.h"
+
+
+WINGDIAPI BOOL APIENTRY
+wglCopyContext(
+ HGLRC hglrcSrc,
+ HGLRC hglrcDst,
+ UINT mask )
+{
+ return stw_copy_context( (UINT_PTR)hglrcSrc,
+ (UINT_PTR)hglrcDst,
+ mask );
+}
+
+WINGDIAPI HGLRC APIENTRY
+wglCreateContext(
+ HDC hdc )
+{
+ return wglCreateLayerContext(hdc, 0);
+}
+
+WINGDIAPI HGLRC APIENTRY
+wglCreateLayerContext(
+ HDC hdc,
+ int iLayerPlane )
+{
+ return (HGLRC) stw_create_layer_context( hdc, iLayerPlane );
+}
+
+WINGDIAPI BOOL APIENTRY
+wglDeleteContext(
+ HGLRC hglrc )
+{
+ return stw_delete_context( (UINT_PTR)hglrc );
+}
+
+
+WINGDIAPI HGLRC APIENTRY
+wglGetCurrentContext( VOID )
+{
+ return (HGLRC)stw_get_current_context();
+}
+
+WINGDIAPI HDC APIENTRY
+wglGetCurrentDC( VOID )
+{
+ return stw_get_current_dc();
+}
+
+WINGDIAPI BOOL APIENTRY
+wglMakeCurrent(
+ HDC hdc,
+ HGLRC hglrc )
+{
+ return stw_make_current( hdc, (UINT_PTR)hglrc );
+}
+
+
+WINGDIAPI BOOL APIENTRY
+wglSwapBuffers(
+ HDC hdc )
+{
+ return stw_swap_buffers( hdc );
+}
+
+
+WINGDIAPI BOOL APIENTRY
+wglSwapLayerBuffers(
+ HDC hdc,
+ UINT fuPlanes )
+{
+ return stw_swap_layer_buffers( hdc, fuPlanes );
+}
+
+WINGDIAPI PROC APIENTRY
+wglGetProcAddress(
+ LPCSTR lpszProc )
+{
+ return stw_get_proc_address( lpszProc );
+}
+
+
+WINGDIAPI int APIENTRY
+wglChoosePixelFormat(
+ HDC hdc,
+ CONST PIXELFORMATDESCRIPTOR *ppfd )
+{
+ if (ppfd->nSize != sizeof( PIXELFORMATDESCRIPTOR ) || ppfd->nVersion != 1)
+ return 0;
+ if (ppfd->iPixelType != PFD_TYPE_RGBA)
+ return 0;
+ if (!(ppfd->dwFlags & PFD_DRAW_TO_WINDOW))
+ return 0;
+ if (!(ppfd->dwFlags & PFD_SUPPORT_OPENGL))
+ return 0;
+ if (ppfd->dwFlags & PFD_DRAW_TO_BITMAP)
+ return 0;
+ if (ppfd->dwFlags & PFD_SUPPORT_GDI)
+ return 0;
+ if (!(ppfd->dwFlags & PFD_STEREO_DONTCARE) && (ppfd->dwFlags & PFD_STEREO))
+ return 0;
+
+ return stw_pixelformat_choose( hdc, ppfd );
+}
+
+WINGDIAPI int APIENTRY
+wglDescribePixelFormat(
+ HDC hdc,
+ int iPixelFormat,
+ UINT nBytes,
+ LPPIXELFORMATDESCRIPTOR ppfd )
+{
+ return stw_pixelformat_describe( hdc, iPixelFormat, nBytes, ppfd );
+}
+
+WINGDIAPI int APIENTRY
+wglGetPixelFormat(
+ HDC hdc )
+{
+ return stw_pixelformat_get( hdc );
+}
+
+WINGDIAPI BOOL APIENTRY
+wglSetPixelFormat(
+ HDC hdc,
+ int iPixelFormat,
+ const PIXELFORMATDESCRIPTOR *ppfd )
+{
+ if (ppfd->nSize != sizeof( PIXELFORMATDESCRIPTOR ))
+ return FALSE;
+
+ return stw_pixelformat_set( hdc, iPixelFormat );
+}
+
+
+WINGDIAPI BOOL APIENTRY
+wglUseFontBitmapsA(
+ HDC hdc,
+ DWORD first,
+ DWORD count,
+ DWORD listBase )
+{
+ (void) hdc;
+ (void) first;
+ (void) count;
+ (void) listBase;
+
+ assert( 0 );
+
+ return FALSE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglShareLists(
+ HGLRC hglrc1,
+ HGLRC hglrc2 )
+{
+ return stw_share_lists( (UINT_PTR)hglrc1, (UINT_PTR)hglrc2);;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglUseFontBitmapsW(
+ HDC hdc,
+ DWORD first,
+ DWORD count,
+ DWORD listBase )
+{
+ (void) hdc;
+ (void) first;
+ (void) count;
+ (void) listBase;
+
+ assert( 0 );
+
+ return FALSE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglUseFontOutlinesA(
+ HDC hdc,
+ DWORD first,
+ DWORD count,
+ DWORD listBase,
+ FLOAT deviation,
+ FLOAT extrusion,
+ int format,
+ LPGLYPHMETRICSFLOAT lpgmf )
+{
+ (void) hdc;
+ (void) first;
+ (void) count;
+ (void) listBase;
+ (void) deviation;
+ (void) extrusion;
+ (void) format;
+ (void) lpgmf;
+
+ assert( 0 );
+
+ return FALSE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglUseFontOutlinesW(
+ HDC hdc,
+ DWORD first,
+ DWORD count,
+ DWORD listBase,
+ FLOAT deviation,
+ FLOAT extrusion,
+ int format,
+ LPGLYPHMETRICSFLOAT lpgmf )
+{
+ (void) hdc;
+ (void) first;
+ (void) count;
+ (void) listBase;
+ (void) deviation;
+ (void) extrusion;
+ (void) format;
+ (void) lpgmf;
+
+ assert( 0 );
+
+ return FALSE;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglDescribeLayerPlane(
+ HDC hdc,
+ int iPixelFormat,
+ int iLayerPlane,
+ UINT nBytes,
+ LPLAYERPLANEDESCRIPTOR plpd )
+{
+ (void) hdc;
+ (void) iPixelFormat;
+ (void) iLayerPlane;
+ (void) nBytes;
+ (void) plpd;
+
+ assert( 0 );
+
+ return FALSE;
+}
+
+WINGDIAPI int APIENTRY
+wglSetLayerPaletteEntries(
+ HDC hdc,
+ int iLayerPlane,
+ int iStart,
+ int cEntries,
+ CONST COLORREF *pcr )
+{
+ (void) hdc;
+ (void) iLayerPlane;
+ (void) iStart;
+ (void) cEntries;
+ (void) pcr;
+
+ assert( 0 );
+
+ return 0;
+}
+
+WINGDIAPI int APIENTRY
+wglGetLayerPaletteEntries(
+ HDC hdc,
+ int iLayerPlane,
+ int iStart,
+ int cEntries,
+ COLORREF *pcr )
+{
+ (void) hdc;
+ (void) iLayerPlane;
+ (void) iStart;
+ (void) cEntries;
+ (void) pcr;
+
+ assert( 0 );
+
+ return 0;
+}
+
+WINGDIAPI BOOL APIENTRY
+wglRealizeLayerPalette(
+ HDC hdc,
+ int iLayerPlane,
+ BOOL bRealize )
+{
+ (void) hdc;
+ (void) iLayerPlane;
+ (void) bRealize;
+
+ assert( 0 );
+
+ return FALSE;
+}
diff --git a/src/gallium/state_trackers/wgl/wgl/stw_wgl.h b/src/gallium/state_trackers/wgl/wgl/stw_wgl.h
new file mode 100644
index 0000000000..a98179944a
--- /dev/null
+++ b/src/gallium/state_trackers/wgl/wgl/stw_wgl.h
@@ -0,0 +1,63 @@
+/**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef STW_WGL_H_
+#define STW_WGL_H_
+
+
+#include <windows.h>
+
+#include <GL/gl.h>
+
+
+/*
+ * Undeclared APIs exported by opengl32.dll
+ */
+
+WINGDIAPI BOOL WINAPI
+wglSwapBuffers(HDC hdc);
+
+WINGDIAPI int WINAPI
+wglChoosePixelFormat(HDC hdc,
+ CONST PIXELFORMATDESCRIPTOR *ppfd);
+
+WINGDIAPI int WINAPI
+wglDescribePixelFormat(HDC hdc,
+ int iPixelFormat,
+ UINT nBytes,
+ LPPIXELFORMATDESCRIPTOR ppfd);
+
+WINGDIAPI int WINAPI
+wglGetPixelFormat(HDC hdc);
+
+WINGDIAPI BOOL WINAPI
+wglSetPixelFormat(HDC hdc,
+ int iPixelFormat,
+ CONST PIXELFORMATDESCRIPTOR *ppfd);
+
+
+#endif /* STW_WGL_H_ */
diff --git a/src/gallium/state_trackers/xorg/Makefile b/src/gallium/state_trackers/xorg/Makefile
new file mode 100644
index 0000000000..27a1990724
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/Makefile
@@ -0,0 +1,18 @@
+TOP = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = xorgtracker
+
+LIBRARY_INCLUDES = \
+ -DHAVE_CONFIG_H \
+ $(shell pkg-config --cflags-only-I pixman-1 xorg-server libdrm xproto) \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/include \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/mesa/drivers/dri/common \
+ -I$(TOP)/src/mesa/main
+
+C_SOURCES = $(wildcard ./*.c)
+
+include ../../Makefile.template
diff --git a/src/gallium/state_trackers/xorg/SConscript b/src/gallium/state_trackers/xorg/SConscript
new file mode 100644
index 0000000000..65f55ea378
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/SConscript
@@ -0,0 +1,27 @@
+#######################################################################
+# SConscript for xorg state_tracker
+
+Import('*')
+
+if 'xorg' in env['statetrackers']:
+
+ env = env.Clone()
+
+ env.Append(CPPPATH = [
+ '#/src/mesa',
+ ])
+
+ env.ParseConfig('pkg-config --cflags --libs xorg-server')
+
+ st_xorg = env.ConvenienceLibrary(
+ target = 'st_xorg',
+ source = [ 'xorg_composite.c',
+ 'xorg_crtc.c',
+ 'xorg_dri2.c',
+ 'xorg_driver.c',
+ 'xorg_exa.c',
+ 'xorg_exa_tgsi.c',
+ 'xorg_output.c',
+ ]
+ )
+ Export('st_xorg')
diff --git a/src/gallium/state_trackers/xorg/xorg_composite.c b/src/gallium/state_trackers/xorg/xorg_composite.c
new file mode 100644
index 0000000000..15c955450d
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_composite.c
@@ -0,0 +1,1019 @@
+#include "xorg_composite.h"
+
+#include "xorg_exa_tgsi.h"
+
+#include "cso_cache/cso_context.h"
+#include "util/u_draw_quad.h"
+#include "util/u_math.h"
+
+#include "pipe/p_inlines.h"
+
+struct xorg_composite_blend {
+ int op:8;
+
+ unsigned rgb_src_factor:5; /**< PIPE_BLENDFACTOR_x */
+ unsigned rgb_dst_factor:5; /**< PIPE_BLENDFACTOR_x */
+
+ unsigned alpha_src_factor:5; /**< PIPE_BLENDFACTOR_x */
+ unsigned alpha_dst_factor:5; /**< PIPE_BLENDFACTOR_x */
+};
+
+#define BLEND_OP_OVER 3
+static const struct xorg_composite_blend xorg_blends[] = {
+ { PictOpClear,
+ PIPE_BLENDFACTOR_CONST_COLOR, PIPE_BLENDFACTOR_CONST_ALPHA,
+ PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ZERO },
+
+ { PictOpSrc,
+ PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_ONE,
+ PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ZERO },
+
+ { PictOpDst,
+ PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ZERO,
+ PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_ONE },
+
+ { PictOpOver,
+ PIPE_BLENDFACTOR_SRC_ALPHA, PIPE_BLENDFACTOR_ONE,
+ PIPE_BLENDFACTOR_INV_SRC_ALPHA, PIPE_BLENDFACTOR_INV_SRC_ALPHA },
+
+ { PictOpOverReverse,
+ PIPE_BLENDFACTOR_SRC_ALPHA, PIPE_BLENDFACTOR_ONE,
+ PIPE_BLENDFACTOR_INV_SRC_ALPHA, PIPE_BLENDFACTOR_INV_SRC_ALPHA },
+};
+
+
+static INLINE void
+pixel_to_float4(Pixel pixel, float *color)
+{
+ CARD32 r, g, b, a;
+
+ a = (pixel >> 24) & 0xff;
+ r = (pixel >> 16) & 0xff;
+ g = (pixel >> 8) & 0xff;
+ b = (pixel >> 0) & 0xff;
+ color[0] = ((float)r) / 255.;
+ color[1] = ((float)g) / 255.;
+ color[2] = ((float)b) / 255.;
+ color[3] = ((float)a) / 255.;
+}
+
+static INLINE void
+render_pixel_to_float4(PictFormatPtr format,
+ CARD32 pixel, float *color)
+{
+ CARD32 r, g, b, a;
+
+ debug_assert(format->type == PictTypeDirect);
+
+ r = (pixel >> format->direct.red) & format->direct.redMask;
+ g = (pixel >> format->direct.green) & format->direct.greenMask;
+ b = (pixel >> format->direct.blue) & format->direct.blueMask;
+ a = (pixel >> format->direct.alpha) & format->direct.alphaMask;
+ color[0] = ((float)r) / ((float)format->direct.redMask);
+ color[1] = ((float)g) / ((float)format->direct.greenMask);
+ color[2] = ((float)b) / ((float)format->direct.blueMask);
+ color[3] = ((float)a) / ((float)format->direct.alphaMask);
+}
+
+struct acceleration_info {
+ int op : 16;
+ int with_mask : 1;
+ int component_alpha : 1;
+};
+static const struct acceleration_info accelerated_ops[] = {
+ {PictOpClear, 1, 0},
+ {PictOpSrc, 1, 0},
+ {PictOpDst, 1, 0},
+ {PictOpOver, 1, 0},
+ {PictOpOverReverse, 1, 0},
+ {PictOpIn, 1, 0},
+ {PictOpInReverse, 1, 0},
+ {PictOpOut, 1, 0},
+ {PictOpOutReverse, 1, 0},
+ {PictOpAtop, 1, 0},
+ {PictOpAtopReverse, 1, 0},
+ {PictOpXor, 1, 0},
+ {PictOpAdd, 1, 0},
+ {PictOpSaturate, 1, 0},
+};
+
+static struct xorg_composite_blend
+blend_for_op(int op)
+{
+ const int num_blends =
+ sizeof(xorg_blends)/sizeof(struct xorg_composite_blend);
+ int i;
+
+ for (i = 0; i < num_blends; ++i) {
+ if (xorg_blends[i].op == op)
+ return xorg_blends[i];
+ }
+ return xorg_blends[BLEND_OP_OVER];
+}
+
+static INLINE int
+render_repeat_to_gallium(int mode)
+{
+ switch(mode) {
+ case RepeatNone:
+ return PIPE_TEX_WRAP_CLAMP;
+ case RepeatNormal:
+ return PIPE_TEX_WRAP_REPEAT;
+ case RepeatReflect:
+ return PIPE_TEX_WRAP_MIRROR_REPEAT;
+ case RepeatPad:
+ return PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ default:
+ debug_printf("Unsupported repeat mode\n");
+ }
+ return PIPE_TEX_WRAP_REPEAT;
+}
+
+
+static INLINE void
+setup_vertex0(float vertex[2][4], float x, float y,
+ float color[4])
+{
+ vertex[0][0] = x;
+ vertex[0][1] = y;
+ vertex[0][2] = 0.f; /*z*/
+ vertex[0][3] = 1.f; /*w*/
+
+ vertex[1][0] = color[0]; /*r*/
+ vertex[1][1] = color[1]; /*g*/
+ vertex[1][2] = color[2]; /*b*/
+ vertex[1][3] = color[3]; /*a*/
+}
+
+static struct pipe_buffer *
+setup_vertex_data0(struct exa_context *ctx,
+ int srcX, int srcY, int maskX, int maskY,
+ int dstX, int dstY, int width, int height)
+{
+ float vertices[4][2][4];
+
+ /* 1st vertex */
+ setup_vertex0(vertices[0], dstX, dstY,
+ ctx->solid_color);
+ /* 2nd vertex */
+ setup_vertex0(vertices[1], dstX + width, dstY,
+ ctx->solid_color);
+ /* 3rd vertex */
+ setup_vertex0(vertices[2], dstX + width, dstY + height,
+ ctx->solid_color);
+ /* 4th vertex */
+ setup_vertex0(vertices[3], dstX, dstY + height,
+ ctx->solid_color);
+
+ return pipe_user_buffer_create(ctx->pipe->screen,
+ vertices,
+ sizeof(vertices));
+}
+
+static INLINE void
+setup_vertex1(float vertex[2][4], float x, float y, float s, float t)
+{
+ vertex[0][0] = x;
+ vertex[0][1] = y;
+ vertex[0][2] = 0.f; /*z*/
+ vertex[0][3] = 1.f; /*w*/
+
+ vertex[1][0] = s; /*s*/
+ vertex[1][1] = t; /*t*/
+ vertex[1][2] = 0.f; /*r*/
+ vertex[1][3] = 1.f; /*q*/
+}
+
+static struct pipe_buffer *
+setup_vertex_data1(struct exa_context *ctx,
+ int srcX, int srcY, int maskX, int maskY,
+ int dstX, int dstY, int width, int height)
+{
+ float vertices[4][2][4];
+ float s0, t0, s1, t1;
+ struct pipe_texture *src = ctx->bound_textures[0];
+
+ s0 = srcX / src->width[0];
+ s1 = srcX + width / src->width[0];
+ t0 = srcY / src->height[0];
+ t1 = srcY + height / src->height[0];
+
+ /* 1st vertex */
+ setup_vertex1(vertices[0], dstX, dstY,
+ s0, t0);
+ /* 2nd vertex */
+ setup_vertex1(vertices[1], dstX + width, dstY,
+ s1, t0);
+ /* 3rd vertex */
+ setup_vertex1(vertices[2], dstX + width, dstY + height,
+ s1, t1);
+ /* 4th vertex */
+ setup_vertex1(vertices[3], dstX, dstY + height,
+ s0, t1);
+
+ return pipe_user_buffer_create(ctx->pipe->screen,
+ vertices,
+ sizeof(vertices));
+}
+
+static struct pipe_buffer *
+setup_vertex_data_tex(struct exa_context *ctx,
+ float x0, float y0, float x1, float y1,
+ float s0, float t0, float s1, float t1,
+ float z)
+{
+ float vertices[4][2][4];
+
+ /* 1st vertex */
+ setup_vertex1(vertices[0], x0, y0,
+ s0, t0);
+ /* 2nd vertex */
+ setup_vertex1(vertices[1], x1, y0,
+ s1, t0);
+ /* 3rd vertex */
+ setup_vertex1(vertices[2], x1, y1,
+ s1, t1);
+ /* 4th vertex */
+ setup_vertex1(vertices[3], x0, y1,
+ s0, t1);
+
+ return pipe_user_buffer_create(ctx->pipe->screen,
+ vertices,
+ sizeof(vertices));
+}
+
+
+
+static INLINE void
+setup_vertex2(float vertex[3][4], float x, float y,
+ float s0, float t0, float s1, float t1)
+{
+ vertex[0][0] = x;
+ vertex[0][1] = y;
+ vertex[0][2] = 0.f; /*z*/
+ vertex[0][3] = 1.f; /*w*/
+
+ vertex[1][0] = s0; /*s*/
+ vertex[1][1] = t0; /*t*/
+ vertex[1][2] = 0.f; /*r*/
+ vertex[1][3] = 1.f; /*q*/
+
+ vertex[2][0] = s1; /*s*/
+ vertex[2][1] = t1; /*t*/
+ vertex[2][2] = 0.f; /*r*/
+ vertex[2][3] = 1.f; /*q*/
+}
+
+static struct pipe_buffer *
+setup_vertex_data2(struct exa_context *ctx,
+ int srcX, int srcY, int maskX, int maskY,
+ int dstX, int dstY, int width, int height)
+{
+ float vertices[4][3][4];
+ float st0[4], st1[4];
+ struct pipe_texture *src = ctx->bound_textures[0];
+ struct pipe_texture *mask = ctx->bound_textures[0];
+
+ st0[0] = srcX / src->width[0];
+ st0[1] = srcY / src->height[0];
+ st0[2] = srcX + width / src->width[0];
+ st0[3] = srcY + height / src->height[0];
+
+ st1[0] = maskX / mask->width[0];
+ st1[1] = maskY / mask->height[0];
+ st1[2] = maskX + width / mask->width[0];
+ st1[3] = maskY + height / mask->height[0];
+
+ /* 1st vertex */
+ setup_vertex2(vertices[0], dstX, dstY,
+ st0[0], st0[1], st1[0], st1[1]);
+ /* 2nd vertex */
+ setup_vertex2(vertices[1], dstX + width, dstY,
+ st0[2], st0[1], st1[2], st1[1]);
+ /* 3rd vertex */
+ setup_vertex2(vertices[2], dstX + width, dstY + height,
+ st0[2], st0[3], st1[2], st1[3]);
+ /* 4th vertex */
+ setup_vertex2(vertices[3], dstX, dstY + height,
+ st0[0], st0[3], st1[0], st1[3]);
+
+ return pipe_user_buffer_create(ctx->pipe->screen,
+ vertices,
+ sizeof(vertices));
+}
+
+boolean xorg_composite_accelerated(int op,
+ PicturePtr pSrcPicture,
+ PicturePtr pMaskPicture,
+ PicturePtr pDstPicture)
+{
+ unsigned i;
+ unsigned accel_ops_count =
+ sizeof(accelerated_ops)/sizeof(struct acceleration_info);
+
+
+ /*FIXME: currently accel is disabled */
+ return FALSE;
+
+ if (pSrcPicture) {
+ /* component alpha not supported */
+ if (pSrcPicture->componentAlpha)
+ return FALSE;
+ /* fills not supported */
+ if (pSrcPicture->pSourcePict)
+ return FALSE;
+ }
+
+ for (i = 0; i < accel_ops_count; ++i) {
+ if (op == accelerated_ops[i].op) {
+ if (pMaskPicture && !accelerated_ops[i].with_mask)
+ return FALSE;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static void
+bind_clip_state(struct exa_context *exa)
+{
+}
+
+static void
+bind_framebuffer_state(struct exa_context *exa, struct exa_pixmap_priv *pDst)
+{
+ unsigned i;
+ struct pipe_framebuffer_state state;
+ struct pipe_surface *surface = exa_gpu_surface(exa, pDst);
+ memset(&state, 0, sizeof(struct pipe_framebuffer_state));
+
+ state.width = pDst->tex->width[0];
+ state.height = pDst->tex->height[0];
+
+ state.nr_cbufs = 1;
+ state.cbufs[0] = surface;
+ for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
+ state.cbufs[i] = 0;
+
+ /* currently we don't use depth/stencil */
+ state.zsbuf = 0;
+
+ cso_set_framebuffer(exa->cso, &state);
+}
+
+enum AxisOrientation {
+ Y0_BOTTOM,
+ Y0_TOP
+};
+
+static void
+set_viewport(struct exa_context *exa, int width, int height,
+ enum AxisOrientation orientation)
+{
+ struct pipe_viewport_state viewport;
+ float y_scale = (orientation == Y0_BOTTOM) ? -2.f : 2.f;
+
+ viewport.scale[0] = width / 2.f;
+ viewport.scale[1] = height / y_scale;
+ viewport.scale[2] = 1.0;
+ viewport.scale[3] = 1.0;
+ viewport.translate[0] = width / 2.f;
+ viewport.translate[1] = height / 2.f;
+ viewport.translate[2] = 0.0;
+ viewport.translate[3] = 0.0;
+
+ cso_set_viewport(exa->cso, &viewport);
+}
+
+static void
+bind_viewport_state(struct exa_context *exa, struct exa_pixmap_priv *pDst)
+{
+ int width = pDst->tex->width[0];
+ int height = pDst->tex->height[0];
+
+ debug_printf("Bind viewport (%d, %d)\n", width, height);
+
+ set_viewport(exa, width, height, Y0_TOP);
+}
+
+static void
+bind_blend_state(struct exa_context *exa, int op,
+ PicturePtr pSrcPicture, PicturePtr pMaskPicture)
+{
+ boolean component_alpha = (pSrcPicture) ?
+ pSrcPicture->componentAlpha : FALSE;
+ struct xorg_composite_blend blend_opt;
+ struct pipe_blend_state blend;
+
+ if (component_alpha) {
+ op = PictOpOver;
+ }
+ blend_opt = blend_for_op(op);
+
+ memset(&blend, 0, sizeof(struct pipe_blend_state));
+ blend.blend_enable = 1;
+ blend.colormask |= PIPE_MASK_R;
+ blend.colormask |= PIPE_MASK_G;
+ blend.colormask |= PIPE_MASK_B;
+ blend.colormask |= PIPE_MASK_A;
+
+ blend.rgb_src_factor = blend_opt.rgb_src_factor;
+ blend.alpha_src_factor = blend_opt.alpha_src_factor;
+ blend.rgb_dst_factor = blend_opt.rgb_dst_factor;
+ blend.alpha_dst_factor = blend_opt.alpha_dst_factor;
+
+ cso_set_blend(exa->cso, &blend);
+}
+
+static void
+bind_rasterizer_state(struct exa_context *exa)
+{
+ struct pipe_rasterizer_state raster;
+ memset(&raster, 0, sizeof(struct pipe_rasterizer_state));
+ raster.gl_rasterization_rules = 1;
+ cso_set_rasterizer(exa->cso, &raster);
+}
+
+static void
+bind_shaders(struct exa_context *exa, int op,
+ PicturePtr pSrcPicture, PicturePtr pMaskPicture)
+{
+ unsigned vs_traits = 0, fs_traits = 0;
+ struct xorg_shader shader;
+
+ exa->has_solid_color = FALSE;
+
+ if (pSrcPicture) {
+ if (pSrcPicture->pSourcePict) {
+ if (pSrcPicture->pSourcePict->type == SourcePictTypeSolidFill) {
+ fs_traits |= FS_SOLID_FILL;
+ vs_traits |= VS_SOLID_FILL;
+ render_pixel_to_float4(pSrcPicture->pFormat,
+ pSrcPicture->pSourcePict->solidFill.color,
+ exa->solid_color);
+ exa->has_solid_color = TRUE;
+ } else {
+ debug_assert("!gradients not supported");
+ }
+ } else {
+ fs_traits |= FS_COMPOSITE;
+ vs_traits |= VS_COMPOSITE;
+ }
+ }
+
+ if (pMaskPicture) {
+ vs_traits |= VS_MASK;
+ fs_traits |= FS_MASK;
+ }
+
+ shader = xorg_shaders_get(exa->shaders, vs_traits, fs_traits);
+ cso_set_vertex_shader_handle(exa->cso, shader.vs);
+ cso_set_fragment_shader_handle(exa->cso, shader.fs);
+}
+
+
+static void
+bind_samplers(struct exa_context *exa, int op,
+ PicturePtr pSrcPicture, PicturePtr pMaskPicture,
+ PicturePtr pDstPicture,
+ struct exa_pixmap_priv *pSrc,
+ struct exa_pixmap_priv *pMask,
+ struct exa_pixmap_priv *pDst)
+{
+ struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
+ struct pipe_sampler_state src_sampler, mask_sampler;
+
+ exa->num_bound_samplers = 0;
+
+ memset(&src_sampler, 0, sizeof(struct pipe_sampler_state));
+ memset(&mask_sampler, 0, sizeof(struct pipe_sampler_state));
+
+ if (pSrcPicture && pSrc) {
+ unsigned src_wrap = render_repeat_to_gallium(
+ pSrcPicture->repeatType);
+ src_sampler.wrap_s = src_wrap;
+ src_sampler.wrap_t = src_wrap;
+ src_sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ src_sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ src_sampler.normalized_coords = 1;
+ samplers[0] = &src_sampler;
+ exa->bound_textures[0] = pSrc->tex;
+ ++exa->num_bound_samplers;
+ }
+
+ if (pMaskPicture && pMask) {
+ unsigned mask_wrap = render_repeat_to_gallium(
+ pMaskPicture->repeatType);
+ mask_sampler.wrap_s = mask_wrap;
+ mask_sampler.wrap_t = mask_wrap;
+ mask_sampler.min_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ mask_sampler.mag_img_filter = PIPE_TEX_MIPFILTER_NEAREST;
+ mask_sampler.normalized_coords = 1;
+ samplers[1] = &mask_sampler;
+ exa->bound_textures[1] = pMask->tex;
+ ++exa->num_bound_samplers;
+ }
+
+ cso_set_samplers(exa->cso, exa->num_bound_samplers,
+ (const struct pipe_sampler_state **)samplers);
+ cso_set_sampler_textures(exa->cso, exa->num_bound_samplers,
+ exa->bound_textures);
+}
+
+static void
+setup_vs_constant_buffer(struct exa_context *exa,
+ int width, int height)
+{
+ const int param_bytes = 8 * sizeof(float);
+ float vs_consts[8] = {
+ 2.f/width, 2.f/height, 1, 1,
+ -1, -1, 0, 0
+ };
+ struct pipe_constant_buffer *cbuf = &exa->vs_const_buffer;
+
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+ cbuf->buffer = pipe_buffer_create(exa->pipe->screen, 16,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ param_bytes);
+
+ if (cbuf->buffer) {
+ pipe_buffer_write(exa->pipe->screen, cbuf->buffer,
+ 0, param_bytes, vs_consts);
+ }
+ exa->pipe->set_constant_buffer(exa->pipe, PIPE_SHADER_VERTEX, 0, cbuf);
+}
+
+
+static void
+setup_fs_constant_buffer(struct exa_context *exa)
+{
+ const int param_bytes = 4 * sizeof(float);
+ float fs_consts[8] = {
+ 0, 0, 0, 1,
+ };
+ struct pipe_constant_buffer *cbuf = &exa->fs_const_buffer;
+
+ pipe_buffer_reference(&cbuf->buffer, NULL);
+ cbuf->buffer = pipe_buffer_create(exa->pipe->screen, 16,
+ PIPE_BUFFER_USAGE_CONSTANT,
+ param_bytes);
+
+ if (cbuf->buffer) {
+ pipe_buffer_write(exa->pipe->screen, cbuf->buffer,
+ 0, param_bytes, fs_consts);
+ }
+ exa->pipe->set_constant_buffer(exa->pipe, PIPE_SHADER_FRAGMENT, 0, cbuf);
+}
+
+static void
+setup_constant_buffers(struct exa_context *exa, struct exa_pixmap_priv *pDst)
+{
+ int width = pDst->tex->width[0];
+ int height = pDst->tex->height[0];
+
+ setup_vs_constant_buffer(exa, width, height);
+ setup_fs_constant_buffer(exa);
+}
+
+boolean xorg_composite_bind_state(struct exa_context *exa,
+ int op,
+ PicturePtr pSrcPicture,
+ PicturePtr pMaskPicture,
+ PicturePtr pDstPicture,
+ struct exa_pixmap_priv *pSrc,
+ struct exa_pixmap_priv *pMask,
+ struct exa_pixmap_priv *pDst)
+{
+ bind_framebuffer_state(exa, pDst);
+ bind_viewport_state(exa, pDst);
+ bind_blend_state(exa, op, pSrcPicture, pMaskPicture);
+ bind_rasterizer_state(exa);
+ bind_shaders(exa, op, pSrcPicture, pMaskPicture);
+ bind_samplers(exa, op, pSrcPicture, pMaskPicture,
+ pDstPicture, pSrc, pMask, pDst);
+ bind_clip_state(exa);
+ setup_constant_buffers(exa, pDst);
+
+ return FALSE;
+}
+
+void xorg_composite(struct exa_context *exa,
+ struct exa_pixmap_priv *dst,
+ int srcX, int srcY, int maskX, int maskY,
+ int dstX, int dstY, int width, int height)
+{
+ struct pipe_context *pipe = exa->pipe;
+ struct pipe_buffer *buf = 0;
+
+ if (exa->num_bound_samplers == 0 ) { /* solid fill */
+ buf = setup_vertex_data0(exa,
+ srcX, srcY, maskX, maskY,
+ dstX, dstY, width, height);
+ } else if (exa->num_bound_samplers == 1 ) /* src */
+ buf = setup_vertex_data1(exa,
+ srcX, srcY, maskX, maskY,
+ dstX, dstY, width, height);
+ else if (exa->num_bound_samplers == 2) /* src + mask */
+ buf = setup_vertex_data2(exa,
+ srcX, srcY, maskX, maskY,
+ dstX, dstY, width, height);
+ else if (exa->num_bound_samplers == 3) { /* src + mask + dst */
+ debug_assert(!"src/mask/dst not handled right now");
+#if 0
+ buf = setup_vertex_data2(exa,
+ srcX, srcY, maskX, maskY,
+ dstX, dstY, width, height);
+#endif
+ }
+
+ if (buf) {
+ int num_attribs = 1; /*pos*/
+ num_attribs += exa->num_bound_samplers;
+ if (exa->has_solid_color)
+ ++num_attribs;
+
+ util_draw_vertex_buffer(pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ num_attribs); /* attribs/vert */
+
+ pipe_buffer_reference(&buf, NULL);
+ }
+}
+
+boolean xorg_solid_bind_state(struct exa_context *exa,
+ struct exa_pixmap_priv *pixmap,
+ Pixel fg)
+{
+ unsigned vs_traits, fs_traits;
+ struct xorg_shader shader;
+
+ pixel_to_float4(fg, exa->solid_color);
+ exa->has_solid_color = TRUE;
+
+ exa->solid_color[3] = 1.f;
+
+ debug_printf("Color Pixel=(%d, %d, %d, %d), RGBA=(%f, %f, %f, %f)\n",
+ (fg >> 24) & 0xff, (fg >> 16) & 0xff,
+ (fg >> 8) & 0xff, (fg >> 0) & 0xff,
+ exa->solid_color[0], exa->solid_color[1],
+ exa->solid_color[2], exa->solid_color[3]);
+
+#if 0
+ exa->solid_color[0] = 1.f;
+ exa->solid_color[1] = 0.f;
+ exa->solid_color[2] = 0.f;
+ exa->solid_color[3] = 1.f;
+#endif
+
+ vs_traits = VS_SOLID_FILL;
+ fs_traits = FS_SOLID_FILL;
+
+ bind_framebuffer_state(exa, pixmap);
+ bind_viewport_state(exa, pixmap);
+ bind_rasterizer_state(exa);
+ bind_blend_state(exa, PictOpSrc, NULL, NULL);
+ setup_constant_buffers(exa, pixmap);
+ bind_clip_state(exa);
+
+ shader = xorg_shaders_get(exa->shaders, vs_traits, fs_traits);
+ cso_set_vertex_shader_handle(exa->cso, shader.vs);
+ cso_set_fragment_shader_handle(exa->cso, shader.fs);
+
+ return FALSE;
+}
+
+void xorg_solid(struct exa_context *exa,
+ struct exa_pixmap_priv *pixmap,
+ int x0, int y0, int x1, int y1)
+{
+ struct pipe_context *pipe = exa->pipe;
+ struct pipe_buffer *buf = 0;
+ float vertices[4][2][4];
+
+ x0 = 10; y0 = 10;
+ x1 = 300; y1 = 300;
+
+ /* 1st vertex */
+ setup_vertex0(vertices[0], x0, y0,
+ exa->solid_color);
+ /* 2nd vertex */
+ setup_vertex0(vertices[1], x1, y0,
+ exa->solid_color);
+ /* 3rd vertex */
+ setup_vertex0(vertices[2], x1, y1,
+ exa->solid_color);
+ /* 4th vertex */
+ setup_vertex0(vertices[3], x0, y1,
+ exa->solid_color);
+
+ buf = pipe_user_buffer_create(exa->pipe->screen,
+ vertices,
+ sizeof(vertices));
+
+
+ if (buf) {
+ debug_printf("Drawing buf is %p\n", buf);
+ util_draw_vertex_buffer(pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference(&buf, NULL);
+ }
+}
+
+
+static INLINE void shift_rectx(float coords[4],
+ const float *bounds,
+ const float shift)
+{
+ coords[0] += shift;
+ coords[2] -= shift;
+ if (bounds) {
+ coords[2] = MIN2(coords[2], bounds[2]);
+ /* bound x/y + width/height */
+ if ((coords[0] + coords[2]) > (bounds[0] + bounds[2])) {
+ coords[2] = (bounds[0] + bounds[2]) - coords[0];
+ }
+ }
+}
+
+static INLINE void shift_recty(float coords[4],
+ const float *bounds,
+ const float shift)
+{
+ coords[1] += shift;
+ coords[3] -= shift;
+ if (bounds) {
+ coords[3] = MIN2(coords[3], bounds[3]);
+ if ((coords[1] + coords[3]) > (bounds[1] + bounds[3])) {
+ coords[3] = (bounds[1] + bounds[3]) - coords[1];
+ }
+ }
+}
+
+static INLINE void bound_rect(float coords[4],
+ const float bounds[4],
+ float shift[4])
+{
+ /* if outside the bounds */
+ if (coords[0] > (bounds[0] + bounds[2]) ||
+ coords[1] > (bounds[1] + bounds[3]) ||
+ (coords[0] + coords[2]) < bounds[0] ||
+ (coords[1] + coords[3]) < bounds[1]) {
+ coords[0] = 0.f;
+ coords[1] = 0.f;
+ coords[2] = 0.f;
+ coords[3] = 0.f;
+ shift[0] = 0.f;
+ shift[1] = 0.f;
+ return;
+ }
+
+ /* bound x */
+ if (coords[0] < bounds[0]) {
+ shift[0] = bounds[0] - coords[0];
+ coords[2] -= shift[0];
+ coords[0] = bounds[0];
+ } else
+ shift[0] = 0.f;
+
+ /* bound y */
+ if (coords[1] < bounds[1]) {
+ shift[1] = bounds[1] - coords[1];
+ coords[3] -= shift[1];
+ coords[1] = bounds[1];
+ } else
+ shift[1] = 0.f;
+
+ shift[2] = bounds[2] - coords[2];
+ shift[3] = bounds[3] - coords[3];
+ /* bound width/height */
+ coords[2] = MIN2(coords[2], bounds[2]);
+ coords[3] = MIN2(coords[3], bounds[3]);
+
+ /* bound x/y + width/height */
+ if ((coords[0] + coords[2]) > (bounds[0] + bounds[2])) {
+ coords[2] = (bounds[0] + bounds[2]) - coords[0];
+ }
+ if ((coords[1] + coords[3]) > (bounds[1] + bounds[3])) {
+ coords[3] = (bounds[1] + bounds[3]) - coords[1];
+ }
+
+ /* if outside the bounds */
+ if ((coords[0] + coords[2]) < bounds[0] ||
+ (coords[1] + coords[3]) < bounds[1]) {
+ coords[0] = 0.f;
+ coords[1] = 0.f;
+ coords[2] = 0.f;
+ coords[3] = 0.f;
+ return;
+ }
+}
+
+static INLINE void sync_size(float *src_loc, float *dst_loc)
+{
+ src_loc[2] = MIN2(src_loc[2], dst_loc[2]);
+ src_loc[3] = MIN2(src_loc[3], dst_loc[3]);
+ dst_loc[2] = src_loc[2];
+ dst_loc[3] = src_loc[3];
+}
+
+
+static void renderer_copy_texture(struct exa_context *exa,
+ struct pipe_texture *src,
+ float sx1, float sy1,
+ float sx2, float sy2,
+ struct pipe_texture *dst,
+ float dx1, float dy1,
+ float dx2, float dy2)
+{
+ struct pipe_context *pipe = exa->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct pipe_buffer *buf;
+ struct pipe_surface *dst_surf = screen->get_tex_surface(
+ screen, dst, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+ struct pipe_framebuffer_state fb;
+ float s0, t0, s1, t1;
+ struct xorg_shader shader;
+
+ assert(src->width[0] != 0);
+ assert(src->height[0] != 0);
+ assert(dst->width[0] != 0);
+ assert(dst->height[0] != 0);
+
+#if 0
+ debug_printf("copy texture [%f, %f, %f, %f], [%f, %f, %f, %f]\n",
+ sx1, sy1, sx2, sy2, dx1, dy1, dx2, dy2);
+#endif
+
+#if 1
+ s0 = sx1 / src->width[0];
+ s1 = sx2 / src->width[0];
+ t0 = sy1 / src->height[0];
+ t1 = sy2 / src->height[0];
+#else
+ s0 = 0;
+ s1 = 1;
+ t0 = 0;
+ t1 = 1;
+#endif
+
+ assert(screen->is_format_supported(screen, dst_surf->format, PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET, 0));
+
+ /* save state (restored below) */
+ cso_save_blend(exa->cso);
+ cso_save_samplers(exa->cso);
+ cso_save_sampler_textures(exa->cso);
+ cso_save_framebuffer(exa->cso);
+ cso_save_fragment_shader(exa->cso);
+ cso_save_vertex_shader(exa->cso);
+
+ cso_save_viewport(exa->cso);
+
+
+ /* set misc state we care about */
+ {
+ struct pipe_blend_state blend;
+ memset(&blend, 0, sizeof(blend));
+ blend.rgb_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.alpha_src_factor = PIPE_BLENDFACTOR_ONE;
+ blend.rgb_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
+ blend.colormask = PIPE_MASK_RGBA;
+ cso_set_blend(exa->cso, &blend);
+ }
+
+ /* sampler */
+ {
+ struct pipe_sampler_state sampler;
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.normalized_coords = 1;
+ cso_single_sampler(exa->cso, 0, &sampler);
+ cso_single_sampler_done(exa->cso);
+ }
+
+ set_viewport(exa, dst_surf->width, dst_surf->height, Y0_TOP);
+
+ /* texture */
+ cso_set_sampler_textures(exa->cso, 1, &src);
+
+ /* shaders */
+ shader = xorg_shaders_get(exa->shaders,
+ VS_COMPOSITE,
+ FS_COMPOSITE);
+ cso_set_vertex_shader_handle(exa->cso, shader.vs);
+ cso_set_fragment_shader_handle(exa->cso, shader.fs);
+
+ /* drawing dest */
+ memset(&fb, 0, sizeof(fb));
+ fb.width = dst_surf->width;
+ fb.height = dst_surf->height;
+ fb.nr_cbufs = 1;
+ fb.cbufs[0] = dst_surf;
+ {
+ int i;
+ for (i = 1; i < PIPE_MAX_COLOR_BUFS; ++i)
+ fb.cbufs[i] = 0;
+ }
+ cso_set_framebuffer(exa->cso, &fb);
+
+ /* draw quad */
+ buf = setup_vertex_data_tex(exa,
+ dx1, dy1,
+ dx2, dy2,
+ s0, t0, s1, t1,
+ 0.0f);
+
+ if (buf) {
+ util_draw_vertex_buffer(exa->pipe, buf, 0,
+ PIPE_PRIM_TRIANGLE_FAN,
+ 4, /* verts */
+ 2); /* attribs/vert */
+
+ pipe_buffer_reference(&buf, NULL);
+ }
+
+ /* restore state we changed */
+ cso_restore_blend(exa->cso);
+ cso_restore_samplers(exa->cso);
+ cso_restore_sampler_textures(exa->cso);
+ cso_restore_framebuffer(exa->cso);
+ cso_restore_vertex_shader(exa->cso);
+ cso_restore_fragment_shader(exa->cso);
+ cso_restore_viewport(exa->cso);
+
+ pipe_surface_reference(&dst_surf, NULL);
+}
+
+void xorg_copy_pixmap(struct exa_context *ctx,
+ struct exa_pixmap_priv *dst_priv, int dx, int dy,
+ struct exa_pixmap_priv *src_priv, int sx, int sy,
+ int width, int height)
+{
+ float dst_loc[4], src_loc[4];
+ float dst_bounds[4], src_bounds[4];
+ float src_shift[4], dst_shift[4], shift[4];
+ struct pipe_texture *dst = dst_priv->tex;
+ struct pipe_texture *src = src_priv->tex;
+
+ dst_loc[0] = dx;
+ dst_loc[1] = dy;
+ dst_loc[2] = width;
+ dst_loc[3] = height;
+ dst_bounds[0] = 0.f;
+ dst_bounds[1] = 0.f;
+ dst_bounds[2] = dst->width[0];
+ dst_bounds[3] = dst->height[0];
+
+ src_loc[0] = sx;
+ src_loc[1] = sy;
+ src_loc[2] = width;
+ src_loc[3] = height;
+ src_bounds[0] = 0.f;
+ src_bounds[1] = 0.f;
+ src_bounds[2] = src->width[0];
+ src_bounds[3] = src->height[0];
+
+ bound_rect(src_loc, src_bounds, src_shift);
+ bound_rect(dst_loc, dst_bounds, dst_shift);
+ shift[0] = src_shift[0] - dst_shift[0];
+ shift[1] = src_shift[1] - dst_shift[1];
+
+ if (shift[0] < 0)
+ shift_rectx(src_loc, src_bounds, -shift[0]);
+ else
+ shift_rectx(dst_loc, dst_bounds, shift[0]);
+
+ if (shift[1] < 0)
+ shift_recty(src_loc, src_bounds, -shift[1]);
+ else
+ shift_recty(dst_loc, dst_bounds, shift[1]);
+
+ sync_size(src_loc, dst_loc);
+
+ if (src_loc[2] >= 0 && src_loc[3] >= 0 &&
+ dst_loc[2] >= 0 && dst_loc[3] >= 0) {
+ renderer_copy_texture(ctx,
+ src,
+ src_loc[0],
+ src_loc[1] + src_loc[3],
+ src_loc[0] + src_loc[2],
+ src_loc[1],
+ dst,
+ dst_loc[0],
+ dst_loc[1] + dst_loc[3],
+ dst_loc[0] + dst_loc[2],
+ dst_loc[1]);
+ }
+}
+
diff --git a/src/gallium/state_trackers/xorg/xorg_composite.h b/src/gallium/state_trackers/xorg/xorg_composite.h
new file mode 100644
index 0000000000..e73f1c704a
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_composite.h
@@ -0,0 +1,37 @@
+#ifndef XORG_COMPOSITE_H
+#define XORG_COMPOSITE_H
+
+#include "xorg_exa.h"
+
+boolean xorg_composite_accelerated(int op,
+ PicturePtr pSrcPicture,
+ PicturePtr pMaskPicture,
+ PicturePtr pDstPicture);
+
+boolean xorg_composite_bind_state(struct exa_context *exa,
+ int op,
+ PicturePtr pSrcPicture,
+ PicturePtr pMaskPicture,
+ PicturePtr pDstPicture,
+ struct exa_pixmap_priv *pSrc,
+ struct exa_pixmap_priv *pMask,
+ struct exa_pixmap_priv *pDst);
+
+void xorg_composite(struct exa_context *exa,
+ struct exa_pixmap_priv *dst,
+ int srcX, int srcY, int maskX, int maskY,
+ int dstX, int dstY, int width, int height);
+
+boolean xorg_solid_bind_state(struct exa_context *exa,
+ struct exa_pixmap_priv *pixmap,
+ Pixel fg);
+void xorg_solid(struct exa_context *exa,
+ struct exa_pixmap_priv *pixmap,
+ int x0, int y0, int x1, int y1);
+
+void xorg_copy_pixmap(struct exa_context *ctx,
+ struct exa_pixmap_priv *dst, int dx, int dy,
+ struct exa_pixmap_priv *src, int sx, int sy,
+ int width, int height);
+
+#endif
diff --git a/src/gallium/state_trackers/xorg/xorg_crtc.c b/src/gallium/state_trackers/xorg/xorg_crtc.c
new file mode 100644
index 0000000000..fe08bde9ef
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_crtc.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+#include <unistd.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <math.h>
+#include <stdint.h>
+
+#include "xorg-server.h"
+#include <xf86.h>
+#include <xf86i2c.h>
+#include <xf86Crtc.h>
+#include "xorg_tracker.h"
+#include "xf86Modes.h"
+
+#define DPMS_SERVER
+#include <X11/extensions/dpms.h>
+
+#include "pipe/p_inlines.h"
+#include "util/u_rect.h"
+
+struct crtc_private
+{
+ drmModeCrtcPtr drm_crtc;
+
+ /* hwcursor */
+ struct pipe_texture *cursor_tex;
+ unsigned cursor_handle;
+};
+
+static void
+crtc_dpms(xf86CrtcPtr crtc, int mode)
+{
+ //ScrnInfoPtr pScrn = crtc->scrn;
+
+ switch (mode) {
+ case DPMSModeOn:
+ case DPMSModeStandby:
+ case DPMSModeSuspend:
+ break;
+ case DPMSModeOff:
+ break;
+ }
+}
+
+static Bool
+crtc_lock(xf86CrtcPtr crtc)
+{
+ return FALSE;
+}
+
+static void
+crtc_unlock(xf86CrtcPtr crtc)
+{
+}
+
+static void
+crtc_prepare(xf86CrtcPtr crtc)
+{
+}
+
+static void
+crtc_commit(xf86CrtcPtr crtc)
+{
+}
+
+static Bool
+crtc_mode_fixup(xf86CrtcPtr crtc, DisplayModePtr mode,
+ DisplayModePtr adjusted_mode)
+{
+ return TRUE;
+}
+
+static void
+crtc_mode_set(xf86CrtcPtr crtc, DisplayModePtr mode,
+ DisplayModePtr adjusted_mode, int x, int y)
+{
+ xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
+ modesettingPtr ms = modesettingPTR(crtc->scrn);
+ xf86OutputPtr output = config->output[config->compat_output];
+ drmModeConnectorPtr drm_connector = output->driver_private;
+ struct crtc_private *crtcp = crtc->driver_private;
+ drmModeCrtcPtr drm_crtc = crtcp->drm_crtc;
+ drmModeModeInfo drm_mode;
+
+ drm_mode.clock = mode->Clock;
+ drm_mode.hdisplay = mode->HDisplay;
+ drm_mode.hsync_start = mode->HSyncStart;
+ drm_mode.hsync_end = mode->HSyncEnd;
+ drm_mode.htotal = mode->HTotal;
+ drm_mode.vdisplay = mode->VDisplay;
+ drm_mode.vsync_start = mode->VSyncStart;
+ drm_mode.vsync_end = mode->VSyncEnd;
+ drm_mode.vtotal = mode->VTotal;
+ drm_mode.flags = mode->Flags;
+ drm_mode.hskew = mode->HSkew;
+ drm_mode.vscan = mode->VScan;
+ drm_mode.vrefresh = mode->VRefresh;
+ if (!mode->name)
+ xf86SetModeDefaultName(mode);
+ strncpy(drm_mode.name, mode->name, DRM_DISPLAY_MODE_LEN);
+
+ drmModeSetCrtc(ms->fd, drm_crtc->crtc_id, ms->fb_id, x, y,
+ &drm_connector->connector_id, 1, &drm_mode);
+}
+
+#if 0
+static void
+crtc_load_lut(xf86CrtcPtr crtc)
+{
+ //ScrnInfoPtr pScrn = crtc->scrn;
+}
+#endif
+
+static void
+crtc_gamma_set(xf86CrtcPtr crtc, CARD16 * red, CARD16 * green, CARD16 * blue,
+ int size)
+{
+}
+
+static void *
+crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
+{
+ //ScrnInfoPtr pScrn = crtc->scrn;
+
+ return NULL;
+}
+
+static PixmapPtr
+crtc_shadow_create(xf86CrtcPtr crtc, void *data, int width, int height)
+{
+ //ScrnInfoPtr pScrn = crtc->scrn;
+
+ return NULL;
+}
+
+static void
+crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr rotate_pixmap, void *data)
+{
+ //ScrnInfoPtr pScrn = crtc->scrn;
+}
+
+static void
+crtc_destroy(xf86CrtcPtr crtc)
+{
+ struct crtc_private *crtcp = crtc->driver_private;
+
+ if (crtcp->cursor_tex)
+ pipe_texture_reference(&crtcp->cursor_tex, NULL);
+
+ drmModeFreeCrtc(crtcp->drm_crtc);
+ xfree(crtcp);
+}
+
+static void
+crtc_load_cursor_argb(xf86CrtcPtr crtc, CARD32 * image)
+{
+ unsigned char *ptr;
+ modesettingPtr ms = modesettingPTR(crtc->scrn);
+ struct crtc_private *crtcp = crtc->driver_private;
+ struct pipe_transfer *transfer;
+
+ if (!crtcp->cursor_tex) {
+ struct pipe_texture templat;
+ unsigned pitch;
+
+ memset(&templat, 0, sizeof(templat));
+ templat.tex_usage |= PIPE_TEXTURE_USAGE_RENDER_TARGET;
+ templat.tex_usage |= PIPE_TEXTURE_USAGE_PRIMARY;
+ templat.target = PIPE_TEXTURE_2D;
+ templat.last_level = 0;
+ templat.depth[0] = 1;
+ templat.format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ templat.width[0] = 64;
+ templat.height[0] = 64;
+ pf_get_block(templat.format, &templat.block);
+
+ crtcp->cursor_tex = ms->screen->texture_create(ms->screen,
+ &templat);
+ ms->api->local_handle_from_texture(ms->api,
+ ms->screen,
+ crtcp->cursor_tex,
+ &pitch,
+ &crtcp->cursor_handle);
+ }
+
+ transfer = ms->screen->get_tex_transfer(ms->screen, crtcp->cursor_tex,
+ 0, 0, 0,
+ PIPE_TRANSFER_WRITE,
+ 0, 0, 64, 64);
+ ptr = ms->screen->transfer_map(ms->screen, transfer);
+ util_copy_rect(ptr, &crtcp->cursor_tex->block,
+ transfer->stride, 0, 0,
+ 64, 64, (void*)image, 64 * 4, 0, 0);
+ ms->screen->transfer_unmap(ms->screen, transfer);
+ ms->screen->tex_transfer_destroy(transfer);
+}
+
+static void
+crtc_set_cursor_position(xf86CrtcPtr crtc, int x, int y)
+{
+ modesettingPtr ms = modesettingPTR(crtc->scrn);
+ struct crtc_private *crtcp = crtc->driver_private;
+
+ drmModeMoveCursor(ms->fd, crtcp->drm_crtc->crtc_id, x, y);
+}
+
+static void
+crtc_show_cursor(xf86CrtcPtr crtc)
+{
+ modesettingPtr ms = modesettingPTR(crtc->scrn);
+ struct crtc_private *crtcp = crtc->driver_private;
+
+ if (crtcp->cursor_tex)
+ drmModeSetCursor(ms->fd, crtcp->drm_crtc->crtc_id,
+ crtcp->cursor_handle, 64, 64);
+}
+
+static void
+crtc_hide_cursor(xf86CrtcPtr crtc)
+{
+ modesettingPtr ms = modesettingPTR(crtc->scrn);
+ struct crtc_private *crtcp = crtc->driver_private;
+
+ drmModeSetCursor(ms->fd, crtcp->drm_crtc->crtc_id, 0, 0, 0);
+}
+
+static const xf86CrtcFuncsRec crtc_funcs = {
+ .dpms = crtc_dpms,
+ .save = NULL,
+ .restore = NULL,
+ .lock = crtc_lock,
+ .unlock = crtc_unlock,
+ .mode_fixup = crtc_mode_fixup,
+ .prepare = crtc_prepare,
+ .mode_set = crtc_mode_set,
+ .commit = crtc_commit,
+ .gamma_set = crtc_gamma_set,
+ .shadow_create = crtc_shadow_create,
+ .shadow_allocate = crtc_shadow_allocate,
+ .shadow_destroy = crtc_shadow_destroy,
+ .set_cursor_position = crtc_set_cursor_position,
+ .show_cursor = crtc_show_cursor,
+ .hide_cursor = crtc_hide_cursor,
+ .load_cursor_image = NULL, /* lets convert to argb only */
+ .set_cursor_colors = NULL, /* using argb only */
+ .load_cursor_argb = crtc_load_cursor_argb,
+ .destroy = crtc_destroy,
+};
+
+void
+crtc_cursor_destroy(xf86CrtcPtr crtc)
+{
+ struct crtc_private *crtcp = crtc->driver_private;
+
+ if (crtcp->cursor_tex) {
+ pipe_texture_reference(&crtcp->cursor_tex, NULL);
+ }
+}
+
+void
+crtc_init(ScrnInfoPtr pScrn)
+{
+ modesettingPtr ms = modesettingPTR(pScrn);
+ xf86CrtcPtr crtc;
+ drmModeResPtr res;
+ drmModeCrtcPtr drm_crtc = NULL;
+ struct crtc_private *crtcp;
+ int c;
+
+ res = drmModeGetResources(ms->fd);
+ if (res == 0) {
+ ErrorF("Failed drmModeGetResources %d\n", errno);
+ return;
+ }
+
+ for (c = 0; c < res->count_crtcs; c++) {
+ drm_crtc = drmModeGetCrtc(ms->fd, res->crtcs[c]);
+ if (!drm_crtc)
+ continue;
+
+ crtc = xf86CrtcCreate(pScrn, &crtc_funcs);
+ if (crtc == NULL)
+ goto out;
+
+ crtcp = xcalloc(1, sizeof(struct crtc_private));
+ if (!crtcp) {
+ xf86CrtcDestroy(crtc);
+ goto out;
+ }
+
+ crtcp->drm_crtc = drm_crtc;
+
+ crtc->driver_private = crtcp;
+
+ }
+
+ out:
+ drmModeFreeResources(res);
+}
+
+/* vim: set sw=4 ts=8 sts=4: */
diff --git a/src/gallium/state_trackers/xorg/xorg_dri2.c b/src/gallium/state_trackers/xorg/xorg_dri2.c
new file mode 100644
index 0000000000..8a362596c7
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_dri2.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+#include "xorg-server.h"
+#include "xf86.h"
+#include "xf86_OSproc.h"
+
+#include "xorg_tracker.h"
+#include "xorg_exa.h"
+
+#include "dri2.h"
+
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "util/u_rect.h"
+
+typedef struct {
+ PixmapPtr pPixmap;
+ struct pipe_texture *tex;
+ struct pipe_fence_handle *fence;
+} *BufferPrivatePtr;
+
+static Bool
+driDoCreateBuffer(DrawablePtr pDraw, DRI2BufferPtr buffer, unsigned int format)
+{
+ struct pipe_texture *tex = NULL;
+ ScreenPtr pScreen = pDraw->pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_pixmap_priv *exa_priv;
+ BufferPrivatePtr private = buffer->driverPrivate;
+ PixmapPtr pPixmap;
+ unsigned stride, handle;
+
+ if (pDraw->type == DRAWABLE_PIXMAP)
+ pPixmap = (PixmapPtr) pDraw;
+ else
+ pPixmap = (*pScreen->GetWindowPixmap)((WindowPtr) pDraw);
+ exa_priv = exaGetPixmapDriverPrivate(pPixmap);
+
+ switch (buffer->attachment) {
+ default:
+ if (buffer->attachment != DRI2BufferFakeFrontLeft ||
+ pDraw->type != DRAWABLE_PIXMAP) {
+ private->pPixmap = (*pScreen->CreatePixmap)(pScreen, pDraw->width,
+ pDraw->height,
+ pDraw->depth,
+ 0);
+ }
+ break;
+ case DRI2BufferFrontLeft:
+ break;
+ case DRI2BufferStencil:
+#if defined(DRI2INFOREC_VERSION) && DRI2INFOREC_VERSION > 2
+ case DRI2BufferDepthStencil:
+ if (exa_priv->depth_stencil_tex &&
+ !pf_is_depth_stencil(exa_priv->depth_stencil_tex->format))
+ exa_priv->depth_stencil_tex = NULL;
+ /* Fall through */
+#endif
+ case DRI2BufferDepth:
+ if (exa_priv->depth_stencil_tex)
+ pipe_texture_reference(&tex, exa_priv->depth_stencil_tex);
+ else {
+ struct pipe_texture template;
+ memset(&template, 0, sizeof(template));
+ template.target = PIPE_TEXTURE_2D;
+ if (buffer->attachment == DRI2BufferDepth)
+ template.format = ms->ds_depth_bits_last ?
+ PIPE_FORMAT_X8Z24_UNORM : PIPE_FORMAT_Z24X8_UNORM;
+ else
+ template.format = ms->ds_depth_bits_last ?
+ PIPE_FORMAT_S8Z24_UNORM : PIPE_FORMAT_Z24S8_UNORM;
+ pf_get_block(template.format, &template.block);
+ template.width[0] = pDraw->width;
+ template.height[0] = pDraw->height;
+ template.depth[0] = 1;
+ template.last_level = 0;
+ template.tex_usage = PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
+ PIPE_TEXTURE_USAGE_DISPLAY_TARGET;
+ tex = ms->screen->texture_create(ms->screen, &template);
+ pipe_texture_reference(&exa_priv->depth_stencil_tex, tex);
+ }
+ break;
+ }
+
+ if (!private->pPixmap) {
+ private->pPixmap = pPixmap;
+ pPixmap->refcnt++;
+ }
+
+ if (!tex) {
+ exaMoveInPixmap(private->pPixmap);
+ xorg_exa_set_shared_usage(private->pPixmap);
+ pScreen->ModifyPixmapHeader(private->pPixmap, 0, 0, 0, 0, 0, NULL);
+ tex = xorg_exa_get_texture(private->pPixmap);
+ }
+
+ if (!tex)
+ FatalError("NO TEXTURE IN DRI2\n");
+
+ ms->api->shared_handle_from_texture(ms->api, ms->screen, tex, &stride, &handle);
+
+ buffer->name = handle;
+ buffer->pitch = stride;
+ buffer->cpp = 4;
+ buffer->driverPrivate = private;
+ buffer->flags = 0; /* not tiled */
+ private->tex = tex;
+
+ return TRUE;
+}
+
+static void
+driDoDestroyBuffer(DrawablePtr pDraw, DRI2BufferPtr buffer)
+{
+ ScreenPtr pScreen = pDraw->pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ BufferPrivatePtr private = buffer->driverPrivate;
+ struct exa_pixmap_priv *exa_priv = exaGetPixmapDriverPrivate(private->pPixmap);
+
+ pipe_texture_reference(&private->tex, NULL);
+ ms->screen->fence_reference(ms->screen, &private->fence, NULL);
+ pipe_texture_reference(&exa_priv->depth_stencil_tex, NULL);
+ (*pScreen->DestroyPixmap)(private->pPixmap);
+}
+
+#if defined(DRI2INFOREC_VERSION) && DRI2INFOREC_VERSION > 2
+
+static DRI2BufferPtr
+driCreateBuffer(DrawablePtr pDraw, unsigned int attachment, unsigned int format)
+{
+ DRI2BufferPtr buffer;
+ BufferPrivatePtr private;
+
+ buffer = xcalloc(1, sizeof *buffer);
+ if (!buffer)
+ return NULL;
+
+ private = xcalloc(1, sizeof *private);
+ if (!private) {
+ goto fail;
+ }
+
+ buffer->attachment = attachment;
+ buffer->driverPrivate = private;
+
+ if (driDoCreateBuffer(pDraw, buffer, format))
+ return buffer;
+
+ xfree(private);
+fail:
+ xfree(buffer);
+ return NULL;
+}
+
+static void
+driDestroyBuffer(DrawablePtr pDraw, DRI2BufferPtr buffer)
+{
+ driDoDestroyBuffer(pDraw, buffer);
+
+ xfree(buffer->driverPrivate);
+ xfree(buffer);
+}
+
+#else /* DRI2INFOREC_VERSION <= 2 */
+
+static DRI2BufferPtr
+driCreateBuffers(DrawablePtr pDraw, unsigned int *attachments, int count)
+{
+ BufferPrivatePtr privates;
+ DRI2BufferPtr buffers;
+ int i;
+
+ buffers = xcalloc(count, sizeof *buffers);
+ if (!buffers)
+ goto fail_buffers;
+
+ privates = xcalloc(count, sizeof *privates);
+ if (!privates)
+ goto fail_privates;
+
+ for (i = 0; i < count; i++) {
+ buffers[i].attachment = attachments[i];
+ buffers[i].driverPrivate = &privates[i];
+
+ if (!driDoCreateBuffer(pDraw, &buffers[i], 0))
+ goto fail;
+ }
+
+ return buffers;
+
+fail:
+ xfree(privates);
+fail_privates:
+ xfree(buffers);
+fail_buffers:
+ return NULL;
+}
+
+static void
+driDestroyBuffers(DrawablePtr pDraw, DRI2BufferPtr buffers, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ driDoDestroyBuffer(pDraw, &buffers[i]);
+ }
+
+ if (buffers) {
+ xfree(buffers[0].driverPrivate);
+ xfree(buffers);
+ }
+}
+
+#endif /* DRI2INFOREC_VERSION */
+
+static void
+driCopyRegion(DrawablePtr pDraw, RegionPtr pRegion,
+ DRI2BufferPtr pDestBuffer, DRI2BufferPtr pSrcBuffer)
+{
+ ScreenPtr pScreen = pDraw->pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ BufferPrivatePtr dst_priv = pDestBuffer->driverPrivate;
+ BufferPrivatePtr src_priv = pSrcBuffer->driverPrivate;
+ PixmapPtr src_pixmap;
+ PixmapPtr dst_pixmap;
+ GCPtr gc;
+ RegionPtr copy_clip;
+
+ /*
+ * In driCreateBuffers we dewrap windows into the
+ * backing pixmaps in order to get to the texture.
+ * We need to use the real drawable in CopyArea
+ * so that cliprects and offsets are correct.
+ */
+ src_pixmap = src_priv->pPixmap;
+ dst_pixmap = dst_priv->pPixmap;
+ if (pSrcBuffer->attachment == DRI2BufferFrontLeft)
+ src_pixmap = (PixmapPtr)pDraw;
+ if (pDestBuffer->attachment == DRI2BufferFrontLeft)
+ dst_pixmap = (PixmapPtr)pDraw;
+
+ /*
+ * The clients implements glXWaitX with a copy front to fake and then
+ * waiting on the server to signal its completion of it. While
+ * glXWaitGL is a client side flush and a copy from fake to front.
+ * This is how it is done in the DRI2 protocol, how ever depending
+ * which type of drawables the server does things a bit differently
+ * then what the protocol says as the fake and front are the same.
+ *
+ * for pixmaps glXWaitX is a server flush.
+ * for pixmaps glXWaitGL is a client flush.
+ * for windows glXWaitX is a copy from front to fake then a server flush.
+ * for windows glXWaitGL is a client flush then a copy from fake to front.
+ *
+ * XXX in the windows case this code always flushes but that isn't a
+ * must in the glXWaitGL case but we don't know if this is a glXWaitGL
+ * or a glFlush/glFinish call.
+ */
+ if (dst_pixmap == src_pixmap) {
+ /* pixmap glXWaitX */
+ if (pSrcBuffer->attachment == DRI2BufferFrontLeft &&
+ pDestBuffer->attachment == DRI2BufferFakeFrontLeft) {
+ ms->ctx->flush(ms->ctx, PIPE_FLUSH_SWAPBUFFERS, NULL);
+ return;
+ }
+ /* pixmap glXWaitGL */
+ if (pDestBuffer->attachment == DRI2BufferFrontLeft &&
+ pSrcBuffer->attachment == DRI2BufferFakeFrontLeft) {
+ return;
+ } else {
+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+ "copying between the same pixmap\n");
+ }
+ }
+
+ gc = GetScratchGC(pDraw->depth, pScreen);
+ copy_clip = REGION_CREATE(pScreen, NULL, 0);
+ REGION_COPY(pScreen, copy_clip, pRegion);
+ (*gc->funcs->ChangeClip) (gc, CT_REGION, copy_clip, 0);
+ ValidateGC(&dst_pixmap->drawable, gc);
+
+ /* If this is a full buffer swap, throttle on the previous one */
+ if (dst_priv->fence && REGION_NUM_RECTS(pRegion) == 1) {
+ BoxPtr extents = REGION_EXTENTS(pScreen, pRegion);
+
+ if (extents->x1 == 0 && extents->y1 == 0 &&
+ extents->x2 == pDraw->width && extents->y2 == pDraw->height) {
+ ms->screen->fence_finish(ms->screen, dst_priv->fence, 0);
+ ms->screen->fence_reference(ms->screen, &dst_priv->fence, NULL);
+ }
+ }
+
+ (*gc->ops->CopyArea)(&src_pixmap->drawable, &dst_pixmap->drawable, gc,
+ 0, 0, pDraw->width, pDraw->height, 0, 0);
+
+ FreeScratchGC(gc);
+
+ ms->ctx->flush(ms->ctx, PIPE_FLUSH_SWAPBUFFERS,
+ pDestBuffer->attachment == DRI2BufferFrontLeft ?
+ &dst_priv->fence : NULL);
+}
+
+Bool
+driScreenInit(ScreenPtr pScreen)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ DRI2InfoRec dri2info;
+
+#if defined(DRI2INFOREC_VERSION)
+ dri2info.version = DRI2INFOREC_VERSION;
+#else
+ dri2info.version = 1;
+#endif
+ dri2info.fd = ms->fd;
+
+ dri2info.driverName = pScrn->driverName;
+ dri2info.deviceName = "/dev/dri/card0"; /* FIXME */
+
+#if defined(DRI2INFOREC_VERSION) && DRI2INFOREC_VERSION > 2
+ dri2info.CreateBuffer = driCreateBuffer;
+ dri2info.DestroyBuffer = driDestroyBuffer;
+#else
+ dri2info.CreateBuffers = driCreateBuffers;
+ dri2info.DestroyBuffers = driDestroyBuffers;
+#endif
+ dri2info.CopyRegion = driCopyRegion;
+ dri2info.Wait = NULL;
+
+ ms->d_depth_bits_last =
+ ms->screen->is_format_supported(ms->screen, PIPE_FORMAT_X8Z24_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+ ms->ds_depth_bits_last =
+ ms->screen->is_format_supported(ms->screen, PIPE_FORMAT_S8Z24_UNORM,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_USAGE_DEPTH_STENCIL, 0);
+
+ return DRI2ScreenInit(pScreen, &dri2info);
+}
+
+void
+driCloseScreen(ScreenPtr pScreen)
+{
+ DRI2CloseScreen(pScreen);
+}
+
+/* vim: set sw=4 ts=8 sts=4: */
diff --git a/src/gallium/state_trackers/xorg/xorg_driver.c b/src/gallium/state_trackers/xorg/xorg_driver.c
new file mode 100644
index 0000000000..643b6b3b9e
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_driver.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+
+#include "xorg-server.h"
+#include "xf86.h"
+#include "xf86_OSproc.h"
+#include "compiler.h"
+#include "xf86PciInfo.h"
+#include "xf86Pci.h"
+#include "mipointer.h"
+#include "micmap.h"
+#include <X11/extensions/randr.h>
+#include "fb.h"
+#include "edid.h"
+#include "xf86i2c.h"
+#include "xf86Crtc.h"
+#include "miscstruct.h"
+#include "dixstruct.h"
+#include "xf86xv.h"
+#include <X11/extensions/Xv.h>
+#ifndef XSERVER_LIBPCIACCESS
+#error "libpciaccess needed"
+#endif
+
+#include <pciaccess.h>
+
+#include "pipe/p_context.h"
+#include "xorg_tracker.h"
+#include "xorg_winsys.h"
+
+static void AdjustFrame(int scrnIndex, int x, int y, int flags);
+static Bool CloseScreen(int scrnIndex, ScreenPtr pScreen);
+static Bool EnterVT(int scrnIndex, int flags);
+static Bool SaveHWState(ScrnInfoPtr pScrn);
+static Bool RestoreHWState(ScrnInfoPtr pScrn);
+
+
+static ModeStatus ValidMode(int scrnIndex, DisplayModePtr mode, Bool verbose,
+ int flags);
+static void FreeScreen(int scrnIndex, int flags);
+static void LeaveVT(int scrnIndex, int flags);
+static Bool SwitchMode(int scrnIndex, DisplayModePtr mode, int flags);
+static Bool ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc,
+ char **argv);
+static Bool PreInit(ScrnInfoPtr pScrn, int flags);
+
+typedef enum
+{
+ OPTION_SW_CURSOR,
+} modesettingOpts;
+
+static const OptionInfoRec Options[] = {
+ {OPTION_SW_CURSOR, "SWcursor", OPTV_BOOLEAN, {0}, FALSE},
+ {-1, NULL, OPTV_NONE, {0}, FALSE}
+};
+
+/*
+ * Exported Xorg driver functions to winsys
+ */
+
+const OptionInfoRec *
+xorg_tracker_available_options(int chipid, int busid)
+{
+ return Options;
+}
+
+void
+xorg_tracker_set_functions(ScrnInfoPtr scrn)
+{
+ scrn->PreInit = PreInit;
+ scrn->ScreenInit = ScreenInit;
+ scrn->SwitchMode = SwitchMode;
+ scrn->AdjustFrame = AdjustFrame;
+ scrn->EnterVT = EnterVT;
+ scrn->LeaveVT = LeaveVT;
+ scrn->FreeScreen = FreeScreen;
+ scrn->ValidMode = ValidMode;
+}
+
+/*
+ * Static Xorg funtctions
+ */
+
+static Bool
+GetRec(ScrnInfoPtr pScrn)
+{
+ if (pScrn->driverPrivate)
+ return TRUE;
+
+ pScrn->driverPrivate = xnfcalloc(sizeof(modesettingRec), 1);
+
+ return TRUE;
+}
+
+static void
+FreeRec(ScrnInfoPtr pScrn)
+{
+ if (!pScrn)
+ return;
+
+ if (!pScrn->driverPrivate)
+ return;
+
+ xfree(pScrn->driverPrivate);
+
+ pScrn->driverPrivate = NULL;
+}
+
+static void
+ProbeDDC(ScrnInfoPtr pScrn, int index)
+{
+ ConfiguredMonitor = NULL;
+}
+
+static Bool
+CreateFrontBuffer(ScrnInfoPtr pScrn)
+{
+ modesettingPtr ms = modesettingPTR(pScrn);
+ ScreenPtr pScreen = pScrn->pScreen;
+ PixmapPtr rootPixmap = pScreen->GetScreenPixmap(pScreen);
+ unsigned handle, stride;
+
+ ms->noEvict = TRUE;
+ xorg_exa_set_displayed_usage(rootPixmap);
+ pScreen->ModifyPixmapHeader(rootPixmap,
+ pScrn->virtualX, pScrn->virtualY,
+ pScrn->depth, pScrn->bitsPerPixel,
+ pScrn->displayWidth * pScrn->bitsPerPixel / 8,
+ NULL);
+ ms->noEvict = FALSE;
+
+ handle = xorg_exa_get_pixmap_handle(rootPixmap, &stride);
+
+ drmModeAddFB(ms->fd,
+ pScrn->virtualX,
+ pScrn->virtualY,
+ pScrn->depth,
+ pScrn->bitsPerPixel,
+ stride,
+ handle,
+ &ms->fb_id);
+
+ pScrn->frameX0 = 0;
+ pScrn->frameY0 = 0;
+ AdjustFrame(pScrn->scrnIndex, pScrn->frameX0, pScrn->frameY0, 0);
+
+ return TRUE;
+}
+
+static Bool
+crtc_resize(ScrnInfoPtr pScrn, int width, int height)
+{
+ modesettingPtr ms = modesettingPTR(pScrn);
+ //ScreenPtr pScreen = pScrn->pScreen;
+ //PixmapPtr rootPixmap = pScreen->GetScreenPixmap(pScreen);
+ //Bool fbAccessDisabled;
+ //CARD8 *fbstart;
+
+ if (width == pScrn->virtualX && height == pScrn->virtualY)
+ return TRUE;
+
+ ErrorF("RESIZING TO %dx%d\n", width, height);
+
+ pScrn->virtualX = width;
+ pScrn->virtualY = height;
+
+ /* HW dependent - FIXME */
+ pScrn->displayWidth = pScrn->virtualX;
+
+ drmModeRmFB(ms->fd, ms->fb_id);
+
+ /* now create new frontbuffer */
+ return CreateFrontBuffer(pScrn);
+}
+
+static const xf86CrtcConfigFuncsRec crtc_config_funcs = {
+ crtc_resize
+};
+
+static Bool
+PreInit(ScrnInfoPtr pScrn, int flags)
+{
+ xf86CrtcConfigPtr xf86_config;
+ modesettingPtr ms;
+ rgb defaultWeight = { 0, 0, 0 };
+ EntityInfoPtr pEnt;
+ EntPtr msEnt = NULL;
+ char *BusID;
+ int max_width, max_height;
+
+ if (pScrn->numEntities != 1)
+ return FALSE;
+
+ pEnt = xf86GetEntityInfo(pScrn->entityList[0]);
+
+ if (flags & PROBE_DETECT) {
+ ProbeDDC(pScrn, pEnt->index);
+ return TRUE;
+ }
+
+ /* Allocate driverPrivate */
+ if (!GetRec(pScrn))
+ return FALSE;
+
+ ms = modesettingPTR(pScrn);
+ ms->SaveGeneration = -1;
+ ms->pEnt = pEnt;
+
+ pScrn->displayWidth = 640; /* default it */
+
+ if (ms->pEnt->location.type != BUS_PCI)
+ return FALSE;
+
+ ms->PciInfo = xf86GetPciInfoForEntity(ms->pEnt->index);
+
+ /* Allocate an entity private if necessary */
+ if (xf86IsEntityShared(pScrn->entityList[0])) {
+ FatalError("Entity");
+#if 0
+ msEnt = xf86GetEntityPrivate(pScrn->entityList[0],
+ modesettingEntityIndex)->ptr;
+ ms->entityPrivate = msEnt;
+#else
+ (void)msEnt;
+#endif
+ } else
+ ms->entityPrivate = NULL;
+
+ if (xf86IsEntityShared(pScrn->entityList[0])) {
+ if (xf86IsPrimInitDone(pScrn->entityList[0])) {
+ /* do something */
+ } else {
+ xf86SetPrimInitDone(pScrn->entityList[0]);
+ }
+ }
+
+ BusID = xalloc(64);
+ sprintf(BusID, "PCI:%d:%d:%d",
+ ((ms->PciInfo->domain << 8) | ms->PciInfo->bus),
+ ms->PciInfo->dev, ms->PciInfo->func
+ );
+
+ ms->api = drm_api_create();
+ ms->fd = drmOpen(NULL, BusID);
+
+ if (ms->fd < 0)
+ return FALSE;
+
+ pScrn->monitor = pScrn->confScreen->monitor;
+ pScrn->progClock = TRUE;
+ pScrn->rgbBits = 8;
+
+ if (!xf86SetDepthBpp
+ (pScrn, 0, 0, 0,
+ PreferConvert24to32 | SupportConvert24to32 | Support32bppFb))
+ return FALSE;
+
+ switch (pScrn->depth) {
+ case 15:
+ case 16:
+ case 24:
+ break;
+ default:
+ xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+ "Given depth (%d) is not supported by the driver\n",
+ pScrn->depth);
+ return FALSE;
+ }
+ xf86PrintDepthBpp(pScrn);
+
+ if (!xf86SetWeight(pScrn, defaultWeight, defaultWeight))
+ return FALSE;
+ if (!xf86SetDefaultVisual(pScrn, -1))
+ return FALSE;
+
+ /* Process the options */
+ xf86CollectOptions(pScrn, NULL);
+ if (!(ms->Options = xalloc(sizeof(Options))))
+ return FALSE;
+ memcpy(ms->Options, Options, sizeof(Options));
+ xf86ProcessOptions(pScrn->scrnIndex, pScrn->options, ms->Options);
+
+ /* Allocate an xf86CrtcConfig */
+ xf86CrtcConfigInit(pScrn, &crtc_config_funcs);
+ xf86_config = XF86_CRTC_CONFIG_PTR(pScrn);
+
+ max_width = 8192;
+ max_height = 8192;
+ xf86CrtcSetSizeRange(pScrn, 320, 200, max_width, max_height);
+
+ if (xf86ReturnOptValBool(ms->Options, OPTION_SW_CURSOR, FALSE)) {
+ ms->SWCursor = TRUE;
+ }
+
+ SaveHWState(pScrn);
+
+ crtc_init(pScrn);
+ output_init(pScrn);
+
+ if (!xf86InitialConfiguration(pScrn, TRUE)) {
+ xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "No valid modes.\n");
+ RestoreHWState(pScrn);
+ return FALSE;
+ }
+
+ RestoreHWState(pScrn);
+
+ /*
+ * If the driver can do gamma correction, it should call xf86SetGamma() here.
+ */
+ {
+ Gamma zeros = { 0.0, 0.0, 0.0 };
+
+ if (!xf86SetGamma(pScrn, zeros)) {
+ return FALSE;
+ }
+ }
+
+ if (pScrn->modes == NULL) {
+ xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "No modes.\n");
+ return FALSE;
+ }
+
+ pScrn->currentMode = pScrn->modes;
+
+ /* Set display resolution */
+ xf86SetDpi(pScrn, 0, 0);
+
+ /* Load the required sub modules */
+ if (!xf86LoadSubModule(pScrn, "fb")) {
+ return FALSE;
+ }
+
+ xf86LoadSubModule(pScrn, "exa");
+
+#ifdef DRI2
+ xf86LoadSubModule(pScrn, "dri2");
+#endif
+
+ return TRUE;
+}
+
+static Bool
+SaveHWState(ScrnInfoPtr pScrn)
+{
+ /*xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(pScrn);*/
+
+ return TRUE;
+}
+
+static Bool
+RestoreHWState(ScrnInfoPtr pScrn)
+{
+ /*xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(pScrn);*/
+
+ return TRUE;
+}
+
+static void xorgBlockHandler(int i, pointer blockData, pointer pTimeout,
+ pointer pReadmask)
+{
+ ScreenPtr pScreen = screenInfo.screens[i];
+ modesettingPtr ms = modesettingPTR(xf86Screens[pScreen->myNum]);
+
+ pScreen->BlockHandler = ms->blockHandler;
+ pScreen->BlockHandler(i, blockData, pTimeout, pReadmask);
+ pScreen->BlockHandler = xorgBlockHandler;
+
+ ms->ctx->flush(ms->ctx, PIPE_FLUSH_RENDER_CACHE, NULL);
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ {
+ RegionPtr dirty = DamageRegion(ms->damage);
+ unsigned num_cliprects = REGION_NUM_RECTS(dirty);
+
+ if (num_cliprects) {
+ drmModeClip *clip = alloca(num_cliprects * sizeof(drmModeClip));
+ BoxPtr rect = REGION_RECTS(dirty);
+ int i;
+
+ for (i = 0; i < num_cliprects; i++, rect++) {
+ clip[i].x = rect->x1;
+ clip[i].y = rect->y1;
+ clip[i].width = rect->x2 - rect->x1;
+ clip[i].height = rect->y2 - rect->y1;
+ }
+
+ /* TODO query connector property to see if this is needed */
+ drmModeDirtyFB(ms->fd, ms->fb_id, clip, num_cliprects);
+
+ DamageEmpty(ms->damage);
+ }
+ }
+#endif
+}
+
+static Bool
+CreateScreenResources(ScreenPtr pScreen)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ PixmapPtr rootPixmap;
+ Bool ret;
+ unsigned handle, stride;
+
+ ms->noEvict = TRUE;
+
+ pScreen->CreateScreenResources = ms->createScreenResources;
+ ret = pScreen->CreateScreenResources(pScreen);
+ pScreen->CreateScreenResources = CreateScreenResources;
+
+ rootPixmap = pScreen->GetScreenPixmap(pScreen);
+
+ xorg_exa_set_displayed_usage(rootPixmap);
+ xorg_exa_set_shared_usage(rootPixmap);
+ if (!pScreen->ModifyPixmapHeader(rootPixmap, -1, -1, -1, -1, -1, NULL))
+ FatalError("Couldn't adjust screen pixmap\n");
+
+ ms->noEvict = FALSE;
+
+ handle = xorg_exa_get_pixmap_handle(rootPixmap, &stride);
+
+ drmModeAddFB(ms->fd,
+ pScrn->virtualX,
+ pScrn->virtualY,
+ pScrn->depth,
+ pScrn->bitsPerPixel,
+ stride,
+ handle,
+ &ms->fb_id);
+
+ AdjustFrame(pScrn->scrnIndex, pScrn->frameX0, pScrn->frameY0, 0);
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ ms->damage = DamageCreate(NULL, NULL, DamageReportNone, TRUE,
+ pScreen, rootPixmap);
+
+ if (ms->damage) {
+ DamageRegister(&rootPixmap->drawable, ms->damage);
+
+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Damage tracking initialized\n");
+ } else {
+ xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+ "Failed to create screen damage record\n");
+ return FALSE;
+ }
+#endif
+
+ return ret;
+}
+
+static Bool
+ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ VisualPtr visual;
+
+ /* deal with server regeneration */
+ if (ms->fd < 0) {
+ char *BusID;
+
+ BusID = xalloc(64);
+ sprintf(BusID, "PCI:%d:%d:%d",
+ ((ms->PciInfo->domain << 8) | ms->PciInfo->bus),
+ ms->PciInfo->dev, ms->PciInfo->func
+ );
+
+ ms->fd = drmOpen(NULL, BusID);
+
+ if (ms->fd < 0)
+ return FALSE;
+ }
+
+ if (!ms->screen) {
+ ms->screen = ms->api->create_screen(ms->api, ms->fd, NULL);
+
+ if (!ms->screen) {
+ FatalError("Could not init pipe_screen\n");
+ return FALSE;
+ }
+ }
+
+ pScrn->pScreen = pScreen;
+
+ /* HW dependent - FIXME */
+ pScrn->displayWidth = pScrn->virtualX;
+
+ miClearVisualTypes();
+
+ if (!miSetVisualTypes(pScrn->depth,
+ miGetDefaultVisualMask(pScrn->depth),
+ pScrn->rgbBits, pScrn->defaultVisual))
+ return FALSE;
+
+ if (!miSetPixmapDepths())
+ return FALSE;
+
+ pScrn->memPhysBase = 0;
+ pScrn->fbOffset = 0;
+
+ if (!fbScreenInit(pScreen, NULL,
+ pScrn->virtualX, pScrn->virtualY,
+ pScrn->xDpi, pScrn->yDpi,
+ pScrn->displayWidth, pScrn->bitsPerPixel))
+ return FALSE;
+
+ if (pScrn->bitsPerPixel > 8) {
+ /* Fixup RGB ordering */
+ visual = pScreen->visuals + pScreen->numVisuals;
+ while (--visual >= pScreen->visuals) {
+ if ((visual->class | DynamicClass) == DirectColor) {
+ visual->offsetRed = pScrn->offset.red;
+ visual->offsetGreen = pScrn->offset.green;
+ visual->offsetBlue = pScrn->offset.blue;
+ visual->redMask = pScrn->mask.red;
+ visual->greenMask = pScrn->mask.green;
+ visual->blueMask = pScrn->mask.blue;
+ }
+ }
+ }
+
+ fbPictureInit(pScreen, NULL, 0);
+
+ ms->blockHandler = pScreen->BlockHandler;
+ pScreen->BlockHandler = xorgBlockHandler;
+ ms->createScreenResources = pScreen->CreateScreenResources;
+ pScreen->CreateScreenResources = CreateScreenResources;
+
+ xf86SetBlackWhitePixels(pScreen);
+
+ ms->exa = xorg_exa_init(pScrn);
+
+ miInitializeBackingStore(pScreen);
+ xf86SetBackingStore(pScreen);
+ xf86SetSilkenMouse(pScreen);
+ miDCInitialize(pScreen, xf86GetPointerScreenFuncs());
+
+ /* Need to extend HWcursor support to handle mask interleave */
+ if (!ms->SWCursor)
+ xf86_cursors_init(pScreen, 64, 64,
+ HARDWARE_CURSOR_SOURCE_MASK_INTERLEAVE_64 |
+ HARDWARE_CURSOR_ARGB);
+
+ /* Must force it before EnterVT, so we are in control of VT and
+ * later memory should be bound when allocating, e.g rotate_mem */
+ pScrn->vtSema = TRUE;
+
+ pScreen->SaveScreen = xf86SaveScreen;
+ ms->CloseScreen = pScreen->CloseScreen;
+ pScreen->CloseScreen = CloseScreen;
+
+ if (!xf86CrtcScreenInit(pScreen))
+ return FALSE;
+
+ if (!miCreateDefColormap(pScreen))
+ return FALSE;
+
+ xf86DPMSInit(pScreen, xf86DPMSSet, 0);
+
+ if (serverGeneration == 1)
+ xf86ShowUnusedOptions(pScrn->scrnIndex, pScrn->options);
+
+#if 1
+#ifdef DRI2
+ driScreenInit(pScreen);
+#endif
+#endif
+
+ return EnterVT(scrnIndex, 1);
+}
+
+static void
+AdjustFrame(int scrnIndex, int x, int y, int flags)
+{
+ ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+ xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(pScrn);
+ xf86OutputPtr output = config->output[config->compat_output];
+ xf86CrtcPtr crtc = output->crtc;
+
+ if (crtc && crtc->enabled) {
+ crtc->funcs->mode_set(crtc, pScrn->currentMode, pScrn->currentMode, x,
+ y);
+ crtc->x = output->initial_x + x;
+ crtc->y = output->initial_y + y;
+ }
+}
+
+static void
+FreeScreen(int scrnIndex, int flags)
+{
+ FreeRec(xf86Screens[scrnIndex]);
+}
+
+static void
+LeaveVT(int scrnIndex, int flags)
+{
+ ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(pScrn);
+ int o;
+
+ for (o = 0; o < config->num_crtc; o++) {
+ xf86CrtcPtr crtc = config->crtc[o];
+
+ crtc_cursor_destroy(crtc);
+
+ if (crtc->rotatedPixmap || crtc->rotatedData) {
+ crtc->funcs->shadow_destroy(crtc, crtc->rotatedPixmap,
+ crtc->rotatedData);
+ crtc->rotatedPixmap = NULL;
+ crtc->rotatedData = NULL;
+ }
+ }
+
+ drmModeRmFB(ms->fd, ms->fb_id);
+
+ RestoreHWState(pScrn);
+
+ if (drmDropMaster(ms->fd))
+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+ "drmDropMaster failed: %s\n", strerror(errno));
+
+ pScrn->vtSema = FALSE;
+}
+
+/*
+ * This gets called when gaining control of the VT, and from ScreenInit().
+ */
+static Bool
+EnterVT(int scrnIndex, int flags)
+{
+ ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+ modesettingPtr ms = modesettingPTR(pScrn);
+
+ if (drmSetMaster(ms->fd)) {
+ if (errno == EINVAL) {
+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+ "drmSetMaster failed: 2.6.29 or newer kernel required for "
+ "multi-server DRI\n");
+ } else {
+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+ "drmSetMaster failed: %s\n", strerror(errno));
+ }
+ }
+
+ /*
+ * Only save state once per server generation since that's what most
+ * drivers do. Could change this to save state at each VT enter.
+ */
+ if (ms->SaveGeneration != serverGeneration) {
+ ms->SaveGeneration = serverGeneration;
+ SaveHWState(pScrn);
+ }
+
+ if (!flags) /* signals startup as we'll do this in CreateScreenResources */
+ CreateFrontBuffer(pScrn);
+
+ if (!xf86SetDesiredModes(pScrn))
+ return FALSE;
+
+ return TRUE;
+}
+
+static Bool
+SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
+{
+ ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+
+ return xf86SetSingleMode(pScrn, mode, RR_Rotate_0);
+}
+
+static Bool
+CloseScreen(int scrnIndex, ScreenPtr pScreen)
+{
+ ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+ modesettingPtr ms = modesettingPTR(pScrn);
+
+ if (pScrn->vtSema) {
+ LeaveVT(scrnIndex, 0);
+ }
+#ifdef DRI2
+ driCloseScreen(pScreen);
+#endif
+
+ pScreen->BlockHandler = ms->blockHandler;
+ pScreen->CreateScreenResources = ms->createScreenResources;
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ if (ms->damage) {
+ DamageUnregister(&pScreen->GetScreenPixmap(pScreen)->drawable, ms->damage);
+ DamageDestroy(ms->damage);
+ ms->damage = NULL;
+ }
+#endif
+
+ if (ms->exa)
+ xorg_exa_close(pScrn);
+
+ ms->api->destroy(ms->api);
+ ms->api = NULL;
+ drmClose(ms->fd);
+ ms->fd = -1;
+
+ pScrn->vtSema = FALSE;
+ pScreen->CloseScreen = ms->CloseScreen;
+ return (*pScreen->CloseScreen) (scrnIndex, pScreen);
+}
+
+static ModeStatus
+ValidMode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
+{
+ return MODE_OK;
+}
+
+/* vim: set sw=4 ts=8 sts=4: */
diff --git a/src/gallium/state_trackers/xorg/xorg_exa.c b/src/gallium/state_trackers/xorg/xorg_exa.c
new file mode 100644
index 0000000000..312dab1544
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_exa.c
@@ -0,0 +1,771 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+#include "xorg_exa.h"
+#include "xorg_tracker.h"
+#include "xorg_composite.h"
+#include "xorg_exa_tgsi.h"
+
+#include <xorg-server.h>
+#include <xf86.h>
+#include <picturestr.h>
+#include <picture.h>
+
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+
+#include "cso_cache/cso_context.h"
+
+#include "util/u_rect.h"
+
+/*
+ * Helper functions
+ */
+
+static void
+exa_get_pipe_format(int depth, enum pipe_format *format, int *bbp)
+{
+ switch (depth) {
+ case 32:
+ *format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ assert(*bbp == 32);
+ break;
+ case 24:
+ *format = PIPE_FORMAT_X8R8G8B8_UNORM;
+ assert(*bbp == 32);
+ break;
+ case 16:
+ *format = PIPE_FORMAT_R5G6B5_UNORM;
+ assert(*bbp == 16);
+ break;
+ case 15:
+ *format = PIPE_FORMAT_A1R5G5B5_UNORM;
+ assert(*bbp == 16);
+ break;
+ case 8:
+ case 4:
+ case 1:
+ *format = PIPE_FORMAT_A8R8G8B8_UNORM; /* bad bad bad */
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static void
+xorg_exa_init_state(struct exa_context *exa)
+{
+ struct pipe_depth_stencil_alpha_state dsa;
+
+ /* set common initial clip state */
+ memset(&dsa, 0, sizeof(struct pipe_depth_stencil_alpha_state));
+ cso_set_depth_stencil_alpha(exa->cso, &dsa);
+}
+
+static void
+xorg_exa_common_done(struct exa_context *exa)
+{
+ exa->copy.src = NULL;
+ exa->copy.dst = NULL;
+ exa->has_solid_color = FALSE;
+ exa->num_bound_samplers = 0;
+}
+
+/*
+ * Static exported EXA functions
+ */
+
+static void
+ExaWaitMarker(ScreenPtr pScreen, int marker)
+{
+}
+
+static int
+ExaMarkSync(ScreenPtr pScreen)
+{
+ return 1;
+}
+
+static Bool
+ExaDownloadFromScreen(PixmapPtr pPix, int x, int y, int w, int h, char *dst,
+ int dst_pitch)
+{
+ ScreenPtr pScreen = pPix->drawable.pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPix);
+ struct pipe_transfer *transfer;
+
+ if (!priv || !priv->tex)
+ return FALSE;
+
+ if (exa->pipe->is_texture_referenced(exa->pipe, priv->tex, 0, 0) &
+ PIPE_REFERENCED_FOR_WRITE)
+ exa->pipe->flush(exa->pipe, 0, NULL);
+
+ transfer = exa->scrn->get_tex_transfer(exa->scrn, priv->tex, 0, 0, 0,
+ PIPE_TRANSFER_READ, x, y, w, h);
+ if (!transfer)
+ return FALSE;
+
+ util_copy_rect((unsigned char*)dst, &priv->tex->block, dst_pitch, 0, 0,
+ w, h, exa->scrn->transfer_map(exa->scrn, transfer),
+ transfer->stride, 0, 0);
+
+ exa->scrn->transfer_unmap(exa->scrn, transfer);
+ exa->scrn->tex_transfer_destroy(transfer);
+
+ return TRUE;
+}
+
+static Bool
+ExaUploadToScreen(PixmapPtr pPix, int x, int y, int w, int h, char *src,
+ int src_pitch)
+{
+ ScreenPtr pScreen = pPix->drawable.pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPix);
+ struct pipe_transfer *transfer;
+
+ if (!priv || !priv->tex)
+ return FALSE;
+
+ transfer = exa->scrn->get_tex_transfer(exa->scrn, priv->tex, 0, 0, 0,
+ PIPE_TRANSFER_WRITE, x, y, w, h);
+ if (!transfer)
+ return FALSE;
+
+ util_copy_rect(exa->scrn->transfer_map(exa->scrn, transfer),
+ &priv->tex->block, transfer->stride, 0, 0, w, h,
+ (unsigned char*)src, src_pitch, 0, 0);
+
+ exa->scrn->transfer_unmap(exa->scrn, transfer);
+ exa->scrn->tex_transfer_destroy(transfer);
+
+ return TRUE;
+}
+
+static Bool
+ExaPrepareAccess(PixmapPtr pPix, int index)
+{
+ ScreenPtr pScreen = pPix->drawable.pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv;
+
+ priv = exaGetPixmapDriverPrivate(pPix);
+
+ if (!priv)
+ return FALSE;
+
+ if (!priv->tex)
+ return FALSE;
+
+ if (priv->map_count++ == 0)
+ {
+ if (exa->pipe->is_texture_referenced(exa->pipe, priv->tex, 0, 0) &
+ PIPE_REFERENCED_FOR_WRITE)
+ exa->pipe->flush(exa->pipe, 0, NULL);
+
+ priv->map_transfer =
+ exa->scrn->get_tex_transfer(exa->scrn, priv->tex, 0, 0, 0,
+ PIPE_TRANSFER_READ_WRITE,
+ 0, 0, priv->tex->width[0], priv->tex->height[0]);
+
+ pPix->devPrivate.ptr =
+ exa->scrn->transfer_map(exa->scrn, priv->map_transfer);
+ pPix->devKind = priv->map_transfer->stride;
+ }
+
+ return TRUE;
+}
+
+static void
+ExaFinishAccess(PixmapPtr pPix, int index)
+{
+ ScreenPtr pScreen = pPix->drawable.pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv;
+ priv = exaGetPixmapDriverPrivate(pPix);
+
+ if (!priv)
+ return;
+
+ if (!priv->map_transfer)
+ return;
+
+ if (--priv->map_count == 0) {
+ assert(priv->map_transfer);
+ exa->scrn->transfer_unmap(exa->scrn, priv->map_transfer);
+ exa->scrn->tex_transfer_destroy(priv->map_transfer);
+ priv->map_transfer = NULL;
+ pPix->devPrivate.ptr = NULL;
+ }
+}
+
+static void
+ExaDone(PixmapPtr pPixmap)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pPixmap->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPixmap);
+ struct exa_context *exa = ms->exa;
+
+ if (!priv)
+ return;
+
+#if 1
+ xorg_exa_flush(exa, PIPE_FLUSH_RENDER_CACHE, NULL);
+#else
+ xorg_finish(exa);
+#endif
+ xorg_exa_common_done(exa);
+}
+
+static void
+ExaDoneComposite(PixmapPtr pPixmap)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pPixmap->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+
+ xorg_exa_common_done(exa);
+}
+
+static Bool
+ExaPrepareSolid(PixmapPtr pPixmap, int alu, Pixel planeMask, Pixel fg)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pPixmap->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPixmap);
+ struct exa_context *exa = ms->exa;
+
+ debug_printf("ExaPrepareSolid - test\n");
+ if (pPixmap->drawable.depth < 15)
+ return FALSE;
+
+ if (!EXA_PM_IS_SOLID(&pPixmap->drawable, planeMask))
+ return FALSE;
+
+ if (!priv || !priv->tex)
+ return FALSE;
+
+ if (alu != GXcopy)
+ return FALSE;
+
+ if (!exa->pipe)
+ return FALSE;
+
+ debug_printf(" ExaPrepareSolid(0x%x)\n", fg);
+ return xorg_solid_bind_state(exa, priv, fg);
+}
+
+static void
+ExaSolid(PixmapPtr pPixmap, int x0, int y0, int x1, int y1)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pPixmap->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPixmap);
+
+ debug_printf("\tExaSolid(%d, %d, %d, %d)\n", x0, y0, x1, y1);
+
+#if 0
+ if (x0 == 0 && y0 == 0 &&
+ x1 == priv->tex->width[0] &&
+ y1 == priv->tex->height[0]) {
+ exa->ctx->clear(exa->ctx, PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTHSTENCIL,
+ exa->solid_color, 1., 0);
+ } else
+#endif
+ xorg_solid(exa, priv, x0, y0, x1, y1) ;
+}
+
+static Bool
+ExaPrepareCopy(PixmapPtr pSrcPixmap, PixmapPtr pDstPixmap, int xdir,
+ int ydir, int alu, Pixel planeMask)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pDstPixmap->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pDstPixmap);
+ struct exa_pixmap_priv *src_priv = exaGetPixmapDriverPrivate(pSrcPixmap);
+
+ debug_printf("ExaPrepareCopy\n");
+
+ if (alu != GXcopy)
+ return FALSE;
+
+ if (pSrcPixmap->drawable.depth < 15 || pDstPixmap->drawable.depth < 15)
+ return FALSE;
+
+ if (!EXA_PM_IS_SOLID(&pSrcPixmap->drawable, planeMask))
+ return FALSE;
+
+ if (!priv || !src_priv)
+ return FALSE;
+
+ if (!priv->tex || !src_priv->tex)
+ return FALSE;
+
+ if (!exa->pipe)
+ return FALSE;
+
+ exa->copy.src = src_priv;
+ exa->copy.dst = priv;
+
+ /*XXX disabled until some issues with syncing are fixed */
+ return FALSE;
+}
+
+static void
+ExaCopy(PixmapPtr pDstPixmap, int srcX, int srcY, int dstX, int dstY,
+ int width, int height)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pDstPixmap->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pDstPixmap);
+
+ debug_printf("\tExaCopy(srcx=%d, srcy=%d, dstX=%d, dstY=%d, w=%d, h=%d)\n",
+ srcX, srcY, dstX, dstY, width, height);
+
+ debug_assert(priv == exa->copy.dst);
+
+ xorg_copy_pixmap(exa, exa->copy.dst, dstX, dstY,
+ exa->copy.src, srcX, srcY,
+ width, height);
+}
+
+static Bool
+ExaPrepareComposite(int op, PicturePtr pSrcPicture,
+ PicturePtr pMaskPicture, PicturePtr pDstPicture,
+ PixmapPtr pSrc, PixmapPtr pMask, PixmapPtr pDst)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pDst->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+
+ debug_printf("ExaPrepareComposite\n");
+
+ return xorg_composite_bind_state(exa, op, pSrcPicture, pMaskPicture,
+ pDstPicture,
+ exaGetPixmapDriverPrivate(pSrc),
+ exaGetPixmapDriverPrivate(pMask),
+ exaGetPixmapDriverPrivate(pDst));
+}
+
+static void
+ExaComposite(PixmapPtr pDst, int srcX, int srcY, int maskX, int maskY,
+ int dstX, int dstY, int width, int height)
+{
+ ScrnInfoPtr pScrn = xf86Screens[pDst->drawable.pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pDst);
+
+ debug_printf("\tExaComposite\n");
+
+ xorg_composite(exa, priv, srcX, srcY, maskX, maskY,
+ dstX, dstY, width, height);
+}
+
+static Bool
+ExaCheckComposite(int op,
+ PicturePtr pSrcPicture, PicturePtr pMaskPicture,
+ PicturePtr pDstPicture)
+{
+ return xorg_composite_accelerated(op,
+ pSrcPicture,
+ pMaskPicture,
+ pDstPicture);
+}
+
+static void *
+ExaCreatePixmap(ScreenPtr pScreen, int size, int align)
+{
+ struct exa_pixmap_priv *priv;
+
+ priv = xcalloc(1, sizeof(struct exa_pixmap_priv));
+ if (!priv)
+ return NULL;
+
+ return priv;
+}
+
+static void
+ExaDestroyPixmap(ScreenPtr pScreen, void *dPriv)
+{
+ struct exa_pixmap_priv *priv = (struct exa_pixmap_priv *)dPriv;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+
+ if (!priv)
+ return;
+
+ if (priv->tex)
+ ms->screen->texture_destroy(priv->tex);
+
+ xfree(priv);
+}
+
+static Bool
+ExaPixmapIsOffscreen(PixmapPtr pPixmap)
+{
+ struct exa_pixmap_priv *priv;
+
+ priv = exaGetPixmapDriverPrivate(pPixmap);
+
+ if (!priv)
+ return FALSE;
+
+ if (priv->tex)
+ return TRUE;
+
+ return FALSE;
+}
+
+int
+xorg_exa_set_displayed_usage(PixmapPtr pPixmap)
+{
+ struct exa_pixmap_priv *priv;
+ priv = exaGetPixmapDriverPrivate(pPixmap);
+
+ if (!priv) {
+ FatalError("NO PIXMAP PRIVATE\n");
+ return 0;
+ }
+
+ priv->flags |= PIPE_TEXTURE_USAGE_PRIMARY;
+
+ return 0;
+}
+
+int
+xorg_exa_set_shared_usage(PixmapPtr pPixmap)
+{
+ struct exa_pixmap_priv *priv;
+ priv = exaGetPixmapDriverPrivate(pPixmap);
+
+ if (!priv) {
+ FatalError("NO PIXMAP PRIVATE\n");
+ return 0;
+ }
+
+ priv->flags |= PIPE_TEXTURE_USAGE_DISPLAY_TARGET;
+
+ return 0;
+}
+
+unsigned
+xorg_exa_get_pixmap_handle(PixmapPtr pPixmap, unsigned *stride_out)
+{
+ ScreenPtr pScreen = pPixmap->drawable.pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_pixmap_priv *priv;
+ unsigned handle;
+ unsigned stride;
+
+ if (!ms->exa) {
+ FatalError("NO MS->EXA\n");
+ return 0;
+ }
+
+ priv = exaGetPixmapDriverPrivate(pPixmap);
+
+ if (!priv) {
+ FatalError("NO PIXMAP PRIVATE\n");
+ return 0;
+ }
+
+ ms->api->local_handle_from_texture(ms->api, ms->screen, priv->tex, &stride, &handle);
+ if (stride_out)
+ *stride_out = stride;
+
+ return handle;
+}
+
+static Bool
+ExaModifyPixmapHeader(PixmapPtr pPixmap, int width, int height,
+ int depth, int bitsPerPixel, int devKind,
+ pointer pPixData)
+{
+ ScreenPtr pScreen = pPixmap->drawable.pScreen;
+ ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPixmap);
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+
+ if (!priv || pPixData)
+ return FALSE;
+
+ if (depth <= 0)
+ depth = pPixmap->drawable.depth;
+
+ if (bitsPerPixel <= 0)
+ bitsPerPixel = pPixmap->drawable.bitsPerPixel;
+
+ if (width <= 0)
+ width = pPixmap->drawable.width;
+
+ if (height <= 0)
+ height = pPixmap->drawable.height;
+
+ if (width <= 0 || height <= 0 || depth <= 0)
+ return FALSE;
+
+ miModifyPixmapHeader(pPixmap, width, height, depth,
+ bitsPerPixel, devKind, NULL);
+
+ /* Deal with screen resize */
+ if (!priv->tex ||
+ (priv->tex->width[0] != width ||
+ priv->tex->height[0] != height ||
+ priv->tex_flags != priv->flags)) {
+ struct pipe_texture *texture = NULL;
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ if (priv->flags)
+#endif
+ {
+ struct pipe_texture template;
+
+ memset(&template, 0, sizeof(template));
+ template.target = PIPE_TEXTURE_2D;
+ exa_get_pipe_format(depth, &template.format, &bitsPerPixel);
+ pf_get_block(template.format, &template.block);
+ template.width[0] = width;
+ template.height[0] = height;
+ template.depth[0] = 1;
+ template.last_level = 0;
+ template.tex_usage = PIPE_TEXTURE_USAGE_RENDER_TARGET | priv->flags;
+ priv->tex_flags = priv->flags;
+ texture = exa->scrn->texture_create(exa->scrn, &template);
+
+ if (priv->tex) {
+ struct pipe_surface *dst_surf;
+ struct pipe_surface *src_surf;
+
+ dst_surf = exa->scrn->get_tex_surface(exa->scrn, texture, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+ src_surf = exa_gpu_surface(exa, priv);
+ exa->pipe->surface_copy(exa->pipe, dst_surf, 0, 0, src_surf,
+ 0, 0, min(width, texture->width[0]),
+ min(height, texture->height[0]));
+ exa->scrn->tex_surface_destroy(dst_surf);
+ exa->scrn->tex_surface_destroy(src_surf);
+ } else if (pPixmap->devPrivate.ptr) {
+ struct pipe_transfer *transfer;
+
+ if (priv->map_count != 0)
+ FatalError("doing ExaModifyPixmapHeader on mapped buffer\n");
+
+ transfer =
+ exa->scrn->get_tex_transfer(exa->scrn, texture, 0, 0, 0,
+ PIPE_TRANSFER_WRITE,
+ 0, 0, width, height);
+ util_copy_rect(exa->scrn->transfer_map(exa->scrn, transfer),
+ &texture->block, transfer->stride, 0, 0,
+ width, height, pPixmap->devPrivate.ptr,
+ pPixmap->devKind, 0, 0);
+ exa->scrn->transfer_unmap(exa->scrn, transfer);
+ exa->scrn->tex_transfer_destroy(transfer);
+
+ xfree(pPixmap->devPrivate.ptr);
+ pPixmap->devPrivate.ptr = NULL;
+ }
+ }
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ else {
+ xfree(pPixmap->devPrivate.ptr);
+ pPixmap->devPrivate.ptr = xalloc(pPixmap->drawable.height *
+ pPixmap->devKind);
+ }
+#endif
+
+ pipe_texture_reference(&priv->tex, texture);
+ }
+
+ return TRUE;
+}
+
+struct pipe_texture *
+xorg_exa_get_texture(PixmapPtr pPixmap)
+{
+ struct exa_pixmap_priv *priv = exaGetPixmapDriverPrivate(pPixmap);
+ struct pipe_texture *tex = NULL;
+ pipe_texture_reference(&tex, priv->tex);
+ return tex;
+}
+
+void
+xorg_exa_close(ScrnInfoPtr pScrn)
+{
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa = ms->exa;
+ struct pipe_constant_buffer *vsbuf = &exa->vs_const_buffer;
+ struct pipe_constant_buffer *fsbuf = &exa->fs_const_buffer;
+
+ if (exa->shaders) {
+ xorg_shaders_destroy(exa->shaders);
+ }
+
+ if (vsbuf && vsbuf->buffer)
+ pipe_buffer_reference(&vsbuf->buffer, NULL);
+
+ if (fsbuf && fsbuf->buffer)
+ pipe_buffer_reference(&fsbuf->buffer, NULL);
+
+ if (exa->cso) {
+ cso_release_all(exa->cso);
+ cso_destroy_context(exa->cso);
+ }
+
+ if (exa->pipe)
+ exa->pipe->destroy(exa->pipe);
+
+ exaDriverFini(pScrn->pScreen);
+ xfree(exa);
+ ms->exa = NULL;
+}
+
+void *
+xorg_exa_init(ScrnInfoPtr pScrn)
+{
+ modesettingPtr ms = modesettingPTR(pScrn);
+ struct exa_context *exa;
+ ExaDriverPtr pExa;
+
+ exa = xcalloc(1, sizeof(struct exa_context));
+ if (!exa)
+ return NULL;
+
+ pExa = exaDriverAlloc();
+ if (!pExa) {
+ goto out_err;
+ }
+
+ memset(pExa, 0, sizeof(*pExa));
+
+ pExa->exa_major = 2;
+ pExa->exa_minor = 2;
+ pExa->memoryBase = 0;
+ pExa->memorySize = 0;
+ pExa->offScreenBase = 0;
+ pExa->pixmapOffsetAlign = 0;
+ pExa->pixmapPitchAlign = 1;
+ pExa->flags = EXA_OFFSCREEN_PIXMAPS | EXA_HANDLES_PIXMAPS;
+#ifdef EXA_SUPPORTS_PREPARE_AUX
+ pExa->flags |= EXA_SUPPORTS_PREPARE_AUX;
+#endif
+#ifdef EXA_MIXED_PIXMAPS
+ pExa->flags |= EXA_MIXED_PIXMAPS;
+#endif
+ pExa->maxX = 8191; /* FIXME */
+ pExa->maxY = 8191; /* FIXME */
+
+ pExa->WaitMarker = ExaWaitMarker;
+ pExa->MarkSync = ExaMarkSync;
+ pExa->PrepareSolid = ExaPrepareSolid;
+ pExa->Solid = ExaSolid;
+ pExa->DoneSolid = ExaDone;
+ pExa->PrepareCopy = ExaPrepareCopy;
+ pExa->Copy = ExaCopy;
+ pExa->DoneCopy = ExaDone;
+ pExa->CheckComposite = ExaCheckComposite;
+ pExa->PrepareComposite = ExaPrepareComposite;
+ pExa->Composite = ExaComposite;
+ pExa->DoneComposite = ExaDoneComposite;
+ pExa->PixmapIsOffscreen = ExaPixmapIsOffscreen;
+ pExa->DownloadFromScreen = ExaDownloadFromScreen;
+ pExa->UploadToScreen = ExaUploadToScreen;
+ pExa->PrepareAccess = ExaPrepareAccess;
+ pExa->FinishAccess = ExaFinishAccess;
+ pExa->CreatePixmap = ExaCreatePixmap;
+ pExa->DestroyPixmap = ExaDestroyPixmap;
+ pExa->ModifyPixmapHeader = ExaModifyPixmapHeader;
+
+ if (!exaDriverInit(pScrn->pScreen, pExa)) {
+ goto out_err;
+ }
+
+ exa->scrn = ms->screen;
+ exa->pipe = ms->api->create_context(ms->api, exa->scrn);
+ /* Share context with DRI */
+ ms->ctx = exa->pipe;
+
+ exa->cso = cso_create_context(exa->pipe);
+ exa->shaders = xorg_shaders_create(exa);
+
+ xorg_exa_init_state(exa);
+
+ return (void *)exa;
+
+out_err:
+ xorg_exa_close(pScrn);
+
+ return NULL;
+}
+
+struct pipe_surface *
+exa_gpu_surface(struct exa_context *exa, struct exa_pixmap_priv *priv)
+{
+ return exa->scrn->get_tex_surface(exa->scrn, priv->tex, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+}
+
+void xorg_exa_flush(struct exa_context *exa, uint pipeFlushFlags,
+ struct pipe_fence_handle **fence)
+{
+ exa->pipe->flush(exa->pipe, pipeFlushFlags, fence);
+}
+
+void xorg_exa_finish(struct exa_context *exa)
+{
+ struct pipe_fence_handle *fence = NULL;
+
+ xorg_exa_flush(exa, PIPE_FLUSH_RENDER_CACHE, &fence);
+
+ exa->pipe->screen->fence_finish(exa->pipe->screen, fence, 0);
+ exa->pipe->screen->fence_reference(exa->pipe->screen, &fence, NULL);
+}
+
diff --git a/src/gallium/state_trackers/xorg/xorg_exa.h b/src/gallium/state_trackers/xorg/xorg_exa.h
new file mode 100644
index 0000000000..43949b04a4
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_exa.h
@@ -0,0 +1,57 @@
+#ifndef XORG_EXA_H
+#define XORG_EXA_H
+
+#include "xorg_tracker.h"
+
+#include <pipe/p_state.h>
+
+struct cso_context;
+struct xorg_shaders;
+
+/* src + mask + dst */
+#define MAX_EXA_SAMPLERS 3
+
+struct exa_context
+{
+ ExaDriverPtr pExa;
+ struct pipe_context *pipe;
+ struct pipe_screen *scrn;
+ struct cso_context *cso;
+ struct xorg_shaders *shaders;
+
+ struct pipe_constant_buffer vs_const_buffer;
+ struct pipe_constant_buffer fs_const_buffer;
+
+ struct pipe_texture *bound_textures[MAX_EXA_SAMPLERS];
+ int num_bound_samplers;
+
+ float solid_color[4];
+ boolean has_solid_color;
+
+ struct {
+ struct exa_pixmap_priv *src;
+ struct exa_pixmap_priv *dst;
+ } copy;
+};
+
+
+struct exa_pixmap_priv
+{
+ int flags;
+ int tex_flags;
+
+ struct pipe_texture *tex;
+ struct pipe_texture *depth_stencil_tex;
+
+ struct pipe_transfer *map_transfer;
+ unsigned map_count;
+};
+
+struct pipe_surface *
+exa_gpu_surface(struct exa_context *exa, struct exa_pixmap_priv *priv);
+
+void xorg_exa_flush(struct exa_context *exa, uint pipeFlushFlags,
+ struct pipe_fence_handle **fence);
+void xorg_exa_finish(struct exa_context *exa);
+
+#endif
diff --git a/src/gallium/state_trackers/xorg/xorg_exa_tgsi.c b/src/gallium/state_trackers/xorg/xorg_exa_tgsi.c
new file mode 100644
index 0000000000..694eded09a
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_exa_tgsi.c
@@ -0,0 +1,489 @@
+#include "xorg_exa_tgsi.h"
+
+/*### stupidity defined in X11/extensions/XI.h */
+#undef Absolute
+
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "pipe/p_shader_tokens.h"
+
+#include "util/u_memory.h"
+#include "util/u_simple_shaders.h"
+
+#include "tgsi/tgsi_ureg.h"
+
+#include "cso_cache/cso_context.h"
+#include "cso_cache/cso_hash.h"
+
+/* Vertex shader:
+ * IN[0] = vertex pos
+ * IN[1] = src tex coord | solid fill color
+ * IN[2] = mask tex coord
+ * IN[3] = dst tex coord
+ * CONST[0] = (2/dst_width, 2/dst_height, 1, 1)
+ * CONST[1] = (-1, -1, 0, 0)
+ *
+ * OUT[0] = vertex pos
+ * OUT[1] = src tex coord | solid fill color
+ * OUT[2] = mask tex coord
+ * OUT[3] = dst tex coord
+ */
+
+/* Fragment shader:
+ * SAMP[0] = src
+ * SAMP[1] = mask
+ * SAMP[2] = dst
+ * IN[0] = pos src | solid fill color
+ * IN[1] = pos mask
+ * IN[2] = pos dst
+ * CONST[0] = (0, 0, 0, 1)
+ *
+ * OUT[0] = color
+ */
+
+struct xorg_shaders {
+ struct exa_context *exa;
+
+ struct cso_hash *vs_hash;
+ struct cso_hash *fs_hash;
+};
+
+static const char over_op[] =
+ "SUB TEMP[3], CONST[0].wwww, TEMP[1].wwww\n"
+ "MUL TEMP[3], TEMP[0], TEMP[3]\n"
+ "ADD TEMP[0], TEMP[3], TEMP[0]\n";
+
+
+static INLINE void
+create_preamble(struct ureg_program *ureg)
+{
+}
+
+
+static INLINE void
+src_in_mask(struct ureg_program *ureg,
+ struct ureg_dst dst,
+ struct ureg_src src,
+ struct ureg_src mask)
+{
+ /* MUL dst, src, mask.wwww */
+ ureg_MUL(ureg, dst, src,
+ ureg_scalar(mask, TGSI_SWIZZLE_W));
+}
+
+static struct ureg_src
+vs_normalize_coords(struct ureg_program *ureg, struct ureg_src coords,
+ struct ureg_src const0, struct ureg_src const1)
+{
+ struct ureg_dst tmp = ureg_DECL_temporary(ureg);
+ struct ureg_src ret;
+ ureg_MUL(ureg, tmp, coords, const0);
+ ureg_ADD(ureg, tmp, ureg_src(tmp), const1);
+ ret = ureg_src(tmp);
+ ureg_release_temporary(ureg, tmp);
+ return ret;
+}
+
+static void
+linear_gradient(struct ureg_program *ureg,
+ struct ureg_dst out,
+ struct ureg_src pos,
+ struct ureg_src sampler,
+ struct ureg_src coords,
+ struct ureg_src const0124,
+ struct ureg_src matrow0,
+ struct ureg_src matrow1,
+ struct ureg_src matrow2)
+{
+ struct ureg_dst temp0 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp1 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp2 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp3 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp4 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp5 = ureg_DECL_temporary(ureg);
+
+ ureg_MOV(ureg,
+ ureg_writemask(temp0, TGSI_WRITEMASK_XY), pos);
+ ureg_MOV(ureg,
+ ureg_writemask(temp0, TGSI_WRITEMASK_Z),
+ ureg_scalar(const0124, TGSI_SWIZZLE_Y));
+
+ ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0));
+ ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0));
+ ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0));
+ ureg_RCP(ureg, temp3, ureg_src(temp3));
+ ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3));
+ ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3));
+
+ ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_X),
+ ureg_src(temp1));
+ ureg_MOV(ureg, ureg_writemask(temp4, TGSI_WRITEMASK_Y),
+ ureg_src(temp2));
+
+ ureg_MUL(ureg, temp0,
+ ureg_scalar(coords, TGSI_SWIZZLE_Y),
+ ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_Y));
+ ureg_MAD(ureg, temp1,
+ ureg_scalar(coords, TGSI_SWIZZLE_X),
+ ureg_scalar(ureg_src(temp4), TGSI_SWIZZLE_X),
+ ureg_src(temp0));
+
+ ureg_MUL(ureg, temp2,
+ ureg_src(temp1),
+ ureg_scalar(coords, TGSI_SWIZZLE_Z));
+
+ ureg_TEX(ureg, out,
+ TGSI_TEXTURE_1D, ureg_src(temp2), sampler);
+
+ ureg_release_temporary(ureg, temp0);
+ ureg_release_temporary(ureg, temp1);
+ ureg_release_temporary(ureg, temp2);
+ ureg_release_temporary(ureg, temp3);
+ ureg_release_temporary(ureg, temp4);
+ ureg_release_temporary(ureg, temp5);
+}
+
+
+static void
+radial_gradient(struct ureg_program *ureg,
+ struct ureg_dst out,
+ struct ureg_src pos,
+ struct ureg_src sampler,
+ struct ureg_src coords,
+ struct ureg_src const0124,
+ struct ureg_src matrow0,
+ struct ureg_src matrow1,
+ struct ureg_src matrow2)
+{
+ struct ureg_dst temp0 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp1 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp2 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp3 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp4 = ureg_DECL_temporary(ureg);
+ struct ureg_dst temp5 = ureg_DECL_temporary(ureg);
+
+ ureg_MOV(ureg,
+ ureg_writemask(temp0, TGSI_WRITEMASK_XY),
+ pos);
+ ureg_MOV(ureg,
+ ureg_writemask(temp0, TGSI_WRITEMASK_Z),
+ ureg_scalar(const0124, TGSI_SWIZZLE_Y));
+
+ ureg_DP3(ureg, temp1, matrow0, ureg_src(temp0));
+ ureg_DP3(ureg, temp2, matrow1, ureg_src(temp0));
+ ureg_DP3(ureg, temp3, matrow2, ureg_src(temp0));
+ ureg_RCP(ureg, temp3, ureg_src(temp3));
+ ureg_MUL(ureg, temp1, ureg_src(temp1), ureg_src(temp3));
+ ureg_MUL(ureg, temp2, ureg_src(temp2), ureg_src(temp3));
+
+ ureg_MOV(ureg, ureg_writemask(temp5, TGSI_WRITEMASK_X),
+ ureg_src(temp1));
+ ureg_MOV(ureg, ureg_writemask(temp5, TGSI_WRITEMASK_Y),
+ ureg_src(temp2));
+
+ ureg_MUL(ureg, temp0, ureg_scalar(coords, TGSI_SWIZZLE_Y),
+ ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y));
+ ureg_MAD(ureg, temp1,
+ ureg_scalar(coords, TGSI_SWIZZLE_X),
+ ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X),
+ ureg_src(temp0));
+ ureg_ADD(ureg, temp1,
+ ureg_src(temp1), ureg_src(temp1));
+ ureg_MUL(ureg, temp3,
+ ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y),
+ ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_Y));
+ ureg_MAD(ureg, temp4,
+ ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X),
+ ureg_scalar(ureg_src(temp5), TGSI_SWIZZLE_X),
+ ureg_src(temp3));
+ ureg_MOV(ureg, temp4, ureg_negate(ureg_src(temp4)));
+ ureg_MUL(ureg, temp2,
+ ureg_scalar(coords, TGSI_SWIZZLE_Z),
+ ureg_src(temp4));
+ ureg_MUL(ureg, temp0,
+ ureg_scalar(const0124, TGSI_SWIZZLE_W),
+ ureg_src(temp2));
+ ureg_MUL(ureg, temp3,
+ ureg_src(temp1), ureg_src(temp1));
+ ureg_SUB(ureg, temp2,
+ ureg_src(temp3), ureg_src(temp0));
+ ureg_RSQ(ureg, temp2, ureg_abs(ureg_src(temp2)));
+ ureg_RCP(ureg, temp2, ureg_src(temp2));
+ ureg_SUB(ureg, temp1,
+ ureg_src(temp2), ureg_src(temp1));
+ ureg_ADD(ureg, temp0,
+ ureg_scalar(coords, TGSI_SWIZZLE_Z),
+ ureg_scalar(coords, TGSI_SWIZZLE_Z));
+ ureg_RCP(ureg, temp0, ureg_src(temp0));
+ ureg_MUL(ureg, temp2,
+ ureg_src(temp1), ureg_src(temp0));
+ ureg_TEX(ureg, out, TGSI_TEXTURE_1D,
+ ureg_src(temp2), sampler);
+
+ ureg_release_temporary(ureg, temp0);
+ ureg_release_temporary(ureg, temp1);
+ ureg_release_temporary(ureg, temp2);
+ ureg_release_temporary(ureg, temp3);
+ ureg_release_temporary(ureg, temp4);
+ ureg_release_temporary(ureg, temp5);
+}
+
+static void *
+create_vs(struct pipe_context *pipe,
+ unsigned vs_traits)
+{
+ struct ureg_program *ureg;
+ struct ureg_src src;
+ struct ureg_dst dst;
+ struct ureg_src const0, const1;
+ boolean is_fill = vs_traits & VS_FILL;
+ boolean is_composite = vs_traits & VS_COMPOSITE;
+ boolean has_mask = vs_traits & VS_MASK;
+
+ ureg = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (ureg == NULL)
+ return 0;
+
+ const0 = ureg_DECL_constant(ureg);
+ const1 = ureg_DECL_constant(ureg);
+
+ /* it has to be either a fill or a composite op */
+ debug_assert(is_fill ^ is_composite);
+
+ src = ureg_DECL_vs_input(ureg,
+ TGSI_SEMANTIC_POSITION, 0);
+ dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
+ src = vs_normalize_coords(ureg, src,
+ const0, const1);
+ ureg_MOV(ureg, dst, src);
+
+
+ if (is_composite) {
+ src = ureg_DECL_vs_input(ureg,
+ TGSI_SEMANTIC_GENERIC, 1);
+ dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 1);
+ ureg_MOV(ureg, dst, src);
+ }
+ if (is_fill) {
+ src = ureg_DECL_vs_input(ureg,
+ TGSI_SEMANTIC_COLOR, 0);
+ dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
+ ureg_MOV(ureg, dst, src);
+ }
+
+ if (has_mask) {
+ src = ureg_DECL_vs_input(ureg,
+ TGSI_SEMANTIC_GENERIC, 2);
+ dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 2);
+ ureg_MOV(ureg, dst, src);
+ }
+
+ ureg_END(ureg);
+
+ return ureg_create_shader_and_destroy(ureg, pipe);
+}
+
+static void *
+create_fs(struct pipe_context *pipe,
+ unsigned fs_traits)
+{
+ struct ureg_program *ureg;
+ struct ureg_src /*dst_sampler,*/ src_sampler, mask_sampler;
+ struct ureg_src /*dst_pos,*/ src_input, mask_pos;
+ struct ureg_dst src, mask;
+ struct ureg_dst out;
+ boolean has_mask = fs_traits & FS_MASK;
+ boolean is_fill = fs_traits & FS_FILL;
+ boolean is_composite = fs_traits & FS_COMPOSITE;
+ boolean is_solid = fs_traits & FS_SOLID_FILL;
+ boolean is_lingrad = fs_traits & FS_LINGRAD_FILL;
+ boolean is_radgrad = fs_traits & FS_RADGRAD_FILL;
+
+ ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (ureg == NULL)
+ return 0;
+
+ /* it has to be either a fill or a composite op */
+ debug_assert(is_fill ^ is_composite);
+
+ out = ureg_DECL_output(ureg,
+ TGSI_SEMANTIC_COLOR,
+ 0);
+
+ if (is_composite) {
+ src_sampler = ureg_DECL_sampler(ureg, 0);
+ src_input = ureg_DECL_fs_input(ureg,
+ TGSI_SEMANTIC_POSITION,
+ 0,
+ TGSI_INTERPOLATE_PERSPECTIVE);
+ } else {
+ debug_assert(is_fill);
+ if (is_solid)
+ src_input = ureg_DECL_fs_input(ureg,
+ TGSI_SEMANTIC_COLOR,
+ 0,
+ TGSI_INTERPOLATE_PERSPECTIVE);
+ else
+ src_input = ureg_DECL_fs_input(ureg,
+ TGSI_SEMANTIC_POSITION,
+ 0,
+ TGSI_INTERPOLATE_PERSPECTIVE);
+ }
+
+ if (has_mask) {
+ mask_sampler = ureg_DECL_sampler(ureg, 1);
+ mask_pos = ureg_DECL_fs_input(ureg,
+ TGSI_SEMANTIC_POSITION,
+ 1,
+ TGSI_INTERPOLATE_PERSPECTIVE);
+ }
+
+#if 0 /* unused right now */
+ dst_sampler = ureg_DECL_sampler(ureg, 2);
+ dst_pos = ureg_DECL_fs_input(ureg,
+ TGSI_SEMANTIC_POSITION,
+ 2,
+ TGSI_INTERPOLATE_PERSPECTIVE);
+#endif
+
+ if (is_composite) {
+ if (has_mask)
+ src = ureg_DECL_temporary(ureg);
+ else
+ src = out;
+ ureg_TEX(ureg, src,
+ TGSI_TEXTURE_2D, src_input, src_sampler);
+ } else if (is_fill) {
+ if (is_solid) {
+ if (has_mask)
+ src = ureg_dst(src_input);
+ else
+ ureg_MOV(ureg, out, src_input);
+ } else if (is_lingrad || is_radgrad) {
+ struct ureg_src coords, const0124,
+ matrow0, matrow1, matrow2;
+
+ if (has_mask)
+ src = ureg_DECL_temporary(ureg);
+ else
+ src = out;
+
+ coords = ureg_DECL_constant(ureg);
+ const0124 = ureg_DECL_constant(ureg);
+ matrow0 = ureg_DECL_constant(ureg);
+ matrow1 = ureg_DECL_constant(ureg);
+ matrow2 = ureg_DECL_constant(ureg);
+
+ if (is_lingrad) {
+ linear_gradient(ureg, src,
+ src_input, src_sampler,
+ coords, const0124,
+ matrow0, matrow1, matrow2);
+ } else if (is_radgrad) {
+ radial_gradient(ureg, src,
+ src_input, src_sampler,
+ coords, const0124,
+ matrow0, matrow1, matrow2);
+ }
+ } else
+ debug_assert(!"Unknown fill type!");
+ }
+
+ if (has_mask) {
+ mask = ureg_DECL_temporary(ureg);
+ ureg_TEX(ureg, mask,
+ TGSI_TEXTURE_2D, mask_pos, mask_sampler);
+ /* src IN mask */
+ src_in_mask(ureg, out, ureg_src(src), ureg_src(mask));
+ ureg_release_temporary(ureg, mask);
+ }
+
+ ureg_END(ureg);
+
+ return ureg_create_shader_and_destroy(ureg, pipe);
+}
+
+struct xorg_shaders * xorg_shaders_create(struct exa_context *exa)
+{
+ struct xorg_shaders *sc = CALLOC_STRUCT(xorg_shaders);
+
+ sc->exa = exa;
+ sc->vs_hash = cso_hash_create();
+ sc->fs_hash = cso_hash_create();
+
+ return sc;
+}
+
+static void
+cache_destroy(struct cso_context *cso,
+ struct cso_hash *hash,
+ unsigned processor)
+{
+ struct cso_hash_iter iter = cso_hash_first_node(hash);
+ while (!cso_hash_iter_is_null(iter)) {
+ void *shader = (void *)cso_hash_iter_data(iter);
+ if (processor == PIPE_SHADER_FRAGMENT) {
+ cso_delete_fragment_shader(cso, shader);
+ } else if (processor == PIPE_SHADER_VERTEX) {
+ cso_delete_vertex_shader(cso, shader);
+ }
+ iter = cso_hash_erase(hash, iter);
+ }
+ cso_hash_delete(hash);
+}
+
+void xorg_shaders_destroy(struct xorg_shaders *sc)
+{
+ cache_destroy(sc->exa->cso, sc->vs_hash,
+ PIPE_SHADER_VERTEX);
+ cache_destroy(sc->exa->cso, sc->fs_hash,
+ PIPE_SHADER_FRAGMENT);
+
+ free(sc);
+}
+
+static INLINE void *
+shader_from_cache(struct pipe_context *pipe,
+ unsigned type,
+ struct cso_hash *hash,
+ unsigned key)
+{
+ void *shader = 0;
+
+ struct cso_hash_iter iter = cso_hash_find(hash, key);
+
+ if (cso_hash_iter_is_null(iter)) {
+ if (type == PIPE_SHADER_VERTEX)
+ shader = create_vs(pipe, key);
+ else
+ shader = create_fs(pipe, key);
+ cso_hash_insert(hash, key, shader);
+ } else
+ shader = (void *)cso_hash_iter_data(iter);
+
+ return shader;
+}
+
+struct xorg_shader xorg_shaders_get(struct xorg_shaders *sc,
+ unsigned vs_traits,
+ unsigned fs_traits)
+{
+ struct xorg_shader shader = {0};
+ void *vs, *fs;
+
+ vs = shader_from_cache(sc->exa->pipe, PIPE_SHADER_VERTEX,
+ sc->vs_hash, vs_traits);
+ fs = shader_from_cache(sc->exa->pipe, PIPE_SHADER_FRAGMENT,
+ sc->fs_hash, fs_traits);
+
+ debug_assert(vs && fs);
+ if (!vs || !fs)
+ return shader;
+
+ shader.vs = vs;
+ shader.fs = fs;
+
+ return shader;
+}
diff --git a/src/gallium/state_trackers/xorg/xorg_exa_tgsi.h b/src/gallium/state_trackers/xorg/xorg_exa_tgsi.h
new file mode 100644
index 0000000000..1535a0c8c3
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_exa_tgsi.h
@@ -0,0 +1,43 @@
+#ifndef XORG_EXA_TGSI_H
+#define XORG_EXA_TGSI_H
+
+#include "xorg_exa.h"
+
+enum xorg_vs_traits {
+ VS_COMPOSITE = 1 << 0,
+ VS_MASK = 1 << 1,
+ VS_SOLID_FILL = 1 << 2,
+ VS_LINGRAD_FILL = 1 << 3,
+ VS_RADGRAD_FILL = 1 << 4,
+ VS_FILL = (VS_SOLID_FILL |
+ VS_LINGRAD_FILL |
+ VS_RADGRAD_FILL)
+ /*VS_TRANSFORM = 1 << 5*/
+};
+
+enum xorg_fs_traits {
+ FS_COMPOSITE = 1 << 0,
+ FS_MASK = 1 << 1,
+ FS_SOLID_FILL = 1 << 2,
+ FS_LINGRAD_FILL = 1 << 3,
+ FS_RADGRAD_FILL = 1 << 4,
+ FS_FILL = (FS_SOLID_FILL |
+ FS_LINGRAD_FILL |
+ FS_RADGRAD_FILL)
+};
+
+struct xorg_shader {
+ void *fs;
+ void *vs;
+};
+
+struct xorg_shaders;
+
+struct xorg_shaders *xorg_shaders_create(struct exa_context *exa);
+void xorg_shaders_destroy(struct xorg_shaders *shaders);
+
+struct xorg_shader xorg_shaders_get(struct xorg_shaders *shaders,
+ unsigned vs_traits,
+ unsigned fs_traits);
+
+#endif
diff --git a/src/gallium/state_trackers/xorg/xorg_output.c b/src/gallium/state_trackers/xorg/xorg_output.c
new file mode 100644
index 0000000000..950af942f5
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_output.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+#include "xorg-server.h"
+#include <xf86.h>
+#include <xf86i2c.h>
+#include <xf86Crtc.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#define DPMS_SERVER
+#include <X11/extensions/dpms.h>
+
+#include "X11/Xatom.h"
+
+#include "xorg_tracker.h"
+
+static char *connector_enum_list[] = {
+ "Unknown",
+ "VGA",
+ "DVI-I",
+ "DVI-D",
+ "DVI-A",
+ "Composite",
+ "SVIDEO",
+ "LVDS",
+ "Component",
+ "9-pin DIN",
+ "DisplayPort",
+ "HDMI Type A",
+ "HDMI Type B",
+};
+
+static void
+dpms(xf86OutputPtr output, int mode)
+{
+}
+
+static void
+save(xf86OutputPtr output)
+{
+}
+
+static void
+restore(xf86OutputPtr output)
+{
+}
+
+static int
+mode_valid(xf86OutputPtr output, DisplayModePtr pMode)
+{
+ return MODE_OK;
+}
+
+static Bool
+mode_fixup(xf86OutputPtr output, DisplayModePtr mode,
+ DisplayModePtr adjusted_mode)
+{
+ return TRUE;
+}
+
+static void
+prepare(xf86OutputPtr output)
+{
+ dpms(output, DPMSModeOff);
+}
+
+static void
+mode_set(xf86OutputPtr output, DisplayModePtr mode,
+ DisplayModePtr adjusted_mode)
+{
+}
+
+static void
+commit(xf86OutputPtr output)
+{
+ dpms(output, DPMSModeOn);
+
+ if (output->scrn->pScreen != NULL)
+ xf86_reload_cursors(output->scrn->pScreen);
+}
+
+static xf86OutputStatus
+detect(xf86OutputPtr output)
+{
+ drmModeConnectorPtr drm_connector = output->driver_private;
+
+ switch (drm_connector->connection) {
+ case DRM_MODE_CONNECTED:
+ return XF86OutputStatusConnected;
+ case DRM_MODE_DISCONNECTED:
+ return XF86OutputStatusDisconnected;
+ default:
+ return XF86OutputStatusUnknown;
+ }
+}
+
+static DisplayModePtr
+get_modes(xf86OutputPtr output)
+{
+ drmModeConnectorPtr drm_connector = output->driver_private;
+ drmModeModeInfoPtr drm_mode = NULL;
+ DisplayModePtr modes = NULL, mode = NULL;
+ int i;
+
+ for (i = 0; i < drm_connector->count_modes; i++) {
+ drm_mode = &drm_connector->modes[i];
+ if (drm_mode) {
+ mode = xcalloc(1, sizeof(DisplayModeRec));
+ if (!mode)
+ continue;
+ mode->type = 0;
+ mode->Clock = drm_mode->clock;
+ mode->HDisplay = drm_mode->hdisplay;
+ mode->HSyncStart = drm_mode->hsync_start;
+ mode->HSyncEnd = drm_mode->hsync_end;
+ mode->HTotal = drm_mode->htotal;
+ mode->VDisplay = drm_mode->vdisplay;
+ mode->VSyncStart = drm_mode->vsync_start;
+ mode->VSyncEnd = drm_mode->vsync_end;
+ mode->VTotal = drm_mode->vtotal;
+ mode->Flags = drm_mode->flags;
+ mode->HSkew = drm_mode->hskew;
+ mode->VScan = drm_mode->vscan;
+ mode->VRefresh = xf86ModeVRefresh(mode);
+ mode->Private = (void *)drm_mode;
+ xf86SetModeDefaultName(mode);
+ modes = xf86ModesAdd(modes, mode);
+ xf86PrintModeline(0, mode);
+ }
+ }
+
+ return modes;
+}
+
+static void
+destroy(xf86OutputPtr output)
+{
+ drmModeFreeConnector(output->driver_private);
+}
+
+static void
+create_resources(xf86OutputPtr output)
+{
+#ifdef RANDR_12_INTERFACE
+#endif /* RANDR_12_INTERFACE */
+}
+
+#ifdef RANDR_12_INTERFACE
+static Bool
+set_property(xf86OutputPtr output, Atom property, RRPropertyValuePtr value)
+{
+ return TRUE;
+}
+#endif /* RANDR_12_INTERFACE */
+
+#ifdef RANDR_13_INTERFACE
+static Bool
+get_property(xf86OutputPtr output, Atom property)
+{
+ return TRUE;
+}
+#endif /* RANDR_13_INTERFACE */
+
+#ifdef RANDR_GET_CRTC_INTERFACE
+static xf86CrtcPtr
+get_crtc(xf86OutputPtr output)
+{
+ return NULL;
+}
+#endif
+
+static const xf86OutputFuncsRec output_funcs = {
+ .create_resources = create_resources,
+ .dpms = dpms,
+ .save = save,
+ .restore = restore,
+ .mode_valid = mode_valid,
+ .mode_fixup = mode_fixup,
+ .prepare = prepare,
+ .mode_set = mode_set,
+ .commit = commit,
+ .detect = detect,
+ .get_modes = get_modes,
+#ifdef RANDR_12_INTERFACE
+ .set_property = set_property,
+#endif
+#ifdef RANDR_13_INTERFACE
+ .get_property = get_property,
+#endif
+ .destroy = destroy,
+#ifdef RANDR_GET_CRTC_INTERFACE
+ .get_crtc = get_crtc,
+#endif
+};
+
+void
+output_init(ScrnInfoPtr pScrn)
+{
+ modesettingPtr ms = modesettingPTR(pScrn);
+ xf86OutputPtr output;
+ drmModeResPtr res;
+ drmModeConnectorPtr drm_connector = NULL;
+ drmModeEncoderPtr drm_encoder = NULL;
+ char *name;
+ int c, v, p;
+
+ res = drmModeGetResources(ms->fd);
+ if (res == 0) {
+ DRV_ERROR("Failed drmModeGetResources\n");
+ return;
+ }
+
+ for (c = 0; c < res->count_connectors; c++) {
+ drm_connector = drmModeGetConnector(ms->fd, res->connectors[c]);
+ if (!drm_connector)
+ goto out;
+
+#if 0
+ for (p = 0; p < drm_connector->count_props; p++) {
+ drmModePropertyPtr prop;
+
+ prop = drmModeGetProperty(ms->fd, drm_connector->props[p]);
+
+ name = NULL;
+ if (prop) {
+ ErrorF("VALUES %d\n", prop->count_values);
+
+ for (v = 0; v < prop->count_values; v++)
+ ErrorF("%s %lld\n", prop->name, prop->values[v]);
+ }
+ }
+#else
+ (void)p;
+ (void)v;
+#endif
+
+ name = connector_enum_list[drm_connector->connector_type];
+
+ output = xf86OutputCreate(pScrn, &output_funcs, name);
+ if (!output)
+ continue;
+
+ drm_encoder = drmModeGetEncoder(ms->fd, drm_connector->encoders[0]);
+ if (drm_encoder) {
+ output->possible_crtcs = drm_encoder->possible_crtcs;
+ output->possible_clones = drm_encoder->possible_clones;
+ } else {
+ output->possible_crtcs = 0;
+ output->possible_clones = 0;
+ }
+ output->driver_private = drm_connector;
+ output->subpixel_order = SubPixelHorizontalRGB;
+ output->interlaceAllowed = FALSE;
+ output->doubleScanAllowed = FALSE;
+ }
+
+ out:
+ drmModeFreeResources(res);
+}
+
+/* vim: set sw=4 ts=8 sts=4: */
diff --git a/src/gallium/state_trackers/xorg/xorg_tracker.h b/src/gallium/state_trackers/xorg/xorg_tracker.h
new file mode 100644
index 0000000000..b1ab783a15
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_tracker.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+#ifndef _XORG_TRACKER_H_
+#define _XORG_TRACKER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <errno.h>
+#include <drm.h>
+#include <xf86drm.h>
+#include <xf86drmMode.h>
+#include <xorg-server.h>
+#include <xf86.h>
+#include "xf86Crtc.h"
+#include <exa.h>
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+#include <damage.h>
+#endif
+
+#include "pipe/p_screen.h"
+#include "state_tracker/drm_api.h"
+
+#define DRV_ERROR(msg) xf86DrvMsg(pScrn->scrnIndex, X_ERROR, msg);
+
+typedef struct
+{
+ int lastInstance;
+ int refCount;
+ ScrnInfoPtr pScrn_1;
+ ScrnInfoPtr pScrn_2;
+} EntRec, *EntPtr;
+
+typedef struct _modesettingRec
+{
+ /* drm */
+ int fd;
+ unsigned fb_id;
+
+ /* X */
+ EntPtr entityPrivate;
+
+ int Chipset;
+ EntityInfoPtr pEnt;
+ struct pci_device *PciInfo;
+
+ Bool noAccel;
+ Bool SWCursor;
+ CloseScreenProcPtr CloseScreen;
+
+ /* Broken-out options. */
+ OptionInfoPtr Options;
+
+ unsigned int SaveGeneration;
+
+ void (*blockHandler)(int, pointer, pointer, pointer);
+ CreateScreenResourcesProcPtr createScreenResources;
+
+ /* gallium */
+ struct drm_api *api;
+ struct pipe_screen *screen;
+ struct pipe_context *ctx;
+ boolean d_depth_bits_last;
+ boolean ds_depth_bits_last;
+
+ /* exa */
+ void *exa;
+ Bool noEvict;
+
+#ifdef DRM_MODE_FEATURE_DIRTYFB
+ DamagePtr damage;
+#endif
+} modesettingRec, *modesettingPtr;
+
+#define modesettingPTR(p) ((modesettingPtr)((p)->driverPrivate))
+
+
+/***********************************************************************
+ * xorg_exa.c
+ */
+struct pipe_texture *
+xorg_exa_get_texture(PixmapPtr pPixmap);
+
+unsigned
+xorg_exa_get_pixmap_handle(PixmapPtr pPixmap, unsigned *stride);
+
+int
+xorg_exa_set_displayed_usage(PixmapPtr pPixmap);
+
+int
+xorg_exa_set_shared_usage(PixmapPtr pPixmap);
+
+void *
+xorg_exa_init(ScrnInfoPtr pScrn);
+
+void
+xorg_exa_close(ScrnInfoPtr pScrn);
+
+
+/***********************************************************************
+ * xorg_dri2.c
+ */
+Bool
+driScreenInit(ScreenPtr pScreen);
+
+void
+driCloseScreen(ScreenPtr pScreen);
+
+
+/***********************************************************************
+ * xorg_crtc.c
+ */
+void
+crtc_init(ScrnInfoPtr pScrn);
+
+void
+crtc_cursor_destroy(xf86CrtcPtr crtc);
+
+
+/***********************************************************************
+ * xorg_output.c
+ */
+void
+output_init(ScrnInfoPtr pScrn);
+
+
+#endif /* _XORG_TRACKER_H_ */
diff --git a/src/gallium/state_trackers/xorg/xorg_winsys.h b/src/gallium/state_trackers/xorg/xorg_winsys.h
new file mode 100644
index 0000000000..47ee4b9ffd
--- /dev/null
+++ b/src/gallium/state_trackers/xorg/xorg_winsys.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+/*
+ * File with all the junk needed to personalize the a xorg driver.
+ */
+
+#ifndef _XORG_WINSYS_H_
+#define _XORG_WINSYS_H_
+
+#include "xorg-server.h"
+#include "xf86.h"
+#include "pciaccess.h"
+
+#ifndef XSERVER_LIBPCIACCESS
+#error "libpciaccess needed"
+#endif
+
+void xorg_tracker_set_functions(ScrnInfoPtr scrn);
+const OptionInfoRec * xorg_tracker_available_options(int chipid, int busid);
+
+#endif
diff --git a/src/gallium/winsys/Makefile b/src/gallium/winsys/Makefile
new file mode 100644
index 0000000000..bce5b3f9e0
--- /dev/null
+++ b/src/gallium/winsys/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/winsys/Makefile
+TOP = ../../..
+include $(TOP)/configs/current
+
+SUBDIRS = $(GALLIUM_WINSYS_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/winsys/SConscript b/src/gallium/winsys/SConscript
new file mode 100644
index 0000000000..30c3378dff
--- /dev/null
+++ b/src/gallium/winsys/SConscript
@@ -0,0 +1,16 @@
+Import('*')
+
+if env['dri']:
+ SConscript([
+ 'drm/SConscript',
+ ])
+
+if 'xlib' in env['winsys']:
+ SConscript([
+ 'xlib/SConscript',
+ ])
+
+if 'gdi' in env['winsys']:
+ SConscript([
+ 'gdi/SConscript',
+ ])
diff --git a/src/gallium/winsys/drm/Makefile b/src/gallium/winsys/drm/Makefile
new file mode 100644
index 0000000000..fee0191643
--- /dev/null
+++ b/src/gallium/winsys/drm/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/winsys/Makefile
+TOP = ../../../..
+include $(TOP)/configs/current
+
+SUBDIRS = $(GALLIUM_WINSYS_DRM_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/winsys/drm/Makefile.template b/src/gallium/winsys/drm/Makefile.template
new file mode 100644
index 0000000000..9635c3c50e
--- /dev/null
+++ b/src/gallium/winsys/drm/Makefile.template
@@ -0,0 +1,126 @@
+# -*-makefile-*-
+
+MESA_MODULES = \
+ $(TOP)/src/mesa/libmesagallium.a \
+ $(GALLIUM_AUXILIARIES)
+
+COMMON_GALLIUM_SOURCES = \
+ $(TOP)/src/mesa/drivers/dri/common/utils.c \
+ $(TOP)/src/mesa/drivers/dri/common/vblank.c \
+ $(TOP)/src/mesa/drivers/dri/common/dri_util.c \
+ $(TOP)/src/mesa/drivers/dri/common/xmlconfig.c
+
+COMMON_SOURCES = $(COMMON_GALLIUM_SOURCES) \
+ $(TOP)/src/mesa/drivers/common/driverfuncs.c \
+ $(TOP)/src/mesa/drivers/dri/common/texmem.c \
+ $(TOP)/src/mesa/drivers/dri/common/drirenderbuffer.c
+
+COMMON_BM_SOURCES = \
+ $(TOP)/src/mesa/drivers/dri/common/dri_bufmgr.c \
+ $(TOP)/src/mesa/drivers/dri/common/dri_drmpool.c
+
+
+ifeq ($(WINDOW_SYSTEM),dri)
+WINOBJ=
+WINLIB=
+INCLUDES = $(SHARED_INCLUDES) $(EXPAT_INCLUDES)
+
+OBJECTS = \
+ $(C_SOURCES:.c=.o) \
+ $(ASM_SOURCES:.S=.o)
+
+else
+# miniglx
+WINOBJ=
+WINLIB=-L$(MESA)/src/glx/mini
+MINIGLX_INCLUDES = -I$(TOP)/src/glx/mini
+INCLUDES = $(MINIGLX_INCLUDES) \
+ $(SHARED_INCLUDES) \
+ $(PCIACCESS_CFLAGS)
+
+OBJECTS = $(C_SOURCES:.c=.o) \
+ $(MINIGLX_SOURCES:.c=.o) \
+ $(ASM_SOURCES:.S=.o)
+endif
+
+
+### Include directories
+SHARED_INCLUDES = \
+ -I. \
+ -I$(TOP)/src/mesa/drivers/dri/common \
+ -Iserver \
+ -I$(TOP)/include \
+ -I$(TOP)/include/GL/internal \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/src/gallium/winsys/common \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/mesa/main \
+ -I$(TOP)/src/mesa/glapi \
+ -I$(TOP)/src/mesa/math \
+ -I$(TOP)/src/mesa/transform \
+ -I$(TOP)/src/mesa/shader \
+ -I$(TOP)/src/mesa/swrast \
+ -I$(TOP)/src/mesa/swrast_setup \
+ -I$(TOP)/src/egl/main \
+ -I$(TOP)/src/egl/drivers/dri \
+ $(LIBDRM_CFLAGS)
+
+
+##### RULES #####
+
+.c.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+.S.o:
+ $(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
+
+
+##### TARGETS #####
+
+default: depend symlinks $(TOP)/$(LIB_DIR)/gallium/$(LIBNAME)
+
+$(LIBNAME): $(OBJECTS) $(MESA_MODULES) $(PIPE_DRIVERS) $(WINOBJ) Makefile $(TOP)/src/mesa/drivers/dri/Makefile.template
+ $(MKLIB) -noprefix -o $@ \
+ $(OBJECTS) $(PIPE_DRIVERS) \
+ -Wl,--start-group $(MESA_MODULES) -Wl,--end-group \
+ $(WINOBJ) $(DRI_LIB_DEPS) $(DRIVER_EXTRAS)
+
+$(LIBNAME_EGL): $(WINSYS_OBJECTS) $(LIBS)
+ $(MKLIB) -o $(LIBNAME_EGL) \
+ -linker "$(CC)" \
+ -noprefix \
+ $(OBJECTS) $(MKLIB_OPTIONS) $(WINSYS_OBJECTS) $(PIPE_DRIVERS) $(WINOBJ) $(DRI_LIB_DEPS) \
+ --whole-archive $(LIBS) $(GALLIUM_AUXILIARIES) --no-whole-archive $(DRIVER_EXTRAS)
+
+$(TOP)/$(LIB_DIR)/gallium:
+ mkdir -p $@
+
+$(TOP)/$(LIB_DIR)/gallium/$(LIBNAME): $(LIBNAME) $(TOP)/$(LIB_DIR)/gallium
+ $(INSTALL) $(LIBNAME) $(TOP)/$(LIB_DIR)/gallium
+
+depend: $(C_SOURCES) $(ASM_SOURCES) $(SYMLINKS)
+ rm -f depend
+ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(DRIVER_DEFINES) $(INCLUDES) $(C_SOURCES) \
+ $(ASM_SOURCES) 2> /dev/null
+
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` `find ../include`
+
+
+# Remove .o and backup files
+clean:
+ -rm -f *.o */*.o *~ *.so *~ server/*.o $(SYMLINKS)
+ -rm -f depend depend.bak
+
+
+install: $(LIBNAME)
+ $(INSTALL) -d $(DESTDIR)$(DRI_DRIVER_INSTALL_DIR)
+ $(MINSTALL) -m 755 $(LIBNAME) $(DESTDIR)$(DRI_DRIVER_INSTALL_DIR)
+
+
+include depend
diff --git a/src/gallium/winsys/drm/SConscript b/src/gallium/winsys/drm/SConscript
new file mode 100644
index 0000000000..a9e9f2682a
--- /dev/null
+++ b/src/gallium/winsys/drm/SConscript
@@ -0,0 +1,59 @@
+Import('*')
+
+if env['dri']:
+
+ drienv = env.Clone()
+
+ drienv.Replace(CPPPATH = [
+ '#src/mesa/drivers/dri/common',
+ '#include',
+ '#include/GL/internal',
+ '#src/gallium/include',
+ '#src/gallium/auxiliary',
+ '#src/gallium/drivers',
+ '#src/mesa',
+ '#src/mesa/main',
+ '#src/mesa/glapi',
+ '#src/mesa/math',
+ '#src/mesa/transform',
+ '#src/mesa/shader',
+ '#src/mesa/swrast',
+ '#src/mesa/swrast_setup',
+ '#src/egl/main',
+ '#src/egl/drivers/dri',
+ ])
+
+ drienv.ParseConfig('pkg-config --cflags --libs libdrm')
+
+ COMMON_GALLIUM_SOURCES = [
+ '#src/mesa/drivers/dri/common/utils.c',
+ '#src/mesa/drivers/dri/common/vblank.c',
+ '#src/mesa/drivers/dri/common/dri_util.c',
+ '#src/mesa/drivers/dri/common/xmlconfig.c',
+ ]
+
+ COMMON_BM_SOURCES = [
+ '#src/mesa/drivers/dri/common/dri_bufmgr.c',
+ '#src/mesa/drivers/dri/common/dri_drmpool.c',
+ ]
+
+ Export([
+ 'drienv',
+ 'COMMON_GALLIUM_SOURCES',
+ 'COMMON_BM_SOURCES',
+ ])
+
+ # TODO: Installation
+ #install: $(LIBNAME)
+ # $(INSTALL) -d $(DRI_DRIVER_INSTALL_DIR)
+ # $(INSTALL) -m 755 $(LIBNAME) $(DRI_DRIVER_INSTALL_DIR)
+
+ if 'intel' in env['winsys']:
+ SConscript([
+ 'intel/SConscript',
+ ])
+
+ if 'radeon' in env['winsys']:
+ SConscript([
+ 'radeon/SConscript',
+ ])
diff --git a/src/gallium/winsys/drm/intel/Makefile b/src/gallium/winsys/drm/intel/Makefile
new file mode 100644
index 0000000000..d8feef6824
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/winsys/drm/intel/Makefile
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+SUBDIRS = gem $(GALLIUM_STATE_TRACKERS_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/winsys/drm/intel/SConscript b/src/gallium/winsys/drm/intel/SConscript
new file mode 100644
index 0000000000..50d7b75ed6
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/SConscript
@@ -0,0 +1,7 @@
+Import('*')
+
+SConscript(['gem/SConscript',])
+
+if 'mesa' in env['statetrackers']:
+
+ SConscript(['dri/SConscript'])
diff --git a/src/gallium/winsys/drm/intel/dri/Makefile b/src/gallium/winsys/drm/intel/dri/Makefile
new file mode 100644
index 0000000000..5e212b62a4
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/dri/Makefile
@@ -0,0 +1,27 @@
+TOP = ../../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = i915_dri.so
+
+PIPE_DRIVERS = \
+ $(TOP)/src/gallium/state_trackers/dri/libdridrm.a \
+ $(TOP)/src/gallium/winsys/drm/intel/gem/libinteldrm.a \
+ $(TOP)/src/gallium/drivers/trace/libtrace.a \
+ $(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
+ $(TOP)/src/gallium/drivers/identity/libidentity.a \
+ $(TOP)/src/gallium/drivers/i915simple/libi915simple.a
+
+
+DRIVER_SOURCES =
+
+C_SOURCES = \
+ $(COMMON_GALLIUM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+include ../../Makefile.template
+
+DRI_LIB_DEPS += -ldrm_intel
+
+symlinks: $(TOP)/$(LIB_DIR)/gallium
+ @rm -f $(TOP)/$(LIB_DIR)/gallium/i965_dri.so
+ ln -s i915_dri.so $(TOP)/$(LIB_DIR)/gallium/i965_dri.so
diff --git a/src/gallium/winsys/drm/intel/dri/SConscript b/src/gallium/winsys/drm/intel/dri/SConscript
new file mode 100644
index 0000000000..f973811072
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/dri/SConscript
@@ -0,0 +1,20 @@
+Import('*')
+
+env = drienv.Clone()
+
+env.ParseConfig('pkg-config --cflags --libs libdrm_intel')
+
+drivers = [
+ st_dri,
+ inteldrm,
+ softpipe,
+ i915simple,
+ trace,
+]
+
+env.LoadableModule(
+ target ='i915_dri.so',
+ source = COMMON_GALLIUM_SOURCES,
+ LIBS = drivers + mesa + auxiliaries + env['LIBS'],
+ SHLIBPREFIX = '',
+)
diff --git a/src/gallium/winsys/drm/intel/egl/Makefile b/src/gallium/winsys/drm/intel/egl/Makefile
new file mode 100644
index 0000000000..490baded66
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/egl/Makefile
@@ -0,0 +1,29 @@
+TOP = ../../../../../..
+GALLIUMDIR = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = EGL_i915.so
+
+PIPE_DRIVERS = \
+ $(TOP)/src/gallium/state_trackers/egl/libegldrm.a \
+ $(GALLIUMDIR)/winsys/drm/intel/gem/libinteldrm.a \
+ $(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
+ $(TOP)/src/gallium/drivers/trace/libtrace.a \
+ $(TOP)/src/gallium/drivers/i915simple/libi915simple.a
+
+DRIVER_SOURCES =
+
+C_SOURCES = \
+ $(COMMON_GALLIUM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+DRIVER_EXTRAS = -ldrm_intel
+
+ASM_SOURCES =
+
+DRIVER_DEFINES = -I../gem $(shell pkg-config libdrm --atleast-version=2.3.1 \
+ && echo "-DDRM_VBLANK_FLIP=DRM_VBLANK_FLIP")
+
+include ../../Makefile.template
+
+symlinks:
diff --git a/src/gallium/winsys/drm/intel/gem/Makefile b/src/gallium/winsys/drm/intel/gem/Makefile
new file mode 100644
index 0000000000..0d6d4e37db
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/Makefile
@@ -0,0 +1,16 @@
+TOP = ../../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = inteldrm
+
+C_SOURCES = \
+ intel_drm_batchbuffer.c \
+ intel_drm_buffer.c \
+ intel_drm_fence.c \
+ intel_drm_api.c
+
+LIBRARY_INCLUDES = $(shell pkg-config libdrm --cflags-only-I)
+
+LIBRARY_DEFINES = $(shell pkg-config libdrm --cflags-only-other)
+
+include ../../../../Makefile.template
diff --git a/src/gallium/winsys/drm/intel/gem/SConscript b/src/gallium/winsys/drm/intel/gem/SConscript
new file mode 100644
index 0000000000..26717f391f
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/SConscript
@@ -0,0 +1,17 @@
+Import('*')
+
+env = drienv.Clone()
+
+inteldrm_sources = [
+ 'intel_drm_api.c',
+ 'intel_drm_batchbuffer.c',
+ 'intel_drm_buffer.c',
+ 'intel_drm_fence.c',
+]
+
+inteldrm = env.ConvenienceLibrary(
+ target ='inteldrm',
+ source = inteldrm_sources,
+)
+
+Export('inteldrm')
diff --git a/src/gallium/winsys/drm/intel/gem/intel_drm_api.c b/src/gallium/winsys/drm/intel/gem/intel_drm_api.c
new file mode 100644
index 0000000000..4c5a1d2ea8
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/intel_drm_api.c
@@ -0,0 +1,202 @@
+
+#include "state_tracker/drm_api.h"
+
+#include "intel_drm_winsys.h"
+#include "util/u_memory.h"
+
+#include "i915simple/i915_context.h"
+#include "i915simple/i915_screen.h"
+
+
+/*
+ * Helper functions
+ */
+
+
+static void
+intel_drm_get_device_id(unsigned int *device_id)
+{
+ char path[512];
+ FILE *file;
+ void *shutup_gcc;
+
+ /*
+ * FIXME: Fix this up to use a drm ioctl or whatever.
+ */
+
+ snprintf(path, sizeof(path), "/sys/class/drm/card0/device/device");
+ file = fopen(path, "r");
+ if (!file) {
+ return;
+ }
+
+ shutup_gcc = fgets(path, sizeof(path), file);
+ sscanf(path, "%x", device_id);
+ fclose(file);
+}
+
+static struct intel_buffer *
+intel_drm_buffer_from_handle(struct intel_drm_winsys *idws,
+ const char* name, unsigned handle)
+{
+ struct intel_drm_buffer *buf = CALLOC_STRUCT(intel_drm_buffer);
+
+ if (!buf)
+ return NULL;
+
+ buf->magic = 0xDEAD1337;
+ buf->bo = drm_intel_bo_gem_create_from_name(idws->pools.gem, name, handle);
+ buf->flinked = TRUE;
+ buf->flink = handle;
+
+ if (!buf->bo)
+ goto err;
+
+ return (struct intel_buffer *)buf;
+
+err:
+ FREE(buf);
+ return NULL;
+}
+
+
+/*
+ * Exported functions
+ */
+
+
+static struct pipe_texture *
+intel_drm_texture_from_shared_handle(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *templ,
+ const char* name,
+ unsigned pitch,
+ unsigned handle)
+{
+ struct intel_drm_winsys *idws = intel_drm_winsys(i915_screen(screen)->iws);
+ struct intel_buffer *buffer;
+
+ buffer = intel_drm_buffer_from_handle(idws, name, handle);
+ if (!buffer)
+ return NULL;
+
+ return i915_texture_blanket_intel(screen, templ, pitch, buffer);
+}
+
+static boolean
+intel_drm_shared_handle_from_texture(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *pitch,
+ unsigned *handle)
+{
+ struct intel_drm_buffer *buf = NULL;
+ struct intel_buffer *buffer = NULL;
+ if (!i915_get_texture_buffer_intel(texture, &buffer, pitch))
+ return FALSE;
+
+ buf = intel_drm_buffer(buffer);
+ if (!buf->flinked) {
+ if (drm_intel_bo_flink(buf->bo, &buf->flink))
+ return FALSE;
+ buf->flinked = TRUE;
+ }
+
+ *handle = buf->flink;
+
+ return TRUE;
+}
+
+static boolean
+intel_drm_local_handle_from_texture(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *pitch,
+ unsigned *handle)
+{
+ struct intel_buffer *buffer = NULL;
+ if (!i915_get_texture_buffer_intel(texture, &buffer, pitch))
+ return FALSE;
+
+ *handle = intel_drm_buffer(buffer)->bo->handle;
+
+ return TRUE;
+}
+
+static void
+intel_drm_winsys_destroy(struct intel_winsys *iws)
+{
+ struct intel_drm_winsys *idws = intel_drm_winsys(iws);
+
+ drm_intel_bufmgr_destroy(idws->pools.gem);
+
+ FREE(idws);
+}
+
+static struct pipe_screen *
+intel_drm_create_screen(struct drm_api *api, int drmFD,
+ struct drm_create_screen_arg *arg)
+{
+ struct intel_drm_winsys *idws;
+ unsigned int deviceID;
+
+ if (arg != NULL) {
+ switch(arg->mode) {
+ case DRM_CREATE_NORMAL:
+ break;
+ default:
+ return NULL;
+ }
+ }
+
+ idws = CALLOC_STRUCT(intel_drm_winsys);
+ if (!idws)
+ return NULL;
+
+ intel_drm_get_device_id(&deviceID);
+
+ intel_drm_winsys_init_batchbuffer_functions(idws);
+ intel_drm_winsys_init_buffer_functions(idws);
+ intel_drm_winsys_init_fence_functions(idws);
+
+ idws->fd = drmFD;
+ idws->id = deviceID;
+ idws->max_batch_size = 16 * 4096;
+
+ idws->base.destroy = intel_drm_winsys_destroy;
+
+ idws->pools.gem = drm_intel_bufmgr_gem_init(idws->fd, idws->max_batch_size);
+
+ idws->softpipe = FALSE;
+ idws->dump_cmd = debug_get_bool_option("INTEL_DUMP_CMD", FALSE);
+
+ return i915_create_screen(&idws->base, deviceID);
+}
+
+static struct pipe_context *
+intel_drm_create_context(struct drm_api *api, struct pipe_screen *screen)
+{
+ return i915_create_context(screen);
+}
+
+static void
+destroy(struct drm_api *api)
+{
+
+}
+
+struct drm_api intel_drm_api =
+{
+ .create_context = intel_drm_create_context,
+ .create_screen = intel_drm_create_screen,
+ .texture_from_shared_handle = intel_drm_texture_from_shared_handle,
+ .shared_handle_from_texture = intel_drm_shared_handle_from_texture,
+ .local_handle_from_texture = intel_drm_local_handle_from_texture,
+ .destroy = destroy,
+};
+
+struct drm_api *
+drm_api_create()
+{
+ return &intel_drm_api;
+}
diff --git a/src/gallium/winsys/drm/intel/gem/intel_drm_batchbuffer.c b/src/gallium/winsys/drm/intel/gem/intel_drm_batchbuffer.c
new file mode 100644
index 0000000000..ebd1b607b7
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/intel_drm_batchbuffer.c
@@ -0,0 +1,229 @@
+
+#include "intel_drm_winsys.h"
+#include "util/u_memory.h"
+
+#include "i915_drm.h"
+
+#define BATCH_RESERVED 16
+
+#define INTEL_DEFAULT_RELOCS 100
+#define INTEL_MAX_RELOCS 400
+
+#define INTEL_BATCH_NO_CLIPRECTS 0x1
+#define INTEL_BATCH_CLIPRECTS 0x2
+
+#undef INTEL_RUN_SYNC
+#undef INTEL_MAP_BATCHBUFFER
+
+struct intel_drm_batchbuffer
+{
+ struct intel_batchbuffer base;
+
+ size_t actual_size;
+
+ drm_intel_bo *bo;
+};
+
+static INLINE struct intel_drm_batchbuffer *
+intel_drm_batchbuffer(struct intel_batchbuffer *batch)
+{
+ return (struct intel_drm_batchbuffer *)batch;
+}
+
+static void
+intel_drm_batchbuffer_reset(struct intel_drm_batchbuffer *batch)
+{
+ struct intel_drm_winsys *idws = intel_drm_winsys(batch->base.iws);
+
+ if (batch->bo)
+ drm_intel_bo_unreference(batch->bo);
+ batch->bo = drm_intel_bo_alloc(idws->pools.gem,
+ "gallium3d_batchbuffer",
+ batch->actual_size,
+ 4096);
+
+#ifdef INTEL_MAP_BATCHBUFFER
+ drm_intel_bo_map(batch->bo, TRUE);
+ batch->base.map = batch->bo->virtual;
+#endif
+
+ memset(batch->base.map, 0, batch->actual_size);
+ batch->base.ptr = batch->base.map;
+ batch->base.size = batch->actual_size - BATCH_RESERVED;
+ batch->base.relocs = 0;
+}
+
+static struct intel_batchbuffer *
+intel_drm_batchbuffer_create(struct intel_winsys *iws)
+{
+ struct intel_drm_winsys *idws = intel_drm_winsys(iws);
+ struct intel_drm_batchbuffer *batch = CALLOC_STRUCT(intel_drm_batchbuffer);
+
+ batch->actual_size = idws->max_batch_size;
+
+#ifdef INTEL_MAP_BATCHBUFFER
+ batch->base.map = NULL;
+#else
+ batch->base.map = MALLOC(batch->actual_size);
+#endif
+ batch->base.ptr = NULL;
+ batch->base.size = 0;
+
+ batch->base.relocs = 0;
+ batch->base.max_relocs = 300;/*INTEL_DEFAULT_RELOCS;*/
+
+ batch->base.iws = iws;
+
+ intel_drm_batchbuffer_reset(batch);
+
+ return &batch->base;
+}
+
+static int
+intel_drm_batchbuffer_reloc(struct intel_batchbuffer *ibatch,
+ struct intel_buffer *buffer,
+ enum intel_buffer_usage usage,
+ unsigned pre_add)
+{
+ struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
+ unsigned write_domain = 0;
+ unsigned read_domain = 0;
+ unsigned offset;
+ int ret = 0;
+
+ assert(batch->base.relocs < batch->base.max_relocs);
+
+ if (usage == INTEL_USAGE_SAMPLER) {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_SAMPLER;
+
+ } else if (usage == INTEL_USAGE_RENDER) {
+ write_domain = I915_GEM_DOMAIN_RENDER;
+ read_domain = I915_GEM_DOMAIN_RENDER;
+
+ } else if (usage == INTEL_USAGE_2D_TARGET) {
+ write_domain = I915_GEM_DOMAIN_RENDER;
+ read_domain = I915_GEM_DOMAIN_RENDER;
+
+ } else if (usage == INTEL_USAGE_2D_SOURCE) {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_RENDER;
+
+ } else if (usage == INTEL_USAGE_VERTEX) {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_VERTEX;
+
+ } else {
+ assert(0);
+ return -1;
+ }
+
+ offset = (unsigned)(batch->base.ptr - batch->base.map);
+
+ ret = drm_intel_bo_emit_reloc(batch->bo, offset,
+ intel_bo(buffer), pre_add,
+ read_domain,
+ write_domain);
+
+ ((uint32_t*)batch->base.ptr)[0] = intel_bo(buffer)->offset + pre_add;
+ batch->base.ptr += 4;
+
+ if (!ret)
+ batch->base.relocs++;
+
+ return ret;
+}
+
+static void
+intel_drm_batchbuffer_flush(struct intel_batchbuffer *ibatch,
+ struct pipe_fence_handle **fence)
+{
+ struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
+ unsigned used = 0;
+ int ret = 0;
+ int i;
+
+ assert(intel_batchbuffer_space(ibatch) >= 0);
+
+ used = batch->base.ptr - batch->base.map;
+ assert((used & 3) == 0);
+
+ if (used & 4) {
+ // MI_FLUSH | FLUSH_MAP_CACHE;
+ intel_batchbuffer_dword(ibatch, (0x0<<29)|(0x4<<23)|(1<<0));
+ // MI_NOOP
+ intel_batchbuffer_dword(ibatch, (0x0<<29)|(0x0<<23));
+ // MI_BATCH_BUFFER_END;
+ intel_batchbuffer_dword(ibatch, (0x0<<29)|(0xA<<23));
+ } else {
+ //MI_FLUSH | FLUSH_MAP_CACHE;
+ intel_batchbuffer_dword(ibatch, (0x0<<29)|(0x4<<23)|(1<<0));
+ // MI_BATCH_BUFFER_END;
+ intel_batchbuffer_dword(ibatch, (0x0<<29)|(0xA<<23));
+ }
+
+ used = batch->base.ptr - batch->base.map;
+
+#ifdef INTEL_MAP_BATCHBUFFER
+ drm_intel_bo_unmap(batch->bo);
+#else
+ drm_intel_bo_subdata(batch->bo, 0, used, batch->base.map);
+#endif
+
+ /* Do the sending to HW */
+ ret = drm_intel_bo_exec(batch->bo, used, NULL, 0, 0);
+ assert(ret == 0);
+
+ if (intel_drm_winsys(ibatch->iws)->dump_cmd) {
+ unsigned *ptr;
+ drm_intel_bo_map(batch->bo, FALSE);
+ ptr = (unsigned*)batch->bo->virtual;
+
+ debug_printf("%s:\n", __func__);
+ for (i = 0; i < used / 4; i++, ptr++) {
+ debug_printf("\t%08x: %08x\n", i*4, *ptr);
+ }
+
+ drm_intel_bo_unmap(batch->bo);
+ } else {
+#ifdef INTEL_RUN_SYNC
+ drm_intel_bo_map(batch->bo, FALSE);
+ drm_intel_bo_unmap(batch->bo);
+#endif
+ }
+
+ if (fence) {
+ ibatch->iws->fence_reference(ibatch->iws, fence, NULL);
+
+#ifdef INTEL_RUN_SYNC
+ /* we run synced to GPU so just pass null */
+ (*fence) = intel_drm_fence_create(NULL);
+#else
+ (*fence) = intel_drm_fence_create(batch->bo);
+#endif
+ }
+
+ intel_drm_batchbuffer_reset(batch);
+}
+
+static void
+intel_drm_batchbuffer_destroy(struct intel_batchbuffer *ibatch)
+{
+ struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
+
+ if (batch->bo)
+ drm_intel_bo_unreference(batch->bo);
+
+#ifndef INTEL_MAP_BATCHBUFFER
+ FREE(batch->base.map);
+#endif
+ FREE(batch);
+}
+
+void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws)
+{
+ idws->base.batchbuffer_create = intel_drm_batchbuffer_create;
+ idws->base.batchbuffer_reloc = intel_drm_batchbuffer_reloc;
+ idws->base.batchbuffer_flush = intel_drm_batchbuffer_flush;
+ idws->base.batchbuffer_destroy = intel_drm_batchbuffer_destroy;
+}
diff --git a/src/gallium/winsys/drm/intel/gem/intel_drm_buffer.c b/src/gallium/winsys/drm/intel/gem/intel_drm_buffer.c
new file mode 100644
index 0000000000..0030f915a3
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/intel_drm_buffer.c
@@ -0,0 +1,148 @@
+
+#include "intel_drm_winsys.h"
+#include "util/u_memory.h"
+
+#include "i915_drm.h"
+
+static struct intel_buffer *
+intel_drm_buffer_create(struct intel_winsys *iws,
+ unsigned size, unsigned alignment,
+ enum intel_buffer_type type)
+{
+ struct intel_drm_buffer *buf = CALLOC_STRUCT(intel_drm_buffer);
+ struct intel_drm_winsys *idws = intel_drm_winsys(iws);
+ drm_intel_bufmgr *pool;
+ char *name;
+
+ if (!buf)
+ return NULL;
+
+ buf->magic = 0xDEAD1337;
+ buf->flinked = FALSE;
+ buf->flink = 0;
+ buf->map_gtt = FALSE;
+
+ if (type == INTEL_NEW_TEXTURE) {
+ name = "gallium3d_texture";
+ pool = idws->pools.gem;
+ } else if (type == INTEL_NEW_VERTEX) {
+ name = "gallium3d_vertex";
+ pool = idws->pools.gem;
+ buf->map_gtt = TRUE;
+ } else if (type == INTEL_NEW_SCANOUT) {
+ name = "gallium3d_scanout";
+ pool = idws->pools.gem;
+ buf->map_gtt = TRUE;
+ } else {
+ assert(0);
+ name = "gallium3d_unknown";
+ pool = idws->pools.gem;
+ }
+
+ buf->bo = drm_intel_bo_alloc(pool, name, size, alignment);
+
+ if (!buf->bo)
+ goto err;
+
+ return (struct intel_buffer *)buf;
+
+err:
+ assert(0);
+ FREE(buf);
+ return NULL;
+}
+
+static int
+intel_drm_buffer_set_fence_reg(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ unsigned stride,
+ enum intel_buffer_tile tile)
+{
+ assert(I915_TILING_NONE == INTEL_TILE_NONE);
+ assert(I915_TILING_X == INTEL_TILE_X);
+ assert(I915_TILING_Y == INTEL_TILE_Y);
+
+ return drm_intel_bo_set_tiling(intel_bo(buffer), &tile, stride);
+}
+
+static void *
+intel_drm_buffer_map(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ boolean write)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+ drm_intel_bo *bo = intel_bo(buffer);
+ int ret = 0;
+
+ assert(bo);
+
+ if (buf->map_count)
+ goto out;
+
+ if (buf->map_gtt)
+ ret = drm_intel_gem_bo_map_gtt(bo);
+ else
+ ret = drm_intel_bo_map(bo, write);
+
+ buf->ptr = bo->virtual;
+
+ assert(ret == 0);
+out:
+ if (ret)
+ return NULL;
+
+ buf->map_count++;
+ return buf->ptr;
+}
+
+static void
+intel_drm_buffer_unmap(struct intel_winsys *iws,
+ struct intel_buffer *buffer)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+
+ if (--buf->map_count)
+ return;
+
+ if (buf->map_gtt)
+ drm_intel_gem_bo_unmap_gtt(intel_bo(buffer));
+ else
+ drm_intel_bo_unmap(intel_bo(buffer));
+}
+
+static int
+intel_drm_buffer_write(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ const void *data,
+ size_t size,
+ size_t offset)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+
+ return drm_intel_bo_subdata(buf->bo, offset, size, (void*)data);
+}
+
+static void
+intel_drm_buffer_destroy(struct intel_winsys *iws,
+ struct intel_buffer *buffer)
+{
+ drm_intel_bo_unreference(intel_bo(buffer));
+
+#ifdef DEBUG
+ intel_drm_buffer(buffer)->magic = 0;
+ intel_drm_buffer(buffer)->bo = NULL;
+#endif
+
+ FREE(buffer);
+}
+
+void
+intel_drm_winsys_init_buffer_functions(struct intel_drm_winsys *idws)
+{
+ idws->base.buffer_create = intel_drm_buffer_create;
+ idws->base.buffer_set_fence_reg = intel_drm_buffer_set_fence_reg;
+ idws->base.buffer_map = intel_drm_buffer_map;
+ idws->base.buffer_unmap = intel_drm_buffer_unmap;
+ idws->base.buffer_write = intel_drm_buffer_write;
+ idws->base.buffer_destroy = intel_drm_buffer_destroy;
+}
diff --git a/src/gallium/winsys/drm/intel/gem/intel_drm_fence.c b/src/gallium/winsys/drm/intel/gem/intel_drm_fence.c
new file mode 100644
index 0000000000..e70bfe7b44
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/intel_drm_fence.c
@@ -0,0 +1,81 @@
+
+#include "intel_drm_winsys.h"
+#include "util/u_memory.h"
+#include "pipe/p_refcnt.h"
+
+/**
+ * Because gem does not have fence's we have to create our own fences.
+ *
+ * They work by keeping the batchbuffer around and checking if that has
+ * been idled. If bo is NULL fence has expired.
+ */
+struct intel_drm_fence
+{
+ struct pipe_reference reference;
+ drm_intel_bo *bo;
+};
+
+
+struct pipe_fence_handle *
+intel_drm_fence_create(drm_intel_bo *bo)
+{
+ struct intel_drm_fence *fence = CALLOC_STRUCT(intel_drm_fence);
+
+ pipe_reference_init(&fence->reference, 1);
+ /* bo is null if fence already expired */
+ if (bo) {
+ drm_intel_bo_reference(bo);
+ fence->bo = bo;
+ }
+
+ return (struct pipe_fence_handle *)fence;
+}
+
+static void
+intel_drm_fence_reference(struct intel_winsys *iws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct intel_drm_fence *old = (struct intel_drm_fence *)*ptr;
+ struct intel_drm_fence *f = (struct intel_drm_fence *)fence;
+
+ if (pipe_reference((struct pipe_reference**)ptr, &f->reference)) {
+ if (old->bo)
+ drm_intel_bo_unreference(old->bo);
+ FREE(old);
+ }
+}
+
+static int
+intel_drm_fence_signalled(struct intel_winsys *iws,
+ struct pipe_fence_handle *fence)
+{
+ assert(0);
+
+ return 0;
+}
+
+static int
+intel_drm_fence_finish(struct intel_winsys *iws,
+ struct pipe_fence_handle *fence)
+{
+ struct intel_drm_fence *f = (struct intel_drm_fence *)fence;
+
+ /* fence already expired */
+ if (!f->bo)
+ return 0;
+
+ drm_intel_bo_wait_rendering(f->bo);
+ drm_intel_bo_unreference(f->bo);
+ f->bo = NULL;
+
+ return 0;
+}
+
+void
+intel_drm_winsys_init_fence_functions(struct intel_drm_winsys *idws)
+{
+ idws->base.fence_reference = intel_drm_fence_reference;
+ idws->base.fence_signalled = intel_drm_fence_signalled;
+ idws->base.fence_finish = intel_drm_fence_finish;
+}
diff --git a/src/gallium/winsys/drm/intel/gem/intel_drm_winsys.h b/src/gallium/winsys/drm/intel/gem/intel_drm_winsys.h
new file mode 100644
index 0000000000..415c45feea
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/gem/intel_drm_winsys.h
@@ -0,0 +1,78 @@
+
+#ifndef INTEL_DRM_WINSYS_H
+#define INTEL_DRM_WINSYS_H
+
+#include "i915simple/intel_batchbuffer.h"
+
+#include "drm.h"
+#include "intel_bufmgr.h"
+
+
+/*
+ * Winsys
+ */
+
+
+struct intel_drm_winsys
+{
+ struct intel_winsys base;
+
+ boolean softpipe;
+ boolean dump_cmd;
+
+ int fd; /**< Drm file discriptor */
+
+ unsigned id;
+
+ size_t max_batch_size;
+
+ struct {
+ drm_intel_bufmgr *gem;
+ } pools;
+};
+
+static INLINE struct intel_drm_winsys *
+intel_drm_winsys(struct intel_winsys *iws)
+{
+ return (struct intel_drm_winsys *)iws;
+}
+
+struct intel_drm_winsys * intel_drm_winsys_create(int fd, unsigned pci_id);
+struct pipe_fence_handle * intel_drm_fence_create(drm_intel_bo *bo);
+
+void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws);
+void intel_drm_winsys_init_buffer_functions(struct intel_drm_winsys *idws);
+void intel_drm_winsys_init_fence_functions(struct intel_drm_winsys *idws);
+
+
+/*
+ * Buffer
+ */
+
+
+struct intel_drm_buffer {
+ unsigned magic;
+
+ drm_intel_bo *bo;
+
+ void *ptr;
+ unsigned map_count;
+ boolean map_gtt;
+
+ boolean flinked;
+ unsigned flink;
+};
+
+static INLINE struct intel_drm_buffer *
+intel_drm_buffer(struct intel_buffer *buffer)
+{
+ return (struct intel_drm_buffer *)buffer;
+}
+
+static INLINE drm_intel_bo *
+intel_bo(struct intel_buffer *buffer)
+{
+ return intel_drm_buffer(buffer)->bo;
+}
+
+#endif
diff --git a/src/gallium/winsys/drm/intel/xorg/Makefile b/src/gallium/winsys/drm/intel/xorg/Makefile
new file mode 100644
index 0000000000..9e56853b02
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/xorg/Makefile
@@ -0,0 +1,57 @@
+TARGET = modesetting_drv.so
+CFILES = $(wildcard ./*.c)
+OBJECTS = $(patsubst ./%.c,./%.o,$(CFILES))
+TOP = ../../../../../..
+
+include $(TOP)/configs/current
+
+INCLUDES = \
+ $(shell pkg-config --cflags-only-I pixman-1 xorg-server libdrm xproto) \
+ -I../gem \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/src/gallium/auxiliary \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/include \
+ -I$(TOP)/src/egl/main
+
+LIBS = \
+ $(TOP)/src/gallium/state_trackers/xorg/libxorgtracker.a \
+ $(TOP)/src/gallium/winsys/drm/intel/gem/libinteldrm.a \
+ $(TOP)/src/gallium/drivers/i915simple/libi915simple.a \
+ $(TOP)/src/gallium/drivers/trace/libtrace.a \
+ $(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
+ $(GALLIUM_AUXILIARIES)
+
+DRIVER_DEFINES = \
+ -DHAVE_CONFIG_H
+
+
+#############################################
+
+
+
+all default: $(TARGET)
+
+$(TARGET): $(OBJECTS) Makefile $(TOP)/src/gallium/state_trackers/xorg/libxorgtracker.a $(LIBS)
+ $(TOP)/bin/mklib -noprefix -o $@ \
+ $(OBJECTS) $(LIBS) $(shell pkg-config --libs libdrm) -ldrm_intel
+
+clean:
+ rm -rf $(OBJECTS) $(TARGET)
+
+install:
+ $(INSTALL) -d $(DESTDIR)/$(XORG_DRIVER_INSTALL_DIR)
+ $(MINSTALL) -m 755 $(TARGET) $(DESTDIR)/$(XORG_DRIVER_INSTALL_DIR)
+
+
+##############################################
+
+
+.c.o:
+ $(CC) -c $(CFLAGS) $(INCLUDES) $(DRIVER_DEFINES) $< -o $@
+
+
+##############################################
+
+.PHONY = all clean install
diff --git a/src/gallium/winsys/drm/intel/xorg/intel_xorg.c b/src/gallium/winsys/drm/intel/xorg/intel_xorg.c
new file mode 100644
index 0000000000..369dc356cf
--- /dev/null
+++ b/src/gallium/winsys/drm/intel/xorg/intel_xorg.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ *
+ */
+
+#include "../../../../state_trackers/xorg/xorg_winsys.h"
+
+static void intel_xorg_identify(int flags);
+static Bool intel_xorg_pci_probe(DriverPtr driver,
+ int entity_num,
+ struct pci_device *device,
+ intptr_t match_data);
+
+static const struct pci_id_match intel_xorg_device_match[] = {
+ {0x8086, PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY, 0, 0, 0},
+ {0, 0, 0},
+};
+
+static SymTabRec intel_xorg_chipsets[] = {
+ {PCI_MATCH_ANY, "Intel Graphics Device"},
+ {-1, NULL}
+};
+
+static PciChipsets intel_xorg_pci_devices[] = {
+ {PCI_MATCH_ANY, PCI_MATCH_ANY, NULL},
+ {-1, -1, NULL}
+};
+
+static XF86ModuleVersionInfo intel_xorg_version = {
+ "modesetting",
+ MODULEVENDORSTRING,
+ MODINFOSTRING1,
+ MODINFOSTRING2,
+ XORG_VERSION_CURRENT,
+ 0, 1, 0, /* major, minor, patch */
+ ABI_CLASS_VIDEODRV,
+ ABI_VIDEODRV_VERSION,
+ MOD_CLASS_VIDEODRV,
+ {0, 0, 0, 0}
+};
+
+/*
+ * Xorg driver exported structures
+ */
+
+_X_EXPORT DriverRec modesetting = {
+ 1,
+ "modesetting",
+ intel_xorg_identify,
+ NULL,
+ xorg_tracker_available_options,
+ NULL,
+ 0,
+ NULL,
+ intel_xorg_device_match,
+ intel_xorg_pci_probe
+};
+
+static MODULESETUPPROTO(intel_xorg_setup);
+
+_X_EXPORT XF86ModuleData modesettingModuleData = {
+ &intel_xorg_version,
+ intel_xorg_setup,
+ NULL
+};
+
+/*
+ * Xorg driver functions
+ */
+
+static pointer
+intel_xorg_setup(pointer module, pointer opts, int *errmaj, int *errmin)
+{
+ static Bool setupDone = 0;
+
+ /* This module should be loaded only once, but check to be sure.
+ */
+ if (!setupDone) {
+ setupDone = 1;
+ xf86AddDriver(&modesetting, module, HaveDriverFuncs);
+
+ /*
+ * The return value must be non-NULL on success even though there
+ * is no TearDownProc.
+ */
+ return (pointer) 1;
+ } else {
+ if (errmaj)
+ *errmaj = LDR_ONCEONLY;
+ return NULL;
+ }
+}
+
+static void
+intel_xorg_identify(int flags)
+{
+ xf86PrintChipsets("modesetting", "Driver for Modesetting Kernel Drivers",
+ intel_xorg_chipsets);
+}
+
+static Bool
+intel_xorg_pci_probe(DriverPtr driver,
+ int entity_num, struct pci_device *device, intptr_t match_data)
+{
+ ScrnInfoPtr scrn = NULL;
+ EntityInfoPtr entity;
+
+ scrn = xf86ConfigPciEntity(scrn, 0, entity_num, intel_xorg_pci_devices,
+ NULL, NULL, NULL, NULL, NULL);
+ if (scrn != NULL) {
+ scrn->driverVersion = 1;
+ scrn->driverName = "i915";
+ scrn->name = "modesetting";
+ scrn->Probe = NULL;
+
+ entity = xf86GetEntityInfo(entity_num);
+
+ /* Use all the functions from the xorg tracker */
+ xorg_tracker_set_functions(scrn);
+ }
+ return scrn != NULL;
+}
diff --git a/src/gallium/winsys/drm/nouveau/Makefile b/src/gallium/winsys/drm/nouveau/Makefile
new file mode 100644
index 0000000000..6c9cbef26d
--- /dev/null
+++ b/src/gallium/winsys/drm/nouveau/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/winsys/drm/nouveau/Makefile
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+SUBDIRS = drm $(GALLIUM_STATE_TRACKERS_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/winsys/drm/nouveau/dri/Makefile b/src/gallium/winsys/drm/nouveau/dri/Makefile
new file mode 100644
index 0000000000..0937f68c34
--- /dev/null
+++ b/src/gallium/winsys/drm/nouveau/dri/Makefile
@@ -0,0 +1,27 @@
+TOP = ../../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nouveau_dri.so
+
+PIPE_DRIVERS = \
+ $(TOP)/src/gallium/state_trackers/dri/libdridrm.a \
+ $(TOP)/src/gallium/winsys/drm/nouveau/drm/libnouveaudrm.a \
+ $(TOP)/src/gallium/drivers/nv04/libnv04.a \
+ $(TOP)/src/gallium/drivers/nv10/libnv10.a \
+ $(TOP)/src/gallium/drivers/nv20/libnv20.a \
+ $(TOP)/src/gallium/drivers/nv30/libnv30.a \
+ $(TOP)/src/gallium/drivers/nv40/libnv40.a \
+ $(TOP)/src/gallium/drivers/nv50/libnv50.a \
+ $(TOP)/src/gallium/drivers/nouveau/libnouveau.a
+
+DRIVER_SOURCES =
+
+C_SOURCES = \
+ $(COMMON_GALLIUM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+include ../../Makefile.template
+
+DRI_LIB_DEPS += $(shell pkg-config libdrm_nouveau --libs)
+
+symlinks:
diff --git a/src/gallium/winsys/drm/nouveau/drm/Makefile b/src/gallium/winsys/drm/nouveau/drm/Makefile
new file mode 100644
index 0000000000..54c3b26c75
--- /dev/null
+++ b/src/gallium/winsys/drm/nouveau/drm/Makefile
@@ -0,0 +1,11 @@
+TOP = ../../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = nouveaudrm
+
+C_SOURCES = nouveau_drm_api.c
+
+LIBRARY_INCLUDES = $(shell pkg-config libdrm libdrm_nouveau --cflags-only-I)
+LIBRARY_DEFINES = $(shell pkg-config libdrm libdrm_nouveau --cflags-only-other)
+
+include ../../../../Makefile.template
diff --git a/src/gallium/winsys/drm/nouveau/drm/nouveau_dri.h b/src/gallium/winsys/drm/nouveau/drm/nouveau_dri.h
new file mode 100644
index 0000000000..1207c2d609
--- /dev/null
+++ b/src/gallium/winsys/drm/nouveau/drm/nouveau_dri.h
@@ -0,0 +1,28 @@
+#ifndef _NOUVEAU_DRI_
+#define _NOUVEAU_DRI_
+
+#include "xf86drm.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+
+struct nouveau_dri {
+ uint32_t device_id; /**< \brief PCI device ID */
+ uint32_t width; /**< \brief width in pixels of display */
+ uint32_t height; /**< \brief height in scanlines of display */
+ uint32_t depth; /**< \brief depth of display (8, 15, 16, 24) */
+ uint32_t bpp; /**< \brief bit depth of display (8, 16, 24, 32) */
+
+ uint32_t bus_type; /**< \brief ths bus type */
+ uint32_t bus_mode; /**< \brief bus mode (used for AGP, maybe also for PCI-E ?) */
+
+ uint32_t front_offset; /**< \brief front buffer offset */
+ uint32_t front_pitch; /**< \brief front buffer pitch */
+ uint32_t back_offset; /**< \brief private back buffer offset */
+ uint32_t back_pitch; /**< \brief private back buffer pitch */
+ uint32_t depth_offset; /**< \brief private depth buffer offset */
+ uint32_t depth_pitch; /**< \brief private depth buffer pitch */
+
+};
+
+#endif
+
diff --git a/src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.c b/src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.c
new file mode 100644
index 0000000000..091cbbcfed
--- /dev/null
+++ b/src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.c
@@ -0,0 +1,263 @@
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+
+#include "nouveau_drm_api.h"
+
+#include "nouveau_drmif.h"
+#include "nouveau_channel.h"
+#include "nouveau_bo.h"
+
+#include "nouveau/nouveau_winsys.h"
+#include "nouveau/nouveau_screen.h"
+
+static struct pipe_surface *
+dri_surface_from_handle(struct drm_api *api, struct pipe_screen *pscreen,
+ unsigned handle, enum pipe_format format,
+ unsigned width, unsigned height, unsigned pitch)
+{
+ struct pipe_surface *ps = NULL;
+ struct pipe_texture *pt = NULL;
+ struct pipe_texture tmpl;
+
+ memset(&tmpl, 0, sizeof(tmpl));
+ tmpl.tex_usage = PIPE_TEXTURE_USAGE_PRIMARY |
+ NOUVEAU_TEXTURE_USAGE_LINEAR;
+ tmpl.target = PIPE_TEXTURE_2D;
+ tmpl.last_level = 0;
+ tmpl.depth[0] = 1;
+ tmpl.format = format;
+ tmpl.width[0] = width;
+ tmpl.height[0] = height;
+ pf_get_block(tmpl.format, &tmpl.block);
+
+ pt = api->texture_from_shared_handle(api, pscreen, &tmpl,
+ "front buffer", pitch, handle);
+ if (!pt)
+ return NULL;
+
+ ps = pscreen->get_tex_surface(pscreen, pt, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_READ |
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+
+ /* we don't need the texture from this point on */
+ pipe_texture_reference(&pt, NULL);
+ return ps;
+}
+
+static struct pipe_surface *
+nouveau_dri1_front_surface(struct pipe_context *pipe)
+{
+ return nouveau_winsys_screen(pipe->screen)->front;
+}
+
+static struct dri1_api nouveau_dri1_api = {
+ nouveau_dri1_front_surface,
+};
+
+static struct pipe_screen *
+nouveau_drm_create_screen(struct drm_api *api, int fd,
+ struct drm_create_screen_arg *arg)
+{
+ struct dri1_create_screen_arg *dri1 = (void *)arg;
+ struct nouveau_winsys *nvws;
+ struct pipe_winsys *ws;
+ struct nouveau_device *dev = NULL;
+ struct pipe_screen *(*init)(struct pipe_winsys *,
+ struct nouveau_device *);
+ int ret;
+
+ ret = nouveau_device_open_existing(&dev, 0, fd, 0);
+ if (ret)
+ return NULL;
+
+ switch (dev->chipset & 0xf0) {
+ case 0x00:
+ init = nv04_screen_create;
+ break;
+ case 0x10:
+ init = nv10_screen_create;
+ break;
+ case 0x20:
+ init = nv20_screen_create;
+ break;
+ case 0x30:
+ init = nv30_screen_create;
+ break;
+ case 0x40:
+ case 0x60:
+ init = nv40_screen_create;
+ break;
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ init = nv50_screen_create;
+ break;
+ default:
+ debug_printf("%s: unknown chipset nv%02x\n", __func__,
+ dev->chipset);
+ return NULL;
+ }
+
+ nvws = CALLOC_STRUCT(nouveau_winsys);
+ if (!nvws) {
+ nouveau_device_close(&dev);
+ return NULL;
+ }
+ ws = &nvws->base;
+
+ nvws->pscreen = init(ws, dev);
+ if (!nvws->pscreen) {
+ ws->destroy(ws);
+ return NULL;
+ }
+
+ if (arg->mode == DRM_CREATE_DRI1) {
+ struct nouveau_dri *nvdri = dri1->ddx_info;
+ enum pipe_format format;
+
+ if (nvdri->bpp == 16)
+ format = PIPE_FORMAT_R5G6B5_UNORM;
+ else
+ format = PIPE_FORMAT_A8R8G8B8_UNORM;
+
+ nvws->front = dri_surface_from_handle(api, nvws->pscreen,
+ nvdri->front_offset,
+ format, nvdri->width,
+ nvdri->height,
+ nvdri->front_pitch *
+ (nvdri->bpp / 8));
+ if (!nvws->front) {
+ debug_printf("%s: error referencing front buffer\n",
+ __func__);
+ ws->destroy(ws);
+ return NULL;
+ }
+
+ dri1->api = &nouveau_dri1_api;
+ }
+
+ return nvws->pscreen;
+}
+
+static struct pipe_context *
+nouveau_drm_create_context(struct drm_api *api, struct pipe_screen *pscreen)
+{
+ struct nouveau_winsys *nvws = nouveau_winsys_screen(pscreen);
+ struct pipe_context *(*init)(struct pipe_screen *, unsigned);
+ unsigned chipset = nouveau_screen(pscreen)->device->chipset;
+ int i;
+
+ switch (chipset & 0xf0) {
+ case 0x00:
+ init = nv04_create;
+ break;
+ case 0x10:
+ init = nv10_create;
+ break;
+ case 0x20:
+ init = nv20_create;
+ break;
+ case 0x30:
+ init = nv30_create;
+ break;
+ case 0x40:
+ case 0x60:
+ init = nv40_create;
+ break;
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ init = nv50_create;
+ break;
+ default:
+ debug_printf("%s: unknown chipset nv%02x\n", __func__, chipset);
+ return NULL;
+ }
+
+ /* Find a free slot for a pipe context, allocate a new one if needed */
+ for (i = 0; i < nvws->nr_pctx; i++) {
+ if (nvws->pctx[i] == NULL)
+ break;
+ }
+
+ if (i == nvws->nr_pctx) {
+ nvws->nr_pctx++;
+ nvws->pctx = realloc(nvws->pctx,
+ sizeof(*nvws->pctx) * nvws->nr_pctx);
+ }
+
+ nvws->pctx[i] = init(pscreen, i);
+ return nvws->pctx[i];
+}
+
+static struct pipe_texture *
+nouveau_drm_pt_from_name(struct drm_api *api, struct pipe_screen *pscreen,
+ struct pipe_texture *templ, const char *name,
+ unsigned stride, unsigned handle)
+{
+ struct nouveau_device *dev = nouveau_screen(pscreen)->device;
+ struct pipe_buffer *pb;
+ int ret;
+
+ pb = CALLOC(1, sizeof(struct pipe_buffer) + sizeof(struct nouveau_bo*));
+ if (!pb)
+ return NULL;
+
+ ret = nouveau_bo_handle_ref(dev, handle, (struct nouveau_bo**)(pb+1));
+ if (ret) {
+ debug_printf("%s: ref name 0x%08x failed with %d\n",
+ __func__, handle, ret);
+ FREE(pb);
+ return NULL;
+ }
+
+ pipe_reference_init(&pb->reference, 1);
+ pb->screen = pscreen;
+ pb->alignment = 0;
+ pb->usage = PIPE_BUFFER_USAGE_GPU_READ_WRITE |
+ PIPE_BUFFER_USAGE_CPU_READ_WRITE;
+ pb->size = nouveau_bo(pb)->size;
+ return pscreen->texture_blanket(pscreen, templ, &stride, pb);
+}
+
+static boolean
+nouveau_drm_name_from_pt(struct drm_api *api, struct pipe_screen *pscreen,
+ struct pipe_texture *pt, unsigned *stride,
+ unsigned *handle)
+{
+ struct nouveau_miptree *mt = nouveau_miptree(pt);
+
+ if (!mt || !mt->bo)
+ return false;
+
+ return nouveau_bo_handle_get(mt->bo, handle) == 0;
+}
+
+static boolean
+nouveau_drm_handle_from_pt(struct drm_api *api, struct pipe_screen *pscreen,
+ struct pipe_texture *pt, unsigned *stride,
+ unsigned *handle)
+{
+ struct nouveau_miptree *mt = nouveau_miptree(pt);
+
+ if (!mt || !mt->bo)
+ return false;
+
+ *handle = mt->bo->handle;
+ return true;
+}
+
+struct drm_api drm_api_hooks = {
+ .create_screen = nouveau_drm_create_screen,
+ .create_context = nouveau_drm_create_context,
+ .texture_from_shared_handle = nouveau_drm_pt_from_name,
+ .shared_handle_from_texture = nouveau_drm_name_from_pt,
+ .local_handle_from_texture = nouveau_drm_handle_from_pt,
+};
+
+struct drm_api *
+drm_api_create() {
+ return &drm_api_hooks;
+}
+
diff --git a/src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.h b/src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.h
new file mode 100644
index 0000000000..e61e0e0957
--- /dev/null
+++ b/src/gallium/winsys/drm/nouveau/drm/nouveau_drm_api.h
@@ -0,0 +1,34 @@
+#ifndef __NOUVEAU_DRM_API_H__
+#define __NOUVEAU_DRM_API_H__
+
+#include "state_tracker/drm_api.h"
+#include "state_tracker/dri1_api.h"
+
+#include "pipe/internal/p_winsys_screen.h"
+
+#include "nouveau_dri.h"
+
+struct nouveau_winsys {
+ struct pipe_winsys base;
+
+ struct pipe_screen *pscreen;
+
+ unsigned nr_pctx;
+ struct pipe_context **pctx;
+
+ struct pipe_surface *front;
+};
+
+static INLINE struct nouveau_winsys *
+nouveau_winsys(struct pipe_winsys *ws)
+{
+ return (struct nouveau_winsys *)ws;
+}
+
+static INLINE struct nouveau_winsys *
+nouveau_winsys_screen(struct pipe_screen *pscreen)
+{
+ return nouveau_winsys(pscreen->winsys);
+}
+
+#endif
diff --git a/src/gallium/winsys/drm/radeon/Makefile b/src/gallium/winsys/drm/radeon/Makefile
new file mode 100644
index 0000000000..bacdf3de28
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/Makefile
@@ -0,0 +1,12 @@
+# src/gallium/winsys/drm/radeon/Makefile
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+SUBDIRS = core $(GALLIUM_STATE_TRACKERS_DIRS)
+
+default install clean:
+ @for dir in $(SUBDIRS) ; do \
+ if [ -d $$dir ] ; then \
+ (cd $$dir && $(MAKE) $@) || exit 1; \
+ fi \
+ done
diff --git a/src/gallium/winsys/drm/radeon/SConscript b/src/gallium/winsys/drm/radeon/SConscript
new file mode 100644
index 0000000000..b2dfd504d4
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/SConscript
@@ -0,0 +1,7 @@
+Import('*')
+
+SConscript(['core/SConscript',])
+
+if 'mesa' in env['statetrackers']:
+
+ SConscript(['dri/SConscript'])
diff --git a/src/gallium/winsys/drm/radeon/core/Makefile b/src/gallium/winsys/drm/radeon/core/Makefile
new file mode 100644
index 0000000000..42a6f4abc2
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/Makefile
@@ -0,0 +1,18 @@
+
+TOP = ../../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = radeonwinsys
+
+C_SOURCES = \
+ radeon_buffer.c \
+ radeon_drm.c \
+ radeon_r300.c \
+ radeon_winsys_softpipe.c
+
+LIBRARY_INCLUDES = -I$(TOP)/src/gallium/drivers/r300 \
+ $(shell pkg-config libdrm --cflags-only-I)
+
+include ../../../../Makefile.template
+
+symlinks:
diff --git a/src/gallium/winsys/drm/radeon/core/SConscript b/src/gallium/winsys/drm/radeon/core/SConscript
new file mode 100644
index 0000000000..2ad68e403f
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/SConscript
@@ -0,0 +1,19 @@
+Import('*')
+
+env = drienv.Clone()
+
+radeon_sources = [
+ 'radeon_buffer.c',
+ 'radeon_drm.c',
+ 'radeon_r300.c',
+ 'radeon_winsys_softpipe.c',
+]
+
+env.Append(CPPPATH = '#/src/gallium/drivers/r300')
+
+radeonwinsys = env.ConvenienceLibrary(
+ target ='radeonwinsys',
+ source = radeon_sources,
+)
+
+Export('radeonwinsys')
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_buffer.c b/src/gallium/winsys/drm/radeon/core/radeon_buffer.c
new file mode 100644
index 0000000000..7bf23cba23
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_buffer.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * 2009 Corbin Simpson
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <glisse@freedesktop.org>
+ * Corbin Simpson <MostAwesomeDude@gmail.com>
+ */
+
+#include "radeon_buffer.h"
+
+#include "radeon_bo_gem.h"
+
+static const char *radeon_get_name(struct pipe_winsys *ws)
+{
+ return "Radeon/GEM+KMS";
+}
+
+static struct pipe_buffer *radeon_buffer_create(struct pipe_winsys *ws,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct radeon_winsys *radeon_ws = (struct radeon_winsys *)ws;
+ struct radeon_pipe_buffer *radeon_buffer;
+ uint32_t domain;
+
+ radeon_buffer = CALLOC_STRUCT(radeon_pipe_buffer);
+ if (radeon_buffer == NULL) {
+ return NULL;
+ }
+
+ pipe_reference_init(&radeon_buffer->base.reference, 1);
+ radeon_buffer->base.alignment = alignment;
+ radeon_buffer->base.usage = usage;
+ radeon_buffer->base.size = size;
+
+ domain = 0;
+
+ if (usage & PIPE_BUFFER_USAGE_PIXEL) {
+ domain |= RADEON_GEM_DOMAIN_VRAM;
+ }
+ if (usage & PIPE_BUFFER_USAGE_VERTEX) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ }
+ if (usage & PIPE_BUFFER_USAGE_INDEX) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ }
+
+ radeon_buffer->bo = radeon_bo_open(radeon_ws->priv->bom, 0, size,
+ alignment, domain, 0);
+ if (radeon_buffer->bo == NULL) {
+ FREE(radeon_buffer);
+ return NULL;
+ }
+ return &radeon_buffer->base;
+}
+
+static struct pipe_buffer *radeon_buffer_user_create(struct pipe_winsys *ws,
+ void *ptr,
+ unsigned bytes)
+{
+ struct radeon_pipe_buffer *radeon_buffer;
+
+ radeon_buffer =
+ (struct radeon_pipe_buffer*)radeon_buffer_create(ws, 0, 0, bytes);
+ if (radeon_buffer == NULL) {
+ return NULL;
+ }
+ radeon_bo_map(radeon_buffer->bo, 1);
+ memcpy(radeon_buffer->bo->ptr, ptr, bytes);
+ radeon_bo_unmap(radeon_buffer->bo);
+ return &radeon_buffer->base;
+}
+
+static struct pipe_buffer *radeon_surface_buffer_create(struct pipe_winsys *ws,
+ unsigned width,
+ unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy, size;
+
+ pf_get_block(format, &block);
+
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+
+ /* Radeons enjoy things in multiples of 32. */
+ /* XXX this can be 32 when POT */
+ *stride = (nblocksx * block.size + 63) & ~63;
+ size = *stride * nblocksy;
+
+ return radeon_buffer_create(ws, 64, usage, size);
+}
+
+static void radeon_buffer_del(struct pipe_buffer *buffer)
+{
+ struct radeon_pipe_buffer *radeon_buffer =
+ (struct radeon_pipe_buffer*)buffer;
+
+ radeon_bo_unref(radeon_buffer->bo);
+ free(radeon_buffer);
+}
+
+static void *radeon_buffer_map(struct pipe_winsys *ws,
+ struct pipe_buffer *buffer,
+ unsigned flags)
+{
+ struct radeon_pipe_buffer *radeon_buffer =
+ (struct radeon_pipe_buffer*)buffer;
+ int write = 0;
+
+ if (flags & PIPE_BUFFER_USAGE_DONTBLOCK) {
+ uint32_t domain;
+
+ if (radeon_bo_is_busy(radeon_buffer->bo, &domain))
+ return NULL;
+ }
+ if (flags & PIPE_BUFFER_USAGE_CPU_WRITE) {
+ write = 1;
+ }
+
+ if (radeon_bo_map(radeon_buffer->bo, write)) {
+ return NULL;
+ }
+
+ return radeon_buffer->bo->ptr;
+}
+
+static void radeon_buffer_unmap(struct pipe_winsys *ws,
+ struct pipe_buffer *buffer)
+{
+ struct radeon_pipe_buffer *radeon_buffer =
+ (struct radeon_pipe_buffer*)buffer;
+
+ radeon_bo_unmap(radeon_buffer->bo);
+}
+
+static void radeon_fence_reference(struct pipe_winsys *ws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *pfence)
+{
+}
+
+static int radeon_fence_signalled(struct pipe_winsys *ws,
+ struct pipe_fence_handle *pfence,
+ unsigned flag)
+{
+ return 1;
+}
+
+static int radeon_fence_finish(struct pipe_winsys *ws,
+ struct pipe_fence_handle *pfence,
+ unsigned flag)
+{
+ return 0;
+}
+
+static void radeon_flush_frontbuffer(struct pipe_winsys *pipe_winsys,
+ struct pipe_surface *pipe_surface,
+ void *context_private)
+{
+ /* XXX TODO: call dri2CopyRegion */
+}
+
+struct radeon_winsys* radeon_pipe_winsys(int fd)
+{
+ struct radeon_winsys* radeon_ws;
+
+ radeon_ws = CALLOC_STRUCT(radeon_winsys);
+ if (radeon_ws == NULL) {
+ return NULL;
+ }
+
+ radeon_ws->priv = CALLOC_STRUCT(radeon_winsys_priv);
+ if (radeon_ws->priv == NULL) {
+ FREE(radeon_ws);
+ return NULL;
+ }
+
+ radeon_ws->priv->fd = fd;
+ radeon_ws->priv->bom = radeon_bo_manager_gem_ctor(fd);
+
+ radeon_ws->base.flush_frontbuffer = radeon_flush_frontbuffer;
+
+ radeon_ws->base.buffer_create = radeon_buffer_create;
+ radeon_ws->base.user_buffer_create = radeon_buffer_user_create;
+ radeon_ws->base.surface_buffer_create = radeon_surface_buffer_create;
+ radeon_ws->base.buffer_map = radeon_buffer_map;
+ radeon_ws->base.buffer_unmap = radeon_buffer_unmap;
+ radeon_ws->base.buffer_destroy = radeon_buffer_del;
+
+ radeon_ws->base.fence_reference = radeon_fence_reference;
+ radeon_ws->base.fence_signalled = radeon_fence_signalled;
+ radeon_ws->base.fence_finish = radeon_fence_finish;
+
+ radeon_ws->base.get_name = radeon_get_name;
+
+ return radeon_ws;
+}
+#if 0
+static struct pipe_buffer *radeon_buffer_from_handle(struct radeon_screen *radeon_screen,
+ uint32_t handle)
+{
+ struct radeon_pipe_buffer *radeon_buffer;
+ struct radeon_bo *bo = NULL;
+
+ bo = radeon_bo_open(radeon_screen->bom, handle, 0, 0, 0, 0);
+ if (bo == NULL) {
+ return NULL;
+ }
+ radeon_buffer = calloc(1, sizeof(struct radeon_pipe_buffer));
+ if (radeon_buffer == NULL) {
+ radeon_bo_unref(bo);
+ return NULL;
+ }
+ pipe_reference_init(&radeon_buffer->base.reference, 1);
+ radeon_buffer->base.usage = PIPE_BUFFER_USAGE_PIXEL;
+ radeon_buffer->bo = bo;
+ return &radeon_buffer->base;
+}
+
+struct pipe_surface *radeon_surface_from_handle(struct radeon_context *radeon_context,
+ uint32_t handle,
+ enum pipe_format format,
+ int w, int h, int pitch)
+{
+ struct pipe_screen *pipe_screen = radeon_context->pipe_screen;
+ struct pipe_winsys *pipe_winsys = radeon_context->pipe_winsys;
+ struct pipe_texture tmpl;
+ struct pipe_surface *ps;
+ struct pipe_texture *pt;
+ struct pipe_buffer *pb;
+
+ pb = radeon_buffer_from_handle(radeon_context->radeon_screen, handle);
+ if (pb == NULL) {
+ return NULL;
+ }
+ memset(&tmpl, 0, sizeof(tmpl));
+ tmpl.tex_usage = PIPE_TEXTURE_USAGE_DISPLAY_TARGET;
+ tmpl.target = PIPE_TEXTURE_2D;
+ tmpl.width[0] = w;
+ tmpl.height[0] = h;
+ tmpl.depth[0] = 1;
+ tmpl.format = format;
+ pf_get_block(tmpl.format, &tmpl.block);
+ tmpl.nblocksx[0] = pf_get_nblocksx(&tmpl.block, w);
+ tmpl.nblocksy[0] = pf_get_nblocksy(&tmpl.block, h);
+
+ pt = pipe_screen->texture_blanket(pipe_screen, &tmpl, &pitch, pb);
+ if (pt == NULL) {
+ pipe_buffer_reference(&pb, NULL);
+ }
+ ps = pipe_screen->get_tex_surface(pipe_screen, pt, 0, 0, 0,
+ PIPE_BUFFER_USAGE_GPU_WRITE);
+ return ps;
+}
+#endif
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_buffer.h b/src/gallium/winsys/drm/radeon/core/radeon_buffer.h
new file mode 100644
index 0000000000..f5153b06af
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_buffer.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BUFFER_H
+#define RADEON_BUFFER_H
+
+#include <stdio.h>
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_defines.h"
+#include "pipe/p_inlines.h"
+
+//#include "state_tracker/st_public.h"
+
+#include "util/u_memory.h"
+
+#include "radeon_bo.h"
+#include "radeon_cs.h"
+
+#include "radeon_drm.h"
+
+struct radeon_pipe_buffer {
+ struct pipe_buffer base;
+ struct radeon_bo *bo;
+ boolean flinked;
+ uint32_t flink;
+};
+
+#define RADEON_MAX_BOS 24
+
+struct radeon_winsys_priv {
+ /* DRM FD */
+ int fd;
+
+ /* Radeon BO manager. */
+ struct radeon_bo_manager* bom;
+
+ /* Radeon CS manager. */
+ struct radeon_cs_manager* csm;
+
+ /* Current CS. */
+ struct radeon_cs* cs;
+};
+
+struct radeon_winsys {
+ /* Parent class. */
+ struct pipe_winsys base;
+
+ /* This corresponds to void* radeon_winsys in r300_winsys. */
+ struct radeon_winsys_priv* priv;
+};
+
+struct radeon_winsys* radeon_pipe_winsys(int fb);
+#if 0
+struct pipe_surface *radeon_surface_from_handle(struct radeon_context *radeon_context,
+ uint32_t handle,
+ enum pipe_format format,
+ int w, int h, int pitch);
+#endif
+#endif
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_drm.c b/src/gallium/winsys/drm/radeon/core/radeon_drm.c
new file mode 100644
index 0000000000..a4011db0b8
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_drm.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright © 2009 Corbin Simpson
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Corbin Simpson <MostAwesomeDude@gmail.com>
+ * Joakim Sindholt <opensource@zhasha.com>
+ */
+
+#include "radeon_drm.h"
+
+/* Create a pipe_screen. */
+struct pipe_screen* radeon_create_screen(struct drm_api* api,
+ int drmFB,
+ struct drm_create_screen_arg *arg)
+{
+ struct radeon_winsys* winsys = radeon_pipe_winsys(drmFB);
+
+ if (getenv("RADEON_SOFTPIPE")) {
+ return softpipe_create_screen((struct pipe_winsys*)winsys);
+ } else {
+ struct r300_winsys* r300 = radeon_create_r300_winsys(drmFB, winsys);
+ FREE(winsys);
+ return r300_create_screen(r300);
+ }
+}
+
+/* Create a pipe_context. */
+struct pipe_context* radeon_create_context(struct drm_api* api,
+ struct pipe_screen* screen)
+{
+ if (getenv("RADEON_SOFTPIPE")) {
+ return radeon_create_softpipe(screen->winsys);
+ } else {
+ return r300_create_context(screen,
+ (struct r300_winsys*)screen->winsys);
+ }
+}
+
+boolean radeon_buffer_from_texture(struct drm_api* api,
+ struct pipe_texture* texture,
+ struct pipe_buffer** buffer,
+ unsigned* stride)
+{
+ /* XXX fix this */
+ return r300_get_texture_buffer(texture, buffer, stride);
+}
+
+/* Create a buffer from a handle. */
+/* XXX what's up with name? */
+struct pipe_buffer* radeon_buffer_from_handle(struct drm_api* api,
+ struct pipe_screen* screen,
+ const char* name,
+ unsigned handle)
+{
+ struct radeon_bo_manager* bom =
+ ((struct radeon_winsys*)screen->winsys)->priv->bom;
+ struct radeon_pipe_buffer* radeon_buffer;
+ struct radeon_bo* bo = NULL;
+
+ bo = radeon_bo_open(bom, handle, 0, 0, 0, 0);
+ if (bo == NULL) {
+ return NULL;
+ }
+
+ radeon_buffer = CALLOC_STRUCT(radeon_pipe_buffer);
+ if (radeon_buffer == NULL) {
+ radeon_bo_unref(bo);
+ return NULL;
+ }
+
+ pipe_reference_init(&radeon_buffer->base.reference, 1);
+ radeon_buffer->base.screen = screen;
+ radeon_buffer->base.usage = PIPE_BUFFER_USAGE_PIXEL;
+ radeon_buffer->bo = bo;
+ return &radeon_buffer->base;
+}
+
+static struct pipe_texture*
+radeon_texture_from_shared_handle(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *templ,
+ const char *name,
+ unsigned stride,
+ unsigned handle)
+{
+ struct pipe_buffer *buffer;
+
+ buffer = radeon_buffer_from_handle(api, screen, name, handle);
+ if (!buffer) {
+ return NULL;
+ }
+
+ return screen->texture_blanket(screen, templ, &stride, buffer);
+}
+
+static boolean radeon_shared_handle_from_texture(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *stride,
+ unsigned *handle)
+{
+ int retval, fd;
+ struct drm_gem_flink flink;
+ struct radeon_pipe_buffer* radeon_buffer;
+ struct pipe_buffer *buffer;
+
+ if (!radeon_buffer_from_texture(api, texture, &buffer, stride)) {
+ return FALSE;
+ }
+
+ radeon_buffer = (struct radeon_pipe_buffer*)buffer;
+ if (!radeon_buffer->flinked) {
+ fd = ((struct radeon_winsys*)screen->winsys)->priv->fd;
+
+ flink.handle = radeon_buffer->bo->handle;
+
+ retval = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (retval) {
+ debug_printf("radeon: DRM_IOCTL_GEM_FLINK failed, error %d\n",
+ retval);
+ return FALSE;
+ }
+
+ radeon_buffer->flink = flink.name;
+ radeon_buffer->flinked = TRUE;
+ }
+
+ *handle = radeon_buffer->flink;
+ return TRUE;
+}
+
+static boolean radeon_local_handle_from_texture(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *stride,
+ unsigned *handle)
+{
+ struct pipe_buffer *buffer;
+ if (!radeon_buffer_from_texture(api, texture, &buffer, stride)) {
+ return FALSE;
+ }
+
+ *handle = ((struct radeon_pipe_buffer*)buffer)->bo->handle;
+
+ pipe_buffer_reference(&buffer, NULL);
+
+ return TRUE;
+}
+
+struct drm_api drm_api_hooks = {
+ .create_screen = radeon_create_screen,
+ .create_context = radeon_create_context,
+ .texture_from_shared_handle = radeon_texture_from_shared_handle,
+ .shared_handle_from_texture = radeon_shared_handle_from_texture,
+ .local_handle_from_texture = radeon_local_handle_from_texture,
+};
+
+struct drm_api* drm_api_create()
+{
+#ifdef DEBUG
+ return trace_drm_create(&drm_api_hooks);
+#else
+ return &drm_api_hooks;
+#endif
+}
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_drm.h b/src/gallium/winsys/drm/radeon/core/radeon_drm.h
new file mode 100644
index 0000000000..88a5c82b28
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_drm.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2009 Corbin Simpson
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Corbin Simpson <MostAwesomeDude@gmail.com>
+ */
+#ifndef RADEON_DRM_H
+#define RADEON_DRM_H
+
+#include <sys/ioctl.h>
+
+#include "xf86drm.h"
+
+#include "pipe/p_screen.h"
+
+#include "trace/tr_drm.h"
+#include "util/u_memory.h"
+
+#include "state_tracker/drm_api.h"
+
+#include "radeon_buffer.h"
+#include "radeon_r300.h"
+#include "radeon_winsys_softpipe.h"
+
+/* XXX */
+#include "r300_screen.h"
+
+struct pipe_screen* radeon_create_screen(struct drm_api* api,
+ int drmFB,
+ struct drm_create_screen_arg *arg);
+
+struct pipe_context* radeon_create_context(struct drm_api* api,
+ struct pipe_screen* screen);
+
+boolean radeon_buffer_from_texture(struct drm_api* api,
+ struct pipe_texture* texture,
+ struct pipe_buffer** buffer,
+ unsigned* stride);
+
+struct pipe_buffer* radeon_buffer_from_handle(struct drm_api* api,
+ struct pipe_screen* screen,
+ const char* name,
+ unsigned handle);
+
+boolean radeon_handle_from_buffer(struct drm_api* api,
+ struct pipe_screen* screen,
+ struct pipe_buffer* buffer,
+ unsigned* handle);
+
+boolean radeon_global_handle_from_buffer(struct drm_api* api,
+ struct pipe_screen* screen,
+ struct pipe_buffer* buffer,
+ unsigned* handle);
+
+void radeon_destroy_drm_api(struct drm_api* api);
+#endif
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_r300.c b/src/gallium/winsys/drm/radeon/core/radeon_r300.c
new file mode 100644
index 0000000000..d2d84f1a8f
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_r300.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "radeon_r300.h"
+
+static boolean radeon_r300_add_buffer(struct r300_winsys* winsys,
+ struct pipe_buffer* pbuffer,
+ uint32_t rd,
+ uint32_t wd)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+ struct radeon_bo* bo = ((struct radeon_pipe_buffer*)pbuffer)->bo;
+
+ radeon_cs_space_add_persistent_bo(priv->cs, bo, rd, wd);
+ return TRUE;
+}
+
+static boolean radeon_r300_validate(struct r300_winsys* winsys)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+
+ if (radeon_cs_space_check(priv->cs) < 0) {
+ return FALSE;
+ }
+
+ /* Things are fine, we can proceed as normal. */
+ return TRUE;
+}
+
+static boolean radeon_r300_check_cs(struct r300_winsys* winsys, int size)
+{
+ /* XXX check size here, lazy ass! */
+ /* XXX also validate buffers */
+ return TRUE;
+}
+
+static void radeon_r300_begin_cs(struct r300_winsys* winsys,
+ int size,
+ const char* file,
+ const char* function,
+ int line)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+
+ radeon_cs_begin(priv->cs, size, file, function, line);
+}
+
+static void radeon_r300_write_cs_dword(struct r300_winsys* winsys,
+ uint32_t dword)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+
+ radeon_cs_write_dword(priv->cs, dword);
+}
+
+static void radeon_r300_write_cs_reloc(struct r300_winsys* winsys,
+ struct pipe_buffer* pbuffer,
+ uint32_t rd,
+ uint32_t wd,
+ uint32_t flags)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+ int retval = 0;
+
+ retval = radeon_cs_write_reloc(priv->cs,
+ ((struct radeon_pipe_buffer*)pbuffer)->bo, rd, wd, flags);
+
+ if (retval) {
+ debug_printf("radeon: Relocation of %p (%d, %d, %d) failed!\n",
+ pbuffer, rd, wd, flags);
+ }
+}
+
+static void radeon_r300_end_cs(struct r300_winsys* winsys,
+ const char* file,
+ const char* function,
+ int line)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+
+ radeon_cs_end(priv->cs, file, function, line);
+}
+
+static void radeon_r300_flush_cs(struct r300_winsys* winsys)
+{
+ struct radeon_winsys_priv* priv =
+ (struct radeon_winsys_priv*)winsys->radeon_winsys;
+ int retval;
+
+ /* Emit the CS. */
+ retval = radeon_cs_emit(priv->cs);
+ if (retval) {
+ debug_printf("radeon: Bad CS, dumping...\n");
+ radeon_cs_print(priv->cs, stderr);
+ }
+
+ /* Clean out BOs. */
+ radeon_cs_space_reset_bos(priv->cs);
+
+ /* Reset CS.
+ * Someday, when we care about performance, we should really find a way
+ * to rotate between two or three CS objects so that the GPU can be
+ * spinning through one CS while another one is being filled. */
+ radeon_cs_erase(priv->cs);
+}
+
+/* Helper function to do the ioctls needed for setup and init. */
+static void do_ioctls(struct r300_winsys* winsys, int fd)
+{
+ struct drm_radeon_gem_info gem_info = {0};
+ struct drm_radeon_info info = {0};
+ int target = 0;
+ int retval;
+
+ info.value = (unsigned long)&target;
+
+ /* First, get the number of pixel pipes */
+ info.request = RADEON_INFO_NUM_GB_PIPES;
+ retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
+ if (retval) {
+ fprintf(stderr, "%s: Failed to get GB pipe count, "
+ "error number %d\n", __FUNCTION__, retval);
+ exit(1);
+ }
+ winsys->gb_pipes = target;
+
+ /* Then, get PCI ID */
+ info.request = RADEON_INFO_DEVICE_ID;
+ retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
+ if (retval) {
+ fprintf(stderr, "%s: Failed to get PCI ID, "
+ "error number %d\n", __FUNCTION__, retval);
+ exit(1);
+ }
+ winsys->pci_id = target;
+
+ /* Finally, retrieve MM info */
+ retval = drmCommandWriteRead(fd, DRM_RADEON_GEM_INFO,
+ &gem_info, sizeof(gem_info));
+ if (retval) {
+ fprintf(stderr, "%s: Failed to get MM info, error number %d\n",
+ __FUNCTION__, retval);
+ exit(1);
+ }
+ winsys->gart_size = gem_info.gart_size;
+ /* XXX */
+ winsys->vram_size = gem_info.vram_visible;
+}
+
+struct r300_winsys*
+radeon_create_r300_winsys(int fd, struct radeon_winsys* old_winsys)
+{
+ struct r300_winsys* winsys = CALLOC_STRUCT(r300_winsys);
+ struct radeon_winsys_priv* priv;
+
+ if (winsys == NULL) {
+ return NULL;
+ }
+
+ priv = old_winsys->priv;
+
+ do_ioctls(winsys, fd);
+
+ priv->csm = radeon_cs_manager_gem_ctor(fd);
+
+ priv->cs = radeon_cs_create(priv->csm, 1024 * 64 / 4);
+ radeon_cs_set_limit(priv->cs,
+ RADEON_GEM_DOMAIN_GTT, winsys->gart_size);
+ radeon_cs_set_limit(priv->cs,
+ RADEON_GEM_DOMAIN_VRAM, winsys->vram_size);
+
+ winsys->add_buffer = radeon_r300_add_buffer;
+ winsys->validate = radeon_r300_validate;
+
+ winsys->check_cs = radeon_r300_check_cs;
+ winsys->begin_cs = radeon_r300_begin_cs;
+ winsys->write_cs_dword = radeon_r300_write_cs_dword;
+ winsys->write_cs_reloc = radeon_r300_write_cs_reloc;
+ winsys->end_cs = radeon_r300_end_cs;
+ winsys->flush_cs = radeon_r300_flush_cs;
+
+ memcpy(winsys, old_winsys, sizeof(struct radeon_winsys));
+
+ return winsys;
+}
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_r300.h b/src/gallium/winsys/drm/radeon/core/radeon_r300.h
new file mode 100644
index 0000000000..775d7937fd
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_r300.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef RADEON_R300_H
+#define RADEON_R300_H
+
+/* XXX WTF is this! I shouldn't have to include those first three! FUCK! */
+#include <stdint.h>
+#include <stdlib.h>
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_cs_gem.h"
+
+#include "r300_winsys.h"
+
+#include "radeon_buffer.h"
+
+struct radeon_winsys;
+
+struct r300_winsys*
+radeon_create_r300_winsys(int fd, struct radeon_winsys* old_winsys);
+
+#endif /* RADEON_R300_H */
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.c b/src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.c
new file mode 100644
index 0000000000..f038bfa40e
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.c
@@ -0,0 +1,41 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#include "radeon_winsys_softpipe.h"
+
+struct pipe_context *radeon_create_softpipe(struct pipe_winsys* winsys)
+{
+ struct pipe_screen *pipe_screen;
+
+ pipe_screen = softpipe_create_screen(winsys);
+
+ return softpipe_create(pipe_screen);
+}
diff --git a/src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.h b/src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.h
new file mode 100644
index 0000000000..04740e41a5
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/core/radeon_winsys_softpipe.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_WINSYS_SOFTPIPE_H
+#define RADEON_WINSYS_SOFTPIPE_H
+
+#include <stdio.h>
+
+#include "pipe/p_defines.h"
+#include "pipe/p_format.h"
+
+#include "softpipe/sp_winsys.h"
+
+#include "util/u_memory.h"
+
+struct pipe_context *radeon_create_softpipe(struct pipe_winsys* winsys);
+
+#endif
diff --git a/src/gallium/winsys/drm/radeon/dri/Makefile b/src/gallium/winsys/drm/radeon/dri/Makefile
new file mode 100644
index 0000000000..a9889444de
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/dri/Makefile
@@ -0,0 +1,26 @@
+
+TOP = ../../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = radeon_dri.so
+
+MINIGLX_SOURCES =
+
+PIPE_DRIVERS = \
+ $(TOP)/src/gallium/state_trackers/dri/libdridrm.a \
+ $(TOP)/src/gallium/winsys/drm/radeon/core/libradeonwinsys.a \
+ $(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
+ $(TOP)/src/gallium/drivers/trace/libtrace.a \
+ $(TOP)/src/gallium/drivers/r300/libr300.a
+
+C_SOURCES = \
+ $(COMMON_GALLIUM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+ASM_SOURCES =
+
+include ../../Makefile.template
+
+DRI_LIB_DEPS += -ldrm_radeon
+
+symlinks:
diff --git a/src/gallium/winsys/drm/radeon/dri/SConscript b/src/gallium/winsys/drm/radeon/dri/SConscript
new file mode 100644
index 0000000000..aea987a3ac
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/dri/SConscript
@@ -0,0 +1,17 @@
+Import('*')
+
+env = drienv.Clone()
+
+env.ParseConfig('pkg-config --cflags --libs libdrm_radeon')
+
+drivers = [
+ trace,
+ softpipe,
+ r300
+]
+
+env.SharedLibrary(
+ target ='radeon_dri.so',
+ source = COMMON_GALLIUM_SOURCES,
+ LIBS = st_dri + radeonwinsys + mesa + drivers + auxiliaries + env['LIBS'],
+)
diff --git a/src/gallium/winsys/drm/radeon/egl/Makefile b/src/gallium/winsys/drm/radeon/egl/Makefile
new file mode 100644
index 0000000000..6a1448d1b9
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/egl/Makefile
@@ -0,0 +1,26 @@
+TOP = ../../../../../..
+GALLIUMDIR = ../../../..
+include $(TOP)/configs/current
+
+LIBNAME = EGL_r300.so
+
+PIPE_DRIVERS = \
+ $(TOP)/src/gallium/state_trackers/egl/libegldrm.a \
+ $(GALLIUMDIR)/winsys/drm/radeon/core/libradeonwinsys.a \
+ $(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
+ $(TOP)/src/gallium/drivers/trace/libtrace.a \
+ $(TOP)/src/gallium/drivers/r300/libr300.a
+
+DRIVER_SOURCES =
+
+C_SOURCES = \
+ $(COMMON_GALLIUM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+DRIVER_EXTRAS = -ldrm_radeon
+
+ASM_SOURCES =
+
+include ../../Makefile.template
+
+symlinks:
diff --git a/src/gallium/winsys/drm/radeon/python/README b/src/gallium/winsys/drm/radeon/python/README
new file mode 100644
index 0000000000..d9e49bce67
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/python/README
@@ -0,0 +1,15 @@
+Python bindings for the radeon gallium driver.
+
+
+See gallium/src/gallium/state_trackers/python/README for more information.
+
+
+Build as:
+
+ scons debug=1 statetrackers=python winsys=drm/radeon/python
+
+Run as:
+
+ export PYTHONPATH=$PWD/build/linux-x86-debug/gallium/winsys/drm/radeon/python:$PWD/build/linux-x86-debug/gallium/state_trackers/python
+
+ python src/gallium/state_trackers/python/samples/tri.py
diff --git a/src/gallium/winsys/drm/radeon/python/SConscript b/src/gallium/winsys/drm/radeon/python/SConscript
new file mode 100644
index 0000000000..3200fd8d1b
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/python/SConscript
@@ -0,0 +1,33 @@
+import os.path
+
+Import('*')
+
+if env['platform'] == 'linux':
+
+ env = env.Clone()
+
+ env.Tool('python')
+
+ env.ParseConfig('pkg-config --cflags --libs libdrm')
+
+ env.Prepend(CPPPATH = [
+ '#src/gallium/state_trackers/python',
+ '../core',
+ ])
+
+ drivers = [
+ softpipe,
+ radeon,
+ trace,
+ ]
+
+ sources = [
+ 'radeon_hardpipe_winsys.c',
+ 'xf86dri.c',
+ ]
+
+ env.SharedLibrary(
+ target ='_gallium',
+ source = sources,
+ LIBS = [pyst] + drivers + auxiliaries + env['LIBS'],
+ )
diff --git a/src/gallium/winsys/drm/radeon/python/radeon_hardpipe_winsys.c b/src/gallium/winsys/drm/radeon/python/radeon_hardpipe_winsys.c
new file mode 100644
index 0000000000..c3ec24aaf7
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/python/radeon_hardpipe_winsys.c
@@ -0,0 +1,140 @@
+ /**************************************************************************
+ *
+ * Copyright 2009 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <X11/X.h>
+#include <X11/Xlib.h>
+#include <drm/drm.h>
+
+#include "pipe/p_screen.h"
+#include "pipe/p_context.h"
+
+#include "st_winsys.h"
+
+#include "radeon_winsys.h"
+
+#include "xf86dri.h"
+
+
+/* XXX: Force init_gallium symbol to be linked */
+extern void init_gallium(void);
+void (*force_init_gallium_linkage)(void) = &init_gallium;
+
+
+static struct pipe_screen *
+radeon_hardpipe_screen_create(void)
+{
+ Display *dpy;
+ Window rootWin;
+ XWindowAttributes winAttr;
+ int isCapable;
+ int screen;
+ char *driverName;
+ char *curBusID;
+ unsigned magic;
+ int ddxDriverMajor;
+ int ddxDriverMinor;
+ int ddxDriverPatch;
+ drm_handle_t sAreaOffset;
+ int ret;
+ int drmFD;
+ drm_context_t hHWContext;
+ XID id;
+
+ dpy = XOpenDisplay(":0");
+ if (!dpy) {
+ fprintf(stderr, "Open Display Failed\n");
+ return NULL;
+ }
+
+ screen = DefaultScreen(dpy);
+ rootWin = RootWindow(dpy, screen);
+ XGetWindowAttributes(dpy, rootWin, &winAttr);
+
+ ret = uniDRIQueryDirectRenderingCapable(dpy, screen, &isCapable);
+ if (!ret || !isCapable) {
+ fprintf(stderr, "No DRI on this display:sceen\n");
+ goto error;
+ }
+
+ if (!uniDRIOpenConnection(dpy, screen, &sAreaOffset,
+ &curBusID)) {
+ fprintf(stderr, "Could not open DRI connection.\n");
+ goto error;
+ }
+
+ if (!uniDRIGetClientDriverName(dpy, screen, &ddxDriverMajor,
+ &ddxDriverMinor, &ddxDriverPatch,
+ &driverName)) {
+ fprintf(stderr, "Could not get DRI driver name.\n");
+ goto error;
+ }
+
+ if ((drmFD = drmOpen(NULL, curBusID)) < 0) {
+ perror("DRM Device could not be opened");
+ goto error;
+ }
+
+ drmGetMagic(drmFD, &magic);
+ if (!uniDRIAuthConnection(dpy, screen, magic)) {
+ fprintf(stderr, "Could not get X server to authenticate us.\n");
+ goto error;
+ }
+
+ if (!uniDRICreateContext(dpy, screen, winAttr.visual,
+ &id, &hHWContext)) {
+ fprintf(stderr, "Could not create DRI context.\n");
+ goto error;
+ }
+
+ /* FIXME: create a radeon pipe_screen from drmFD and hHWContext */
+
+ return NULL;
+
+error:
+ return NULL;
+}
+
+
+static struct pipe_context *
+radeon_hardpipe_context_create(struct pipe_screen *screen)
+{
+ /* FIXME: create a radon pipe_context from screen */
+
+ return NULL;
+}
+
+
+const struct st_winsys st_hardpipe_winsys = {
+ &radeon_hardpipe_screen_create,
+ &radeon_hardpipe_context_create,
+};
+
diff --git a/src/gallium/winsys/drm/radeon/python/xf86dri.c b/src/gallium/winsys/drm/radeon/python/xf86dri.c
new file mode 100644
index 0000000000..1736f1e54f
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/python/xf86dri.c
@@ -0,0 +1,605 @@
+/**************************************************************************
+
+Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
+Copyright 2000 VA Linux Systems, Inc.
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Jens Owen <jens@tungstengraphics.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ */
+
+/* THIS IS NOT AN X CONSORTIUM STANDARD */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define NEED_REPLIES
+#include <X11/Xlibint.h>
+#include <X11/extensions/Xext.h>
+#include <X11/extensions/extutil.h>
+#include "xf86dristr.h"
+
+static XExtensionInfo _xf86dri_info_data;
+static XExtensionInfo *xf86dri_info = &_xf86dri_info_data;
+static char xf86dri_extension_name[] = XF86DRINAME;
+
+#define uniDRICheckExtension(dpy,i,val) \
+ XextCheckExtension (dpy, i, xf86dri_extension_name, val)
+
+/*****************************************************************************
+ * *
+ * private utility routines *
+ * *
+ *****************************************************************************/
+
+static int close_display(Display * dpy, XExtCodes * extCodes);
+static /* const */ XExtensionHooks xf86dri_extension_hooks = {
+ NULL, /* create_gc */
+ NULL, /* copy_gc */
+ NULL, /* flush_gc */
+ NULL, /* free_gc */
+ NULL, /* create_font */
+ NULL, /* free_font */
+ close_display, /* close_display */
+ NULL, /* wire_to_event */
+ NULL, /* event_to_wire */
+ NULL, /* error */
+ NULL, /* error_string */
+};
+
+static
+XEXT_GENERATE_FIND_DISPLAY(find_display, xf86dri_info,
+ xf86dri_extension_name, &xf86dri_extension_hooks,
+ 0, NULL)
+
+ static XEXT_GENERATE_CLOSE_DISPLAY(close_display, xf86dri_info)
+
+/*****************************************************************************
+ * *
+ * public XFree86-DRI Extension routines *
+ * *
+ *****************************************************************************/
+#if 0
+#include <stdio.h>
+#define TRACE(msg) fprintf(stderr,"uniDRI%s\n", msg);
+#else
+#define TRACE(msg)
+#endif
+ Bool uniDRIQueryExtension(dpy, event_basep, error_basep)
+ Display *dpy;
+ int *event_basep, *error_basep;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+
+ TRACE("QueryExtension...");
+ if (XextHasExtension(info)) {
+ *event_basep = info->codes->first_event;
+ *error_basep = info->codes->first_error;
+ TRACE("QueryExtension... return True");
+ return True;
+ } else {
+ TRACE("QueryExtension... return False");
+ return False;
+ }
+}
+
+Bool
+uniDRIQueryVersion(dpy, majorVersion, minorVersion, patchVersion)
+ Display *dpy;
+ int *majorVersion;
+ int *minorVersion;
+ int *patchVersion;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIQueryVersionReply rep;
+ xXF86DRIQueryVersionReq *req;
+
+ TRACE("QueryVersion...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIQueryVersion, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIQueryVersion;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("QueryVersion... return False");
+ return False;
+ }
+ *majorVersion = rep.majorVersion;
+ *minorVersion = rep.minorVersion;
+ *patchVersion = rep.patchVersion;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("QueryVersion... return True");
+ return True;
+}
+
+Bool
+uniDRIQueryDirectRenderingCapable(dpy, screen, isCapable)
+ Display *dpy;
+ int screen;
+ Bool *isCapable;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIQueryDirectRenderingCapableReply rep;
+ xXF86DRIQueryDirectRenderingCapableReq *req;
+
+ TRACE("QueryDirectRenderingCapable...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIQueryDirectRenderingCapable, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIQueryDirectRenderingCapable;
+ req->screen = screen;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("QueryDirectRenderingCapable... return False");
+ return False;
+ }
+ *isCapable = rep.isCapable;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("QueryDirectRenderingCapable... return True");
+ return True;
+}
+
+Bool
+uniDRIOpenConnection(dpy, screen, hSAREA, busIdString)
+ Display *dpy;
+ int screen;
+ drm_handle_t *hSAREA;
+ char **busIdString;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIOpenConnectionReply rep;
+ xXF86DRIOpenConnectionReq *req;
+
+ TRACE("OpenConnection...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIOpenConnection, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIOpenConnection;
+ req->screen = screen;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("OpenConnection... return False");
+ return False;
+ }
+
+ *hSAREA = rep.hSAREALow;
+#ifdef LONG64
+ if (sizeof(drm_handle_t) == 8) {
+ *hSAREA |= ((unsigned long)rep.hSAREAHigh) << 32;
+ }
+#endif
+ if (rep.length) {
+ if (!(*busIdString = (char *)Xcalloc(rep.busIdStringLength + 1, 1))) {
+ _XEatData(dpy, ((rep.busIdStringLength + 3) & ~3));
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("OpenConnection... return False");
+ return False;
+ }
+ _XReadPad(dpy, *busIdString, rep.busIdStringLength);
+ } else {
+ *busIdString = NULL;
+ }
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("OpenConnection... return True");
+ return True;
+}
+
+Bool
+uniDRIAuthConnection(dpy, screen, magic)
+ Display *dpy;
+ int screen;
+ drm_magic_t magic;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIAuthConnectionReq *req;
+ xXF86DRIAuthConnectionReply rep;
+
+ TRACE("AuthConnection...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIAuthConnection, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIAuthConnection;
+ req->screen = screen;
+ req->magic = magic;
+ rep.authenticated = 0;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse) || !rep.authenticated) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("AuthConnection... return False");
+ return False;
+ }
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("AuthConnection... return True");
+ return True;
+}
+
+Bool
+uniDRICloseConnection(dpy, screen)
+ Display *dpy;
+ int screen;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRICloseConnectionReq *req;
+
+ TRACE("CloseConnection...");
+
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRICloseConnection, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRICloseConnection;
+ req->screen = screen;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("CloseConnection... return True");
+ return True;
+}
+
+Bool
+uniDRIGetClientDriverName(dpy, screen, ddxDriverMajorVersion,
+ ddxDriverMinorVersion, ddxDriverPatchVersion,
+ clientDriverName)
+ Display *dpy;
+ int screen;
+ int *ddxDriverMajorVersion;
+ int *ddxDriverMinorVersion;
+ int *ddxDriverPatchVersion;
+ char **clientDriverName;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIGetClientDriverNameReply rep;
+ xXF86DRIGetClientDriverNameReq *req;
+
+ TRACE("GetClientDriverName...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIGetClientDriverName, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIGetClientDriverName;
+ req->screen = screen;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetClientDriverName... return False");
+ return False;
+ }
+
+ *ddxDriverMajorVersion = rep.ddxDriverMajorVersion;
+ *ddxDriverMinorVersion = rep.ddxDriverMinorVersion;
+ *ddxDriverPatchVersion = rep.ddxDriverPatchVersion;
+
+ if (rep.length) {
+ if (!(*clientDriverName =
+ (char *)Xcalloc(rep.clientDriverNameLength + 1, 1))) {
+ _XEatData(dpy, ((rep.clientDriverNameLength + 3) & ~3));
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetClientDriverName... return False");
+ return False;
+ }
+ _XReadPad(dpy, *clientDriverName, rep.clientDriverNameLength);
+ } else {
+ *clientDriverName = NULL;
+ }
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetClientDriverName... return True");
+ return True;
+}
+
+Bool
+uniDRICreateContextWithConfig(dpy, screen, configID, context, hHWContext)
+ Display *dpy;
+ int screen;
+ int configID;
+ XID *context;
+ drm_context_t *hHWContext;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRICreateContextReply rep;
+ xXF86DRICreateContextReq *req;
+
+ TRACE("CreateContext...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRICreateContext, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRICreateContext;
+ req->visual = configID;
+ req->screen = screen;
+ *context = XAllocID(dpy);
+ req->context = *context;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("CreateContext... return False");
+ return False;
+ }
+ *hHWContext = rep.hHWContext;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("CreateContext... return True");
+ return True;
+}
+
+Bool
+uniDRICreateContext(dpy, screen, visual, context, hHWContext)
+ Display *dpy;
+ int screen;
+ Visual *visual;
+ XID *context;
+ drm_context_t *hHWContext;
+{
+ return uniDRICreateContextWithConfig(dpy, screen, visual->visualid,
+ context, hHWContext);
+}
+
+Bool
+uniDRIDestroyContext(Display * ndpy, int screen, XID context)
+{
+ Display *const dpy = (Display *) ndpy;
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIDestroyContextReq *req;
+
+ TRACE("DestroyContext...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIDestroyContext, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIDestroyContext;
+ req->screen = screen;
+ req->context = context;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("DestroyContext... return True");
+ return True;
+}
+
+Bool
+uniDRICreateDrawable(Display * ndpy, int screen,
+ Drawable drawable, drm_drawable_t * hHWDrawable)
+{
+ Display *const dpy = (Display *) ndpy;
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRICreateDrawableReply rep;
+ xXF86DRICreateDrawableReq *req;
+
+ TRACE("CreateDrawable...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRICreateDrawable, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRICreateDrawable;
+ req->screen = screen;
+ req->drawable = drawable;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("CreateDrawable... return False");
+ return False;
+ }
+ *hHWDrawable = rep.hHWDrawable;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("CreateDrawable... return True");
+ return True;
+}
+
+Bool
+uniDRIDestroyDrawable(Display * ndpy, int screen, Drawable drawable)
+{
+ Display *const dpy = (Display *) ndpy;
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIDestroyDrawableReq *req;
+
+ TRACE("DestroyDrawable...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIDestroyDrawable, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIDestroyDrawable;
+ req->screen = screen;
+ req->drawable = drawable;
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("DestroyDrawable... return True");
+ return True;
+}
+
+Bool
+uniDRIGetDrawableInfo(Display * dpy, int screen, Drawable drawable,
+ unsigned int *index, unsigned int *stamp,
+ int *X, int *Y, int *W, int *H,
+ int *numClipRects, drm_clip_rect_t ** pClipRects,
+ int *backX, int *backY,
+ int *numBackClipRects,
+ drm_clip_rect_t ** pBackClipRects)
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIGetDrawableInfoReply rep;
+ xXF86DRIGetDrawableInfoReq *req;
+ int total_rects;
+
+ TRACE("GetDrawableInfo...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIGetDrawableInfo, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIGetDrawableInfo;
+ req->screen = screen;
+ req->drawable = drawable;
+
+ if (!_XReply(dpy, (xReply *) & rep, 1, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetDrawableInfo... return False");
+ return False;
+ }
+ *index = rep.drawableTableIndex;
+ *stamp = rep.drawableTableStamp;
+ *X = (int)rep.drawableX;
+ *Y = (int)rep.drawableY;
+ *W = (int)rep.drawableWidth;
+ *H = (int)rep.drawableHeight;
+ *numClipRects = rep.numClipRects;
+ total_rects = *numClipRects;
+
+ *backX = rep.backX;
+ *backY = rep.backY;
+ *numBackClipRects = rep.numBackClipRects;
+ total_rects += *numBackClipRects;
+
+#if 0
+ /* Because of the fix in Xserver/GL/dri/xf86dri.c, this check breaks
+ * backwards compatibility (Because of the >> 2 shift) but the fix
+ * enables multi-threaded apps to work.
+ */
+ if (rep.length != ((((SIZEOF(xXF86DRIGetDrawableInfoReply) -
+ SIZEOF(xGenericReply) +
+ total_rects * sizeof(drm_clip_rect_t)) +
+ 3) & ~3) >> 2)) {
+ _XEatData(dpy, rep.length);
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetDrawableInfo... return False");
+ return False;
+ }
+#endif
+
+ if (*numClipRects) {
+ int len = sizeof(drm_clip_rect_t) * (*numClipRects);
+
+ *pClipRects = (drm_clip_rect_t *) Xcalloc(len, 1);
+ if (*pClipRects)
+ _XRead(dpy, (char *)*pClipRects, len);
+ } else {
+ *pClipRects = NULL;
+ }
+
+ if (*numBackClipRects) {
+ int len = sizeof(drm_clip_rect_t) * (*numBackClipRects);
+
+ *pBackClipRects = (drm_clip_rect_t *) Xcalloc(len, 1);
+ if (*pBackClipRects)
+ _XRead(dpy, (char *)*pBackClipRects, len);
+ } else {
+ *pBackClipRects = NULL;
+ }
+
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetDrawableInfo... return True");
+ return True;
+}
+
+Bool
+uniDRIGetDeviceInfo(dpy, screen, hFrameBuffer,
+ fbOrigin, fbSize, fbStride, devPrivateSize, pDevPrivate)
+ Display *dpy;
+ int screen;
+ drm_handle_t *hFrameBuffer;
+ int *fbOrigin;
+ int *fbSize;
+ int *fbStride;
+ int *devPrivateSize;
+ void **pDevPrivate;
+{
+ XExtDisplayInfo *info = find_display(dpy);
+ xXF86DRIGetDeviceInfoReply rep;
+ xXF86DRIGetDeviceInfoReq *req;
+
+ TRACE("GetDeviceInfo...");
+ uniDRICheckExtension(dpy, info, False);
+
+ LockDisplay(dpy);
+ GetReq(XF86DRIGetDeviceInfo, req);
+ req->reqType = info->codes->major_opcode;
+ req->driReqType = X_XF86DRIGetDeviceInfo;
+ req->screen = screen;
+ if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetDeviceInfo... return False");
+ return False;
+ }
+
+ *hFrameBuffer = rep.hFrameBufferLow;
+#ifdef LONG64
+ if (sizeof(drm_handle_t) == 8) {
+ *hFrameBuffer |= ((unsigned long)rep.hFrameBufferHigh) << 32;
+ }
+#endif
+
+ *fbOrigin = rep.framebufferOrigin;
+ *fbSize = rep.framebufferSize;
+ *fbStride = rep.framebufferStride;
+ *devPrivateSize = rep.devPrivateSize;
+
+ if (rep.length) {
+ if (!(*pDevPrivate = (void *)Xcalloc(rep.devPrivateSize, 1))) {
+ _XEatData(dpy, ((rep.devPrivateSize + 3) & ~3));
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetDeviceInfo... return False");
+ return False;
+ }
+ _XRead(dpy, (char *)*pDevPrivate, rep.devPrivateSize);
+ } else {
+ *pDevPrivate = NULL;
+ }
+
+ UnlockDisplay(dpy);
+ SyncHandle();
+ TRACE("GetDeviceInfo... return True");
+ return True;
+}
diff --git a/src/gallium/winsys/drm/radeon/python/xf86dri.h b/src/gallium/winsys/drm/radeon/python/xf86dri.h
new file mode 100644
index 0000000000..bf6de37d9d
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/python/xf86dri.h
@@ -0,0 +1,123 @@
+/**************************************************************************
+
+Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
+Copyright 2000 VA Linux Systems, Inc.
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/**
+ * \file xf86dri.h
+ * Protocol numbers and function prototypes for DRI X protocol.
+ *
+ * \author Kevin E. Martin <martin@valinux.com>
+ * \author Jens Owen <jens@tungstengraphics.com>
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+#ifndef _XF86DRI_H_
+#define _XF86DRI_H_
+
+#include <stdint.h>
+#include <X11/Xfuncproto.h>
+#include <drm/drm.h>
+
+#define X_XF86DRIQueryVersion 0
+#define X_XF86DRIQueryDirectRenderingCapable 1
+#define X_XF86DRIOpenConnection 2
+#define X_XF86DRICloseConnection 3
+#define X_XF86DRIGetClientDriverName 4
+#define X_XF86DRICreateContext 5
+#define X_XF86DRIDestroyContext 6
+#define X_XF86DRICreateDrawable 7
+#define X_XF86DRIDestroyDrawable 8
+#define X_XF86DRIGetDrawableInfo 9
+#define X_XF86DRIGetDeviceInfo 10
+#define X_XF86DRIAuthConnection 11
+#define X_XF86DRIOpenFullScreen 12 /* Deprecated */
+#define X_XF86DRICloseFullScreen 13 /* Deprecated */
+
+#define XF86DRINumberEvents 0
+
+#define XF86DRIClientNotLocal 0
+#define XF86DRIOperationNotSupported 1
+#define XF86DRINumberErrors (XF86DRIOperationNotSupported + 1)
+
+#ifndef _XF86DRI_SERVER_
+
+_XFUNCPROTOBEGIN
+ Bool uniDRIQueryExtension(Display * dpy, int *event_base,
+ int *error_base);
+
+Bool uniDRIQueryVersion(Display * dpy, int *majorVersion, int *minorVersion,
+ int *patchVersion);
+
+Bool uniDRIQueryDirectRenderingCapable(Display * dpy, int screen,
+ Bool * isCapable);
+
+Bool uniDRIOpenConnection(Display * dpy, int screen, drm_handle_t * hSAREA,
+ char **busIDString);
+
+Bool uniDRIAuthConnection(Display * dpy, int screen, drm_magic_t magic);
+
+Bool uniDRICloseConnection(Display * dpy, int screen);
+
+Bool uniDRIGetClientDriverName(Display * dpy, int screen,
+ int *ddxDriverMajorVersion,
+ int *ddxDriverMinorVersion,
+ int *ddxDriverPatchVersion,
+ char **clientDriverName);
+
+Bool uniDRICreateContext(Display * dpy, int screen, Visual * visual,
+ XID * ptr_to_returned_context_id,
+ drm_context_t * hHWContext);
+
+Bool uniDRICreateContextWithConfig(Display * dpy, int screen, int configID,
+ XID * ptr_to_returned_context_id,
+ drm_context_t * hHWContext);
+
+extern Bool uniDRIDestroyContext(Display * dpy, int screen, XID context_id);
+
+extern Bool uniDRICreateDrawable(Display * dpy, int screen,
+ Drawable drawable,
+ drm_drawable_t * hHWDrawable);
+
+extern Bool uniDRIDestroyDrawable(Display * dpy, int screen,
+ Drawable drawable);
+
+Bool uniDRIGetDrawableInfo(Display * dpy, int screen, Drawable drawable,
+ unsigned int *index, unsigned int *stamp,
+ int *X, int *Y, int *W, int *H,
+ int *numClipRects, drm_clip_rect_t ** pClipRects,
+ int *backX, int *backY,
+ int *numBackClipRects,
+ drm_clip_rect_t ** pBackClipRects);
+
+Bool uniDRIGetDeviceInfo(Display * dpy, int screen,
+ drm_handle_t * hFrameBuffer, int *fbOrigin,
+ int *fbSize, int *fbStride, int *devPrivateSize,
+ void **pDevPrivate);
+
+_XFUNCPROTOEND
+#endif /* _XF86DRI_SERVER_ */
+#endif /* _XF86DRI_H_ */
diff --git a/src/gallium/winsys/drm/radeon/python/xf86dristr.h b/src/gallium/winsys/drm/radeon/python/xf86dristr.h
new file mode 100644
index 0000000000..d898996360
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/python/xf86dristr.h
@@ -0,0 +1,389 @@
+/**************************************************************************
+
+Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
+Copyright 2000 VA Linux Systems, Inc.
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Jens Owen <jens@tungstengraphics.com>
+ * Rickard E. (Rik) Fiath <faith@valinux.com>
+ *
+ */
+
+#ifndef _XF86DRISTR_H_
+#define _XF86DRISTR_H_
+
+#include "xf86dri.h"
+
+#define XF86DRINAME "XFree86-DRI"
+
+/* The DRI version number. This was originally set to be the same of the
+ * XFree86 version number. However, this version is really indepedent of
+ * the XFree86 version.
+ *
+ * Version History:
+ * 4.0.0: Original
+ * 4.0.1: Patch to bump clipstamp when windows are destroyed, 28 May 02
+ * 4.1.0: Add transition from single to multi in DRMInfo rec, 24 Jun 02
+ */
+#define XF86DRI_MAJOR_VERSION 4
+#define XF86DRI_MINOR_VERSION 1
+#define XF86DRI_PATCH_VERSION 0
+
+typedef struct _XF86DRIQueryVersion
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIQueryVersion */
+ CARD16 length B16;
+} xXF86DRIQueryVersionReq;
+
+#define sz_xXF86DRIQueryVersionReq 4
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD16 majorVersion B16; /* major version of DRI protocol */
+ CARD16 minorVersion B16; /* minor version of DRI protocol */
+ CARD32 patchVersion B32; /* patch version of DRI protocol */
+ CARD32 pad3 B32;
+ CARD32 pad4 B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+} xXF86DRIQueryVersionReply;
+
+#define sz_xXF86DRIQueryVersionReply 32
+
+typedef struct _XF86DRIQueryDirectRenderingCapable
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* X_DRIQueryDirectRenderingCapable */
+ CARD16 length B16;
+ CARD32 screen B32;
+} xXF86DRIQueryDirectRenderingCapableReq;
+
+#define sz_xXF86DRIQueryDirectRenderingCapableReq 8
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ BOOL isCapable;
+ BOOL pad2;
+ BOOL pad3;
+ BOOL pad4;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+ CARD32 pad7 B32;
+ CARD32 pad8 B32;
+ CARD32 pad9 B32;
+} xXF86DRIQueryDirectRenderingCapableReply;
+
+#define sz_xXF86DRIQueryDirectRenderingCapableReply 32
+
+typedef struct _XF86DRIOpenConnection
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIOpenConnection */
+ CARD16 length B16;
+ CARD32 screen B32;
+} xXF86DRIOpenConnectionReq;
+
+#define sz_xXF86DRIOpenConnectionReq 8
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 hSAREALow B32;
+ CARD32 hSAREAHigh B32;
+ CARD32 busIdStringLength B32;
+ CARD32 pad6 B32;
+ CARD32 pad7 B32;
+ CARD32 pad8 B32;
+} xXF86DRIOpenConnectionReply;
+
+#define sz_xXF86DRIOpenConnectionReply 32
+
+typedef struct _XF86DRIAuthConnection
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRICloseConnection */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 magic B32;
+} xXF86DRIAuthConnectionReq;
+
+#define sz_xXF86DRIAuthConnectionReq 12
+
+typedef struct
+{
+ BYTE type;
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 authenticated B32;
+ CARD32 pad2 B32;
+ CARD32 pad3 B32;
+ CARD32 pad4 B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+} xXF86DRIAuthConnectionReply;
+
+#define zx_xXF86DRIAuthConnectionReply 32
+
+typedef struct _XF86DRICloseConnection
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRICloseConnection */
+ CARD16 length B16;
+ CARD32 screen B32;
+} xXF86DRICloseConnectionReq;
+
+#define sz_xXF86DRICloseConnectionReq 8
+
+typedef struct _XF86DRIGetClientDriverName
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIGetClientDriverName */
+ CARD16 length B16;
+ CARD32 screen B32;
+} xXF86DRIGetClientDriverNameReq;
+
+#define sz_xXF86DRIGetClientDriverNameReq 8
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 ddxDriverMajorVersion B32;
+ CARD32 ddxDriverMinorVersion B32;
+ CARD32 ddxDriverPatchVersion B32;
+ CARD32 clientDriverNameLength B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+} xXF86DRIGetClientDriverNameReply;
+
+#define sz_xXF86DRIGetClientDriverNameReply 32
+
+typedef struct _XF86DRICreateContext
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRICreateContext */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 visual B32;
+ CARD32 context B32;
+} xXF86DRICreateContextReq;
+
+#define sz_xXF86DRICreateContextReq 16
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 hHWContext B32;
+ CARD32 pad2 B32;
+ CARD32 pad3 B32;
+ CARD32 pad4 B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+} xXF86DRICreateContextReply;
+
+#define sz_xXF86DRICreateContextReply 32
+
+typedef struct _XF86DRIDestroyContext
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIDestroyContext */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 context B32;
+} xXF86DRIDestroyContextReq;
+
+#define sz_xXF86DRIDestroyContextReq 12
+
+typedef struct _XF86DRICreateDrawable
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRICreateDrawable */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 drawable B32;
+} xXF86DRICreateDrawableReq;
+
+#define sz_xXF86DRICreateDrawableReq 12
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 hHWDrawable B32;
+ CARD32 pad2 B32;
+ CARD32 pad3 B32;
+ CARD32 pad4 B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+} xXF86DRICreateDrawableReply;
+
+#define sz_xXF86DRICreateDrawableReply 32
+
+typedef struct _XF86DRIDestroyDrawable
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIDestroyDrawable */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 drawable B32;
+} xXF86DRIDestroyDrawableReq;
+
+#define sz_xXF86DRIDestroyDrawableReq 12
+
+typedef struct _XF86DRIGetDrawableInfo
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIGetDrawableInfo */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 drawable B32;
+} xXF86DRIGetDrawableInfoReq;
+
+#define sz_xXF86DRIGetDrawableInfoReq 12
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 drawableTableIndex B32;
+ CARD32 drawableTableStamp B32;
+ INT16 drawableX B16;
+ INT16 drawableY B16;
+ INT16 drawableWidth B16;
+ INT16 drawableHeight B16;
+ CARD32 numClipRects B32;
+ INT16 backX B16;
+ INT16 backY B16;
+ CARD32 numBackClipRects B32;
+} xXF86DRIGetDrawableInfoReply;
+
+#define sz_xXF86DRIGetDrawableInfoReply 36
+
+typedef struct _XF86DRIGetDeviceInfo
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIGetDeviceInfo */
+ CARD16 length B16;
+ CARD32 screen B32;
+} xXF86DRIGetDeviceInfoReq;
+
+#define sz_xXF86DRIGetDeviceInfoReq 8
+
+typedef struct
+{
+ BYTE type; /* X_Reply */
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 hFrameBufferLow B32;
+ CARD32 hFrameBufferHigh B32;
+ CARD32 framebufferOrigin B32;
+ CARD32 framebufferSize B32;
+ CARD32 framebufferStride B32;
+ CARD32 devPrivateSize B32;
+} xXF86DRIGetDeviceInfoReply;
+
+#define sz_xXF86DRIGetDeviceInfoReply 32
+
+typedef struct _XF86DRIOpenFullScreen
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRIOpenFullScreen */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 drawable B32;
+} xXF86DRIOpenFullScreenReq;
+
+#define sz_xXF86DRIOpenFullScreenReq 12
+
+typedef struct
+{
+ BYTE type;
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 isFullScreen B32;
+ CARD32 pad2 B32;
+ CARD32 pad3 B32;
+ CARD32 pad4 B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+} xXF86DRIOpenFullScreenReply;
+
+#define sz_xXF86DRIOpenFullScreenReply 32
+
+typedef struct _XF86DRICloseFullScreen
+{
+ CARD8 reqType; /* always DRIReqCode */
+ CARD8 driReqType; /* always X_DRICloseFullScreen */
+ CARD16 length B16;
+ CARD32 screen B32;
+ CARD32 drawable B32;
+} xXF86DRICloseFullScreenReq;
+
+#define sz_xXF86DRICloseFullScreenReq 12
+
+typedef struct
+{
+ BYTE type;
+ BOOL pad1;
+ CARD16 sequenceNumber B16;
+ CARD32 length B32;
+ CARD32 pad2 B32;
+ CARD32 pad3 B32;
+ CARD32 pad4 B32;
+ CARD32 pad5 B32;
+ CARD32 pad6 B32;
+ CARD32 pad7 B32;
+} xXF86DRICloseFullScreenReply;
+
+#define sz_xXF86DRICloseFullScreenReply 32
+
+#endif /* _XF86DRISTR_H_ */
diff --git a/src/gallium/winsys/drm/radeon/xorg/Makefile b/src/gallium/winsys/drm/radeon/xorg/Makefile
new file mode 100644
index 0000000000..0241625f69
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/xorg/Makefile
@@ -0,0 +1,42 @@
+TARGET = modesetting_drv.so
+CFILES = $(wildcard ./*.c)
+OBJECTS = $(patsubst ./%.c,./%.o,$(CFILES))
+GALLIUMDIR = ../../../..
+TOP = ../../../../../..
+
+include ${TOP}/configs/current
+
+CFLAGS = -DHAVE_CONFIG_H \
+ -g -Wall -Wimplicit-function-declaration -fPIC \
+ $(shell pkg-config --cflags pixman-1 xorg-server libdrm xproto) \
+ -I${GALLIUMDIR}/include \
+ -I${GALLIUMDIR}/drivers \
+ -I${GALLIUMDIR}/auxiliary \
+ -I${TOP}/src/mesa \
+ -I$(TOP)/include \
+ -I$(TOP)/src/egl/main
+
+LIBS = \
+ $(GALLIUMDIR)/state_trackers/xorg/libxorgtracker.a \
+ $(GALLIUMDIR)/winsys/drm/radeon/core/libradeonwinsys.a \
+ $(TOP)/src/gallium/drivers/r300/libr300.a \
+ $(GALLIUM_AUXILIARIES)
+
+#############################################
+
+
+
+all default: $(TARGET)
+
+$(TARGET): $(OBJECTS) Makefile $(GALLIUMDIR)/state_trackers/xorg/libxorgtracker.a
+ $(TOP)/bin/mklib -noprefix -o $@ \
+ $(OBJECTS) $(LIBS) $(shell pkg-config --libs libdrm) -ldrm_radeon
+
+clean:
+ rm -rf $(OBJECTS) $(TARGET)
+
+install:
+ $(INSTALL) -d $(DESTDIR)/$(XORG_DRIVER_INSTALL_DIR)
+ $(MINSTALL) -m 755 $(TARGET) $(DESTDIR)/$(XORG_DRIVER_INSTALL_DIR)
+
+.PHONY = all clean install
diff --git a/src/gallium/winsys/drm/radeon/xorg/radeon_xorg.c b/src/gallium/winsys/drm/radeon/xorg/radeon_xorg.c
new file mode 100644
index 0000000000..837f2aa8fe
--- /dev/null
+++ b/src/gallium/winsys/drm/radeon/xorg/radeon_xorg.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Author: Alan Hourihane <alanh@tungstengraphics.com>
+ * Author: Jakob Bornecrantz <wallbraker@gmail.com>
+ * Author: Corbin Simpson <MostAwesomedude@gmail.com>
+ *
+ */
+
+#include "../../../../state_trackers/xorg/xorg_winsys.h"
+
+static void radeon_xorg_identify(int flags);
+static Bool radeon_xorg_pci_probe(DriverPtr driver,
+ int entity_num,
+ struct pci_device *device,
+ intptr_t match_data);
+
+static const struct pci_id_match radeon_xorg_device_match[] = {
+ {0x1002, PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY, 0, 0, 0},
+ {0, 0, 0},
+};
+
+static SymTabRec radeon_xorg_chipsets[] = {
+ {PCI_MATCH_ANY, "ATI/AMD Radeon Graphics Chipset"},
+ {-1, NULL}
+};
+
+static PciChipsets radeon_xorg_pci_devices[] = {
+ {PCI_MATCH_ANY, PCI_MATCH_ANY, NULL},
+ {-1, -1, NULL}
+};
+
+static XF86ModuleVersionInfo radeon_xorg_version = {
+ "modesetting",
+ MODULEVENDORSTRING,
+ MODINFOSTRING1,
+ MODINFOSTRING2,
+ XORG_VERSION_CURRENT,
+ 0, 1, 0, /* major, minor, patch */
+ ABI_CLASS_VIDEODRV,
+ ABI_VIDEODRV_VERSION,
+ MOD_CLASS_VIDEODRV,
+ {0, 0, 0, 0}
+};
+
+/*
+ * Xorg driver exported structures
+ */
+
+_X_EXPORT DriverRec modesetting = {
+ 1,
+ "modesetting",
+ radeon_xorg_identify,
+ NULL,
+ xorg_tracker_available_options,
+ NULL,
+ 0,
+ NULL,
+ radeon_xorg_device_match,
+ radeon_xorg_pci_probe
+};
+
+static MODULESETUPPROTO(radeon_xorg_setup);
+
+_X_EXPORT XF86ModuleData modesettingModuleData = {
+ &radeon_xorg_version,
+ radeon_xorg_setup,
+ NULL
+};
+
+/*
+ * Xorg driver functions
+ */
+
+static pointer
+radeon_xorg_setup(pointer module, pointer opts, int *errmaj, int *errmin)
+{
+ static Bool setupDone = 0;
+
+ /* This module should be loaded only once, but check to be sure.
+ */
+ if (!setupDone) {
+ setupDone = 1;
+ xf86AddDriver(&modesetting, module, HaveDriverFuncs);
+
+ /*
+ * The return value must be non-NULL on success even though there
+ * is no TearDownProc.
+ */
+ return (pointer) 1;
+ } else {
+ if (errmaj)
+ *errmaj = LDR_ONCEONLY;
+ return NULL;
+ }
+}
+
+static void
+radeon_xorg_identify(int flags)
+{
+ xf86PrintChipsets("modesetting", "Driver for Modesetting Kernel Drivers",
+ radeon_xorg_chipsets);
+}
+
+static Bool
+radeon_xorg_pci_probe(DriverPtr driver,
+ int entity_num, struct pci_device *device, intptr_t match_data)
+{
+ ScrnInfoPtr scrn = NULL;
+ EntityInfoPtr entity;
+
+ scrn = xf86ConfigPciEntity(scrn, 0, entity_num, radeon_xorg_pci_devices,
+ NULL, NULL, NULL, NULL, NULL);
+ if (scrn != NULL) {
+ scrn->driverVersion = 1;
+ scrn->driverName = "radeon";
+ scrn->name = "modesetting";
+ scrn->Probe = NULL;
+
+ entity = xf86GetEntityInfo(entity_num);
+
+ /* Use all the functions from the xorg tracker */
+ xorg_tracker_set_functions(scrn);
+ }
+ return scrn != NULL;
+}
diff --git a/src/gallium/winsys/egl_xlib/Makefile b/src/gallium/winsys/egl_xlib/Makefile
new file mode 100644
index 0000000000..3efb7ed4af
--- /dev/null
+++ b/src/gallium/winsys/egl_xlib/Makefile
@@ -0,0 +1,89 @@
+# src/gallium/winsys/egl_xlib/Makefile
+
+# Build softpipe/xlib/EGL driver library/object: "egl_softpipe.so"
+
+
+TOP = ../../../..
+include $(TOP)/configs/current
+
+
+DRIVER_NAME = egl_softpipe.so
+
+
+INCLUDE_DIRS = \
+ -I$(TOP)/include \
+ -I$(TOP)/src/egl/main \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/mesa/main \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/src/gallium/auxiliary
+
+WINSYS_SOURCES = \
+ egl_xlib.c \
+ sw_winsys.c
+
+WINSYS_OBJECTS = $(WINSYS_SOURCES:.c=.o)
+
+
+LIBS = \
+ $(GALLIUM_DRIVERS) \
+ $(GALLIUM_AUXILIARIES)
+
+# XXX temporary (should create a separate lib with the GL API funcs and
+# mesa code, as done for ES 1.x, 2.x, OpenVG, etc)
+UNUSED_LIBS = \
+ $(TOP)/src/mesa/libglapi.a \
+ $(TOP)/src/mesa/libmesagallium.a \
+
+
+LOCAL_CFLAGS =
+
+
+.c.o:
+ $(CC) -c $(INCLUDE_DIRS) $(CFLAGS) $(LOCAL_CFLAGS) $< -o $@
+
+
+.PHONY: library
+
+
+default: depend library Makefile
+
+
+library: $(TOP)/$(LIB_DIR)/$(DRIVER_NAME)
+
+
+# Make the egl_softpipe.so library
+$(TOP)/$(LIB_DIR)/$(DRIVER_NAME): $(WINSYS_OBJECTS) $(LIBS)
+ $(TOP)/bin/mklib -o $(DRIVER_NAME) \
+ -linker "$(CC)" \
+ -noprefix \
+ -install $(TOP)/$(LIB_DIR) \
+ $(MKLIB_OPTIONS) $(WINSYS_OBJECTS) \
+ -Wl,--whole-archive $(LIBS) -Wl,--no-whole-archive
+
+
+depend: $(ALL_SOURCES)
+ @ echo "running $(MKDEP)"
+ @ rm -f depend # workaround oops on gutsy?!?
+ @ touch depend
+ @ $(MKDEP) $(MKDEP_OPTIONS) $(DEFINES) $(INCLUDE_DIRS) $(ALL_SOURCES) \
+ > /dev/null 2>/dev/null
+
+
+install: default
+ $(INSTALL) -d $(INSTALL_DIR)/$(LIB_DIR)
+ @if [ -e $(TOP)/$(LIB_DIR) ]; then \
+ $(MINSTALL) $(TOP)/$(LIB_DIR)/$(DRIVER_NAME) $(INSTALL_DIR)/$(LIB_DIR); \
+ fi
+
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` $(TOP)/include/GL/*.h
+
+clean:
+ -rm -f *.o *~ *.bak
+
+
+include depend
diff --git a/src/gallium/winsys/egl_xlib/egl_xlib.c b/src/gallium/winsys/egl_xlib/egl_xlib.c
new file mode 100644
index 0000000000..d02f825047
--- /dev/null
+++ b/src/gallium/winsys/egl_xlib/egl_xlib.c
@@ -0,0 +1,869 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * EGL / softpipe / xlib winsys module
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include <dlfcn.h>
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+
+#include "pipe/p_compiler.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_memory.h"
+#include "softpipe/sp_winsys.h"
+#include "softpipe/sp_texture.h"
+
+#include "eglconfig.h"
+#include "eglconfigutil.h"
+#include "eglcontext.h"
+#include "egldisplay.h"
+#include "egldriver.h"
+#include "eglglobals.h"
+#include "egllog.h"
+#include "eglsurface.h"
+
+#include "state_tracker/st_public.h"
+
+#include "sw_winsys.h"
+
+
+/** subclass of _EGLDriver */
+struct xlib_egl_driver
+{
+ _EGLDriver Base; /**< base class */
+ EGLint apis;
+};
+
+
+/** driver data of _EGLDisplay */
+struct xlib_egl_display
+{
+ Display *Dpy;
+
+ struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+};
+
+
+/** subclass of _EGLContext */
+struct xlib_egl_context
+{
+ _EGLContext Base; /**< base class */
+
+ struct pipe_context *pipe; /**< Gallium driver context */
+ struct st_context *Context; /**< Mesa/gallium state tracker context */
+};
+
+
+/** subclass of _EGLSurface */
+struct xlib_egl_surface
+{
+ _EGLSurface Base; /**< base class */
+
+ /* These are set for window surface */
+ Display *Dpy; /**< The X Display of the window */
+ Window Win; /**< The user-created window ID */
+ GC Gc;
+ XVisualInfo VisInfo;
+
+ struct pipe_winsys *winsys;
+
+ struct st_framebuffer *Framebuffer;
+};
+
+
+static void
+flush_frontbuffer(struct pipe_winsys *pws,
+ struct pipe_surface *psurf,
+ void *context_private);
+
+
+/** cast wrapper */
+static INLINE struct xlib_egl_driver *
+xlib_egl_driver(_EGLDriver *drv)
+{
+ return (struct xlib_egl_driver *) drv;
+}
+
+
+static INLINE struct xlib_egl_display *
+xlib_egl_display(_EGLDisplay *dpy)
+{
+ return (struct xlib_egl_display *) dpy->DriverData;
+}
+
+
+static INLINE struct xlib_egl_surface *
+lookup_surface(_EGLSurface *surf)
+{
+ return (struct xlib_egl_surface *) surf;
+}
+
+
+static INLINE struct xlib_egl_context *
+lookup_context(_EGLContext *ctx)
+{
+ return (struct xlib_egl_context *) ctx;
+}
+
+
+static unsigned int
+bitcount(unsigned int n)
+{
+ unsigned int bits;
+ for (bits = 0; n > 0; n = n >> 1) {
+ bits += (n & 1);
+ }
+ return bits;
+}
+
+
+/**
+ * Create the EGLConfigs. (one per X visual)
+ */
+static void
+create_configs(struct xlib_egl_display *xdpy, _EGLDisplay *disp)
+{
+ static const EGLint all_apis = (EGL_OPENGL_ES_BIT |
+ EGL_OPENGL_ES2_BIT |
+ EGL_OPENVG_BIT |
+ EGL_OPENGL_BIT);
+ XVisualInfo *visInfo, visTemplate;
+ int num_visuals, i;
+
+ /* get list of all X visuals, create an EGL config for each */
+ visTemplate.screen = DefaultScreen(xdpy->Dpy);
+ visInfo = XGetVisualInfo(xdpy->Dpy, VisualScreenMask,
+ &visTemplate, &num_visuals);
+ if (!visInfo) {
+ printf("egl_xlib.c: couldn't get any X visuals\n");
+ abort();
+ }
+
+ for (i = 0; i < num_visuals; i++) {
+ _EGLConfig *config = calloc(1, sizeof(_EGLConfig));
+ int id = i + 1;
+ int rbits = bitcount(visInfo[i].red_mask);
+ int gbits = bitcount(visInfo[i].green_mask);
+ int bbits = bitcount(visInfo[i].blue_mask);
+ int abits = bbits == 8 ? 8 : 0;
+ int zbits = 24;
+ int sbits = 8;
+ int visid = visInfo[i].visualid;
+#if defined(__cplusplus) || defined(c_plusplus)
+ int vistype = visInfo[i].c_class;
+#else
+ int vistype = visInfo[i].class;
+#endif
+
+ _eglInitConfig(config, id);
+ SET_CONFIG_ATTRIB(config, EGL_BUFFER_SIZE, rbits + gbits + bbits + abits);
+ SET_CONFIG_ATTRIB(config, EGL_RED_SIZE, rbits);
+ SET_CONFIG_ATTRIB(config, EGL_GREEN_SIZE, gbits);
+ SET_CONFIG_ATTRIB(config, EGL_BLUE_SIZE, bbits);
+ SET_CONFIG_ATTRIB(config, EGL_ALPHA_SIZE, abits);
+ SET_CONFIG_ATTRIB(config, EGL_DEPTH_SIZE, zbits);
+ SET_CONFIG_ATTRIB(config, EGL_STENCIL_SIZE, sbits);
+ SET_CONFIG_ATTRIB(config, EGL_NATIVE_VISUAL_ID, visid);
+ SET_CONFIG_ATTRIB(config, EGL_NATIVE_VISUAL_TYPE, vistype);
+ SET_CONFIG_ATTRIB(config, EGL_NATIVE_RENDERABLE, EGL_FALSE);
+ SET_CONFIG_ATTRIB(config, EGL_CONFORMANT, all_apis);
+ SET_CONFIG_ATTRIB(config, EGL_RENDERABLE_TYPE, all_apis);
+ SET_CONFIG_ATTRIB(config, EGL_SURFACE_TYPE, EGL_WINDOW_BIT | EGL_PBUFFER_BIT);
+ SET_CONFIG_ATTRIB(config, EGL_BIND_TO_TEXTURE_RGBA, EGL_TRUE);
+ SET_CONFIG_ATTRIB(config, EGL_BIND_TO_TEXTURE_RGB, EGL_TRUE);
+
+ _eglAddConfig(disp, config);
+ }
+
+ XFree(visInfo);
+}
+
+
+/**
+ * Called via eglInitialize(), drv->API.Initialize().
+ */
+static EGLBoolean
+xlib_eglInitialize(_EGLDriver *drv, _EGLDisplay *dpy,
+ EGLint *major, EGLint *minor)
+{
+ struct xlib_egl_driver *xdrv = xlib_egl_driver(drv);
+ struct xlib_egl_display *xdpy;
+
+ xdpy = CALLOC_STRUCT(xlib_egl_display);
+ if (!xdpy)
+ return _eglError(EGL_BAD_ALLOC, "eglInitialize");
+
+ xdpy->Dpy = (Display *) dpy->NativeDisplay;
+ if (!xdpy->Dpy) {
+ xdpy->Dpy = XOpenDisplay(NULL);
+ if (!xdpy->Dpy) {
+ free(xdpy);
+ return EGL_FALSE;
+ }
+ }
+
+ /* create winsys and pipe screen */
+ xdpy->winsys = create_sw_winsys();
+ if (!xdpy->winsys) {
+ free(xdpy);
+ return _eglError(EGL_BAD_ALLOC, "eglInitialize");
+ }
+ xdpy->winsys->flush_frontbuffer = flush_frontbuffer;
+ xdpy->screen = softpipe_create_screen(xdpy->winsys);
+ if (!xdpy->screen) {
+ free(xdpy->winsys);
+ free(xdpy);
+ return _eglError(EGL_BAD_ALLOC, "eglInitialize");
+ }
+
+ dpy->DriverData = (void *) xdpy;
+ dpy->ClientAPIsMask = xdrv->apis;
+
+ create_configs(xdpy, dpy);
+
+ /* we're supporting EGL 1.4 */
+ *major = 1;
+ *minor = 4;
+
+ return EGL_TRUE;
+}
+
+
+/**
+ * Called via eglTerminate(), drv->API.Terminate().
+ */
+static EGLBoolean
+xlib_eglTerminate(_EGLDriver *drv, _EGLDisplay *dpy)
+{
+ struct xlib_egl_display *xdpy = xlib_egl_display(dpy);
+
+ _eglReleaseDisplayResources(drv, dpy);
+ _eglCleanupDisplay(dpy);
+
+ xdpy->screen->destroy(xdpy->screen);
+ free(xdpy->winsys);
+
+ if (!dpy->NativeDisplay)
+ XCloseDisplay(xdpy->Dpy);
+ free(xdpy);
+
+ return EGL_TRUE;
+}
+
+
+static _EGLProc
+xlib_eglGetProcAddress(const char *procname)
+{
+ return (_EGLProc) st_get_proc_address(procname);
+}
+
+
+static void
+get_drawable_visual_info(Display *dpy, Drawable d, XVisualInfo *visInfo)
+{
+ XWindowAttributes attr;
+ XVisualInfo visTemp, *vis;
+ int num_visuals;
+
+ XGetWindowAttributes(dpy, d, &attr);
+
+ visTemp.screen = DefaultScreen(dpy);
+ visTemp.visualid = attr.visual->visualid;
+ vis = XGetVisualInfo(dpy,
+ (VisualScreenMask | VisualIDMask),
+ &visTemp, &num_visuals);
+ if (vis)
+ *visInfo = *vis;
+
+ XFree(vis);
+}
+
+
+
+/** Get size of given window */
+static Status
+get_drawable_size(Display *dpy, Drawable d, uint *width, uint *height)
+{
+ Window root;
+ Status stat;
+ int xpos, ypos;
+ unsigned int w, h, bw, depth;
+ stat = XGetGeometry(dpy, d, &root, &xpos, &ypos, &w, &h, &bw, &depth);
+ *width = w;
+ *height = h;
+ return stat;
+}
+
+
+static void
+check_and_update_buffer_size(struct xlib_egl_surface *surface)
+{
+ uint width, height;
+ if (surface->Base.Type == EGL_PBUFFER_BIT) {
+ width = surface->Base.Width;
+ height = surface->Base.Height;
+ }
+ else {
+ get_drawable_size(surface->Dpy, surface->Win, &width, &height);
+ }
+ st_resize_framebuffer(surface->Framebuffer, width, height);
+ surface->Base.Width = width;
+ surface->Base.Height = height;
+}
+
+
+
+static void
+display_surface(struct pipe_winsys *pws,
+ struct pipe_surface *psurf,
+ struct xlib_egl_surface *xsurf)
+{
+ struct softpipe_texture *spt = softpipe_texture(psurf->texture);
+ XImage *ximage;
+ void *data;
+
+ if (xsurf->Base.Type == EGL_PBUFFER_BIT)
+ return;
+
+ ximage = XCreateImage(xsurf->Dpy,
+ xsurf->VisInfo.visual,
+ xsurf->VisInfo.depth,
+ ZPixmap, 0, /* format, offset */
+ NULL, /* data */
+ 0, 0, /* size */
+ 32, /* bitmap_pad */
+ 0); /* bytes_per_line */
+
+
+ assert(ximage->format);
+ assert(ximage->bitmap_unit);
+
+ data = pws->buffer_map(pws, spt->buffer, 0);
+
+ /* update XImage's fields */
+ ximage->data = data;
+ ximage->width = psurf->width;
+ ximage->height = psurf->height;
+ ximage->bytes_per_line = spt->stride[psurf->level];
+
+ XPutImage(xsurf->Dpy, xsurf->Win, xsurf->Gc,
+ ximage, 0, 0, 0, 0, psurf->width, psurf->height);
+
+ XSync(xsurf->Dpy, 0);
+
+ ximage->data = NULL;
+ XDestroyImage(ximage);
+
+ pws->buffer_unmap(pws, spt->buffer);
+}
+
+
+
+/** Display gallium surface in X window */
+static void
+flush_frontbuffer(struct pipe_winsys *pws,
+ struct pipe_surface *psurf,
+ void *context_private)
+{
+ struct xlib_egl_surface *xsurf = (struct xlib_egl_surface *) context_private;
+ display_surface(pws, psurf, xsurf);
+}
+
+
+
+/**
+ * Called via eglCreateContext(), drv->API.CreateContext().
+ */
+static _EGLContext *
+xlib_eglCreateContext(_EGLDriver *drv, _EGLDisplay *dpy, _EGLConfig *conf,
+ _EGLContext *share_list, const EGLint *attrib_list)
+{
+ struct xlib_egl_display *xdpy = xlib_egl_display(dpy);
+ struct xlib_egl_context *ctx;
+ struct st_context *share_ctx = NULL; /* XXX fix */
+ __GLcontextModes visual;
+
+ ctx = CALLOC_STRUCT(xlib_egl_context);
+ if (!ctx)
+ return NULL;
+
+ /* let EGL lib init the common stuff */
+ if (!_eglInitContext(drv, &ctx->Base, conf, attrib_list)) {
+ free(ctx);
+ return NULL;
+ }
+
+ /* API-dependent context creation */
+ switch (ctx->Base.ClientAPI) {
+ case EGL_OPENVG_API:
+ case EGL_OPENGL_ES_API:
+ _eglLog(_EGL_DEBUG, "Create Context for ES version %d\n",
+ ctx->Base.ClientVersion);
+ /* fall-through */
+ case EGL_OPENGL_API:
+ /* create a softpipe context */
+ ctx->pipe = softpipe_create(xdpy->screen);
+ /* Now do xlib / state tracker inits here */
+ _eglConfigToContextModesRec(conf, &visual);
+ ctx->Context = st_create_context(ctx->pipe, &visual, share_ctx);
+ break;
+ default:
+ _eglError(EGL_BAD_MATCH, "eglCreateContext(unsupported API)");
+ free(ctx);
+ return NULL;
+ }
+
+ return &ctx->Base;
+}
+
+
+static EGLBoolean
+xlib_eglDestroyContext(_EGLDriver *drv, _EGLDisplay *dpy, _EGLContext *ctx)
+{
+ struct xlib_egl_context *context = lookup_context(ctx);
+
+ if (!_eglIsContextBound(&context->Base)) {
+ /* API-dependent clean-up */
+ switch (context->Base.ClientAPI) {
+ case EGL_OPENGL_ES_API:
+ case EGL_OPENVG_API:
+ /* fall-through */
+ case EGL_OPENGL_API:
+ st_destroy_context(context->Context);
+ break;
+ default:
+ assert(0);
+ }
+ free(context);
+ }
+ return EGL_TRUE;
+}
+
+
+/**
+ * Called via eglMakeCurrent(), drv->API.MakeCurrent().
+ */
+static EGLBoolean
+xlib_eglMakeCurrent(_EGLDriver *drv, _EGLDisplay *dpy,
+ _EGLSurface *draw, _EGLSurface *read, _EGLContext *ctx)
+{
+ struct xlib_egl_context *context = lookup_context(ctx);
+ struct xlib_egl_surface *draw_surf = lookup_surface(draw);
+ struct xlib_egl_surface *read_surf = lookup_surface(read);
+ struct st_context *oldcontext = NULL;
+ _EGLContext *oldctx;
+
+ oldctx = _eglGetCurrentContext();
+ if (oldctx && _eglIsContextLinked(oldctx))
+ oldcontext = st_get_current();
+
+ if (!_eglMakeCurrent(drv, dpy, draw, read, ctx))
+ return EGL_FALSE;
+
+ /* Flush before switching context. Check client API? */
+ if (oldcontext)
+ st_flush(oldcontext, PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_FRAME, NULL);
+ st_make_current((context ? context->Context : NULL),
+ (draw_surf ? draw_surf->Framebuffer : NULL),
+ (read_surf ? read_surf->Framebuffer : NULL));
+
+ if (draw_surf)
+ check_and_update_buffer_size(draw_surf);
+ if (read_surf && read_surf != draw_surf)
+ check_and_update_buffer_size(draw_surf);
+
+ return EGL_TRUE;
+}
+
+
+static enum pipe_format
+choose_color_format(const __GLcontextModes *visual)
+{
+ if (visual->redBits == 8 &&
+ visual->greenBits == 8 &&
+ visual->blueBits == 8 &&
+ visual->alphaBits == 8) {
+ /* XXX this really also depends on the ordering of R,G,B,A */
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+ }
+ else {
+ assert(0);
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+
+static enum pipe_format
+choose_depth_format(const __GLcontextModes *visual)
+{
+ if (visual->depthBits > 0)
+ return PIPE_FORMAT_S8Z24_UNORM;
+ else
+ return PIPE_FORMAT_NONE;
+}
+
+
+static enum pipe_format
+choose_stencil_format(const __GLcontextModes *visual)
+{
+ if (visual->stencilBits > 0)
+ return PIPE_FORMAT_S8Z24_UNORM;
+ else
+ return PIPE_FORMAT_NONE;
+}
+
+
+/**
+ * Called via eglCreateWindowSurface(), drv->API.CreateWindowSurface().
+ */
+static _EGLSurface *
+xlib_eglCreateWindowSurface(_EGLDriver *drv, _EGLDisplay *disp, _EGLConfig *conf,
+ NativeWindowType window, const EGLint *attrib_list)
+{
+ struct xlib_egl_display *xdpy = xlib_egl_display(disp);
+ struct xlib_egl_surface *surf;
+ __GLcontextModes visual;
+ uint width, height;
+
+ surf = CALLOC_STRUCT(xlib_egl_surface);
+ if (!surf)
+ return NULL;
+
+ /* Let EGL lib init the common stuff */
+ if (!_eglInitSurface(drv, &surf->Base, EGL_WINDOW_BIT,
+ conf, attrib_list)) {
+ free(surf);
+ return NULL;
+ }
+
+ /*
+ * Now init the Xlib and gallium stuff
+ */
+ surf->Win = (Window) window; /* The X window ID */
+ surf->Dpy = xdpy->Dpy; /* The X display */
+ surf->Gc = XCreateGC(surf->Dpy, surf->Win, 0, NULL);
+
+ surf->winsys = xdpy->winsys;
+
+ _eglConfigToContextModesRec(conf, &visual);
+ get_drawable_size(surf->Dpy, surf->Win, &width, &height);
+ get_drawable_visual_info(surf->Dpy, surf->Win, &surf->VisInfo);
+
+ surf->Base.Width = width;
+ surf->Base.Height = height;
+
+ /* Create GL statetracker framebuffer */
+ surf->Framebuffer = st_create_framebuffer(&visual,
+ choose_color_format(&visual),
+ choose_depth_format(&visual),
+ choose_stencil_format(&visual),
+ width, height,
+ (void *) surf);
+
+ st_resize_framebuffer(surf->Framebuffer, width, height);
+
+ return &surf->Base;
+}
+
+
+static _EGLSurface *
+xlib_eglCreatePbufferSurface(_EGLDriver *drv, _EGLDisplay *disp, _EGLConfig *conf,
+ const EGLint *attrib_list)
+{
+ struct xlib_egl_display *xdpy = xlib_egl_display(disp);
+ struct xlib_egl_surface *surf;
+ __GLcontextModes visual;
+ uint width, height;
+ EGLBoolean bind_texture;
+
+ surf = CALLOC_STRUCT(xlib_egl_surface);
+ if (!surf) {
+ _eglError(EGL_BAD_ALLOC, "eglCreatePbufferSurface");
+ return NULL;
+ }
+
+ if (!_eglInitSurface(drv, &surf->Base, EGL_PBUFFER_BIT,
+ conf, attrib_list)) {
+ free(surf);
+ return NULL;
+ }
+ if (surf->Base.Width < 0 || surf->Base.Height < 0) {
+ _eglError(EGL_BAD_PARAMETER, "eglCreatePbufferSurface");
+ free(surf);
+ return NULL;
+ }
+
+ bind_texture = (surf->Base.TextureFormat != EGL_NO_TEXTURE);
+ width = (uint) surf->Base.Width;
+ height = (uint) surf->Base.Height;
+ if ((surf->Base.TextureTarget == EGL_NO_TEXTURE && bind_texture) ||
+ (surf->Base.TextureTarget != EGL_NO_TEXTURE && !bind_texture)) {
+ _eglError(EGL_BAD_MATCH, "eglCreatePbufferSurface");
+ free(surf);
+ return NULL;
+ }
+ /* a framebuffer of zero width or height confuses st */
+ if (width == 0 || height == 0) {
+ _eglError(EGL_BAD_MATCH, "eglCreatePbufferSurface");
+ free(surf);
+ return NULL;
+ }
+ /* no mipmap generation */
+ if (surf->Base.MipmapTexture) {
+ _eglError(EGL_BAD_MATCH, "eglCreatePbufferSurface");
+ free(surf);
+ return NULL;
+ }
+
+ surf->winsys = xdpy->winsys;
+
+ _eglConfigToContextModesRec(conf, &visual);
+
+ /* Create GL statetracker framebuffer */
+ surf->Framebuffer = st_create_framebuffer(&visual,
+ choose_color_format(&visual),
+ choose_depth_format(&visual),
+ choose_stencil_format(&visual),
+ width, height,
+ (void *) surf);
+ st_resize_framebuffer(surf->Framebuffer, width, height);
+
+ return &surf->Base;
+}
+
+
+static EGLBoolean
+xlib_eglDestroySurface(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *surface)
+{
+ struct xlib_egl_surface *surf = lookup_surface(surface);
+ if (!_eglIsSurfaceBound(&surf->Base)) {
+ if (surf->Base.Type != EGL_PBUFFER_BIT)
+ XFreeGC(surf->Dpy, surf->Gc);
+ st_unreference_framebuffer(surf->Framebuffer);
+ free(surf);
+ }
+ return EGL_TRUE;
+}
+
+
+static EGLBoolean
+xlib_eglBindTexImage(_EGLDriver *drv, _EGLDisplay *dpy,
+ _EGLSurface *surface, EGLint buffer)
+{
+ struct xlib_egl_surface *xsurf = lookup_surface(surface);
+ struct xlib_egl_context *xctx;
+ struct pipe_surface *psurf;
+ enum pipe_format format;
+ int target;
+
+ if (!xsurf || xsurf->Base.Type != EGL_PBUFFER_BIT)
+ return _eglError(EGL_BAD_SURFACE, "eglBindTexImage");
+ if (buffer != EGL_BACK_BUFFER)
+ return _eglError(EGL_BAD_PARAMETER, "eglBindTexImage");
+ if (xsurf->Base.BoundToTexture)
+ return _eglError(EGL_BAD_ACCESS, "eglBindTexImage");
+
+ /* this should be updated when choose_color_format is */
+ switch (xsurf->Base.TextureFormat) {
+ case EGL_TEXTURE_RGB:
+ format = PIPE_FORMAT_R8G8B8_UNORM;
+ break;
+ case EGL_TEXTURE_RGBA:
+ format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ break;
+ default:
+ return _eglError(EGL_BAD_MATCH, "eglBindTexImage");
+ }
+
+ switch (xsurf->Base.TextureTarget) {
+ case EGL_TEXTURE_2D:
+ target = ST_TEXTURE_2D;
+ break;
+ default:
+ return _eglError(EGL_BAD_MATCH, "eglBindTexImage");
+ }
+
+ /* flush properly */
+ if (eglGetCurrentSurface(EGL_DRAW) == surface) {
+ xctx = lookup_context(_eglGetCurrentContext());
+ st_flush(xctx->Context, PIPE_FLUSH_RENDER_CACHE | PIPE_FLUSH_FRAME,
+ NULL);
+ }
+ else if (_eglIsSurfaceBound(&xsurf->Base)) {
+ xctx = lookup_context(xsurf->Base.Binding);
+ if (xctx)
+ st_finish(xctx->Context);
+ }
+
+ st_get_framebuffer_surface(xsurf->Framebuffer, ST_SURFACE_BACK_LEFT,
+ &psurf);
+ st_bind_texture_surface(psurf, target, xsurf->Base.MipmapLevel, format);
+ xsurf->Base.BoundToTexture = EGL_TRUE;
+
+ return EGL_TRUE;
+}
+
+
+static EGLBoolean
+xlib_eglReleaseTexImage(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *surface,
+ EGLint buffer)
+{
+ struct xlib_egl_surface *xsurf = lookup_surface(surface);
+ struct pipe_surface *psurf;
+
+ if (!xsurf || xsurf->Base.Type != EGL_PBUFFER_BIT ||
+ !xsurf->Base.BoundToTexture)
+ return _eglError(EGL_BAD_SURFACE, "eglReleaseTexImage");
+ if (buffer != EGL_BACK_BUFFER)
+ return _eglError(EGL_BAD_PARAMETER, "eglReleaseTexImage");
+
+ st_get_framebuffer_surface(xsurf->Framebuffer, ST_SURFACE_BACK_LEFT,
+ &psurf);
+ st_unbind_texture_surface(psurf, ST_TEXTURE_2D, xsurf->Base.MipmapLevel);
+ xsurf->Base.BoundToTexture = EGL_FALSE;
+
+ return EGL_TRUE;
+}
+
+
+static EGLBoolean
+xlib_eglSwapBuffers(_EGLDriver *drv, _EGLDisplay *dpy, _EGLSurface *draw)
+{
+ /* error checking step: */
+ if (!_eglSwapBuffers(drv, dpy, draw))
+ return EGL_FALSE;
+
+ {
+ struct xlib_egl_surface *xsurf = lookup_surface(draw);
+ struct pipe_winsys *pws = xsurf->winsys;
+ struct pipe_surface *psurf;
+
+ st_get_framebuffer_surface(xsurf->Framebuffer, ST_SURFACE_BACK_LEFT,
+ &psurf);
+
+ st_notify_swapbuffers(xsurf->Framebuffer);
+
+ display_surface(pws, psurf, xsurf);
+
+ check_and_update_buffer_size(xsurf);
+ }
+
+ return EGL_TRUE;
+}
+
+
+/**
+ * Determine which API(s) is(are) present by looking for some specific
+ * global symbols.
+ */
+static EGLint
+find_supported_apis(void)
+{
+ EGLint mask = 0;
+ void *handle;
+
+ handle = dlopen(NULL, RTLD_LAZY | RTLD_LOCAL);
+ if(!handle)
+ return mask;
+
+ if (dlsym(handle, "st_api_OpenGL_ES1"))
+ mask |= EGL_OPENGL_ES_BIT;
+
+ if (dlsym(handle, "st_api_OpenGL_ES2"))
+ mask |= EGL_OPENGL_ES2_BIT;
+
+ if (dlsym(handle, "st_api_OpenGL"))
+ mask |= EGL_OPENGL_BIT;
+
+ if (dlsym(handle, "st_api_OpenVG"))
+ mask |= EGL_OPENVG_BIT;
+
+ dlclose(handle);
+
+ return mask;
+}
+
+
+static void
+xlib_Unload(_EGLDriver *drv)
+{
+ struct xlib_egl_driver *xdrv = xlib_egl_driver(drv);
+ free(xdrv);
+}
+
+
+/**
+ * This is the main entrypoint into the driver.
+ * Called by libEGL to instantiate an _EGLDriver object.
+ */
+_EGLDriver *
+_eglMain(const char *args)
+{
+ struct xlib_egl_driver *xdrv;
+
+ _eglLog(_EGL_INFO, "Entering EGL/Xlib _eglMain(%s)", args);
+
+ xdrv = CALLOC_STRUCT(xlib_egl_driver);
+ if (!xdrv)
+ return NULL;
+
+ _eglInitDriverFallbacks(&xdrv->Base);
+ xdrv->Base.API.Initialize = xlib_eglInitialize;
+ xdrv->Base.API.Terminate = xlib_eglTerminate;
+ xdrv->Base.API.GetProcAddress = xlib_eglGetProcAddress;
+ xdrv->Base.API.CreateContext = xlib_eglCreateContext;
+ xdrv->Base.API.DestroyContext = xlib_eglDestroyContext;
+ xdrv->Base.API.CreateWindowSurface = xlib_eglCreateWindowSurface;
+ xdrv->Base.API.CreatePbufferSurface = xlib_eglCreatePbufferSurface;
+ xdrv->Base.API.DestroySurface = xlib_eglDestroySurface;
+ xdrv->Base.API.BindTexImage = xlib_eglBindTexImage;
+ xdrv->Base.API.ReleaseTexImage = xlib_eglReleaseTexImage;
+ xdrv->Base.API.MakeCurrent = xlib_eglMakeCurrent;
+ xdrv->Base.API.SwapBuffers = xlib_eglSwapBuffers;
+
+ xdrv->apis = find_supported_apis();
+ if (xdrv->apis == 0x0) {
+ /* the app isn't directly linked with any EGL-supprted APIs
+ * (such as libGLESv2.so) so use an EGL utility to see what
+ * APIs might be loaded dynamically on this system.
+ */
+ xdrv->apis = _eglFindAPIs();
+ }
+
+ xdrv->Base.Name = "Xlib/softpipe";
+ xdrv->Base.Unload = xlib_Unload;
+
+ return &xdrv->Base;
+}
diff --git a/src/gallium/winsys/egl_xlib/sw_winsys.c b/src/gallium/winsys/egl_xlib/sw_winsys.c
new file mode 100644
index 0000000000..79ff2cc985
--- /dev/null
+++ b/src/gallium/winsys/egl_xlib/sw_winsys.c
@@ -0,0 +1,243 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Totally software-based winsys layer.
+ * Note that the one winsys function that we can't implement here
+ * is flush_frontbuffer().
+ * Whoever uses this code will have to provide that.
+ *
+ * Authors: Brian Paul
+ */
+
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_state.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "sw_winsys.h"
+
+
+
+/** Subclass of pipe_winsys */
+struct sw_pipe_winsys
+{
+ struct pipe_winsys Base;
+ /* no extra fields for now */
+};
+
+
+/** subclass of pipe_buffer */
+struct sw_pipe_buffer
+{
+ struct pipe_buffer Base;
+ boolean UserBuffer; /** Is this a user-space buffer? */
+ void *Data;
+ void *Mapped;
+};
+
+
+/** cast wrapper */
+static INLINE struct sw_pipe_buffer *
+sw_pipe_buffer(struct pipe_buffer *b)
+{
+ return (struct sw_pipe_buffer *) b;
+}
+
+
+/**
+ * Round n up to next multiple.
+ */
+static INLINE unsigned
+round_up(unsigned n, unsigned multiple)
+{
+ return (n + multiple - 1) & ~(multiple - 1);
+}
+
+
+static const char *
+get_name(struct pipe_winsys *pws)
+{
+ return "software";
+}
+
+
+/** Create new pipe_buffer and allocate storage of given size */
+static struct pipe_buffer *
+buffer_create(struct pipe_winsys *pws,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct sw_pipe_buffer *buffer = CALLOC_STRUCT(sw_pipe_buffer);
+ if (!buffer)
+ return NULL;
+
+ pipe_reference_init(&buffer->Base.reference, 1);
+ buffer->Base.alignment = alignment;
+ buffer->Base.usage = usage;
+ buffer->Base.size = size;
+
+ /* align to 16-byte multiple for Cell */
+ buffer->Data = align_malloc(size, MAX2(alignment, 16));
+
+ return &buffer->Base;
+}
+
+
+/**
+ * Create buffer which wraps user-space data.
+ */
+static struct pipe_buffer *
+user_buffer_create(struct pipe_winsys *pws, void *ptr, unsigned bytes)
+{
+ struct sw_pipe_buffer *buffer = CALLOC_STRUCT(sw_pipe_buffer);
+ if (!buffer)
+ return NULL;
+
+ pipe_reference_init(&buffer->Base.reference, 1);
+ buffer->Base.size = bytes;
+ buffer->UserBuffer = TRUE;
+ buffer->Data = ptr;
+
+ return &buffer->Base;
+}
+
+
+static void *
+buffer_map(struct pipe_winsys *pws, struct pipe_buffer *buf, unsigned flags)
+{
+ struct sw_pipe_buffer *buffer = sw_pipe_buffer(buf);
+ buffer->Mapped = buffer->Data;
+ return buffer->Mapped;
+}
+
+
+static void
+buffer_unmap(struct pipe_winsys *pws, struct pipe_buffer *buf)
+{
+ struct sw_pipe_buffer *buffer = sw_pipe_buffer(buf);
+ buffer->Mapped = NULL;
+}
+
+
+static void
+buffer_destroy(struct pipe_buffer *buf)
+{
+ struct sw_pipe_buffer *buffer = sw_pipe_buffer(buf);
+
+ if (buffer->Data && !buffer->UserBuffer) {
+ align_free(buffer->Data);
+ buffer->Data = NULL;
+ }
+
+ free(buffer);
+}
+
+
+static struct pipe_buffer *
+surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ const unsigned alignment = 64;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = round_up(nblocksx * block.size, alignment);
+
+ return winsys->buffer_create(winsys, alignment,
+ usage,
+ *stride * nblocksy);
+}
+
+
+static void
+fence_reference(struct pipe_winsys *sws, struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ /* no-op */
+}
+
+
+static int
+fence_signalled(struct pipe_winsys *sws, struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ /* no-op */
+ return 0;
+}
+
+
+static int
+fence_finish(struct pipe_winsys *sws, struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ /* no-op */
+ return 0;
+}
+
+
+/**
+ * Create/return a new pipe_winsys object.
+ */
+struct pipe_winsys *
+create_sw_winsys(void)
+{
+ struct sw_pipe_winsys *ws = CALLOC_STRUCT(sw_pipe_winsys);
+ if (!ws)
+ return NULL;
+
+ /* Fill in this struct with callbacks that pipe will need to
+ * communicate with the window system, buffer manager, etc.
+ */
+ ws->Base.buffer_create = buffer_create;
+ ws->Base.user_buffer_create = user_buffer_create;
+ ws->Base.buffer_map = buffer_map;
+ ws->Base.buffer_unmap = buffer_unmap;
+ ws->Base.buffer_destroy = buffer_destroy;
+
+ ws->Base.surface_buffer_create = surface_buffer_create;
+
+ ws->Base.fence_reference = fence_reference;
+ ws->Base.fence_signalled = fence_signalled;
+ ws->Base.fence_finish = fence_finish;
+
+ ws->Base.flush_frontbuffer = NULL; /* not implemented here! */
+
+ ws->Base.get_name = get_name;
+
+ return &ws->Base;
+}
diff --git a/src/gallium/winsys/egl_xlib/sw_winsys.h b/src/gallium/winsys/egl_xlib/sw_winsys.h
new file mode 100644
index 0000000000..f96c5a14b0
--- /dev/null
+++ b/src/gallium/winsys/egl_xlib/sw_winsys.h
@@ -0,0 +1,40 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef SW_WINSYS_H
+#define SW_WINSYS_H
+
+
+struct pipe_winsys;
+
+
+extern struct pipe_winsys *
+create_sw_winsys(void);
+
+
+#endif /* SW_WINSYS_H */
diff --git a/src/gallium/winsys/g3dvl/nouveau/Makefile b/src/gallium/winsys/g3dvl/nouveau/Makefile
new file mode 100644
index 0000000000..2997f6b79c
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/Makefile
@@ -0,0 +1,50 @@
+TARGET = libnouveau_dri.so
+GALLIUMDIR = ../../..
+DRMDIR ?= /usr
+DRIDIR = ../../../../driclient
+
+OBJECTS = nouveau_screen_vl.o nouveau_context_vl.o nouveau_swapbuffers.o
+
+CFLAGS += -g -Wall -Werror=implicit-function-declaration -fPIC \
+ -I${GALLIUMDIR}/include \
+ -I${GALLIUMDIR}/winsys/g3dvl \
+ -I${GALLIUMDIR}/winsys/drm/nouveau \
+ -I${DRMDIR}/include \
+ -I${DRMDIR}/include/drm \
+ -I${DRMDIR}/include/nouveau \
+ -I${GALLIUMDIR}/drivers \
+ -I${GALLIUMDIR}/auxiliary \
+ -I${DRIDIR}/include
+
+LDFLAGS += -L${DRMDIR}/lib \
+ -L${DRIDIR}/lib \
+ -L${GALLIUMDIR}/winsys/drm/nouveau/common \
+ -L${GALLIUMDIR}/auxiliary/draw \
+ -L${GALLIUMDIR}/auxiliary/tgsi \
+ -L${GALLIUMDIR}/auxiliary/translate \
+ -L${GALLIUMDIR}/auxiliary/rtasm \
+ -L${GALLIUMDIR}/auxiliary/cso_cache \
+ -L${GALLIUMDIR}/drivers/nv04 \
+ -L${GALLIUMDIR}/drivers/nv10 \
+ -L${GALLIUMDIR}/drivers/nv20 \
+ -L${GALLIUMDIR}/drivers/nv30 \
+ -L${GALLIUMDIR}/drivers/nv40 \
+ -L${GALLIUMDIR}/drivers/nv50
+
+LIBS += -lnouveaudrm -ldriclient -ldrm_nouveau -ldrm -lnv04 -lnv10 -lnv20 -lnv30 -lnv40 -lnv50 -ldraw -ltgsi -ltranslate -lrtasm -lcso_cache -lm
+
+#############################################
+
+.PHONY = all clean libdriclient
+
+all: ${TARGET}
+
+${TARGET}: ${OBJECTS} libdriclient
+ $(CC) ${LDFLAGS} -shared -o $@ ${OBJECTS} ${LIBS}
+
+libdriclient:
+ cd ${DRIDIR}/src; ${MAKE}
+
+clean:
+ cd ${DRIDIR}/src; ${MAKE} clean
+ rm -rf ${OBJECTS} ${TARGET}
diff --git a/src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.c b/src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.c
new file mode 100644
index 0000000000..dfc4905bc0
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.c
@@ -0,0 +1,172 @@
+#include "nouveau_context_vl.h"
+#include <pipe/p_defines.h>
+#include <pipe/p_context.h>
+#include <pipe/p_screen.h>
+#include <util/u_memory.h>
+#include <common/nouveau_dri.h>
+#include <common/nouveau_local.h>
+#include <common/nouveau_winsys_pipe.h>
+#include "nouveau_screen_vl.h"
+
+/*
+#ifdef DEBUG
+static const struct dri_debug_control debug_control[] = {
+ { "bo", DEBUG_BO },
+ { NULL, 0 }
+};
+int __nouveau_debug = 0;
+#endif
+*/
+
+int
+nouveau_context_create(dri_context_t *dri_context)
+{
+ dri_screen_t *dri_screen;
+ struct nouveau_screen_vl *nv_screen;
+ struct nouveau_context_vl *nv;
+
+ assert (dri_context);
+
+ dri_screen = dri_context->dri_screen;
+ nv_screen = dri_screen->private;
+ nv = CALLOC_STRUCT(nouveau_context_vl);
+
+ if (!nv)
+ return 1;
+
+ if (nouveau_context_init(&nv_screen->base, dri_context->drm_context,
+ (drmLock*)&dri_screen->sarea->lock, NULL, &nv->base))
+ {
+ FREE(nv);
+ return 1;
+ }
+
+ dri_context->private = (void*)nv;
+ nv->dri_context = dri_context;
+ nv->nv_screen = nv_screen;
+
+ /*
+ driParseConfigFiles(&nv->dri_option_cache, &nv_screen->option_cache,
+ nv->dri_screen->myNum, "nouveau");
+#ifdef DEBUG
+ __nouveau_debug = driParseDebugString(getenv("NOUVEAU_DEBUG"),
+ debug_control);
+#endif
+ */
+
+ nv->base.nvc->pctx[nv->base.pctx_id]->priv = nv;
+
+ return 0;
+}
+
+void
+nouveau_context_destroy(dri_context_t *dri_context)
+{
+ struct nouveau_context_vl *nv = dri_context->private;
+
+ assert(dri_context);
+
+ nouveau_context_cleanup(&nv->base);
+
+ FREE(nv);
+}
+
+int
+nouveau_context_bind(struct nouveau_context_vl *nv, dri_drawable_t *dri_drawable)
+{
+ assert(nv);
+ assert(dri_drawable);
+
+ if (nv->dri_drawable != dri_drawable)
+ {
+ nv->dri_drawable = dri_drawable;
+ dri_drawable->private = nv;
+ }
+
+ return 0;
+}
+
+int
+nouveau_context_unbind(struct nouveau_context_vl *nv)
+{
+ assert(nv);
+
+ nv->dri_drawable = NULL;
+
+ return 0;
+}
+
+/* Show starts here */
+
+int bind_pipe_drawable(struct pipe_context *pipe, Drawable drawable)
+{
+ struct nouveau_context_vl *nv;
+ dri_drawable_t *dri_drawable;
+
+ assert(pipe);
+
+ nv = pipe->priv;
+
+ driCreateDrawable(nv->nv_screen->dri_screen, drawable, &dri_drawable);
+
+ nouveau_context_bind(nv, dri_drawable);
+
+ return 0;
+}
+
+int unbind_pipe_drawable(struct pipe_context *pipe)
+{
+ assert (pipe);
+
+ nouveau_context_unbind(pipe->priv);
+
+ return 0;
+}
+
+struct pipe_context* create_pipe_context(Display *display, int screen)
+{
+ dri_screen_t *dri_screen;
+ dri_framebuffer_t dri_framebuf;
+ dri_context_t *dri_context;
+ struct nouveau_context_vl *nv;
+
+ assert(display);
+
+ driCreateScreen(display, screen, &dri_screen, &dri_framebuf);
+ driCreateContext(dri_screen, XDefaultVisual(display, screen), &dri_context);
+
+ nouveau_screen_create(dri_screen, &dri_framebuf);
+ nouveau_context_create(dri_context);
+
+ nv = dri_context->private;
+
+ return nv->base.nvc->pctx[nv->base.pctx_id];
+}
+
+int destroy_pipe_context(struct pipe_context *pipe)
+{
+ struct pipe_screen *screen;
+ struct pipe_winsys *winsys;
+ struct nouveau_context_vl *nv;
+ dri_screen_t *dri_screen;
+ dri_context_t *dri_context;
+
+ assert(pipe);
+
+ screen = pipe->screen;
+ winsys = pipe->winsys;
+ nv = pipe->priv;
+ dri_context = nv->dri_context;
+ dri_screen = dri_context->dri_screen;
+
+ pipe->destroy(pipe);
+ screen->destroy(screen);
+ FREE(winsys);
+
+ nouveau_context_destroy(dri_context);
+ nouveau_screen_destroy(dri_screen);
+ driDestroyContext(dri_context);
+ driDestroyScreen(dri_screen);
+
+ return 0;
+}
diff --git a/src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.h b/src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.h
new file mode 100644
index 0000000000..1115c3130c
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/nouveau_context_vl.h
@@ -0,0 +1,39 @@
+#ifndef __NOUVEAU_CONTEXT_VL_H__
+#define __NOUVEAU_CONTEXT_VL_H__
+
+#include <driclient.h>
+#include <nouveau/nouveau_winsys.h>
+#include <common/nouveau_context.h>
+
+/*#include "xmlconfig.h"*/
+
+struct nouveau_context_vl {
+ struct nouveau_context base;
+ struct nouveau_screen_vl *nv_screen;
+ dri_context_t *dri_context;
+ dri_drawable_t *dri_drawable;
+ unsigned int last_stamp;
+ /*driOptionCache dri_option_cache;*/
+ drm_context_t drm_context;
+ drmLock drm_lock;
+};
+
+extern int nouveau_context_create(dri_context_t *);
+extern void nouveau_context_destroy(dri_context_t *);
+extern int nouveau_context_bind(struct nouveau_context_vl *, dri_drawable_t *);
+extern int nouveau_context_unbind(struct nouveau_context_vl *);
+
+#ifdef DEBUG
+extern int __nouveau_debug;
+
+#define DEBUG_BO (1 << 0)
+
+#define DBG(flag, ...) do { \
+ if (__nouveau_debug & (DEBUG_##flag)) \
+ NOUVEAU_ERR(__VA_ARGS__); \
+} while(0)
+#else
+#define DBG(flag, ...)
+#endif
+
+#endif
diff --git a/src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.c b/src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.c
new file mode 100644
index 0000000000..b7c74f8299
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.c
@@ -0,0 +1,88 @@
+#include "nouveau_screen_vl.h"
+#include <util/u_memory.h>
+#include <nouveau_drm.h>
+#include <common/nouveau_dri.h>
+#include <common/nouveau_local.h>
+
+#if NOUVEAU_DRM_HEADER_PATCHLEVEL != 12
+#error nouveau_drm.h version does not match expected version
+#endif
+
+/*
+PUBLIC const char __driConfigOptions[] =
+DRI_CONF_BEGIN
+DRI_CONF_END;
+static const GLuint __driNConfigOptions = 0;
+*/
+
+int nouveau_check_dri_drm_ddx(dri_version_t *dri, dri_version_t *drm, dri_version_t *ddx)
+{
+ static const dri_version_t ddx_expected = {0, 0, NOUVEAU_DRM_HEADER_PATCHLEVEL};
+ static const dri_version_t dri_expected = {4, 0, 0};
+ static const dri_version_t drm_expected = {0, 0, NOUVEAU_DRM_HEADER_PATCHLEVEL};
+
+ assert(dri);
+ assert(drm);
+ assert(ddx);
+
+ if (dri->major != dri_expected.major || dri->minor < dri_expected.minor)
+ {
+ NOUVEAU_ERR("Unexpected DRI version.\n");
+ return 1;
+ }
+ if (drm->major != drm_expected.major || drm->minor < drm_expected.minor)
+ {
+ NOUVEAU_ERR("Unexpected DRM version.\n");
+ return 1;
+ }
+ if (ddx->major != ddx_expected.major || ddx->minor < ddx_expected.minor)
+ {
+ NOUVEAU_ERR("Unexpected DDX version.\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+nouveau_screen_create(dri_screen_t *dri_screen, dri_framebuffer_t *dri_framebuf)
+{
+ struct nouveau_dri *nv_dri = dri_framebuf->private;
+ struct nouveau_screen_vl *nv_screen;
+
+ assert(dri_screen);
+ assert(dri_framebuf);
+
+ if (nouveau_check_dri_drm_ddx(&dri_screen->dri, &dri_screen->drm, &dri_screen->ddx))
+ return 1;
+
+ nv_screen = CALLOC_STRUCT(nouveau_screen_vl);
+
+ if (!nv_screen)
+ return 1;
+
+ if (nouveau_screen_init(nv_dri, dri_screen->fd, &nv_screen->base))
+ {
+ FREE(nv_screen);
+ return 1;
+ }
+
+ /*
+ driParseOptionInfo(&nv_screen->option_cache,
+ __driConfigOptions, __driNConfigOptions);
+ */
+
+ nv_screen->dri_screen = dri_screen;
+ dri_screen->private = (void*)nv_screen;
+
+ return 0;
+}
+
+void
+nouveau_screen_destroy(dri_screen_t *dri_screen)
+{
+ struct nouveau_screen_vl *nv_screen = dri_screen->private;
+
+ nouveau_screen_cleanup(&nv_screen->base);
+ FREE(nv_screen);
+}
diff --git a/src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.h b/src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.h
new file mode 100644
index 0000000000..0c1ceca6de
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/nouveau_screen_vl.h
@@ -0,0 +1,20 @@
+#ifndef __NOUVEAU_SCREEN_VL_H__
+#define __NOUVEAU_SCREEN_VL_H__
+
+#include <driclient.h>
+#include <common/nouveau_screen.h>
+
+/* TODO: Investigate using DRI options for interesting things */
+/*#include "xmlconfig.h"*/
+
+struct nouveau_screen_vl
+{
+ struct nouveau_screen base;
+ dri_screen_t *dri_screen;
+ /*driOptionCache option_cache;*/
+};
+
+int nouveau_screen_create(dri_screen_t *dri_screen, dri_framebuffer_t *dri_framebuf);
+void nouveau_screen_destroy(dri_screen_t *dri_screen);
+
+#endif
diff --git a/src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.c b/src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.c
new file mode 100644
index 0000000000..77e46a2054
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.c
@@ -0,0 +1,94 @@
+#include <driclient.h>
+#include <common/nouveau_local.h>
+#include <common/nouveau_screen.h>
+#include "nouveau_context_vl.h"
+#include "nouveau_swapbuffers.h"
+
+void
+nouveau_copy_buffer(dri_drawable_t *dri_drawable, struct pipe_surface *surf,
+ const drm_clip_rect_t *rect)
+{
+ struct nouveau_context_vl *nv = dri_drawable->private;
+ struct pipe_context *pipe = nv->base.nvc->pctx[nv->base.pctx_id];
+ drm_clip_rect_t *pbox;
+ int nbox, i;
+
+ LOCK_HARDWARE(&nv->base);
+ if (!dri_drawable->num_cliprects) {
+ UNLOCK_HARDWARE(&nv->base);
+ return;
+ }
+ pbox = dri_drawable->cliprects;
+ nbox = dri_drawable->num_cliprects;
+
+ for (i = 0; i < nbox; i++, pbox++) {
+ int sx, sy, dx, dy, w, h;
+
+ sx = pbox->x1 - dri_drawable->x;
+ sy = pbox->y1 - dri_drawable->y;
+ dx = pbox->x1;
+ dy = pbox->y1;
+ w = pbox->x2 - pbox->x1;
+ h = pbox->y2 - pbox->y1;
+
+ pipe->surface_copy(pipe, nv->base.frontbuffer,
+ dx, dy, surf, sx, sy, w, h);
+ }
+
+ FIRE_RING(nv->base.nvc->channel);
+ UNLOCK_HARDWARE(&nv->base);
+}
+
+void
+nouveau_copy_sub_buffer(dri_drawable_t *dri_drawable, struct pipe_surface *surf, int x, int y, int w, int h)
+{
+ if (surf) {
+ drm_clip_rect_t rect;
+ rect.x1 = x;
+ rect.y1 = y;
+ rect.x2 = x + w;
+ rect.y2 = y + h;
+
+ nouveau_copy_buffer(dri_drawable, surf, &rect);
+ }
+}
+
+void
+nouveau_swap_buffers(dri_drawable_t *dri_drawable, struct pipe_surface *surf)
+{
+ if (surf)
+ nouveau_copy_buffer(dri_drawable, surf, NULL);
+}
+
+void
+nouveau_flush_frontbuffer(struct pipe_winsys *pws, struct pipe_surface *surf,
+ void *context_private)
+{
+ struct nouveau_context_vl *nv;
+ dri_drawable_t *dri_drawable;
+
+ assert(pws);
+ assert(surf);
+ assert(context_private);
+
+ nv = context_private;
+ dri_drawable = nv->dri_drawable;
+
+ nouveau_copy_buffer(dri_drawable, surf, NULL);
+}
+
+void
+nouveau_contended_lock(struct nouveau_context *nv)
+{
+ struct nouveau_context_vl *nv_vl = (struct nouveau_context_vl*)nv;
+ dri_drawable_t *dri_drawable = nv_vl->dri_drawable;
+ dri_screen_t *dri_screen = nv_vl->dri_context->dri_screen;
+
+ /* If the window moved, may need to set a new cliprect now.
+ *
+ * NOTE: This releases and regains the hw lock, so all state
+ * checking must be done *after* this call:
+ */
+ if (dri_drawable)
+ DRI_VALIDATE_DRAWABLE_INFO(dri_screen, dri_drawable);
+}
diff --git a/src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.h b/src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.h
new file mode 100644
index 0000000000..35e934adba
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/nouveau/nouveau_swapbuffers.h
@@ -0,0 +1,10 @@
+#ifndef __NOUVEAU_SWAPBUFFERS_H__
+#define __NOUVEAU_SWAPBUFFERS_H__
+
+extern void nouveau_copy_buffer(dri_drawable_t *, struct pipe_surface *,
+ const drm_clip_rect_t *);
+extern void nouveau_copy_sub_buffer(dri_drawable_t *, struct pipe_surface *,
+ int x, int y, int w, int h);
+extern void nouveau_swap_buffers(dri_drawable_t *, struct pipe_surface *);
+
+#endif
diff --git a/src/gallium/winsys/g3dvl/vl_winsys.h b/src/gallium/winsys/g3dvl/vl_winsys.h
new file mode 100644
index 0000000000..c83db28dd9
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/vl_winsys.h
@@ -0,0 +1,14 @@
+#ifndef vl_winsys_h
+#define vl_winsys_h
+
+#include <X11/Xlib.h>
+
+struct pipe_context;
+
+struct pipe_context* create_pipe_context(Display *display, int screen);
+int destroy_pipe_context(struct pipe_context *pipe);
+int bind_pipe_drawable(struct pipe_context *pipe, Drawable drawable);
+int unbind_pipe_drawable(struct pipe_context *pipe);
+
+#endif
+
diff --git a/src/gallium/winsys/g3dvl/xsp_winsys.c b/src/gallium/winsys/g3dvl/xsp_winsys.c
new file mode 100644
index 0000000000..698c2856a4
--- /dev/null
+++ b/src/gallium/winsys/g3dvl/xsp_winsys.c
@@ -0,0 +1,290 @@
+#include "vl_winsys.h"
+#include <X11/Xutil.h>
+#include <pipe/internal/p_winsys_screen.h>
+#include <pipe/p_state.h>
+#include <pipe/p_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_math.h>
+#include <softpipe/sp_winsys.h>
+#include <softpipe/sp_texture.h>
+
+/* pipe_winsys implementation */
+
+struct xsp_pipe_winsys
+{
+ struct pipe_winsys base;
+ XImage fbimage;
+};
+
+struct xsp_context
+{
+ Display *display;
+ int screen;
+ Drawable drawable;
+ int drawable_bound;
+};
+
+struct xsp_buffer
+{
+ struct pipe_buffer base;
+ boolean is_user_buffer;
+ void *data;
+ void *mapped_data;
+};
+
+static struct pipe_buffer* xsp_buffer_create(struct pipe_winsys *pws, unsigned alignment, unsigned usage, unsigned size)
+{
+ struct xsp_buffer *buffer;
+
+ assert(pws);
+
+ buffer = calloc(1, sizeof(struct xsp_buffer));
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.alignment = alignment;
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+ buffer->data = align_malloc(size, alignment);
+
+ return (struct pipe_buffer*)buffer;
+}
+
+static struct pipe_buffer* xsp_user_buffer_create(struct pipe_winsys *pws, void *data, unsigned size)
+{
+ struct xsp_buffer *buffer;
+
+ assert(pws);
+
+ buffer = calloc(1, sizeof(struct xsp_buffer));
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.size = size;
+ buffer->is_user_buffer = TRUE;
+ buffer->data = data;
+
+ return (struct pipe_buffer*)buffer;
+}
+
+static void* xsp_buffer_map(struct pipe_winsys *pws, struct pipe_buffer *buffer, unsigned flags)
+{
+ struct xsp_buffer *xsp_buf = (struct xsp_buffer*)buffer;
+
+ assert(pws);
+ assert(buffer);
+
+ xsp_buf->mapped_data = xsp_buf->data;
+
+ return xsp_buf->mapped_data;
+}
+
+static void xsp_buffer_unmap(struct pipe_winsys *pws, struct pipe_buffer *buffer)
+{
+ struct xsp_buffer *xsp_buf = (struct xsp_buffer*)buffer;
+
+ assert(pws);
+ assert(buffer);
+
+ xsp_buf->mapped_data = NULL;
+}
+
+static void xsp_buffer_destroy(struct pipe_winsys *pws, struct pipe_buffer *buffer)
+{
+ struct xsp_buffer *xsp_buf = (struct xsp_buffer*)buffer;
+
+ assert(pws);
+ assert(buffer);
+
+ if (!xsp_buf->is_user_buffer)
+ align_free(xsp_buf->data);
+
+ free(xsp_buf);
+}
+
+static struct pipe_buffer* xsp_surface_buffer_create
+(
+ struct pipe_winsys *pws,
+ unsigned width,
+ unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned *stride
+)
+{
+ const unsigned int ALIGNMENT = 1;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = align(nblocksx * block.size, ALIGNMENT);
+
+ return pws->buffer_create(pws, ALIGNMENT,
+ usage,
+ *stride * nblocksy);
+}
+
+static void xsp_fence_reference(struct pipe_winsys *pws, struct pipe_fence_handle **ptr, struct pipe_fence_handle *fence)
+{
+ assert(pws);
+ assert(ptr);
+ assert(fence);
+}
+
+static int xsp_fence_signalled(struct pipe_winsys *pws, struct pipe_fence_handle *fence, unsigned flag)
+{
+ assert(pws);
+ assert(fence);
+
+ return 0;
+}
+
+static int xsp_fence_finish(struct pipe_winsys *pws, struct pipe_fence_handle *fence, unsigned flag)
+{
+ assert(pws);
+ assert(fence);
+
+ return 0;
+}
+
+static void xsp_flush_frontbuffer(struct pipe_winsys *pws, struct pipe_surface *surface, void *context_private)
+{
+ struct xsp_pipe_winsys *xsp_winsys;
+ struct xsp_context *xsp_context;
+
+ assert(pws);
+ assert(surface);
+ assert(context_private);
+
+ xsp_winsys = (struct xsp_pipe_winsys*)pws;
+ xsp_context = (struct xsp_context*)context_private;
+
+ if (!xsp_context->drawable_bound)
+ return;
+
+ xsp_winsys->fbimage.width = surface->width;
+ xsp_winsys->fbimage.height = surface->height;
+ xsp_winsys->fbimage.bytes_per_line = surface->width * (xsp_winsys->fbimage.bits_per_pixel >> 3);
+ xsp_winsys->fbimage.data = ((struct xsp_buffer *)softpipe_texture(surface->texture)->buffer)->data + surface->offset;
+
+ XPutImage
+ (
+ xsp_context->display,
+ xsp_context->drawable,
+ XDefaultGC(xsp_context->display, xsp_context->screen),
+ &xsp_winsys->fbimage,
+ 0,
+ 0,
+ 0,
+ 0,
+ surface->width,
+ surface->height
+ );
+ XFlush(xsp_context->display);
+}
+
+static const char* xsp_get_name(struct pipe_winsys *pws)
+{
+ assert(pws);
+ return "X11 SoftPipe";
+}
+
+/* Show starts here */
+
+int bind_pipe_drawable(struct pipe_context *pipe, Drawable drawable)
+{
+ struct xsp_context *xsp_context;
+
+ assert(pipe);
+
+ xsp_context = pipe->priv;
+ xsp_context->drawable = drawable;
+ xsp_context->drawable_bound = 1;
+
+ return 0;
+}
+
+int unbind_pipe_drawable(struct pipe_context *pipe)
+{
+ struct xsp_context *xsp_context;
+
+ assert(pipe);
+
+ xsp_context = pipe->priv;
+ xsp_context->drawable_bound = 0;
+
+ return 0;
+}
+
+struct pipe_context* create_pipe_context(Display *display, int screen)
+{
+ struct xsp_pipe_winsys *xsp_winsys;
+ struct xsp_context *xsp_context;
+ struct pipe_screen *sp_screen;
+ struct pipe_context *sp_pipe;
+
+ assert(display);
+
+ xsp_winsys = calloc(1, sizeof(struct xsp_pipe_winsys));
+ xsp_winsys->base.buffer_create = xsp_buffer_create;
+ xsp_winsys->base.user_buffer_create = xsp_user_buffer_create;
+ xsp_winsys->base.buffer_map = xsp_buffer_map;
+ xsp_winsys->base.buffer_unmap = xsp_buffer_unmap;
+ xsp_winsys->base.buffer_destroy = xsp_buffer_destroy;
+ xsp_winsys->base.surface_buffer_create = xsp_surface_buffer_create;
+ xsp_winsys->base.fence_reference = xsp_fence_reference;
+ xsp_winsys->base.fence_signalled = xsp_fence_signalled;
+ xsp_winsys->base.fence_finish = xsp_fence_finish;
+ xsp_winsys->base.flush_frontbuffer = xsp_flush_frontbuffer;
+ xsp_winsys->base.get_name = xsp_get_name;
+
+ {
+ /* XXX: Can't use the returned XImage* directly,
+ since we don't have control over winsys destruction
+ and we wouldn't be able to free it */
+ XImage *template = XCreateImage
+ (
+ display,
+ XDefaultVisual(display, XDefaultScreen(display)),
+ XDefaultDepth(display, XDefaultScreen(display)),
+ ZPixmap,
+ 0,
+ NULL,
+ 0, /* Don't know the width and height until flush_frontbuffer */
+ 0,
+ 32,
+ 0
+ );
+
+ memcpy(&xsp_winsys->fbimage, template, sizeof(XImage));
+ XInitImage(&xsp_winsys->fbimage);
+
+ XDestroyImage(template);
+ }
+
+ sp_screen = softpipe_create_screen((struct pipe_winsys*)xsp_winsys);
+ sp_pipe = softpipe_create(sp_screen);
+
+ xsp_context = calloc(1, sizeof(struct xsp_context));
+ xsp_context->display = display;
+ xsp_context->screen = screen;
+
+ sp_pipe->priv = xsp_context;
+
+ return sp_pipe;
+}
+
+int destroy_pipe_context(struct pipe_context *pipe)
+{
+ struct pipe_screen *screen;
+ struct pipe_winsys *winsys;
+
+ assert(pipe);
+
+ screen = pipe->screen;
+ winsys = pipe->winsys;
+ free(pipe->priv);
+ pipe->destroy(pipe);
+ screen->destroy(screen);
+ free(winsys);
+
+ return 0;
+}
diff --git a/src/gallium/winsys/gdi/SConscript b/src/gallium/winsys/gdi/SConscript
new file mode 100644
index 0000000000..86eb9ef55e
--- /dev/null
+++ b/src/gallium/winsys/gdi/SConscript
@@ -0,0 +1,39 @@
+#######################################################################
+# SConscript for gdi winsys
+
+Import('*')
+
+if env['platform'] == 'windows':
+
+ env = env.Clone()
+
+ env.Append(CPPPATH = [
+ '#src/gallium/state_trackers/wgl',
+ ])
+
+ env.Append(LIBS = [
+ 'gdi32',
+ 'user32',
+ 'kernel32',
+ 'ws2_32',
+ ])
+
+ sources = [
+ 'gdi_softpipe_winsys.c',
+ ]
+
+ if env['gcc']:
+ sources += ['#src/gallium/state_trackers/wgl/opengl32.mingw.def']
+ else:
+ sources += ['#src/gallium/state_trackers/wgl/opengl32.def']
+
+ drivers = [
+ trace,
+ softpipe,
+ ]
+
+ env.SharedLibrary(
+ target ='opengl32',
+ source = sources,
+ LIBS = wgl + glapi + mesa + drivers + auxiliaries + env['LIBS'],
+ )
diff --git a/src/gallium/winsys/gdi/gdi_softpipe_winsys.c b/src/gallium/winsys/gdi/gdi_softpipe_winsys.c
new file mode 100644
index 0000000000..66120a6a98
--- /dev/null
+++ b/src/gallium/winsys/gdi/gdi_softpipe_winsys.c
@@ -0,0 +1,334 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Softpipe support.
+ *
+ * @author Keith Whitwell
+ * @author Brian Paul
+ * @author Jose Fonseca
+ */
+
+
+#include <windows.h>
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "softpipe/sp_winsys.h"
+#include "softpipe/sp_texture.h"
+#include "shared/stw_winsys.h"
+
+
+struct gdi_softpipe_buffer
+{
+ struct pipe_buffer base;
+ boolean userBuffer; /** Is this a user-space buffer? */
+ void *data;
+ void *mapped;
+};
+
+
+/** Cast wrapper */
+static INLINE struct gdi_softpipe_buffer *
+gdi_softpipe_buffer( struct pipe_buffer *buf )
+{
+ return (struct gdi_softpipe_buffer *)buf;
+}
+
+
+static void *
+gdi_softpipe_buffer_map(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf,
+ unsigned flags)
+{
+ struct gdi_softpipe_buffer *gdi_softpipe_buf = gdi_softpipe_buffer(buf);
+ gdi_softpipe_buf->mapped = gdi_softpipe_buf->data;
+ return gdi_softpipe_buf->mapped;
+}
+
+
+static void
+gdi_softpipe_buffer_unmap(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ struct gdi_softpipe_buffer *gdi_softpipe_buf = gdi_softpipe_buffer(buf);
+ gdi_softpipe_buf->mapped = NULL;
+}
+
+
+static void
+gdi_softpipe_buffer_destroy(struct pipe_buffer *buf)
+{
+ struct gdi_softpipe_buffer *oldBuf = gdi_softpipe_buffer(buf);
+
+ if (oldBuf->data) {
+ if (!oldBuf->userBuffer)
+ align_free(oldBuf->data);
+
+ oldBuf->data = NULL;
+ }
+
+ FREE(oldBuf);
+}
+
+
+static const char *
+gdi_softpipe_get_name(struct pipe_winsys *winsys)
+{
+ return "softpipe";
+}
+
+
+static struct pipe_buffer *
+gdi_softpipe_buffer_create(struct pipe_winsys *winsys,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct gdi_softpipe_buffer *buffer = CALLOC_STRUCT(gdi_softpipe_buffer);
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.alignment = alignment;
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+
+ buffer->data = align_malloc(size, alignment);
+
+ return &buffer->base;
+}
+
+
+/**
+ * Create buffer which wraps user-space data.
+ */
+static struct pipe_buffer *
+gdi_softpipe_user_buffer_create(struct pipe_winsys *winsys,
+ void *ptr,
+ unsigned bytes)
+{
+ struct gdi_softpipe_buffer *buffer;
+
+ buffer = CALLOC_STRUCT(gdi_softpipe_buffer);
+ if(!buffer)
+ return NULL;
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.size = bytes;
+ buffer->userBuffer = TRUE;
+ buffer->data = ptr;
+
+ return &buffer->base;
+}
+
+
+/**
+ * Round n up to next multiple.
+ */
+static INLINE unsigned
+round_up(unsigned n, unsigned multiple)
+{
+ return (n + multiple - 1) & ~(multiple - 1);
+}
+
+
+static struct pipe_buffer *
+gdi_softpipe_surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ const unsigned alignment = 64;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = round_up(nblocksx * block.size, alignment);
+
+ return winsys->buffer_create(winsys, alignment,
+ usage,
+ *stride * nblocksy);
+}
+
+
+static void
+gdi_softpipe_dummy_flush_frontbuffer(struct pipe_winsys *winsys,
+ struct pipe_surface *surface,
+ void *context_private)
+{
+ assert(0);
+}
+
+
+static void
+gdi_softpipe_fence_reference(struct pipe_winsys *winsys,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+}
+
+
+static int
+gdi_softpipe_fence_signalled(struct pipe_winsys *winsys,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static int
+gdi_softpipe_fence_finish(struct pipe_winsys *winsys,
+ struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static void
+gdi_softpipe_destroy(struct pipe_winsys *winsys)
+{
+ FREE(winsys);
+}
+
+
+static struct pipe_screen *
+gdi_softpipe_screen_create(void)
+{
+ static struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ winsys = CALLOC_STRUCT(pipe_winsys);
+ if(!winsys)
+ return NULL;
+
+ winsys->destroy = gdi_softpipe_destroy;
+
+ winsys->buffer_create = gdi_softpipe_buffer_create;
+ winsys->user_buffer_create = gdi_softpipe_user_buffer_create;
+ winsys->buffer_map = gdi_softpipe_buffer_map;
+ winsys->buffer_unmap = gdi_softpipe_buffer_unmap;
+ winsys->buffer_destroy = gdi_softpipe_buffer_destroy;
+
+ winsys->surface_buffer_create = gdi_softpipe_surface_buffer_create;
+
+ winsys->fence_reference = gdi_softpipe_fence_reference;
+ winsys->fence_signalled = gdi_softpipe_fence_signalled;
+ winsys->fence_finish = gdi_softpipe_fence_finish;
+
+ winsys->flush_frontbuffer = gdi_softpipe_dummy_flush_frontbuffer;
+ winsys->get_name = gdi_softpipe_get_name;
+
+ screen = softpipe_create_screen(winsys);
+ if(!screen)
+ gdi_softpipe_destroy(winsys);
+
+ return screen;
+}
+
+
+static struct pipe_context *
+gdi_softpipe_context_create(struct pipe_screen *screen)
+{
+ return softpipe_create(screen);
+}
+
+
+static void
+gdi_softpipe_flush_frontbuffer(struct pipe_screen *screen,
+ struct pipe_surface *surface,
+ HDC hDC)
+{
+ struct softpipe_texture *texture;
+ struct gdi_softpipe_buffer *buffer;
+ BITMAPINFO bmi;
+
+ texture = softpipe_texture(surface->texture);
+
+ buffer = gdi_softpipe_buffer(texture->buffer);
+
+ memset(&bmi, 0, sizeof(BITMAPINFO));
+ bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ bmi.bmiHeader.biWidth = texture->stride[surface->level] / pf_get_size(surface->format);
+ bmi.bmiHeader.biHeight= -(long)surface->height;
+ bmi.bmiHeader.biPlanes = 1;
+ bmi.bmiHeader.biBitCount = pf_get_bits(surface->format);
+ bmi.bmiHeader.biCompression = BI_RGB;
+ bmi.bmiHeader.biSizeImage = 0;
+ bmi.bmiHeader.biXPelsPerMeter = 0;
+ bmi.bmiHeader.biYPelsPerMeter = 0;
+ bmi.bmiHeader.biClrUsed = 0;
+ bmi.bmiHeader.biClrImportant = 0;
+
+ StretchDIBits(hDC,
+ 0, 0, surface->width, surface->height,
+ 0, 0, surface->width, surface->height,
+ buffer->data, &bmi, 0, SRCCOPY);
+}
+
+
+static const struct stw_winsys stw_winsys = {
+ &gdi_softpipe_screen_create,
+ &gdi_softpipe_context_create,
+ &gdi_softpipe_flush_frontbuffer
+};
+
+
+BOOL WINAPI
+DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved)
+{
+ switch (fdwReason) {
+ case DLL_PROCESS_ATTACH:
+ if (!stw_init(&stw_winsys)) {
+ return FALSE;
+ }
+ return stw_init_thread();
+
+ case DLL_THREAD_ATTACH:
+ return stw_init_thread();
+
+ case DLL_THREAD_DETACH:
+ stw_cleanup_thread();
+ break;
+
+ case DLL_PROCESS_DETACH:
+ stw_cleanup_thread();
+ stw_cleanup();
+ break;
+ }
+ return TRUE;
+}
diff --git a/src/gallium/winsys/xlib/Makefile b/src/gallium/winsys/xlib/Makefile
new file mode 100644
index 0000000000..522f6dc5ae
--- /dev/null
+++ b/src/gallium/winsys/xlib/Makefile
@@ -0,0 +1,105 @@
+# src/gallium/winsys/xlib/Makefile
+
+# This makefile produces a "stand-alone" libGL.so which is based on
+# Xlib (no DRI HW acceleration)
+
+
+TOP = ../../../..
+include $(TOP)/configs/current
+
+
+GL_MAJOR = 1
+GL_MINOR = 5
+GL_TINY = 0$(MESA_MAJOR)0$(MESA_MINOR)0$(MESA_TINY)
+
+
+INCLUDE_DIRS = \
+ -I$(TOP)/include \
+ -I$(TOP)/src/mesa \
+ -I$(TOP)/src/mesa/main \
+ -I$(TOP)/src/gallium/include \
+ -I$(TOP)/src/gallium/drivers \
+ -I$(TOP)/src/gallium/state_trackers/glx/xlib \
+ -I$(TOP)/src/gallium/auxiliary
+
+DEFINES += \
+ -DGALLIUM_SOFTPIPE \
+ -DGALLIUM_TRACE \
+ -DGALLIUM_BRW
+#-DGALLIUM_CELL will be defined by the config */
+
+XLIB_WINSYS_SOURCES = \
+ xlib.c \
+ xlib_cell.c \
+ xlib_brw_aub.c \
+ xlib_brw_context.c \
+ xlib_brw_screen.c \
+ xlib_softpipe.c \
+ xlib_trace.c
+
+
+XLIB_WINSYS_OBJECTS = $(XLIB_WINSYS_SOURCES:.c=.o)
+
+
+# Note: CELL_SPU_LIB is only defined for cell configs
+
+LIBS = \
+ $(GALLIUM_DRIVERS) \
+ $(TOP)/src/gallium/state_trackers/glx/xlib/libxlib.a \
+ $(TOP)/src/mesa/libglapi.a \
+ $(TOP)/src/mesa/libmesagallium.a \
+ $(GALLIUM_AUXILIARIES) \
+ $(CELL_SPU_LIB) \
+
+
+.SUFFIXES : .cpp
+
+.c.o:
+ $(CC) -c $(INCLUDE_DIRS) $(DEFINES) $(CFLAGS) $< -o $@
+
+.cpp.o:
+ $(CXX) -c $(INCLUDE_DIRS) $(DEFINES) $(CXXFLAGS) $< -o $@
+
+
+
+default: $(TOP)/$(LIB_DIR)/gallium $(TOP)/$(LIB_DIR)/gallium/$(GL_LIB_NAME)
+
+$(TOP)/$(LIB_DIR)/gallium:
+ @ mkdir -p $(TOP)/$(LIB_DIR)/gallium
+
+# Make the libGL.so library
+$(TOP)/$(LIB_DIR)/gallium/$(GL_LIB_NAME): $(XLIB_WINSYS_OBJECTS) $(LIBS) Makefile
+ $(TOP)/bin/mklib -o $(GL_LIB) \
+ -linker "$(CC)" \
+ -major $(GL_MAJOR) -minor $(GL_MINOR) -patch $(GL_TINY) \
+ -install $(TOP)/$(LIB_DIR)/gallium \
+ $(MKLIB_OPTIONS) $(XLIB_WINSYS_OBJECTS) \
+ -Wl,--start-group $(LIBS) -Wl,--end-group $(GL_LIB_DEPS)
+
+
+depend: $(XLIB_WINSYS_SOURCES)
+ @ echo "running $(MKDEP)"
+ @ rm -f depend # workaround oops on gutsy?!?
+ @ touch depend
+ $(MKDEP) $(MKDEP_OPTIONS) $(DEFINES) $(INCLUDE_DIRS) $(XLIB_WINSYS_SOURCES) \
+ > /dev/null 2>/dev/null
+
+
+install: default
+ $(INSTALL) -d $(INSTALL_DIR)/include/GL
+ $(INSTALL) -d $(INSTALL_DIR)/$(LIB_DIR)
+ $(INSTALL) -m 644 $(TOP)/include/GL/*.h $(INSTALL_DIR)/include/GL
+ @if [ -e $(TOP)/$(LIB_DIR)/$(GL_LIB_NAME) ]; then \
+ $(MINSTALL) $(TOP)/$(LIB_DIR)/libGL* $(INSTALL_DIR)/$(LIB_DIR); \
+ fi
+
+
+# Emacs tags
+tags:
+ etags `find . -name \*.[ch]` $(TOP)/include/GL/*.h
+
+clean:
+ -rm -f *.o
+
+
+include depend
diff --git a/src/gallium/winsys/xlib/SConscript b/src/gallium/winsys/xlib/SConscript
new file mode 100644
index 0000000000..467d595d33
--- /dev/null
+++ b/src/gallium/winsys/xlib/SConscript
@@ -0,0 +1,66 @@
+#######################################################################
+# SConscript for xlib winsys
+
+Import('*')
+
+if env['platform'] == 'linux' \
+ and 'mesa' in env['statetrackers'] \
+ and set(('softpipe', 'llvmpipe', 'i915simple', 'trace')).intersection(env['drivers']) \
+ and not env['dri']:
+
+ env = env.Clone()
+
+ env.Append(CPPPATH = [
+ '#/src/mesa',
+ '#/src/mesa/main',
+ '#src/gallium/state_trackers/glx/xlib',
+ ])
+
+ env.Append(CPPDEFINES = ['USE_XSHM'])
+
+ sources = [
+ 'xlib.c',
+ ]
+
+ drivers = []
+
+ if 'softpipe' in env['drivers']:
+ env.Append(CPPDEFINES = 'GALLIUM_SOFTPIPE')
+ sources += ['xlib_softpipe.c']
+ drivers += [softpipe]
+
+ if 'llvmpipe' in env['drivers']:
+ env.Tool('llvm')
+ if 'LLVM_VERSION' in env:
+ env.Append(CPPDEFINES = 'GALLIUM_LLVMPIPE')
+ env.Tool('udis86')
+ sources += ['xlib_llvmpipe.c']
+ drivers += [llvmpipe]
+
+ if 'i965simple' in env['drivers']:
+ env.Append(CPPDEFINES = 'GALLIUM_I965SIMPLE')
+ sources += [
+ 'xlib_brw_aub.c',
+ 'xlib_brw_context.c',
+ 'xlib_brw_screen.c',
+ ]
+ drivers += [i965simple]
+
+ if 'cell' in env['drivers']:
+ env.Append(CPPDEFINES = 'GALLIUM_CELL')
+ sources += ['xlib_cell.c']
+ drivers += [cell]
+
+ if 'trace' in env['drivers']:
+ env.Append(CPPDEFINES = 'GALLIUM_TRACE')
+ sources += ['xlib_trace.c']
+ drivers += [trace]
+
+ # TODO: write a wrapper function http://www.scons.org/wiki/WrapperFunctions
+ libgl = env.SharedLibrary(
+ target ='GL',
+ source = sources,
+ LIBS = st_xlib + glapi + mesa + drivers + auxiliaries + env['LIBS'],
+ )
+
+ env.InstallSharedLibrary(libgl, version=(1, 5))
diff --git a/src/gallium/winsys/xlib/xlib.c b/src/gallium/winsys/xlib/xlib.c
new file mode 100644
index 0000000000..4b71cf7ec3
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib.c
@@ -0,0 +1,123 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ */
+
+#include "xlib.h"
+#include "xm_winsys.h"
+
+#include <stdlib.h>
+#include <assert.h>
+
+/* Todo, replace all this with callback-structs provided by the
+ * individual implementations.
+ */
+
+enum mode {
+ MODE_TRACE,
+ MODE_BRW,
+ MODE_CELL,
+ MODE_LLVMPIPE,
+ MODE_SOFTPIPE
+};
+
+
+static enum mode get_mode()
+{
+ if (getenv("XMESA_TRACE"))
+ return MODE_TRACE;
+
+ if (getenv("XMESA_BRW"))
+ return MODE_BRW;
+
+#ifdef GALLIUM_CELL
+ if (!getenv("GALLIUM_NOCELL"))
+ return MODE_CELL;
+#endif
+
+#if defined(GALLIUM_LLVMPIPE)
+ return MODE_LLVMPIPE;
+#else
+ return MODE_SOFTPIPE;
+#endif
+}
+
+static void _init( void ) __attribute__((constructor));
+
+static void _init( void )
+{
+ enum mode xlib_mode = get_mode();
+
+ switch (xlib_mode) {
+ case MODE_TRACE:
+#if defined(GALLIUM_TRACE) && defined(GALLIUM_SOFTPIPE)
+ xmesa_set_driver( &xlib_trace_driver );
+#endif
+ break;
+ case MODE_BRW:
+#if defined(GALLIUM_BRW)
+ xmesa_set_driver( &xlib_brw_driver );
+#endif
+ break;
+ case MODE_CELL:
+#if defined(GALLIUM_CELL)
+ xmesa_set_driver( &xlib_cell_driver );
+#endif
+ break;
+ case MODE_LLVMPIPE:
+#if defined(GALLIUM_LLVMPIPE)
+ xmesa_set_driver( &xlib_llvmpipe_driver );
+#endif
+ break;
+ case MODE_SOFTPIPE:
+#if defined(GALLIUM_SOFTPIPE)
+ xmesa_set_driver( &xlib_softpipe_driver );
+#endif
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+
+/***********************************************************************
+ *
+ * Butt-ugly hack to convince the linker not to throw away public GL
+ * symbols (they are all referenced from getprocaddress, I guess).
+ */
+extern void (*linker_foo(const unsigned char *procName))();
+extern void (*glXGetProcAddress(const unsigned char *procName))();
+
+extern void (*linker_foo(const unsigned char *procName))()
+{
+ return glXGetProcAddress(procName);
+}
diff --git a/src/gallium/winsys/xlib/xlib.h b/src/gallium/winsys/xlib/xlib.h
new file mode 100644
index 0000000000..347d45f4d6
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib.h
@@ -0,0 +1,15 @@
+
+#ifndef XLIB_H
+#define XLIB_H
+
+#include "pipe/p_compiler.h"
+#include "xm_winsys.h"
+
+extern struct xm_driver xlib_trace_driver;
+extern struct xm_driver xlib_softpipe_driver;
+extern struct xm_driver xlib_llvmpipe_driver;
+extern struct xm_driver xlib_cell_driver;
+extern struct xm_driver xlib_brw_driver;
+
+
+#endif
diff --git a/src/gallium/winsys/xlib/xlib_brw.h b/src/gallium/winsys/xlib/xlib_brw.h
new file mode 100644
index 0000000000..be2dd147db
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_brw.h
@@ -0,0 +1,30 @@
+#ifndef XLIB_BRW_H
+#define XLIB_BRW_H
+
+struct pipe_winsys;
+struct pipe_buffer;
+struct pipe_surface;
+struct xmesa_buffer;
+
+unsigned xlib_brw_get_buffer_offset( struct pipe_winsys *pws,
+ struct pipe_buffer *buf,
+ unsigned access_flags );
+
+void xlib_brw_buffer_subdata_typed( struct pipe_winsys *pws,
+ struct pipe_buffer *buf,
+ unsigned long offset,
+ unsigned long size,
+ const void *data,
+ unsigned data_type );
+
+
+
+void xlib_brw_commands_aub(struct pipe_winsys *winsys,
+ unsigned *cmds,
+ unsigned nr_dwords);
+
+struct pipe_context *
+xlib_create_brw_context( struct pipe_screen *screen,
+ void *unused );
+
+#endif
diff --git a/src/gallium/winsys/xlib/xlib_brw_aub.c b/src/gallium/winsys/xlib/xlib_brw_aub.c
new file mode 100644
index 0000000000..b6bd849ef2
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_brw_aub.c
@@ -0,0 +1,399 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "xlib_brw_aub.h"
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+#include "softpipe/sp_texture.h"
+
+
+struct brw_aubfile {
+ FILE *file;
+ unsigned next_free_page;
+};
+
+
+extern char *__progname;
+
+
+struct aub_file_header {
+ unsigned int instruction_type;
+ unsigned int pad0:16;
+ unsigned int minor:8;
+ unsigned int major:8;
+ unsigned char application[8*4];
+ unsigned int day:8;
+ unsigned int month:8;
+ unsigned int year:16;
+ unsigned int timezone:8;
+ unsigned int second:8;
+ unsigned int minute:8;
+ unsigned int hour:8;
+ unsigned int comment_length:16;
+ unsigned int pad1:16;
+};
+
+struct aub_block_header {
+ unsigned int instruction_type;
+ unsigned int operation:8;
+ unsigned int type:8;
+ unsigned int address_space:8;
+ unsigned int pad0:8;
+ unsigned int general_state_type:8;
+ unsigned int surface_state_type:8;
+ unsigned int pad1:16;
+ unsigned int address;
+ unsigned int length;
+};
+
+struct aub_dump_bmp {
+ unsigned int instruction_type;
+ unsigned int xmin:16;
+ unsigned int ymin:16;
+ unsigned int pitch:16;
+ unsigned int bpp:8;
+ unsigned int format:8;
+ unsigned int xsize:16;
+ unsigned int ysize:16;
+ unsigned int addr;
+ unsigned int unknown;
+};
+
+enum bh_operation {
+ BH_COMMENT,
+ BH_DATA_WRITE,
+ BH_COMMAND_WRITE,
+ BH_MMI0_WRITE32,
+ BH_END_SCENE,
+ BH_CONFIG_MEMORY_MAP,
+ BH_MAX_OPERATION
+};
+
+enum command_write_type {
+ CW_HWB_RING = 1,
+ CW_PRIMARY_RING_A,
+ CW_PRIMARY_RING_B, /* XXX - disagreement with listaub! */
+ CW_PRIMARY_RING_C,
+ CW_MAX_TYPE
+};
+
+enum memory_map_type {
+ MM_DEFAULT,
+ MM_DYNAMIC,
+ MM_MAX_TYPE
+};
+
+enum address_space {
+ ADDR_GTT,
+ ADDR_LOCAL,
+ ADDR_MAIN,
+ ADDR_MAX
+};
+
+
+#define AUB_FILE_HEADER 0xe085000b
+#define AUB_BLOCK_HEADER 0xe0c10003
+#define AUB_DUMP_BMP 0xe09e0004
+
+/* Registers to control page table
+ */
+#define PGETBL_CTL 0x2020
+#define PGETBL_ENABLED 0x1
+
+#define NR_GTT_ENTRIES 65536 /* 256 mb */
+
+#define FAIL \
+do { \
+ fprintf(stderr, "failed to write aub data at %s/%d\n", __FUNCTION__, __LINE__); \
+ exit(1); \
+} while (0)
+
+
+/* Emit the headers at the top of each aubfile. Initialize the GTT.
+ */
+static void init_aubfile( FILE *aub_file )
+{
+ struct aub_file_header fh;
+ struct aub_block_header bh;
+ unsigned int data;
+
+ static int nr;
+
+ nr++;
+
+ /* Emit the aub header:
+ */
+ memset(&fh, 0, sizeof(fh));
+
+ fh.instruction_type = AUB_FILE_HEADER;
+ fh.minor = 0x0;
+ fh.major = 0x7;
+ memcpy(fh.application, __progname, sizeof(fh.application));
+ fh.day = (nr>>24) & 0xff;
+ fh.month = 0x0;
+ fh.year = 0x0;
+ fh.timezone = 0x0;
+ fh.second = nr & 0xff;
+ fh.minute = (nr>>8) & 0xff;
+ fh.hour = (nr>>16) & 0xff;
+ fh.comment_length = 0x0;
+
+ if (fwrite(&fh, sizeof(fh), 1, aub_file) < 0)
+ FAIL;
+
+ /* Setup the GTT starting at main memory address zero (!):
+ */
+ memset(&bh, 0, sizeof(bh));
+
+ bh.instruction_type = AUB_BLOCK_HEADER;
+ bh.operation = BH_MMI0_WRITE32;
+ bh.type = 0x0;
+ bh.address_space = ADDR_GTT; /* ??? */
+ bh.general_state_type = 0x0;
+ bh.surface_state_type = 0x0;
+ bh.address = PGETBL_CTL;
+ bh.length = 0x4;
+
+ if (fwrite(&bh, sizeof(bh), 1, aub_file) < 0)
+ FAIL;
+
+ data = 0x0 | PGETBL_ENABLED;
+
+ if (fwrite(&data, sizeof(data), 1, aub_file) < 0)
+ FAIL;
+}
+
+
+static void init_aub_gtt( struct brw_aubfile *aubfile,
+ unsigned start_offset,
+ unsigned size )
+{
+ FILE *aub_file = aubfile->file;
+ struct aub_block_header bh;
+ unsigned int i;
+
+ assert(start_offset + size < NR_GTT_ENTRIES * 4096);
+
+
+ memset(&bh, 0, sizeof(bh));
+
+ bh.instruction_type = AUB_BLOCK_HEADER;
+ bh.operation = BH_DATA_WRITE;
+ bh.type = 0x0;
+ bh.address_space = ADDR_MAIN;
+ bh.general_state_type = 0x0;
+ bh.surface_state_type = 0x0;
+ bh.address = start_offset / 4096 * 4;
+ bh.length = size / 4096 * 4;
+
+ if (fwrite(&bh, sizeof(bh), 1, aub_file) < 0)
+ FAIL;
+
+ for (i = 0; i < size / 4096; i++) {
+ unsigned data = aubfile->next_free_page | 1;
+
+ aubfile->next_free_page += 4096;
+
+ if (fwrite(&data, sizeof(data), 1, aub_file) < 0)
+ FAIL;
+ }
+
+}
+
+static void write_block_header( FILE *aub_file,
+ struct aub_block_header *bh,
+ const unsigned *data,
+ unsigned sz )
+{
+ sz = (sz + 3) & ~3;
+
+ if (fwrite(bh, sizeof(*bh), 1, aub_file) < 0)
+ FAIL;
+
+ if (fwrite(data, sz, 1, aub_file) < 0)
+ FAIL;
+
+ fflush(aub_file);
+}
+
+
+static void write_dump_bmp( FILE *aub_file,
+ struct aub_dump_bmp *db )
+{
+ if (fwrite(db, sizeof(*db), 1, aub_file) < 0)
+ FAIL;
+
+ fflush(aub_file);
+}
+
+
+
+void brw_aub_gtt_data( struct brw_aubfile *aubfile,
+ unsigned offset,
+ const void *data,
+ unsigned sz,
+ unsigned type,
+ unsigned state_type )
+{
+ struct aub_block_header bh;
+
+ bh.instruction_type = AUB_BLOCK_HEADER;
+ bh.operation = BH_DATA_WRITE;
+ bh.type = type;
+ bh.address_space = ADDR_GTT;
+ bh.pad0 = 0;
+
+ if (type == DW_GENERAL_STATE) {
+ bh.general_state_type = state_type;
+ bh.surface_state_type = 0;
+ }
+ else {
+ bh.general_state_type = 0;
+ bh.surface_state_type = state_type;
+ }
+
+ bh.pad1 = 0;
+ bh.address = offset;
+ bh.length = sz;
+
+ write_block_header(aubfile->file, &bh, data, sz);
+}
+
+
+
+void brw_aub_gtt_cmds( struct brw_aubfile *aubfile,
+ unsigned offset,
+ const void *data,
+ unsigned sz )
+{
+ struct aub_block_header bh;
+ unsigned type = CW_PRIMARY_RING_A;
+
+
+ bh.instruction_type = AUB_BLOCK_HEADER;
+ bh.operation = BH_COMMAND_WRITE;
+ bh.type = type;
+ bh.address_space = ADDR_GTT;
+ bh.pad0 = 0;
+ bh.general_state_type = 0;
+ bh.surface_state_type = 0;
+ bh.pad1 = 0;
+ bh.address = offset;
+ bh.length = sz;
+
+ write_block_header(aubfile->file, &bh, data, sz);
+}
+
+void brw_aub_dump_bmp( struct brw_aubfile *aubfile,
+ struct pipe_surface *surface,
+ unsigned gtt_offset )
+{
+ struct aub_dump_bmp db;
+ unsigned format;
+
+ assert(surface->texture->block.width == 1);
+ assert(surface->texture->block.height == 1);
+
+ if (surface->texture->block.size == 4)
+ format = 0x7;
+ else
+ format = 0x3;
+
+ db.instruction_type = AUB_DUMP_BMP;
+ db.xmin = 0;
+ db.ymin = 0;
+ db.format = format;
+ db.bpp = surface->texture->block.size * 8;
+ db.pitch = softpipe_texture(surface->texture)->stride[surface->level] /
+ surface->texture->block.size;
+ db.xsize = surface->width;
+ db.ysize = surface->height;
+ db.addr = gtt_offset;
+ db.unknown = /* surface->tiled ? 0x4 : */ 0x0;
+
+ write_dump_bmp(aubfile->file, &db);
+}
+
+
+
+struct brw_aubfile *brw_aubfile_create( void )
+{
+ struct brw_aubfile *aubfile = CALLOC_STRUCT(brw_aubfile);
+ char filename[80];
+ int val;
+ static int i = 0;
+
+ i++;
+
+ if (getenv("INTEL_AUBFILE")) {
+ val = snprintf(filename, sizeof(filename), "%s%d.aub", getenv("INTEL_AUBFILE"), i%4);
+ debug_printf("--> Aub file: %s\n", filename);
+ aubfile->file = fopen(filename, "w");
+ }
+ else {
+ val = snprintf(filename, sizeof(filename), "%s.aub", __progname);
+ if (val < 0 || val > sizeof(filename))
+ strcpy(filename, "default.aub");
+
+ debug_printf("--> Aub file: %s\n", filename);
+ aubfile->file = fopen(filename, "w");
+ }
+
+ if (!aubfile->file) {
+ debug_printf("couldn't open aubfile\n");
+ exit(1);
+ }
+
+ init_aubfile(aubfile->file);
+
+ /* The GTT is located starting address zero in main memory. Pages
+ * to populate the gtt start after this point.
+ */
+ aubfile->next_free_page = (NR_GTT_ENTRIES * 4 + 4095) & ~4095;
+
+ /* More or less correspond with all the agp regions mapped by the
+ * driver:
+ */
+ init_aub_gtt(aubfile, 0, 4096*4);
+ init_aub_gtt(aubfile, AUB_BUF_START, AUB_BUF_SIZE);
+
+ return aubfile;
+}
+
+void brw_aub_destroy( struct brw_aubfile *aubfile )
+{
+ fclose(aubfile->file);
+ FREE(aubfile);
+}
diff --git a/src/gallium/winsys/xlib/xlib_brw_aub.h b/src/gallium/winsys/xlib/xlib_brw_aub.h
new file mode 100644
index 0000000000..f5c60c7be2
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_brw_aub.h
@@ -0,0 +1,114 @@
+/*
+ Copyright (C) Intel Corp. 2006. All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef BRW_AUB_H
+#define BRW_AUB_H
+
+/* We set up this region, buffers may be allocated here:
+ */
+#define AUB_BUF_START (4096*4)
+#define AUB_BUF_SIZE (8*1024*1024)
+
+struct intel_context;
+struct pipe_surface;
+
+struct brw_aubfile *brw_aubfile_create( void );
+
+void brw_aub_destroy( struct brw_aubfile *aubfile );
+
+void brw_aub_gtt_data( struct brw_aubfile *aubfile,
+ unsigned offset,
+ const void *data,
+ unsigned sz,
+ unsigned type,
+ unsigned state_type );
+
+void brw_aub_gtt_cmds( struct brw_aubfile *aubfile,
+ unsigned offset,
+ const void *data,
+ unsigned sz );
+
+void brw_aub_dump_bmp( struct brw_aubfile *aubfile,
+ struct pipe_surface *surface,
+ unsigned gtt_offset );
+
+
+enum data_write_type {
+ DW_NOTYPE,
+ DW_BATCH_BUFFER,
+ DW_BIN_BUFFER,
+ DW_BIN_POINTER_LIST,
+ DW_SLOW_STATE_BUFFER,
+ DW_VERTEX_BUFFER,
+ DW_2D_MAP,
+ DW_CUBE_MAP,
+ DW_INDIRECT_STATE_BUFFER,
+ DW_VOLUME_MAP,
+ DW_1D_MAP,
+ DW_CONSTANT_BUFFER,
+ DW_CONSTANT_URB_ENTRY,
+ DW_INDEX_BUFFER,
+ DW_GENERAL_STATE,
+ DW_SURFACE_STATE,
+ DW_MEDIA_OBJECT_INDIRECT_DATA,
+ DW_MAX_TYPE
+};
+
+enum data_write_general_state_type {
+ DWGS_NOTYPE,
+ DWGS_VERTEX_SHADER_STATE,
+ DWGS_GEOMETRY_SHADER_STATE ,
+ DWGS_CLIPPER_STATE,
+ DWGS_STRIPS_FANS_STATE,
+ DWGS_WINDOWER_IZ_STATE,
+ DWGS_COLOR_CALC_STATE,
+ DWGS_CLIPPER_VIEWPORT_STATE, /* was 0x7 */
+ DWGS_STRIPS_FANS_VIEWPORT_STATE,
+ DWGS_COLOR_CALC_VIEWPORT_STATE, /* was 0x9 */
+ DWGS_SAMPLER_STATE,
+ DWGS_KERNEL_INSTRUCTIONS,
+ DWGS_SCRATCH_SPACE,
+ DWGS_SAMPLER_DEFAULT_COLOR,
+ DWGS_INTERFACE_DESCRIPTOR,
+ DWGS_VLD_STATE,
+ DWGS_VFE_STATE,
+ DWGS_MAX_TYPE
+};
+
+enum data_write_surface_state_type {
+ DWSS_NOTYPE,
+ DWSS_BINDING_TABLE_STATE,
+ DWSS_SURFACE_STATE,
+ DWSS_MAX_TYPE
+};
+
+
+#endif
diff --git a/src/gallium/winsys/xlib/xlib_brw_context.c b/src/gallium/winsys/xlib/xlib_brw_context.c
new file mode 100644
index 0000000000..09599507f4
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_brw_context.c
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ * Brian Paul
+ */
+
+
+//#include "glxheader.h"
+//#include "xmesaP.h"
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "i965simple/brw_winsys.h"
+#include "xlib_brw_aub.h"
+#include "xlib_brw.h"
+
+
+
+
+#define XBCWS_BATCHBUFFER_SIZE 1024
+
+
+/* The backend to the brw driver (ie struct brw_winsys) is actually a
+ * per-context entity.
+ */
+struct xlib_brw_context_winsys {
+ struct brw_winsys brw_context_winsys; /**< batch buffer funcs */
+ struct aub_context *aub;
+
+ struct pipe_winsys *pipe_winsys;
+
+ unsigned batch_data[XBCWS_BATCHBUFFER_SIZE];
+ unsigned batch_nr;
+ unsigned batch_size;
+ unsigned batch_alloc;
+};
+
+
+/* Turn a brw_winsys into an xlib_brw_context_winsys:
+ */
+static inline struct xlib_brw_context_winsys *
+xlib_brw_context_winsys( struct brw_winsys *sws )
+{
+ return (struct xlib_brw_context_winsys *)sws;
+}
+
+
+/* Simple batchbuffer interface:
+ */
+
+static unsigned *xbcws_batch_start( struct brw_winsys *sws,
+ unsigned dwords,
+ unsigned relocs )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+
+ if (xbcws->batch_size < xbcws->batch_nr + dwords)
+ return NULL;
+
+ xbcws->batch_alloc = xbcws->batch_nr + dwords;
+ return (void *)1; /* not a valid pointer! */
+}
+
+static void xbcws_batch_dword( struct brw_winsys *sws,
+ unsigned dword )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+
+ assert(xbcws->batch_nr < xbcws->batch_alloc);
+ xbcws->batch_data[xbcws->batch_nr++] = dword;
+}
+
+static void xbcws_batch_reloc( struct brw_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned access_flags,
+ unsigned delta )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+
+ assert(xbcws->batch_nr < xbcws->batch_alloc);
+ xbcws->batch_data[xbcws->batch_nr++] =
+ ( xlib_brw_get_buffer_offset( NULL, buf, access_flags ) +
+ delta );
+}
+
+static void xbcws_batch_end( struct brw_winsys *sws )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+
+ assert(xbcws->batch_nr <= xbcws->batch_alloc);
+ xbcws->batch_alloc = 0;
+}
+
+static void xbcws_batch_flush( struct brw_winsys *sws,
+ struct pipe_fence_handle **fence )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+ assert(xbcws->batch_nr <= xbcws->batch_size);
+
+ if (xbcws->batch_nr) {
+ xlib_brw_commands_aub( xbcws->pipe_winsys,
+ xbcws->batch_data,
+ xbcws->batch_nr );
+ }
+
+ xbcws->batch_nr = 0;
+}
+
+
+
+/* Really a per-device function, just pass through:
+ */
+static unsigned xbcws_get_buffer_offset( struct brw_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned access_flags )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+
+ return xlib_brw_get_buffer_offset( xbcws->pipe_winsys,
+ buf,
+ access_flags );
+}
+
+
+/* Really a per-device function, just pass through:
+ */
+static void xbcws_buffer_subdata_typed( struct brw_winsys *sws,
+ struct pipe_buffer *buf,
+ unsigned long offset,
+ unsigned long size,
+ const void *data,
+ unsigned data_type )
+{
+ struct xlib_brw_context_winsys *xbcws = xlib_brw_context_winsys(sws);
+
+ xlib_brw_buffer_subdata_typed( xbcws->pipe_winsys,
+ buf,
+ offset,
+ size,
+ data,
+ data_type );
+}
+
+
+/**
+ * Create i965 hardware rendering context, but plugged into a
+ * dump-to-aubfile backend.
+ */
+struct pipe_context *
+xlib_create_brw_context( struct pipe_screen *screen,
+ void *unused )
+{
+ struct xlib_brw_context_winsys *xbcws = CALLOC_STRUCT( xlib_brw_context_winsys );
+
+ /* Fill in this struct with callbacks that i965simple will need to
+ * communicate with the window system, buffer manager, etc.
+ */
+ xbcws->brw_context_winsys.batch_start = xbcws_batch_start;
+ xbcws->brw_context_winsys.batch_dword = xbcws_batch_dword;
+ xbcws->brw_context_winsys.batch_reloc = xbcws_batch_reloc;
+ xbcws->brw_context_winsys.batch_end = xbcws_batch_end;
+ xbcws->brw_context_winsys.batch_flush = xbcws_batch_flush;
+ xbcws->brw_context_winsys.buffer_subdata_typed = xbcws_buffer_subdata_typed;
+ xbcws->brw_context_winsys.get_buffer_offset = xbcws_get_buffer_offset;
+
+ xbcws->pipe_winsys = screen->winsys; /* redundant */
+
+ xbcws->batch_size = XBCWS_BATCHBUFFER_SIZE;
+
+ /* Create the i965simple context:
+ */
+#ifdef GALLIUM_CELL
+ return NULL;
+#else
+ return brw_create( screen,
+ &xbcws->brw_context_winsys,
+ 0 );
+#endif
+}
diff --git a/src/gallium/winsys/xlib/xlib_brw_screen.c b/src/gallium/winsys/xlib/xlib_brw_screen.c
new file mode 100644
index 0000000000..ef545796f3
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_brw_screen.c
@@ -0,0 +1,469 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ * Brian Paul
+ */
+
+
+//#include "state_trackers/xlib/glxheader.h"
+//#include "state_trackers/xlib/xmesaP.h"
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "i965simple/brw_winsys.h"
+#include "i965simple/brw_screen.h"
+#include "i965simple/brw_context.h"
+
+
+#include "xlib_brw_aub.h"
+#include "xlib_brw.h"
+#include "xlib.h"
+
+static struct pipe_buffer *
+buffer_from_surface(struct pipe_surface *surface)
+{
+ struct brw_texture *texture = (struct brw_texture *)surface;
+ return texture->buffer;
+}
+
+struct aub_buffer {
+ struct pipe_reference reference;
+ char *data;
+ unsigned offset;
+ unsigned size;
+ unsigned map_count;
+ boolean dump_on_unmap;
+};
+
+
+
+struct aub_pipe_winsys {
+ struct pipe_winsys winsys;
+
+ struct brw_aubfile *aubfile;
+
+ /* This is simple, isn't it:
+ */
+ char *pool;
+ unsigned size;
+ unsigned used;
+};
+
+
+/* Turn a pipe winsys into an aub/pipe winsys:
+ */
+static inline struct aub_pipe_winsys *
+aub_pipe_winsys( struct pipe_winsys *winsys )
+{
+ return (struct aub_pipe_winsys *)winsys;
+}
+
+
+
+static INLINE struct aub_buffer *
+aub_bo( struct pipe_buffer *bo )
+{
+ return (struct aub_buffer *)bo;
+}
+
+static INLINE struct pipe_buffer *
+pipe_bo( struct aub_buffer *bo )
+{
+ return (struct pipe_buffer *)bo;
+}
+
+
+
+
+static void *aub_buffer_map(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf,
+ unsigned flags )
+{
+ struct aub_buffer *sbo = aub_bo(buf);
+
+ assert(sbo->data);
+
+ if (flags & PIPE_BUFFER_USAGE_CPU_WRITE)
+ sbo->dump_on_unmap = 1;
+
+ sbo->map_count++;
+ return sbo->data;
+}
+
+static void aub_buffer_unmap(struct pipe_winsys *winsys,
+ struct pipe_buffer *buf)
+{
+ struct aub_pipe_winsys *iws = aub_pipe_winsys(winsys);
+ struct aub_buffer *sbo = aub_bo(buf);
+
+ sbo->map_count--;
+
+ if (sbo->map_count == 0 &&
+ sbo->dump_on_unmap) {
+
+ sbo->dump_on_unmap = 0;
+
+ brw_aub_gtt_data( iws->aubfile,
+ sbo->offset,
+ sbo->data,
+ sbo->size,
+ 0,
+ 0);
+ }
+}
+
+
+static void
+aub_buffer_destroy(struct pipe_buffer *buf)
+{
+ free(buf);
+}
+
+
+
+void xlib_brw_commands_aub(struct pipe_winsys *winsys,
+ unsigned *cmds,
+ unsigned nr_dwords)
+{
+ struct aub_pipe_winsys *iws = aub_pipe_winsys(winsys);
+ unsigned size = nr_dwords * 4;
+
+ assert(iws->used + size < iws->size);
+
+ brw_aub_gtt_cmds( iws->aubfile,
+ AUB_BUF_START + iws->used,
+ cmds,
+ nr_dwords * sizeof(int) );
+
+ iws->used += align(size, 4096);
+}
+
+
+/* XXX: fix me:
+ */
+static struct aub_pipe_winsys *global_winsys = NULL;
+
+
+
+
+/* Pipe has no concept of pools. We choose the tex/region pool
+ * for all buffers.
+ */
+static struct pipe_buffer *
+aub_buffer_create(struct pipe_winsys *winsys,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct aub_pipe_winsys *iws = aub_pipe_winsys(winsys);
+ struct aub_buffer *sbo = CALLOC_STRUCT(aub_buffer);
+
+ pipe_reference_init(&sbo->reference, 1);
+
+ /* Could reuse buffers that are not referenced in current
+ * batchbuffer. Can't do that atm, so always reallocate:
+ */
+ assert(iws->used + size < iws->size);
+ sbo->data = iws->pool + iws->used;
+ sbo->offset = AUB_BUF_START + iws->used;
+ iws->used += align(size, 4096);
+
+ sbo->size = size;
+
+ return pipe_bo(sbo);
+}
+
+
+static struct pipe_buffer *
+aub_user_buffer_create(struct pipe_winsys *winsys, void *ptr, unsigned bytes)
+{
+ struct aub_buffer *sbo;
+
+ /* Lets hope this is meant for upload, not as a result!
+ */
+ sbo = aub_bo(aub_buffer_create( winsys, 0, 0, 0 ));
+
+ sbo->data = ptr;
+ sbo->size = bytes;
+
+ return pipe_bo(sbo);
+}
+
+
+/* The state tracker (should!) keep track of whether the fake
+ * frontbuffer has been touched by any rendering since the last time
+ * we copied its contents to the real frontbuffer. Our task is easy:
+ */
+static void
+aub_flush_frontbuffer( struct pipe_winsys *winsys,
+ struct pipe_surface *surface,
+ void *context_private)
+{
+// struct aub_pipe_winsys *iws = aub_pipe_winsys(winsys);
+ brw_aub_dump_bmp( global_winsys->aubfile,
+ surface,
+ aub_bo(buffer_from_surface(surface))->offset );
+}
+
+
+/**
+ * Round n up to next multiple.
+ */
+static INLINE unsigned
+round_up(unsigned n, unsigned multiple)
+{
+ return (n + multiple - 1) & ~(multiple - 1);
+}
+
+static struct pipe_buffer *
+aub_i915_surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ const unsigned alignment = 64;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = round_up(nblocksx * block.size, alignment);
+
+ return winsys->buffer_create(winsys, alignment,
+ usage,
+ *stride * nblocksy);
+}
+
+
+static const char *
+aub_get_name( struct pipe_winsys *winsys )
+{
+ return "Aub/xlib";
+}
+
+static void
+xlib_brw_destroy_pipe_winsys_aub( struct pipe_winsys *winsys )
+
+{
+ struct aub_pipe_winsys *iws = aub_pipe_winsys(winsys);
+ brw_aub_destroy(iws->aubfile);
+ free(iws->pool);
+ free(iws);
+}
+
+
+
+static struct pipe_winsys *
+xlib_create_brw_winsys( void )
+{
+ struct aub_pipe_winsys *iws = CALLOC_STRUCT( aub_pipe_winsys );
+
+ /* Fill in this struct with callbacks that pipe will need to
+ * communicate with the window system, buffer manager, etc.
+ *
+ * Pipe would be happy with a malloc based memory manager, but
+ * the SwapBuffers implementation in this winsys driver requires
+ * that rendering be done to an appropriate _DriBufferObject.
+ */
+ iws->winsys.buffer_create = aub_buffer_create;
+ iws->winsys.user_buffer_create = aub_user_buffer_create;
+ iws->winsys.buffer_map = aub_buffer_map;
+ iws->winsys.buffer_unmap = aub_buffer_unmap;
+ iws->winsys.buffer_destroy = aub_buffer_destroy;
+ iws->winsys.flush_frontbuffer = aub_flush_frontbuffer;
+ iws->winsys.get_name = aub_get_name;
+ iws->winsys.destroy = xlib_brw_destroy_pipe_winsys_aub;
+
+ iws->winsys.surface_buffer_create = aub_i915_surface_buffer_create;
+
+ iws->aubfile = brw_aubfile_create();
+ iws->size = AUB_BUF_SIZE;
+ iws->pool = malloc(AUB_BUF_SIZE);
+
+ /* HACK: static copy of this pointer:
+ */
+ assert(global_winsys == NULL);
+ global_winsys = iws;
+
+ return &iws->winsys;
+}
+
+
+static struct pipe_screen *
+xlib_create_brw_screen( void )
+{
+#ifndef GALLIUM_CELL
+ struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ winsys = xlib_create_brw_winsys();
+ if (winsys == NULL)
+ return NULL;
+
+ screen = brw_create_screen(winsys, 0/* XXX pci_id */);
+ if (screen == NULL)
+ goto fail;
+
+ return screen;
+
+fail:
+ if (winsys)
+ winsys->destroy( winsys );
+
+#endif
+ return NULL;
+}
+
+
+/* These per-screen functions are acually made available to the driver
+ * through the brw_winsys (per-context) entity.
+ */
+unsigned xlib_brw_get_buffer_offset( struct pipe_winsys *pws,
+ struct pipe_buffer *buf,
+ unsigned access_flags )
+{
+ return aub_bo(buf)->offset;
+}
+
+void xlib_brw_buffer_subdata_typed( struct pipe_winsys *pws,
+ struct pipe_buffer *buf,
+ unsigned long offset,
+ unsigned long size,
+ const void *data,
+ unsigned data_type )
+{
+ unsigned aub_type = DW_GENERAL_STATE;
+ unsigned aub_sub_type = 0;
+
+ switch (data_type) {
+ case BRW_CC_VP:
+ aub_sub_type = DWGS_COLOR_CALC_VIEWPORT_STATE;
+ break;
+ case BRW_CC_UNIT:
+ aub_sub_type = DWGS_COLOR_CALC_STATE;
+ break;
+ case BRW_WM_PROG:
+ aub_sub_type = DWGS_KERNEL_INSTRUCTIONS;
+ break;
+ case BRW_SAMPLER_DEFAULT_COLOR:
+ aub_sub_type = DWGS_SAMPLER_DEFAULT_COLOR;
+ break;
+ case BRW_SAMPLER:
+ aub_sub_type = DWGS_SAMPLER_STATE;
+ break;
+ case BRW_WM_UNIT:
+ aub_sub_type = DWGS_WINDOWER_IZ_STATE;
+ break;
+ case BRW_SF_PROG:
+ aub_sub_type = DWGS_KERNEL_INSTRUCTIONS;
+ break;
+ case BRW_SF_VP:
+ aub_sub_type = DWGS_STRIPS_FANS_VIEWPORT_STATE;
+ break;
+ case BRW_SF_UNIT:
+ aub_sub_type = DWGS_STRIPS_FANS_STATE;
+ break;
+ case BRW_VS_UNIT:
+ aub_sub_type = DWGS_VERTEX_SHADER_STATE;
+ break;
+ case BRW_VS_PROG:
+ aub_sub_type = DWGS_KERNEL_INSTRUCTIONS;
+ break;
+ case BRW_GS_UNIT:
+ aub_sub_type = DWGS_GEOMETRY_SHADER_STATE;
+ break;
+ case BRW_GS_PROG:
+ aub_sub_type = DWGS_KERNEL_INSTRUCTIONS;
+ break;
+ case BRW_CLIP_VP:
+ aub_sub_type = DWGS_CLIPPER_VIEWPORT_STATE;
+ break;
+ case BRW_CLIP_UNIT:
+ aub_sub_type = DWGS_CLIPPER_STATE;
+ break;
+ case BRW_CLIP_PROG:
+ aub_sub_type = DWGS_KERNEL_INSTRUCTIONS;
+ break;
+ case BRW_SS_SURFACE:
+ aub_type = DW_SURFACE_STATE;
+ aub_sub_type = DWSS_SURFACE_STATE;
+ break;
+ case BRW_SS_SURF_BIND:
+ aub_type = DW_SURFACE_STATE;
+ aub_sub_type = DWSS_BINDING_TABLE_STATE;
+ break;
+ case BRW_CONSTANT_BUFFER:
+ aub_type = DW_CONSTANT_URB_ENTRY;
+ aub_sub_type = 0;
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ {
+ struct aub_pipe_winsys *iws = aub_pipe_winsys(pws);
+ struct aub_buffer *sbo = aub_bo(buf);
+
+ assert(sbo->size > offset + size);
+ memcpy(sbo->data + offset, data, size);
+
+ brw_aub_gtt_data( iws->aubfile,
+ sbo->offset + offset,
+ sbo->data + offset,
+ size,
+ aub_type,
+ aub_sub_type );
+ }
+}
+
+
+static void
+xlib_brw_display_surface(struct xmesa_buffer *b,
+ struct pipe_surface *surf)
+{
+ brw_aub_dump_bmp( global_winsys->aubfile,
+ surf,
+ aub_bo(buffer_from_surface(surf))->offset );
+}
+
+
+struct xm_driver xlib_brw_driver =
+{
+ .create_pipe_screen = xlib_create_brw_screen,
+ .create_pipe_context = xlib_create_brw_context,
+ .display_surface = xlib_brw_display_surface,
+};
diff --git a/src/gallium/winsys/xlib/xlib_cell.c b/src/gallium/winsys/xlib/xlib_cell.c
new file mode 100644
index 0000000000..13e609f58f
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_cell.c
@@ -0,0 +1,437 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ * Brian Paul
+ */
+
+#include "xlib.h"
+
+#ifdef GALLIUM_CELL
+
+#include "xm_api.h"
+
+#undef ASSERT
+#undef Elements
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+
+#include "cell/ppu/cell_context.h"
+#include "cell/ppu/cell_screen.h"
+#include "cell/ppu/cell_winsys.h"
+#include "cell/ppu/cell_texture.h"
+
+
+/**
+ * Subclass of pipe_buffer for Xlib winsys.
+ * Low-level OS/window system memory buffer
+ */
+struct xm_buffer
+{
+ struct pipe_buffer base;
+ boolean userBuffer; /** Is this a user-space buffer? */
+ void *data;
+ void *mapped;
+
+ XImage *tempImage;
+ int shm;
+};
+
+
+/**
+ * Subclass of pipe_winsys for Xlib winsys
+ */
+struct xmesa_pipe_winsys
+{
+ struct pipe_winsys base;
+};
+
+
+
+/** Cast wrapper */
+static INLINE struct xm_buffer *
+xm_buffer( struct pipe_buffer *buf )
+{
+ return (struct xm_buffer *)buf;
+}
+
+
+/* Most callbacks map direcly onto dri_bufmgr operations:
+ */
+static void *
+xm_buffer_map(struct pipe_winsys *pws, struct pipe_buffer *buf,
+ unsigned flags)
+{
+ struct xm_buffer *xm_buf = xm_buffer(buf);
+ xm_buf->mapped = xm_buf->data;
+ return xm_buf->mapped;
+}
+
+static void
+xm_buffer_unmap(struct pipe_winsys *pws, struct pipe_buffer *buf)
+{
+ struct xm_buffer *xm_buf = xm_buffer(buf);
+ xm_buf->mapped = NULL;
+}
+
+static void
+xm_buffer_destroy(/*struct pipe_winsys *pws,*/
+ struct pipe_buffer *buf)
+{
+ struct xm_buffer *oldBuf = xm_buffer(buf);
+
+ if (oldBuf) {
+ if (oldBuf->data) {
+ if (!oldBuf->userBuffer) {
+ align_free(oldBuf->data);
+ }
+
+ oldBuf->data = NULL;
+ }
+ free(oldBuf);
+ }
+}
+
+
+/**
+ * For Cell. Basically, rearrange the pixels/quads from this layout:
+ * +--+--+--+--+
+ * |p0|p1|p2|p3|....
+ * +--+--+--+--+
+ *
+ * to this layout:
+ * +--+--+
+ * |p0|p1|....
+ * +--+--+
+ * |p2|p3|
+ * +--+--+
+ */
+static void
+twiddle_tile(const uint *tileIn, uint *tileOut)
+{
+ int y, x;
+
+ for (y = 0; y < TILE_SIZE; y+=2) {
+ for (x = 0; x < TILE_SIZE; x+=2) {
+ int k = 4 * (y/2 * TILE_SIZE/2 + x/2);
+ tileOut[y * TILE_SIZE + (x + 0)] = tileIn[k];
+ tileOut[y * TILE_SIZE + (x + 1)] = tileIn[k+1];
+ tileOut[(y + 1) * TILE_SIZE + (x + 0)] = tileIn[k+2];
+ tileOut[(y + 1) * TILE_SIZE + (x + 1)] = tileIn[k+3];
+ }
+ }
+}
+
+
+
+/**
+ * Display a surface that's in a tiled configuration. That is, all the
+ * pixels for a TILE_SIZExTILE_SIZE block are contiguous in memory.
+ */
+static void
+xlib_cell_display_surface(struct xmesa_buffer *b, struct pipe_surface *surf)
+{
+ XImage *ximage;
+ struct xm_buffer *xm_buf = xm_buffer(
+ cell_texture(surf->texture)->buffer);
+ const uint tilesPerRow = (surf->width + TILE_SIZE - 1) / TILE_SIZE;
+ uint x, y;
+
+ ximage = b->tempImage;
+
+ /* check that the XImage has been previously initialized */
+ assert(ximage->format);
+ assert(ximage->bitmap_unit);
+
+ /* update XImage's fields */
+ ximage->width = TILE_SIZE;
+ ximage->height = TILE_SIZE;
+ ximage->bytes_per_line = TILE_SIZE * 4;
+
+ for (y = 0; y < surf->height; y += TILE_SIZE) {
+ for (x = 0; x < surf->width; x += TILE_SIZE) {
+ uint tmpTile[TILE_SIZE * TILE_SIZE];
+ int tx = x / TILE_SIZE;
+ int ty = y / TILE_SIZE;
+ int offset = ty * tilesPerRow + tx;
+ int w = TILE_SIZE;
+ int h = TILE_SIZE;
+
+ if (y + h > surf->height)
+ h = surf->height - y;
+ if (x + w > surf->width)
+ w = surf->width - x;
+
+ /* offset in pixels */
+ offset *= TILE_SIZE * TILE_SIZE;
+
+ /* twiddle from ximage buffer to temp tile */
+ twiddle_tile((uint *) xm_buf->data + offset, tmpTile);
+ /* display temp tile data */
+ ximage->data = (char *) tmpTile;
+ XPutImage(b->xm_visual->display, b->drawable, b->gc,
+ ximage, 0, 0, x, y, w, h);
+ }
+ }
+}
+
+
+
+
+
+static void
+xm_flush_frontbuffer(struct pipe_winsys *pws,
+ struct pipe_surface *surf,
+ void *context_private)
+{
+ /*
+ * The front color buffer is actually just another XImage buffer.
+ * This function copies that XImage to the actual X Window.
+ */
+ XMesaContext xmctx = (XMesaContext) context_private;
+ if (xmctx)
+ xlib_cell_display_surface(xmctx->xm_buffer, surf);
+}
+
+
+
+static const char *
+xm_get_name(struct pipe_winsys *pws)
+{
+ return "Xlib/Cell";
+}
+
+
+static struct pipe_buffer *
+xm_buffer_create(struct pipe_winsys *pws,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct xm_buffer *buffer = CALLOC_STRUCT(xm_buffer);
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.alignment = alignment;
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+
+
+ if (buffer->data == NULL) {
+ buffer->shm = 0;
+
+ /* align to 16-byte multiple for Cell */
+ buffer->data = align_malloc(size, max(alignment, 16));
+ }
+
+ return &buffer->base;
+}
+
+
+/**
+ * Create buffer which wraps user-space data.
+ */
+static struct pipe_buffer *
+xm_user_buffer_create(struct pipe_winsys *pws, void *ptr, unsigned bytes)
+{
+ struct xm_buffer *buffer = CALLOC_STRUCT(xm_buffer);
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.size = bytes;
+ buffer->userBuffer = TRUE;
+ buffer->data = ptr;
+ buffer->shm = 0;
+
+ return &buffer->base;
+}
+
+
+
+/**
+ * Round n up to next multiple.
+ */
+static INLINE unsigned
+round_up(unsigned n, unsigned multiple)
+{
+ return (n + multiple - 1) & ~(multiple - 1);
+}
+
+static struct pipe_buffer *
+xm_surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned *stride)
+{
+ const unsigned alignment = 64;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = round_up(nblocksx * block.size, alignment);
+
+ return winsys->buffer_create(winsys, alignment,
+ usage,
+ /* XXX a bit of a hack */
+ *stride * round_up(nblocksy, TILE_SIZE));
+}
+
+
+/*
+ * Fence functions - basically nothing to do, as we don't create any actual
+ * fence objects.
+ */
+
+static void
+xm_fence_reference(struct pipe_winsys *sws, struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+}
+
+
+static int
+xm_fence_signalled(struct pipe_winsys *sws, struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static int
+xm_fence_finish(struct pipe_winsys *sws, struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+
+static struct pipe_winsys *
+xlib_create_cell_winsys( void )
+{
+ static struct xmesa_pipe_winsys *ws = NULL;
+
+ if (!ws) {
+ ws = CALLOC_STRUCT(xmesa_pipe_winsys);
+
+ /* Fill in this struct with callbacks that pipe will need to
+ * communicate with the window system, buffer manager, etc.
+ */
+ ws->base.buffer_create = xm_buffer_create;
+ ws->base.user_buffer_create = xm_user_buffer_create;
+ ws->base.buffer_map = xm_buffer_map;
+ ws->base.buffer_unmap = xm_buffer_unmap;
+ ws->base.buffer_destroy = xm_buffer_destroy;
+
+ ws->base.surface_buffer_create = xm_surface_buffer_create;
+
+ ws->base.fence_reference = xm_fence_reference;
+ ws->base.fence_signalled = xm_fence_signalled;
+ ws->base.fence_finish = xm_fence_finish;
+
+ ws->base.flush_frontbuffer = xm_flush_frontbuffer;
+ ws->base.get_name = xm_get_name;
+ }
+
+ return &ws->base;
+}
+
+
+static struct pipe_screen *
+xlib_create_cell_screen( void )
+{
+ struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ winsys = xlib_create_cell_winsys();
+ if (winsys == NULL)
+ return NULL;
+
+ screen = cell_create_screen(winsys);
+ if (screen == NULL)
+ goto fail;
+
+ return screen;
+
+fail:
+ if (winsys)
+ winsys->destroy( winsys );
+
+ return NULL;
+}
+
+
+static struct pipe_context *
+xlib_create_cell_context( struct pipe_screen *screen,
+ void *priv )
+{
+ struct pipe_context *pipe;
+
+
+ /* This takes a cell_winsys pointer, but probably that should be
+ * created and stored at screen creation, not context creation.
+ *
+ * The actual cell_winsys value isn't used for anything, so just
+ * passing NULL for now.
+ */
+ pipe = cell_create_context( screen, NULL);
+ if (pipe == NULL)
+ goto fail;
+
+ pipe->priv = priv;
+
+ return pipe;
+
+fail:
+ return NULL;
+}
+
+struct xm_driver xlib_cell_driver =
+{
+ .create_pipe_screen = xlib_create_cell_screen,
+ .create_pipe_context = xlib_create_cell_context,
+ .display_surface = xlib_cell_display_surface,
+};
+
+#else
+
+struct xm_driver xlib_cell_driver =
+{
+ .create_pipe_screen = NULL,
+ .create_pipe_context = NULL,
+ .display_surface = NULL,
+};
+
+#endif
diff --git a/src/gallium/winsys/xlib/xlib_llvmpipe.c b/src/gallium/winsys/xlib/xlib_llvmpipe.c
new file mode 100644
index 0000000000..bc876591c0
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_llvmpipe.c
@@ -0,0 +1,461 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ * Brian Paul
+ */
+
+
+#include "xm_api.h"
+
+#undef ASSERT
+#undef Elements
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "llvmpipe/lp_winsys.h"
+#include "llvmpipe/lp_texture.h"
+
+#include "xlib.h"
+
+/**
+ * Subclass of pipe_buffer for Xlib winsys.
+ * Low-level OS/window system memory buffer
+ */
+struct xm_displaytarget
+{
+ enum pipe_format format;
+ struct pipe_format_block block;
+ unsigned width;
+ unsigned height;
+ unsigned stride;
+
+ void *data;
+ void *mapped;
+
+ XImage *tempImage;
+#ifdef USE_XSHM
+ int shm;
+ XShmSegmentInfo shminfo;
+#endif
+};
+
+
+/**
+ * Subclass of llvmpipe_winsys for Xlib winsys
+ */
+struct xmesa_llvmpipe_winsys
+{
+ struct llvmpipe_winsys base;
+/* struct xmesa_visual *xm_visual; */
+};
+
+
+
+/** Cast wrapper */
+static INLINE struct xm_displaytarget *
+xm_displaytarget( struct llvmpipe_displaytarget *dt )
+{
+ return (struct xm_displaytarget *)dt;
+}
+
+
+/**
+ * X Shared Memory Image extension code
+ */
+
+#ifdef USE_XSHM
+
+static volatile int mesaXErrorFlag = 0;
+
+/**
+ * Catches potential Xlib errors.
+ */
+static int
+mesaHandleXError(Display *dpy, XErrorEvent *event)
+{
+ (void) dpy;
+ (void) event;
+ mesaXErrorFlag = 1;
+ return 0;
+}
+
+
+static char *alloc_shm(struct xm_displaytarget *buf, unsigned size)
+{
+ XShmSegmentInfo *const shminfo = & buf->shminfo;
+
+ shminfo->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT|0777);
+ if (shminfo->shmid < 0) {
+ return NULL;
+ }
+
+ shminfo->shmaddr = (char *) shmat(shminfo->shmid, 0, 0);
+ if (shminfo->shmaddr == (char *) -1) {
+ shmctl(shminfo->shmid, IPC_RMID, 0);
+ return NULL;
+ }
+
+ shminfo->readOnly = False;
+ return shminfo->shmaddr;
+}
+
+
+/**
+ * Allocate a shared memory XImage back buffer for the given XMesaBuffer.
+ */
+static void
+alloc_shm_ximage(struct xm_displaytarget *xm_buffer,
+ struct xmesa_buffer *xmb,
+ unsigned width, unsigned height)
+{
+ /*
+ * We have to do a _lot_ of error checking here to be sure we can
+ * really use the XSHM extension. It seems different servers trigger
+ * errors at different points if the extension won't work. Therefore
+ * we have to be very careful...
+ */
+ int (*old_handler)(Display *, XErrorEvent *);
+
+ xm_buffer->tempImage = XShmCreateImage(xmb->xm_visual->display,
+ xmb->xm_visual->visinfo->visual,
+ xmb->xm_visual->visinfo->depth,
+ ZPixmap,
+ NULL,
+ &xm_buffer->shminfo,
+ width, height);
+ if (xm_buffer->tempImage == NULL) {
+ xm_buffer->shm = 0;
+ return;
+ }
+
+
+ mesaXErrorFlag = 0;
+ old_handler = XSetErrorHandler(mesaHandleXError);
+ /* This may trigger the X protocol error we're ready to catch: */
+ XShmAttach(xmb->xm_visual->display, &xm_buffer->shminfo);
+ XSync(xmb->xm_visual->display, False);
+
+ if (mesaXErrorFlag) {
+ /* we are on a remote display, this error is normal, don't print it */
+ XFlush(xmb->xm_visual->display);
+ mesaXErrorFlag = 0;
+ XDestroyImage(xm_buffer->tempImage);
+ xm_buffer->tempImage = NULL;
+ xm_buffer->shm = 0;
+ (void) XSetErrorHandler(old_handler);
+ return;
+ }
+
+ xm_buffer->shm = 1;
+}
+
+#endif /* USE_XSHM */
+
+static boolean
+xm_is_displaytarget_format_supported( struct llvmpipe_winsys *ws,
+ enum pipe_format format )
+{
+ /* TODO: check visuals or other sensible thing here */
+ return TRUE;
+}
+
+
+static void *
+xm_displaytarget_map(struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt,
+ unsigned flags)
+{
+ struct xm_displaytarget *xm_dt = xm_displaytarget(dt);
+ xm_dt->mapped = xm_dt->data;
+ return xm_dt->mapped;
+}
+
+static void
+xm_displaytarget_unmap(struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt)
+{
+ struct xm_displaytarget *xm_dt = xm_displaytarget(dt);
+ xm_dt->mapped = NULL;
+}
+
+static void
+xm_displaytarget_destroy(struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt)
+{
+ struct xm_displaytarget *xm_dt = xm_displaytarget(dt);
+
+ if (xm_dt->data) {
+#ifdef USE_XSHM
+ if (xm_dt->shminfo.shmid >= 0) {
+ shmdt(xm_dt->shminfo.shmaddr);
+ shmctl(xm_dt->shminfo.shmid, IPC_RMID, 0);
+
+ xm_dt->shminfo.shmid = -1;
+ xm_dt->shminfo.shmaddr = (char *) -1;
+ }
+ else
+#endif
+ FREE(xm_dt->data);
+ }
+
+ FREE(xm_dt);
+}
+
+
+/**
+ * Display/copy the image in the surface into the X window specified
+ * by the XMesaBuffer.
+ */
+static void
+xm_llvmpipe_display(struct xmesa_buffer *xm_buffer,
+ struct llvmpipe_displaytarget *dt)
+{
+ XImage *ximage;
+ struct xm_displaytarget *xm_dt = xm_displaytarget(dt);
+ static boolean no_swap = 0;
+ static boolean firsttime = 1;
+
+ if (firsttime) {
+ no_swap = getenv("SP_NO_RAST") != NULL;
+ firsttime = 0;
+ }
+
+ if (no_swap)
+ return;
+
+#ifdef USE_XSHM
+ if (xm_dt->shm)
+ {
+ if (xm_dt->tempImage == NULL)
+ {
+ assert(xm_dt->block.width == 1);
+ assert(xm_dt->block.height == 1);
+ alloc_shm_ximage(xm_dt, xm_buffer,
+ xm_dt->stride / xm_dt->block.size,
+ xm_dt->height);
+ }
+
+ ximage = xm_dt->tempImage;
+ ximage->data = xm_dt->data;
+
+ /* _debug_printf("XSHM\n"); */
+ XShmPutImage(xm_buffer->xm_visual->display, xm_buffer->drawable, xm_buffer->gc,
+ ximage, 0, 0, 0, 0, xm_dt->width, xm_dt->height, False);
+ }
+ else
+#endif
+ {
+ /* display image in Window */
+ ximage = xm_dt->tempImage;
+ ximage->data = xm_dt->data;
+
+ /* check that the XImage has been previously initialized */
+ assert(ximage->format);
+ assert(ximage->bitmap_unit);
+
+ /* update XImage's fields */
+ ximage->width = xm_dt->width;
+ ximage->height = xm_dt->height;
+ ximage->bytes_per_line = xm_dt->stride;
+
+ /* _debug_printf("XPUT\n"); */
+ XPutImage(xm_buffer->xm_visual->display, xm_buffer->drawable, xm_buffer->gc,
+ ximage, 0, 0, 0, 0, xm_dt->width, xm_dt->height);
+ }
+}
+
+/**
+ * Display/copy the image in the surface into the X window specified
+ * by the XMesaBuffer.
+ */
+static void
+xm_displaytarget_display(struct llvmpipe_winsys *ws,
+ struct llvmpipe_displaytarget *dt,
+ void *context_private)
+{
+ XMesaContext xmctx = (XMesaContext) context_private;
+ struct xmesa_buffer *xm_buffer = xmctx->xm_buffer;
+ xm_llvmpipe_display(xm_buffer, dt);
+}
+
+
+static struct llvmpipe_displaytarget *
+xm_displaytarget_create(struct llvmpipe_winsys *winsys,
+ enum pipe_format format,
+ unsigned width, unsigned height,
+ unsigned alignment,
+ unsigned *stride)
+{
+ struct xm_displaytarget *xm_dt = CALLOC_STRUCT(xm_displaytarget);
+ unsigned nblocksx, nblocksy, size;
+
+ xm_dt = CALLOC_STRUCT(xm_displaytarget);
+ if(!xm_dt)
+ goto no_xm_dt;
+
+ xm_dt->format = format;
+ xm_dt->width = width;
+ xm_dt->height = height;
+
+ pf_get_block(format, &xm_dt->block);
+ nblocksx = pf_get_nblocksx(&xm_dt->block, width);
+ nblocksy = pf_get_nblocksy(&xm_dt->block, height);
+ xm_dt->stride = align(nblocksx * xm_dt->block.size, alignment);
+ size = xm_dt->stride * nblocksy;
+
+#ifdef USE_XSHM
+ if (!debug_get_bool_option("XLIB_NO_SHM", FALSE))
+ {
+ xm_dt->shminfo.shmid = -1;
+ xm_dt->shminfo.shmaddr = (char *) -1;
+ xm_dt->shm = TRUE;
+
+ xm_dt->data = alloc_shm(xm_dt, size);
+ if(!xm_dt->data)
+ goto no_data;
+ }
+#endif
+
+ if(!xm_dt->data) {
+ xm_dt->data = align_malloc(size, alignment);
+ if(!xm_dt->data)
+ goto no_data;
+ }
+
+ *stride = xm_dt->stride;
+ return (struct llvmpipe_displaytarget *)xm_dt;
+
+no_data:
+ FREE(xm_dt);
+no_xm_dt:
+ return NULL;
+}
+
+
+static void
+xm_destroy( struct llvmpipe_winsys *ws )
+{
+ FREE(ws);
+}
+
+
+static struct llvmpipe_winsys *
+xlib_create_llvmpipe_winsys( void )
+{
+ struct xmesa_llvmpipe_winsys *ws;
+
+ ws = CALLOC_STRUCT(xmesa_llvmpipe_winsys);
+ if (!ws)
+ return NULL;
+
+ ws->base.destroy = xm_destroy;
+
+ ws->base.is_displaytarget_format_supported = xm_is_displaytarget_format_supported;
+
+ ws->base.displaytarget_create = xm_displaytarget_create;
+ ws->base.displaytarget_map = xm_displaytarget_map;
+ ws->base.displaytarget_unmap = xm_displaytarget_unmap;
+ ws->base.displaytarget_destroy = xm_displaytarget_destroy;
+
+ ws->base.displaytarget_display = xm_displaytarget_display;
+
+ return &ws->base;
+}
+
+
+static struct pipe_screen *
+xlib_create_llvmpipe_screen( void )
+{
+ struct llvmpipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ winsys = xlib_create_llvmpipe_winsys();
+ if (winsys == NULL)
+ return NULL;
+
+ screen = llvmpipe_create_screen(winsys);
+ if (screen == NULL)
+ goto fail;
+
+ return screen;
+
+fail:
+ if (winsys)
+ winsys->destroy( winsys );
+
+ return NULL;
+}
+
+
+static struct pipe_context *
+xlib_create_llvmpipe_context( struct pipe_screen *screen,
+ void *context_private )
+{
+ struct pipe_context *pipe;
+
+ pipe = llvmpipe_create(screen);
+ if (pipe == NULL)
+ goto fail;
+
+ pipe->priv = context_private;
+ return pipe;
+
+fail:
+ /* Free stuff here */
+ return NULL;
+}
+
+
+static void
+xlib_llvmpipe_display_surface(struct xmesa_buffer *xm_buffer,
+ struct pipe_surface *surf)
+{
+ struct llvmpipe_texture *texture = llvmpipe_texture(surf->texture);
+
+ assert(texture->dt);
+ if (texture->dt)
+ xm_llvmpipe_display(xm_buffer, texture->dt);
+}
+
+
+struct xm_driver xlib_llvmpipe_driver =
+{
+ .create_pipe_screen = xlib_create_llvmpipe_screen,
+ .create_pipe_context = xlib_create_llvmpipe_context,
+ .display_surface = xlib_llvmpipe_display_surface
+};
+
+
+
diff --git a/src/gallium/winsys/xlib/xlib_softpipe.c b/src/gallium/winsys/xlib/xlib_softpipe.c
new file mode 100644
index 0000000000..67fea023a3
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_softpipe.c
@@ -0,0 +1,507 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ * Brian Paul
+ */
+
+
+#include "xm_api.h"
+
+#undef ASSERT
+#undef Elements
+
+#include "pipe/internal/p_winsys_screen.h"
+#include "pipe/p_format.h"
+#include "pipe/p_context.h"
+#include "pipe/p_inlines.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "softpipe/sp_winsys.h"
+#include "softpipe/sp_texture.h"
+
+#include "xlib.h"
+
+/**
+ * Subclass of pipe_buffer for Xlib winsys.
+ * Low-level OS/window system memory buffer
+ */
+struct xm_buffer
+{
+ struct pipe_buffer base;
+ boolean userBuffer; /** Is this a user-space buffer? */
+ void *data;
+ void *mapped;
+
+ XImage *tempImage;
+#ifdef USE_XSHM
+ int shm;
+ XShmSegmentInfo shminfo;
+#endif
+};
+
+
+/**
+ * Subclass of pipe_winsys for Xlib winsys
+ */
+struct xmesa_pipe_winsys
+{
+ struct pipe_winsys base;
+/* struct xmesa_visual *xm_visual; */
+#ifdef USE_XSHM
+ int shm;
+#endif
+};
+
+
+
+/** Cast wrapper */
+static INLINE struct xm_buffer *
+xm_buffer( struct pipe_buffer *buf )
+{
+ return (struct xm_buffer *)buf;
+}
+
+
+/**
+ * X Shared Memory Image extension code
+ */
+#ifdef USE_XSHM
+#define XSHM_ENABLED(b) ((b)->shm)
+#else
+#define XSHM_ENABLED(b) 0
+#endif
+
+#ifdef USE_XSHM
+
+static volatile int mesaXErrorFlag = 0;
+
+/**
+ * Catches potential Xlib errors.
+ */
+static int
+mesaHandleXError(Display *dpy, XErrorEvent *event)
+{
+ (void) dpy;
+ (void) event;
+ mesaXErrorFlag = 1;
+ return 0;
+}
+
+
+static GLboolean alloc_shm(struct xm_buffer *buf, unsigned size)
+{
+ XShmSegmentInfo *const shminfo = & buf->shminfo;
+
+ shminfo->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT|0777);
+ if (shminfo->shmid < 0) {
+ return GL_FALSE;
+ }
+
+ shminfo->shmaddr = (char *) shmat(shminfo->shmid, 0, 0);
+ if (shminfo->shmaddr == (char *) -1) {
+ shmctl(shminfo->shmid, IPC_RMID, 0);
+ return GL_FALSE;
+ }
+
+ shminfo->readOnly = False;
+ return GL_TRUE;
+}
+
+
+/**
+ * Allocate a shared memory XImage back buffer for the given XMesaBuffer.
+ */
+static void
+alloc_shm_ximage(struct xm_buffer *b, struct xmesa_buffer *xmb,
+ unsigned width, unsigned height)
+{
+ /*
+ * We have to do a _lot_ of error checking here to be sure we can
+ * really use the XSHM extension. It seems different servers trigger
+ * errors at different points if the extension won't work. Therefore
+ * we have to be very careful...
+ */
+ int (*old_handler)(Display *, XErrorEvent *);
+
+ b->tempImage = XShmCreateImage(xmb->xm_visual->display,
+ xmb->xm_visual->visinfo->visual,
+ xmb->xm_visual->visinfo->depth,
+ ZPixmap,
+ NULL,
+ &b->shminfo,
+ width, height);
+ if (b->tempImage == NULL) {
+ b->shm = 0;
+ return;
+ }
+
+
+ mesaXErrorFlag = 0;
+ old_handler = XSetErrorHandler(mesaHandleXError);
+ /* This may trigger the X protocol error we're ready to catch: */
+ XShmAttach(xmb->xm_visual->display, &b->shminfo);
+ XSync(xmb->xm_visual->display, False);
+
+ if (mesaXErrorFlag) {
+ /* we are on a remote display, this error is normal, don't print it */
+ XFlush(xmb->xm_visual->display);
+ mesaXErrorFlag = 0;
+ XDestroyImage(b->tempImage);
+ b->tempImage = NULL;
+ b->shm = 0;
+ (void) XSetErrorHandler(old_handler);
+ return;
+ }
+
+ b->shm = 1;
+}
+
+#endif /* USE_XSHM */
+
+
+
+/* Most callbacks map direcly onto dri_bufmgr operations:
+ */
+static void *
+xm_buffer_map(struct pipe_winsys *pws, struct pipe_buffer *buf,
+ unsigned flags)
+{
+ struct xm_buffer *xm_buf = xm_buffer(buf);
+ xm_buf->mapped = xm_buf->data;
+ return xm_buf->mapped;
+}
+
+static void
+xm_buffer_unmap(struct pipe_winsys *pws, struct pipe_buffer *buf)
+{
+ struct xm_buffer *xm_buf = xm_buffer(buf);
+ xm_buf->mapped = NULL;
+}
+
+static void
+xm_buffer_destroy(struct pipe_buffer *buf)
+{
+ struct xm_buffer *oldBuf = xm_buffer(buf);
+
+ if (oldBuf->data) {
+#ifdef USE_XSHM
+ if (oldBuf->shminfo.shmid >= 0) {
+ shmdt(oldBuf->shminfo.shmaddr);
+ shmctl(oldBuf->shminfo.shmid, IPC_RMID, 0);
+
+ oldBuf->shminfo.shmid = -1;
+ oldBuf->shminfo.shmaddr = (char *) -1;
+ }
+ else
+#endif
+ {
+ if (!oldBuf->userBuffer) {
+ align_free(oldBuf->data);
+ }
+ }
+
+ oldBuf->data = NULL;
+ }
+
+ free(oldBuf);
+}
+
+
+/**
+ * Display/copy the image in the surface into the X window specified
+ * by the XMesaBuffer.
+ */
+static void
+xlib_softpipe_display_surface(struct xmesa_buffer *b,
+ struct pipe_surface *surf)
+{
+ XImage *ximage;
+ struct softpipe_texture *spt = softpipe_texture(surf->texture);
+ struct xm_buffer *xm_buf = xm_buffer(spt->buffer);
+ static boolean no_swap = 0;
+ static boolean firsttime = 1;
+
+ if (firsttime) {
+ no_swap = getenv("SP_NO_RAST") != NULL;
+ firsttime = 0;
+ }
+
+ if (no_swap)
+ return;
+
+#ifdef USE_XSHM
+ if (XSHM_ENABLED(xm_buf) && (xm_buf->tempImage == NULL)) {
+ assert(surf->texture->block.width == 1);
+ assert(surf->texture->block.height == 1);
+ alloc_shm_ximage(xm_buf, b, spt->stride[surf->level] /
+ surf->texture->block.size, surf->height);
+ }
+#endif
+
+ ximage = (XSHM_ENABLED(xm_buf)) ? xm_buf->tempImage : b->tempImage;
+ ximage->data = xm_buf->data;
+
+ /* display image in Window */
+#ifdef USE_XSHM
+ if (XSHM_ENABLED(xm_buf)) {
+ XShmPutImage(b->xm_visual->display, b->drawable, b->gc,
+ ximage, 0, 0, 0, 0, surf->width, surf->height, False);
+ } else
+#endif
+ {
+ /* check that the XImage has been previously initialized */
+ assert(ximage->format);
+ assert(ximage->bitmap_unit);
+
+ /* update XImage's fields */
+ ximage->width = surf->width;
+ ximage->height = surf->height;
+ ximage->bytes_per_line = spt->stride[surf->level];
+
+ XPutImage(b->xm_visual->display, b->drawable, b->gc,
+ ximage, 0, 0, 0, 0, surf->width, surf->height);
+ }
+}
+
+
+static void
+xm_flush_frontbuffer(struct pipe_winsys *pws,
+ struct pipe_surface *surf,
+ void *context_private)
+{
+ /*
+ * The front color buffer is actually just another XImage buffer.
+ * This function copies that XImage to the actual X Window.
+ */
+ XMesaContext xmctx = (XMesaContext) context_private;
+ xlib_softpipe_display_surface(xmctx->xm_buffer, surf);
+ xmesa_check_and_update_buffer_size(xmctx, xmctx->xm_buffer);
+}
+
+
+
+static const char *
+xm_get_name(struct pipe_winsys *pws)
+{
+ return "Xlib";
+}
+
+
+static struct pipe_buffer *
+xm_buffer_create(struct pipe_winsys *pws,
+ unsigned alignment,
+ unsigned usage,
+ unsigned size)
+{
+ struct xm_buffer *buffer = CALLOC_STRUCT(xm_buffer);
+#ifdef USE_XSHM
+ struct xmesa_pipe_winsys *xpws = (struct xmesa_pipe_winsys *) pws;
+
+ buffer->shminfo.shmid = -1;
+ buffer->shminfo.shmaddr = (char *) -1;
+
+ if (xpws->shm && (usage & PIPE_BUFFER_USAGE_PIXEL) != 0) {
+ buffer->shm = xpws->shm;
+
+ if (alloc_shm(buffer, size)) {
+ buffer->data = buffer->shminfo.shmaddr;
+ buffer->shm = 1;
+ }
+ }
+#endif
+
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.alignment = alignment;
+ buffer->base.usage = usage;
+ buffer->base.size = size;
+
+ if (buffer->data == NULL) {
+ /* align to 16-byte multiple for Cell */
+ buffer->data = align_malloc(size, max(alignment, 16));
+ }
+
+ return &buffer->base;
+}
+
+
+/**
+ * Create buffer which wraps user-space data.
+ */
+static struct pipe_buffer *
+xm_user_buffer_create(struct pipe_winsys *pws, void *ptr, unsigned bytes)
+{
+ struct xm_buffer *buffer = CALLOC_STRUCT(xm_buffer);
+ pipe_reference_init(&buffer->base.reference, 1);
+ buffer->base.size = bytes;
+ buffer->userBuffer = TRUE;
+ buffer->data = ptr;
+#ifdef USE_XSHM
+ buffer->shm = 0;
+#endif
+
+ return &buffer->base;
+}
+
+
+static struct pipe_buffer *
+xm_surface_buffer_create(struct pipe_winsys *winsys,
+ unsigned width, unsigned height,
+ enum pipe_format format,
+ unsigned usage,
+ unsigned tex_usage,
+ unsigned *stride)
+{
+ const unsigned alignment = 64;
+ struct pipe_format_block block;
+ unsigned nblocksx, nblocksy;
+
+ pf_get_block(format, &block);
+ nblocksx = pf_get_nblocksx(&block, width);
+ nblocksy = pf_get_nblocksy(&block, height);
+ *stride = align(nblocksx * block.size, alignment);
+
+ return winsys->buffer_create(winsys, alignment,
+ usage,
+ *stride * nblocksy);
+}
+
+
+/*
+ * Fence functions - basically nothing to do, as we don't create any actual
+ * fence objects.
+ */
+
+static void
+xm_fence_reference(struct pipe_winsys *sws, struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+}
+
+
+static int
+xm_fence_signalled(struct pipe_winsys *sws, struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+static int
+xm_fence_finish(struct pipe_winsys *sws, struct pipe_fence_handle *fence,
+ unsigned flag)
+{
+ return 0;
+}
+
+
+
+static struct pipe_winsys *
+xlib_create_softpipe_winsys( void )
+{
+ static struct xmesa_pipe_winsys *ws = NULL;
+
+ if (!ws) {
+ ws = CALLOC_STRUCT(xmesa_pipe_winsys);
+
+ /* Fill in this struct with callbacks that pipe will need to
+ * communicate with the window system, buffer manager, etc.
+ */
+ ws->base.buffer_create = xm_buffer_create;
+ ws->base.user_buffer_create = xm_user_buffer_create;
+ ws->base.buffer_map = xm_buffer_map;
+ ws->base.buffer_unmap = xm_buffer_unmap;
+ ws->base.buffer_destroy = xm_buffer_destroy;
+
+ ws->base.surface_buffer_create = xm_surface_buffer_create;
+
+ ws->base.fence_reference = xm_fence_reference;
+ ws->base.fence_signalled = xm_fence_signalled;
+ ws->base.fence_finish = xm_fence_finish;
+
+ ws->base.flush_frontbuffer = xm_flush_frontbuffer;
+ ws->base.get_name = xm_get_name;
+ }
+
+ return &ws->base;
+}
+
+
+static struct pipe_screen *
+xlib_create_softpipe_screen( void )
+{
+ struct pipe_winsys *winsys;
+ struct pipe_screen *screen;
+
+ winsys = xlib_create_softpipe_winsys();
+ if (winsys == NULL)
+ return NULL;
+
+ screen = softpipe_create_screen(winsys);
+ if (screen == NULL)
+ goto fail;
+
+ return screen;
+
+fail:
+ if (winsys)
+ winsys->destroy( winsys );
+
+ return NULL;
+}
+
+
+static struct pipe_context *
+xlib_create_softpipe_context( struct pipe_screen *screen,
+ void *context_private )
+{
+ struct pipe_context *pipe;
+
+ pipe = softpipe_create(screen);
+ if (pipe == NULL)
+ goto fail;
+
+ pipe->priv = context_private;
+ return pipe;
+
+fail:
+ /* Free stuff here */
+ return NULL;
+}
+
+struct xm_driver xlib_softpipe_driver =
+{
+ .create_pipe_screen = xlib_create_softpipe_screen,
+ .create_pipe_context = xlib_create_softpipe_context,
+ .display_surface = xlib_softpipe_display_surface
+};
+
+
+
diff --git a/src/gallium/winsys/xlib/xlib_trace.c b/src/gallium/winsys/xlib/xlib_trace.c
new file mode 100644
index 0000000000..dbea655ab4
--- /dev/null
+++ b/src/gallium/winsys/xlib/xlib_trace.c
@@ -0,0 +1,113 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ * Keith Whitwell
+ * Brian Paul
+ */
+
+
+#include "xlib.h"
+
+#include "trace/tr_screen.h"
+#include "trace/tr_context.h"
+#include "trace/tr_texture.h"
+
+#include "pipe/p_screen.h"
+
+
+
+static struct pipe_screen *
+xlib_create_trace_screen( void )
+{
+ struct pipe_screen *screen, *trace_screen;
+
+ screen = xlib_softpipe_driver.create_pipe_screen();
+ if (screen == NULL)
+ goto fail;
+
+ /* Wrap it:
+ */
+ trace_screen = trace_screen_create(screen);
+ if (trace_screen == NULL)
+ goto fail;
+
+ return trace_screen;
+
+fail:
+ if (screen)
+ screen->destroy( screen );
+ return NULL;
+}
+
+static struct pipe_context *
+xlib_create_trace_context( struct pipe_screen *_screen,
+ void *priv )
+{
+ struct trace_screen *tr_scr = trace_screen( _screen );
+ struct pipe_screen *screen = tr_scr->screen;
+ struct pipe_context *pipe, *trace_pipe;
+
+ pipe = xlib_softpipe_driver.create_pipe_context( screen, priv );
+ if (pipe == NULL)
+ goto fail;
+
+ /* Wrap it:
+ */
+ trace_pipe = trace_context_create(_screen, pipe);
+ if (trace_pipe == NULL)
+ goto fail;
+
+ trace_pipe->priv = priv;
+
+ return trace_pipe;
+
+fail:
+ if (pipe)
+ pipe->destroy( pipe );
+ return NULL;
+}
+
+static void
+xlib_trace_display_surface( struct xmesa_buffer *buffer,
+ struct pipe_surface *_surf )
+{
+ struct trace_surface *tr_surf = trace_surface( _surf );
+ struct pipe_surface *surf = tr_surf->surface;
+
+ xlib_softpipe_driver.display_surface( buffer, surf );
+}
+
+
+struct xm_driver xlib_trace_driver =
+{
+ .create_pipe_screen = xlib_create_trace_screen,
+ .create_pipe_context = xlib_create_trace_context,
+ .display_surface = xlib_trace_display_surface,
+};
diff --git a/src/gallium/winsys/xlib/xmesa.h b/src/gallium/winsys/xlib/xmesa.h
new file mode 100644
index 0000000000..98139af833
--- /dev/null
+++ b/src/gallium/winsys/xlib/xmesa.h
@@ -0,0 +1,424 @@
+/*
+ * Mesa 3-D graphics library
+ * Version: 7.1
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*
+ * Mesa/X11 interface. This header file serves as the documentation for
+ * the Mesa/X11 interface functions.
+ *
+ * Note: this interface isn't intended for user programs. It's primarily
+ * just for implementing the pseudo-GLX interface.
+ */
+
+
+/* Sample Usage:
+
+In addition to the usual X calls to select a visual, create a colormap
+and create a window, you must do the following to use the X/Mesa interface:
+
+1. Call XMesaCreateVisual() to make an XMesaVisual from an XVisualInfo.
+
+2. Call XMesaCreateContext() to create an X/Mesa rendering context, given
+ the XMesaVisual.
+
+3. Call XMesaCreateWindowBuffer() to create an XMesaBuffer from an X window
+ and XMesaVisual.
+
+4. Call XMesaMakeCurrent() to bind the XMesaBuffer to an XMesaContext and
+ to make the context the current one.
+
+5. Make gl* calls to render your graphics.
+
+6. Use XMesaSwapBuffers() when double buffering to swap front/back buffers.
+
+7. Before the X window is destroyed, call XMesaDestroyBuffer().
+
+8. Before exiting, call XMesaDestroyVisual and XMesaDestroyContext.
+
+*/
+
+
+
+
+#ifndef XMESA_H
+#define XMESA_H
+
+#ifdef __VMS
+#include <GL/vms_x_fix.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef XFree86Server
+#include "xmesa_xf86.h"
+#else
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include "xmesa_x.h"
+#endif
+#include "GL/gl.h"
+
+#ifdef AMIWIN
+#include <pragmas/xlib_pragmas.h>
+extern struct Library *XLibBase;
+#endif
+
+
+#define XMESA_MAJOR_VERSION 6
+#define XMESA_MINOR_VERSION 3
+
+
+
+/*
+ * Values passed to XMesaGetString:
+ */
+#define XMESA_VERSION 1
+#define XMESA_EXTENSIONS 2
+
+
+/*
+ * Values passed to XMesaSetFXmode:
+ */
+#define XMESA_FX_WINDOW 1
+#define XMESA_FX_FULLSCREEN 2
+
+
+
+typedef struct xmesa_context *XMesaContext;
+
+typedef struct xmesa_visual *XMesaVisual;
+
+typedef struct xmesa_buffer *XMesaBuffer;
+
+
+
+/*
+ * Create a new X/Mesa visual.
+ * Input: display - X11 display
+ * visinfo - an XVisualInfo pointer
+ * rgb_flag - GL_TRUE = RGB mode,
+ * GL_FALSE = color index mode
+ * alpha_flag - alpha buffer requested?
+ * db_flag - GL_TRUE = double-buffered,
+ * GL_FALSE = single buffered
+ * stereo_flag - stereo visual?
+ * ximage_flag - GL_TRUE = use an XImage for back buffer,
+ * GL_FALSE = use an off-screen pixmap for back buffer
+ * depth_size - requested bits/depth values, or zero
+ * stencil_size - requested bits/stencil values, or zero
+ * accum_red_size - requested bits/red accum values, or zero
+ * accum_green_size - requested bits/green accum values, or zero
+ * accum_blue_size - requested bits/blue accum values, or zero
+ * accum_alpha_size - requested bits/alpha accum values, or zero
+ * num_samples - number of samples/pixel if multisampling, or zero
+ * level - visual level, usually 0
+ * visualCaveat - ala the GLX extension, usually GLX_NONE_EXT
+ * Return; a new XMesaVisual or 0 if error.
+ */
+extern XMesaVisual XMesaCreateVisual( XMesaDisplay *display,
+ XMesaVisualInfo visinfo,
+ GLboolean rgb_flag,
+ GLboolean alpha_flag,
+ GLboolean db_flag,
+ GLboolean stereo_flag,
+ GLboolean ximage_flag,
+ GLint depth_size,
+ GLint stencil_size,
+ GLint accum_red_size,
+ GLint accum_green_size,
+ GLint accum_blue_size,
+ GLint accum_alpha_size,
+ GLint num_samples,
+ GLint level,
+ GLint visualCaveat );
+
+/*
+ * Destroy an XMesaVisual, but not the associated XVisualInfo.
+ */
+extern void XMesaDestroyVisual( XMesaVisual v );
+
+
+
+/*
+ * Create a new XMesaContext for rendering into an X11 window.
+ *
+ * Input: visual - an XMesaVisual
+ * share_list - another XMesaContext with which to share display
+ * lists or NULL if no sharing is wanted.
+ * Return: an XMesaContext or NULL if error.
+ */
+extern XMesaContext XMesaCreateContext( XMesaVisual v,
+ XMesaContext share_list );
+
+
+/*
+ * Destroy a rendering context as returned by XMesaCreateContext()
+ */
+extern void XMesaDestroyContext( XMesaContext c );
+
+
+#ifdef XFree86Server
+/*
+ * These are the extra routines required for integration with XFree86.
+ * None of these routines should be user visible. -KEM
+ */
+extern GLboolean XMesaForceCurrent( XMesaContext c );
+
+extern GLboolean XMesaLoseCurrent( XMesaContext c );
+
+extern GLboolean XMesaCopyContext( XMesaContext src,
+ XMesaContext dst,
+ GLuint mask );
+#endif /* XFree86Server */
+
+
+/*
+ * Create an XMesaBuffer from an X window.
+ */
+extern XMesaBuffer XMesaCreateWindowBuffer( XMesaVisual v, XMesaWindow w );
+
+
+/*
+ * Create an XMesaBuffer from an X pixmap.
+ */
+extern XMesaBuffer XMesaCreatePixmapBuffer( XMesaVisual v,
+ XMesaPixmap p,
+ XMesaColormap cmap );
+
+
+/*
+ * Destroy an XMesaBuffer, but not the corresponding window or pixmap.
+ */
+extern void XMesaDestroyBuffer( XMesaBuffer b );
+
+
+/*
+ * Return the XMesaBuffer handle which corresponds to an X drawable, if any.
+ *
+ * New in Mesa 2.3.
+ */
+extern XMesaBuffer XMesaFindBuffer( XMesaDisplay *dpy,
+ XMesaDrawable d );
+
+
+
+/*
+ * Bind a buffer to a context and make the context the current one.
+ */
+extern GLboolean XMesaMakeCurrent( XMesaContext c,
+ XMesaBuffer b );
+
+
+/*
+ * Bind two buffers (read and draw) to a context and make the
+ * context the current one.
+ * New in Mesa 3.3
+ */
+extern GLboolean XMesaMakeCurrent2( XMesaContext c,
+ XMesaBuffer drawBuffer,
+ XMesaBuffer readBuffer );
+
+
+/*
+ * Unbind the current context from its buffer.
+ */
+extern GLboolean XMesaUnbindContext( XMesaContext c );
+
+
+/*
+ * Return a handle to the current context.
+ */
+extern XMesaContext XMesaGetCurrentContext( void );
+
+
+/*
+ * Return handle to the current (draw) buffer.
+ */
+extern XMesaBuffer XMesaGetCurrentBuffer( void );
+
+
+/*
+ * Return handle to the current read buffer.
+ * New in Mesa 3.3
+ */
+extern XMesaBuffer XMesaGetCurrentReadBuffer( void );
+
+
+/*
+ * Swap the front and back buffers for the given buffer. No action is
+ * taken if the buffer is not double buffered.
+ */
+extern void XMesaSwapBuffers( XMesaBuffer b );
+
+
+/*
+ * Copy a sub-region of the back buffer to the front buffer.
+ *
+ * New in Mesa 2.6
+ */
+extern void XMesaCopySubBuffer( XMesaBuffer b,
+ int x,
+ int y,
+ int width,
+ int height );
+
+
+/*
+ * Return a pointer to the the Pixmap or XImage being used as the back
+ * color buffer of an XMesaBuffer. This function is a way to get "under
+ * the hood" of X/Mesa so one can manipulate the back buffer directly.
+ * Input: b - the XMesaBuffer
+ * Output: pixmap - pointer to back buffer's Pixmap, or 0
+ * ximage - pointer to back buffer's XImage, or NULL
+ * Return: GL_TRUE = context is double buffered
+ * GL_FALSE = context is single buffered
+ */
+extern GLboolean XMesaGetBackBuffer( XMesaBuffer b,
+ XMesaPixmap *pixmap,
+ XMesaImage **ximage );
+
+
+
+/*
+ * Return the depth buffer associated with an XMesaBuffer.
+ * Input: b - the XMesa buffer handle
+ * Output: width, height - size of buffer in pixels
+ * bytesPerValue - bytes per depth value (2 or 4)
+ * buffer - pointer to depth buffer values
+ * Return: GL_TRUE or GL_FALSE to indicate success or failure.
+ *
+ * New in Mesa 2.4.
+ */
+extern GLboolean XMesaGetDepthBuffer( XMesaBuffer b,
+ GLint *width,
+ GLint *height,
+ GLint *bytesPerValue,
+ void **buffer );
+
+
+
+/*
+ * Flush/sync a context
+ */
+extern void XMesaFlush( XMesaContext c );
+
+
+
+/*
+ * Get an X/Mesa-specific string.
+ * Input: name - either XMESA_VERSION or XMESA_EXTENSIONS
+ */
+extern const char *XMesaGetString( XMesaContext c, int name );
+
+
+
+/*
+ * Scan for XMesaBuffers whose window/pixmap has been destroyed, then free
+ * any memory used by that buffer.
+ *
+ * New in Mesa 2.3.
+ */
+extern void XMesaGarbageCollect( void );
+
+
+
+/*
+ * Return a dithered pixel value.
+ * Input: c - XMesaContext
+ * x, y - window coordinate
+ * red, green, blue, alpha - color components in [0,1]
+ * Return: pixel value
+ *
+ * New in Mesa 2.3.
+ */
+extern unsigned long XMesaDitherColor( XMesaContext xmesa,
+ GLint x,
+ GLint y,
+ GLfloat red,
+ GLfloat green,
+ GLfloat blue,
+ GLfloat alpha );
+
+
+
+/*
+ * 3Dfx Glide driver only!
+ * Set 3Dfx/Glide full-screen or window rendering mode.
+ * Input: mode - either XMESA_FX_WINDOW (window rendering mode) or
+ * XMESA_FX_FULLSCREEN (full-screen rendering mode)
+ * Return: GL_TRUE if success
+ * GL_FALSE if invalid mode or if not using 3Dfx driver
+ *
+ * New in Mesa 2.6.
+ */
+extern GLboolean XMesaSetFXmode( GLint mode );
+
+
+
+/*
+ * Reallocate the back/depth/stencil/accum/etc/ buffers associated with
+ * buffer <b> if its size has changed.
+ *
+ * New in Mesa 4.0.2
+ */
+extern void XMesaResizeBuffers( XMesaBuffer b );
+
+
+
+/*
+ * Create a pbuffer.
+ * New in Mesa 4.1
+ */
+extern XMesaBuffer XMesaCreatePBuffer(XMesaVisual v, XMesaColormap cmap,
+ unsigned int width, unsigned int height);
+
+
+
+/*
+ * Texture from Pixmap
+ * New in Mesa 7.1
+ */
+extern void
+XMesaBindTexImage(XMesaDisplay *dpy, XMesaBuffer drawable, int buffer,
+ const int *attrib_list);
+
+extern void
+XMesaReleaseTexImage(XMesaDisplay *dpy, XMesaBuffer drawable, int buffer);
+
+
+extern XMesaBuffer
+XMesaCreatePixmapTextureBuffer(XMesaVisual v, XMesaPixmap p,
+ XMesaColormap cmap,
+ int format, int target, int mipmap);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/src/gallium/winsys/xlib/xmesa_x.h b/src/gallium/winsys/xlib/xmesa_x.h
new file mode 100644
index 0000000000..865bab4313
--- /dev/null
+++ b/src/gallium/winsys/xlib/xmesa_x.h
@@ -0,0 +1,86 @@
+
+/**************************************************************************
+
+Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/*
+ * Authors:
+ * Kevin E. Martin <kevin@precisioninsight.com>
+ *
+ * When we're building the XMesa driver for stand-alone Mesa we
+ * include this file when building the xm_*.c files.
+ * We need to define some types and macros differently when building
+ * in the Xserver vs. stand-alone Mesa.
+ */
+
+#ifndef _XMESA_X_H_
+#define _XMESA_X_H_
+
+typedef Display XMesaDisplay;
+typedef Pixmap XMesaPixmap;
+typedef Colormap XMesaColormap;
+typedef Drawable XMesaDrawable;
+typedef Window XMesaWindow;
+typedef GC XMesaGC;
+typedef XVisualInfo *XMesaVisualInfo;
+typedef XImage XMesaImage;
+typedef XPoint XMesaPoint;
+typedef XColor XMesaColor;
+
+#define XMesaDestroyImage XDestroyImage
+
+#define XMesaPutPixel XPutPixel
+#define XMesaGetPixel XGetPixel
+
+#define XMesaSetForeground XSetForeground
+#define XMesaSetBackground XSetBackground
+#define XMesaSetPlaneMask XSetPlaneMask
+#define XMesaSetFunction XSetFunction
+#define XMesaSetFillStyle XSetFillStyle
+#define XMesaSetTile XSetTile
+
+#define XMesaDrawPoint XDrawPoint
+#define XMesaDrawPoints XDrawPoints
+#define XMesaDrawLine XDrawLine
+#define XMesaFillRectangle XFillRectangle
+#define XMesaGetImage XGetImage
+#define XMesaPutImage XPutImage
+#define XMesaCopyArea XCopyArea
+
+#define XMesaCreatePixmap XCreatePixmap
+#define XMesaFreePixmap XFreePixmap
+#define XMesaFreeGC XFreeGC
+
+#define GET_COLORMAP_SIZE(__v) __v->visinfo->colormap_size
+#define GET_REDMASK(__v) __v->mesa_visual.redMask
+#define GET_GREENMASK(__v) __v->mesa_visual.greenMask
+#define GET_BLUEMASK(__v) __v->mesa_visual.blueMask
+#define GET_VISUAL_DEPTH(__v) __v->visinfo->depth
+#define GET_BLACK_PIXEL(__v) BlackPixel(__v->display, __v->mesa_visual.screen)
+#define CHECK_BYTE_ORDER(__v) host_byte_order()==ImageByteOrder(__v->display)
+#define CHECK_FOR_HPCR(__v) XInternAtom(__v->display, "_HP_RGB_SMOOTH_MAP_LIST", True)
+
+#endif